13cab2bb3Spatrick //===-- wrappers_c_test.cpp -------------------------------------*- C++ -*-===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick 
9d89ec533Spatrick #include "memtag.h"
10d89ec533Spatrick #include "scudo/interface.h"
113cab2bb3Spatrick #include "tests/scudo_unit_test.h"
123cab2bb3Spatrick 
133cab2bb3Spatrick #include <errno.h>
143cab2bb3Spatrick #include <limits.h>
153cab2bb3Spatrick #include <malloc.h>
163cab2bb3Spatrick #include <stdlib.h>
173cab2bb3Spatrick #include <unistd.h>
183cab2bb3Spatrick 
19*810390e3Srobert #ifndef __GLIBC_PREREQ
20*810390e3Srobert #define __GLIBC_PREREQ(x, y) 0
21*810390e3Srobert #endif
22*810390e3Srobert 
233cab2bb3Spatrick extern "C" {
243cab2bb3Spatrick void malloc_enable(void);
253cab2bb3Spatrick void malloc_disable(void);
263cab2bb3Spatrick int malloc_iterate(uintptr_t base, size_t size,
273cab2bb3Spatrick                    void (*callback)(uintptr_t base, size_t size, void *arg),
283cab2bb3Spatrick                    void *arg);
293cab2bb3Spatrick void *valloc(size_t size);
303cab2bb3Spatrick void *pvalloc(size_t size);
313cab2bb3Spatrick }
323cab2bb3Spatrick 
333cab2bb3Spatrick // Note that every C allocation function in the test binary will be fulfilled
343cab2bb3Spatrick // by Scudo (this includes the gtest APIs, etc.), which is a test by itself.
353cab2bb3Spatrick // But this might also lead to unexpected side-effects, since the allocation and
363cab2bb3Spatrick // deallocation operations in the TEST functions will coexist with others (see
373cab2bb3Spatrick // the EXPECT_DEATH comment below).
383cab2bb3Spatrick 
393cab2bb3Spatrick // We have to use a small quarantine to make sure that our double-free tests
403cab2bb3Spatrick // trigger. Otherwise EXPECT_DEATH ends up reallocating the chunk that was just
413cab2bb3Spatrick // freed (this depends on the size obviously) and the following free succeeds.
423cab2bb3Spatrick 
433cab2bb3Spatrick static const size_t Size = 100U;
443cab2bb3Spatrick 
TEST(ScudoWrappersCDeathTest,Malloc)45d89ec533Spatrick TEST(ScudoWrappersCDeathTest, Malloc) {
463cab2bb3Spatrick   void *P = malloc(Size);
473cab2bb3Spatrick   EXPECT_NE(P, nullptr);
483cab2bb3Spatrick   EXPECT_LE(Size, malloc_usable_size(P));
493cab2bb3Spatrick   EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % FIRST_32_SECOND_64(8U, 16U), 0U);
50d89ec533Spatrick 
51d89ec533Spatrick   // An update to this warning in Clang now triggers in this line, but it's ok
52d89ec533Spatrick   // because the check is expecting a bad pointer and should fail.
53d89ec533Spatrick #if defined(__has_warning) && __has_warning("-Wfree-nonheap-object")
54d89ec533Spatrick #pragma GCC diagnostic push
55d89ec533Spatrick #pragma GCC diagnostic ignored "-Wfree-nonheap-object"
56d89ec533Spatrick #endif
573cab2bb3Spatrick   EXPECT_DEATH(
583cab2bb3Spatrick       free(reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(P) | 1U)), "");
59d89ec533Spatrick #if defined(__has_warning) && __has_warning("-Wfree-nonheap-object")
60d89ec533Spatrick #pragma GCC diagnostic pop
61d89ec533Spatrick #endif
62d89ec533Spatrick 
633cab2bb3Spatrick   free(P);
643cab2bb3Spatrick   EXPECT_DEATH(free(P), "");
653cab2bb3Spatrick 
663cab2bb3Spatrick   P = malloc(0U);
673cab2bb3Spatrick   EXPECT_NE(P, nullptr);
683cab2bb3Spatrick   free(P);
693cab2bb3Spatrick 
703cab2bb3Spatrick   errno = 0;
713cab2bb3Spatrick   EXPECT_EQ(malloc(SIZE_MAX), nullptr);
723cab2bb3Spatrick   EXPECT_EQ(errno, ENOMEM);
733cab2bb3Spatrick }
743cab2bb3Spatrick 
TEST(ScudoWrappersCTest,Calloc)753cab2bb3Spatrick TEST(ScudoWrappersCTest, Calloc) {
763cab2bb3Spatrick   void *P = calloc(1U, Size);
773cab2bb3Spatrick   EXPECT_NE(P, nullptr);
783cab2bb3Spatrick   EXPECT_LE(Size, malloc_usable_size(P));
793cab2bb3Spatrick   for (size_t I = 0; I < Size; I++)
803cab2bb3Spatrick     EXPECT_EQ((reinterpret_cast<uint8_t *>(P))[I], 0U);
813cab2bb3Spatrick   free(P);
823cab2bb3Spatrick 
833cab2bb3Spatrick   P = calloc(1U, 0U);
843cab2bb3Spatrick   EXPECT_NE(P, nullptr);
853cab2bb3Spatrick   free(P);
863cab2bb3Spatrick   P = calloc(0U, 1U);
873cab2bb3Spatrick   EXPECT_NE(P, nullptr);
883cab2bb3Spatrick   free(P);
893cab2bb3Spatrick 
903cab2bb3Spatrick   errno = 0;
913cab2bb3Spatrick   EXPECT_EQ(calloc(SIZE_MAX, 1U), nullptr);
923cab2bb3Spatrick   EXPECT_EQ(errno, ENOMEM);
933cab2bb3Spatrick   errno = 0;
943cab2bb3Spatrick   EXPECT_EQ(calloc(static_cast<size_t>(LONG_MAX) + 1U, 2U), nullptr);
953cab2bb3Spatrick   if (SCUDO_ANDROID)
963cab2bb3Spatrick     EXPECT_EQ(errno, ENOMEM);
973cab2bb3Spatrick   errno = 0;
983cab2bb3Spatrick   EXPECT_EQ(calloc(SIZE_MAX, SIZE_MAX), nullptr);
993cab2bb3Spatrick   EXPECT_EQ(errno, ENOMEM);
1003cab2bb3Spatrick }
1013cab2bb3Spatrick 
TEST(ScudoWrappersCTest,SmallAlign)102d89ec533Spatrick TEST(ScudoWrappersCTest, SmallAlign) {
103d89ec533Spatrick   void *P;
104d89ec533Spatrick   for (size_t Size = 1; Size <= 0x10000; Size <<= 1) {
105d89ec533Spatrick     for (size_t Align = 1; Align <= 0x10000; Align <<= 1) {
106d89ec533Spatrick       for (size_t Count = 0; Count < 3; ++Count) {
107d89ec533Spatrick         P = memalign(Align, Size);
108d89ec533Spatrick         EXPECT_TRUE(reinterpret_cast<uintptr_t>(P) % Align == 0);
109d89ec533Spatrick       }
110d89ec533Spatrick     }
111d89ec533Spatrick   }
112d89ec533Spatrick }
113d89ec533Spatrick 
TEST(ScudoWrappersCTest,Memalign)1143cab2bb3Spatrick TEST(ScudoWrappersCTest, Memalign) {
1153cab2bb3Spatrick   void *P;
1163cab2bb3Spatrick   for (size_t I = FIRST_32_SECOND_64(2U, 3U); I <= 18U; I++) {
1173cab2bb3Spatrick     const size_t Alignment = 1U << I;
1183cab2bb3Spatrick 
1193cab2bb3Spatrick     P = memalign(Alignment, Size);
1203cab2bb3Spatrick     EXPECT_NE(P, nullptr);
1213cab2bb3Spatrick     EXPECT_LE(Size, malloc_usable_size(P));
1223cab2bb3Spatrick     EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
1233cab2bb3Spatrick     free(P);
1243cab2bb3Spatrick 
1253cab2bb3Spatrick     P = nullptr;
1263cab2bb3Spatrick     EXPECT_EQ(posix_memalign(&P, Alignment, Size), 0);
1273cab2bb3Spatrick     EXPECT_NE(P, nullptr);
1283cab2bb3Spatrick     EXPECT_LE(Size, malloc_usable_size(P));
1293cab2bb3Spatrick     EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
1303cab2bb3Spatrick     free(P);
1313cab2bb3Spatrick   }
1323cab2bb3Spatrick 
1333cab2bb3Spatrick   EXPECT_EQ(memalign(4096U, SIZE_MAX), nullptr);
1343cab2bb3Spatrick   EXPECT_EQ(posix_memalign(&P, 15U, Size), EINVAL);
1353cab2bb3Spatrick   EXPECT_EQ(posix_memalign(&P, 4096U, SIZE_MAX), ENOMEM);
1363cab2bb3Spatrick 
1373cab2bb3Spatrick   // Android's memalign accepts non power-of-2 alignments, and 0.
1383cab2bb3Spatrick   if (SCUDO_ANDROID) {
1393cab2bb3Spatrick     for (size_t Alignment = 0U; Alignment <= 128U; Alignment++) {
1403cab2bb3Spatrick       P = memalign(Alignment, 1024U);
1413cab2bb3Spatrick       EXPECT_NE(P, nullptr);
1423cab2bb3Spatrick       free(P);
1433cab2bb3Spatrick     }
1443cab2bb3Spatrick   }
1453cab2bb3Spatrick }
1463cab2bb3Spatrick 
TEST(ScudoWrappersCTest,AlignedAlloc)1473cab2bb3Spatrick TEST(ScudoWrappersCTest, AlignedAlloc) {
1483cab2bb3Spatrick   const size_t Alignment = 4096U;
1493cab2bb3Spatrick   void *P = aligned_alloc(Alignment, Alignment * 4U);
1503cab2bb3Spatrick   EXPECT_NE(P, nullptr);
1513cab2bb3Spatrick   EXPECT_LE(Alignment * 4U, malloc_usable_size(P));
1523cab2bb3Spatrick   EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
1533cab2bb3Spatrick   free(P);
1543cab2bb3Spatrick 
1553cab2bb3Spatrick   errno = 0;
1563cab2bb3Spatrick   P = aligned_alloc(Alignment, Size);
1573cab2bb3Spatrick   EXPECT_EQ(P, nullptr);
1583cab2bb3Spatrick   EXPECT_EQ(errno, EINVAL);
1593cab2bb3Spatrick }
1603cab2bb3Spatrick 
TEST(ScudoWrappersCDeathTest,Realloc)161d89ec533Spatrick TEST(ScudoWrappersCDeathTest, Realloc) {
1623cab2bb3Spatrick   // realloc(nullptr, N) is malloc(N)
1633cab2bb3Spatrick   void *P = realloc(nullptr, 0U);
1643cab2bb3Spatrick   EXPECT_NE(P, nullptr);
1653cab2bb3Spatrick   free(P);
1663cab2bb3Spatrick 
1673cab2bb3Spatrick   P = malloc(Size);
1683cab2bb3Spatrick   EXPECT_NE(P, nullptr);
1693cab2bb3Spatrick   // realloc(P, 0U) is free(P) and returns nullptr
1703cab2bb3Spatrick   EXPECT_EQ(realloc(P, 0U), nullptr);
1713cab2bb3Spatrick 
1723cab2bb3Spatrick   P = malloc(Size);
1733cab2bb3Spatrick   EXPECT_NE(P, nullptr);
1743cab2bb3Spatrick   EXPECT_LE(Size, malloc_usable_size(P));
1753cab2bb3Spatrick   memset(P, 0x42, Size);
1763cab2bb3Spatrick 
1773cab2bb3Spatrick   P = realloc(P, Size * 2U);
1783cab2bb3Spatrick   EXPECT_NE(P, nullptr);
1793cab2bb3Spatrick   EXPECT_LE(Size * 2U, malloc_usable_size(P));
1803cab2bb3Spatrick   for (size_t I = 0; I < Size; I++)
1813cab2bb3Spatrick     EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
1823cab2bb3Spatrick 
1833cab2bb3Spatrick   P = realloc(P, Size / 2U);
1843cab2bb3Spatrick   EXPECT_NE(P, nullptr);
1853cab2bb3Spatrick   EXPECT_LE(Size / 2U, malloc_usable_size(P));
1863cab2bb3Spatrick   for (size_t I = 0; I < Size / 2U; I++)
1873cab2bb3Spatrick     EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
1883cab2bb3Spatrick   free(P);
1893cab2bb3Spatrick 
1903cab2bb3Spatrick   EXPECT_DEATH(P = realloc(P, Size), "");
1913cab2bb3Spatrick 
1923cab2bb3Spatrick   errno = 0;
1933cab2bb3Spatrick   EXPECT_EQ(realloc(nullptr, SIZE_MAX), nullptr);
1943cab2bb3Spatrick   EXPECT_EQ(errno, ENOMEM);
1953cab2bb3Spatrick   P = malloc(Size);
1963cab2bb3Spatrick   EXPECT_NE(P, nullptr);
1973cab2bb3Spatrick   errno = 0;
1983cab2bb3Spatrick   EXPECT_EQ(realloc(P, SIZE_MAX), nullptr);
1993cab2bb3Spatrick   EXPECT_EQ(errno, ENOMEM);
2003cab2bb3Spatrick   free(P);
2013cab2bb3Spatrick 
2023cab2bb3Spatrick   // Android allows realloc of memalign pointers.
2033cab2bb3Spatrick   if (SCUDO_ANDROID) {
2043cab2bb3Spatrick     const size_t Alignment = 1024U;
2053cab2bb3Spatrick     P = memalign(Alignment, Size);
2063cab2bb3Spatrick     EXPECT_NE(P, nullptr);
2073cab2bb3Spatrick     EXPECT_LE(Size, malloc_usable_size(P));
2083cab2bb3Spatrick     EXPECT_EQ(reinterpret_cast<uintptr_t>(P) % Alignment, 0U);
2093cab2bb3Spatrick     memset(P, 0x42, Size);
2103cab2bb3Spatrick 
2113cab2bb3Spatrick     P = realloc(P, Size * 2U);
2123cab2bb3Spatrick     EXPECT_NE(P, nullptr);
2133cab2bb3Spatrick     EXPECT_LE(Size * 2U, malloc_usable_size(P));
2143cab2bb3Spatrick     for (size_t I = 0; I < Size; I++)
2153cab2bb3Spatrick       EXPECT_EQ(0x42, (reinterpret_cast<uint8_t *>(P))[I]);
2163cab2bb3Spatrick     free(P);
2173cab2bb3Spatrick   }
2183cab2bb3Spatrick }
2193cab2bb3Spatrick 
2203cab2bb3Spatrick #if !SCUDO_FUCHSIA
TEST(ScudoWrappersCTest,MallOpt)2213cab2bb3Spatrick TEST(ScudoWrappersCTest, MallOpt) {
2223cab2bb3Spatrick   errno = 0;
2233cab2bb3Spatrick   EXPECT_EQ(mallopt(-1000, 1), 0);
2243cab2bb3Spatrick   // mallopt doesn't set errno.
2253cab2bb3Spatrick   EXPECT_EQ(errno, 0);
2263cab2bb3Spatrick 
2273cab2bb3Spatrick   EXPECT_EQ(mallopt(M_PURGE, 0), 1);
2283cab2bb3Spatrick 
2293cab2bb3Spatrick   EXPECT_EQ(mallopt(M_DECAY_TIME, 1), 1);
2303cab2bb3Spatrick   EXPECT_EQ(mallopt(M_DECAY_TIME, 0), 1);
2313cab2bb3Spatrick   EXPECT_EQ(mallopt(M_DECAY_TIME, 1), 1);
2323cab2bb3Spatrick   EXPECT_EQ(mallopt(M_DECAY_TIME, 0), 1);
233d89ec533Spatrick 
234d89ec533Spatrick   if (SCUDO_ANDROID) {
235d89ec533Spatrick     EXPECT_EQ(mallopt(M_CACHE_COUNT_MAX, 100), 1);
236d89ec533Spatrick     EXPECT_EQ(mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2), 1);
237d89ec533Spatrick     EXPECT_EQ(mallopt(M_TSDS_COUNT_MAX, 10), 1);
238d89ec533Spatrick   }
2393cab2bb3Spatrick }
2403cab2bb3Spatrick #endif
2413cab2bb3Spatrick 
TEST(ScudoWrappersCTest,OtherAlloc)2423cab2bb3Spatrick TEST(ScudoWrappersCTest, OtherAlloc) {
2433cab2bb3Spatrick #if !SCUDO_FUCHSIA
2443cab2bb3Spatrick   const size_t PageSize = sysconf(_SC_PAGESIZE);
2453cab2bb3Spatrick 
2463cab2bb3Spatrick   void *P = pvalloc(Size);
2473cab2bb3Spatrick   EXPECT_NE(P, nullptr);
2483cab2bb3Spatrick   EXPECT_EQ(reinterpret_cast<uintptr_t>(P) & (PageSize - 1), 0U);
2493cab2bb3Spatrick   EXPECT_LE(PageSize, malloc_usable_size(P));
2503cab2bb3Spatrick   free(P);
2513cab2bb3Spatrick 
2523cab2bb3Spatrick   EXPECT_EQ(pvalloc(SIZE_MAX), nullptr);
2533cab2bb3Spatrick 
2543cab2bb3Spatrick   P = pvalloc(Size);
2553cab2bb3Spatrick   EXPECT_NE(P, nullptr);
2563cab2bb3Spatrick   EXPECT_EQ(reinterpret_cast<uintptr_t>(P) & (PageSize - 1), 0U);
2573cab2bb3Spatrick   free(P);
2583cab2bb3Spatrick #endif
2593cab2bb3Spatrick 
2603cab2bb3Spatrick   EXPECT_EQ(valloc(SIZE_MAX), nullptr);
2613cab2bb3Spatrick }
2623cab2bb3Spatrick 
2633cab2bb3Spatrick #if !SCUDO_FUCHSIA
TEST(ScudoWrappersCTest,MallInfo)2643cab2bb3Spatrick TEST(ScudoWrappersCTest, MallInfo) {
265*810390e3Srobert   // mallinfo is deprecated.
266*810390e3Srobert #pragma clang diagnostic push
267*810390e3Srobert #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2683cab2bb3Spatrick   const size_t BypassQuarantineSize = 1024U;
2693cab2bb3Spatrick   struct mallinfo MI = mallinfo();
2703cab2bb3Spatrick   size_t Allocated = MI.uordblks;
2713cab2bb3Spatrick   void *P = malloc(BypassQuarantineSize);
2723cab2bb3Spatrick   EXPECT_NE(P, nullptr);
2733cab2bb3Spatrick   MI = mallinfo();
2743cab2bb3Spatrick   EXPECT_GE(static_cast<size_t>(MI.uordblks), Allocated + BypassQuarantineSize);
2753cab2bb3Spatrick   EXPECT_GT(static_cast<size_t>(MI.hblkhd), 0U);
2763cab2bb3Spatrick   size_t Free = MI.fordblks;
2773cab2bb3Spatrick   free(P);
2783cab2bb3Spatrick   MI = mallinfo();
2793cab2bb3Spatrick   EXPECT_GE(static_cast<size_t>(MI.fordblks), Free + BypassQuarantineSize);
280*810390e3Srobert #pragma clang diagnostic pop
281*810390e3Srobert }
282*810390e3Srobert #endif
283*810390e3Srobert 
284*810390e3Srobert #if __GLIBC_PREREQ(2, 33)
TEST(ScudoWrappersCTest,MallInfo2)285*810390e3Srobert TEST(ScudoWrappersCTest, MallInfo2) {
286*810390e3Srobert   const size_t BypassQuarantineSize = 1024U;
287*810390e3Srobert   struct mallinfo2 MI = mallinfo2();
288*810390e3Srobert   size_t Allocated = MI.uordblks;
289*810390e3Srobert   void *P = malloc(BypassQuarantineSize);
290*810390e3Srobert   EXPECT_NE(P, nullptr);
291*810390e3Srobert   MI = mallinfo2();
292*810390e3Srobert   EXPECT_GE(MI.uordblks, Allocated + BypassQuarantineSize);
293*810390e3Srobert   EXPECT_GT(MI.hblkhd, 0U);
294*810390e3Srobert   size_t Free = MI.fordblks;
295*810390e3Srobert   free(P);
296*810390e3Srobert   MI = mallinfo2();
297*810390e3Srobert   EXPECT_GE(MI.fordblks, Free + BypassQuarantineSize);
2983cab2bb3Spatrick }
2993cab2bb3Spatrick #endif
3003cab2bb3Spatrick 
3013cab2bb3Spatrick static uintptr_t BoundaryP;
3023cab2bb3Spatrick static size_t Count;
3033cab2bb3Spatrick 
callback(uintptr_t Base,size_t Size,void * Arg)3043cab2bb3Spatrick static void callback(uintptr_t Base, size_t Size, void *Arg) {
305d89ec533Spatrick   if (scudo::archSupportsMemoryTagging()) {
306d89ec533Spatrick     Base = scudo::untagPointer(Base);
307d89ec533Spatrick     BoundaryP = scudo::untagPointer(BoundaryP);
308d89ec533Spatrick   }
3093cab2bb3Spatrick   if (Base == BoundaryP)
3103cab2bb3Spatrick     Count++;
3113cab2bb3Spatrick }
3123cab2bb3Spatrick 
3133cab2bb3Spatrick // Verify that a block located on an iteration boundary is not mis-accounted.
3143cab2bb3Spatrick // To achieve this, we allocate a chunk for which the backing block will be
3153cab2bb3Spatrick // aligned on a page, then run the malloc_iterate on both the pages that the
3163cab2bb3Spatrick // block is a boundary for. It must only be seen once by the callback function.
TEST(ScudoWrappersCTest,MallocIterateBoundary)3173cab2bb3Spatrick TEST(ScudoWrappersCTest, MallocIterateBoundary) {
3183cab2bb3Spatrick   const size_t PageSize = sysconf(_SC_PAGESIZE);
3193cab2bb3Spatrick   const size_t BlockDelta = FIRST_32_SECOND_64(8U, 16U);
3203cab2bb3Spatrick   const size_t SpecialSize = PageSize - BlockDelta;
3213cab2bb3Spatrick 
3221f9cb04fSpatrick   // We aren't guaranteed that any size class is exactly a page wide. So we need
3231f9cb04fSpatrick   // to keep making allocations until we succeed.
3241f9cb04fSpatrick   //
3251f9cb04fSpatrick   // With a 16-byte block alignment and 4096-byte page size, each allocation has
3261f9cb04fSpatrick   // a probability of (1 - (16/4096)) of failing to meet the alignment
3271f9cb04fSpatrick   // requirements, and the probability of failing 65536 times is
3281f9cb04fSpatrick   // (1 - (16/4096))^65536 < 10^-112. So if we still haven't succeeded after
3291f9cb04fSpatrick   // 65536 tries, give up.
3301f9cb04fSpatrick   uintptr_t Block;
3311f9cb04fSpatrick   void *P = nullptr;
3321f9cb04fSpatrick   for (unsigned I = 0; I != 65536; ++I) {
3331f9cb04fSpatrick     void *PrevP = P;
3341f9cb04fSpatrick     P = malloc(SpecialSize);
3353cab2bb3Spatrick     EXPECT_NE(P, nullptr);
3361f9cb04fSpatrick     *reinterpret_cast<void **>(P) = PrevP;
3373cab2bb3Spatrick     BoundaryP = reinterpret_cast<uintptr_t>(P);
3381f9cb04fSpatrick     Block = BoundaryP - BlockDelta;
3391f9cb04fSpatrick     if ((Block & (PageSize - 1)) == 0U)
3401f9cb04fSpatrick       break;
3411f9cb04fSpatrick   }
3423cab2bb3Spatrick   EXPECT_EQ((Block & (PageSize - 1)), 0U);
3433cab2bb3Spatrick 
3443cab2bb3Spatrick   Count = 0U;
3453cab2bb3Spatrick   malloc_disable();
3463cab2bb3Spatrick   malloc_iterate(Block - PageSize, PageSize, callback, nullptr);
3473cab2bb3Spatrick   malloc_iterate(Block, PageSize, callback, nullptr);
3483cab2bb3Spatrick   malloc_enable();
3493cab2bb3Spatrick   EXPECT_EQ(Count, 1U);
3503cab2bb3Spatrick 
3511f9cb04fSpatrick   while (P) {
3521f9cb04fSpatrick     void *NextP = *reinterpret_cast<void **>(P);
3533cab2bb3Spatrick     free(P);
3541f9cb04fSpatrick     P = NextP;
3551f9cb04fSpatrick   }
3563cab2bb3Spatrick }
3573cab2bb3Spatrick 
358d89ec533Spatrick // Fuchsia doesn't have alarm, fork or malloc_info.
359d89ec533Spatrick #if !SCUDO_FUCHSIA
TEST(ScudoWrappersCDeathTest,MallocDisableDeadlock)360d89ec533Spatrick TEST(ScudoWrappersCDeathTest, MallocDisableDeadlock) {
3613cab2bb3Spatrick   // We expect heap operations within a disable/enable scope to deadlock.
3623cab2bb3Spatrick   EXPECT_DEATH(
3633cab2bb3Spatrick       {
3643cab2bb3Spatrick         void *P = malloc(Size);
3653cab2bb3Spatrick         EXPECT_NE(P, nullptr);
3663cab2bb3Spatrick         free(P);
3673cab2bb3Spatrick         malloc_disable();
3683cab2bb3Spatrick         alarm(1);
3693cab2bb3Spatrick         P = malloc(Size);
3703cab2bb3Spatrick         malloc_enable();
3713cab2bb3Spatrick       },
3723cab2bb3Spatrick       "");
3733cab2bb3Spatrick }
3743cab2bb3Spatrick 
TEST(ScudoWrappersCTest,MallocInfo)3753cab2bb3Spatrick TEST(ScudoWrappersCTest, MallocInfo) {
3761f9cb04fSpatrick   // Use volatile so that the allocations don't get optimized away.
3771f9cb04fSpatrick   void *volatile P1 = malloc(1234);
3781f9cb04fSpatrick   void *volatile P2 = malloc(4321);
3791f9cb04fSpatrick 
3801f9cb04fSpatrick   char Buffer[16384];
3813cab2bb3Spatrick   FILE *F = fmemopen(Buffer, sizeof(Buffer), "w+");
3823cab2bb3Spatrick   EXPECT_NE(F, nullptr);
3833cab2bb3Spatrick   errno = 0;
3843cab2bb3Spatrick   EXPECT_EQ(malloc_info(0, F), 0);
3853cab2bb3Spatrick   EXPECT_EQ(errno, 0);
3863cab2bb3Spatrick   fclose(F);
3873cab2bb3Spatrick   EXPECT_EQ(strncmp(Buffer, "<malloc version=\"scudo-", 23), 0);
3881f9cb04fSpatrick   EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"1234\" count=\""));
3891f9cb04fSpatrick   EXPECT_NE(nullptr, strstr(Buffer, "<alloc size=\"4321\" count=\""));
3901f9cb04fSpatrick 
3911f9cb04fSpatrick   free(P1);
3921f9cb04fSpatrick   free(P2);
3933cab2bb3Spatrick }
3943cab2bb3Spatrick 
TEST(ScudoWrappersCDeathTest,Fork)395d89ec533Spatrick TEST(ScudoWrappersCDeathTest, Fork) {
3963cab2bb3Spatrick   void *P;
3973cab2bb3Spatrick   pid_t Pid = fork();
398d89ec533Spatrick   EXPECT_GE(Pid, 0) << strerror(errno);
3993cab2bb3Spatrick   if (Pid == 0) {
4003cab2bb3Spatrick     P = malloc(Size);
4013cab2bb3Spatrick     EXPECT_NE(P, nullptr);
4023cab2bb3Spatrick     memset(P, 0x42, Size);
4033cab2bb3Spatrick     free(P);
4043cab2bb3Spatrick     _exit(0);
4053cab2bb3Spatrick   }
4063cab2bb3Spatrick   waitpid(Pid, nullptr, 0);
4073cab2bb3Spatrick   P = malloc(Size);
4083cab2bb3Spatrick   EXPECT_NE(P, nullptr);
4093cab2bb3Spatrick   memset(P, 0x42, Size);
4103cab2bb3Spatrick   free(P);
4113cab2bb3Spatrick 
4123cab2bb3Spatrick   // fork should stall if the allocator has been disabled.
4133cab2bb3Spatrick   EXPECT_DEATH(
4143cab2bb3Spatrick       {
4153cab2bb3Spatrick         malloc_disable();
4163cab2bb3Spatrick         alarm(1);
4173cab2bb3Spatrick         Pid = fork();
4183cab2bb3Spatrick         EXPECT_GE(Pid, 0);
4193cab2bb3Spatrick       },
4203cab2bb3Spatrick       "");
4213cab2bb3Spatrick }
4223cab2bb3Spatrick 
4233cab2bb3Spatrick static pthread_mutex_t Mutex;
4243cab2bb3Spatrick static pthread_cond_t Conditional = PTHREAD_COND_INITIALIZER;
4251f9cb04fSpatrick static bool Ready;
4263cab2bb3Spatrick 
enableMalloc(void * Unused)4273cab2bb3Spatrick static void *enableMalloc(void *Unused) {
4283cab2bb3Spatrick   // Initialize the allocator for this thread.
4293cab2bb3Spatrick   void *P = malloc(Size);
4303cab2bb3Spatrick   EXPECT_NE(P, nullptr);
4313cab2bb3Spatrick   memset(P, 0x42, Size);
4323cab2bb3Spatrick   free(P);
4333cab2bb3Spatrick 
4343cab2bb3Spatrick   // Signal the main thread we are ready.
4353cab2bb3Spatrick   pthread_mutex_lock(&Mutex);
4361f9cb04fSpatrick   Ready = true;
4373cab2bb3Spatrick   pthread_cond_signal(&Conditional);
4383cab2bb3Spatrick   pthread_mutex_unlock(&Mutex);
4393cab2bb3Spatrick 
4403cab2bb3Spatrick   // Wait for the malloc_disable & fork, then enable the allocator again.
4413cab2bb3Spatrick   sleep(1);
4423cab2bb3Spatrick   malloc_enable();
4433cab2bb3Spatrick 
4443cab2bb3Spatrick   return nullptr;
4453cab2bb3Spatrick }
4463cab2bb3Spatrick 
TEST(ScudoWrappersCTest,DisableForkEnable)4473cab2bb3Spatrick TEST(ScudoWrappersCTest, DisableForkEnable) {
4483cab2bb3Spatrick   pthread_t ThreadId;
449d89ec533Spatrick   Ready = false;
4503cab2bb3Spatrick   EXPECT_EQ(pthread_create(&ThreadId, nullptr, &enableMalloc, nullptr), 0);
4513cab2bb3Spatrick 
4523cab2bb3Spatrick   // Wait for the thread to be warmed up.
4533cab2bb3Spatrick   pthread_mutex_lock(&Mutex);
4541f9cb04fSpatrick   while (!Ready)
4553cab2bb3Spatrick     pthread_cond_wait(&Conditional, &Mutex);
4563cab2bb3Spatrick   pthread_mutex_unlock(&Mutex);
4573cab2bb3Spatrick 
4583cab2bb3Spatrick   // Disable the allocator and fork. fork should succeed after malloc_enable.
4593cab2bb3Spatrick   malloc_disable();
4603cab2bb3Spatrick   pid_t Pid = fork();
4613cab2bb3Spatrick   EXPECT_GE(Pid, 0);
4623cab2bb3Spatrick   if (Pid == 0) {
4633cab2bb3Spatrick     void *P = malloc(Size);
4643cab2bb3Spatrick     EXPECT_NE(P, nullptr);
4653cab2bb3Spatrick     memset(P, 0x42, Size);
4663cab2bb3Spatrick     free(P);
4673cab2bb3Spatrick     _exit(0);
4683cab2bb3Spatrick   }
4693cab2bb3Spatrick   waitpid(Pid, nullptr, 0);
4703cab2bb3Spatrick   EXPECT_EQ(pthread_join(ThreadId, 0), 0);
4713cab2bb3Spatrick }
4723cab2bb3Spatrick 
4733cab2bb3Spatrick #endif // SCUDO_FUCHSIA
474