1 //===-- asan_noinst_test.cpp ----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of AddressSanitizer, an address sanity checker. 10 // 11 // This test file should be compiled w/o asan instrumentation. 12 //===----------------------------------------------------------------------===// 13 14 #include "asan_allocator.h" 15 #include "asan_internal.h" 16 #include "asan_mapping.h" 17 #include "asan_test_utils.h" 18 #include <sanitizer/allocator_interface.h> 19 20 #include <assert.h> 21 #include <stdio.h> 22 #include <stdlib.h> 23 #include <string.h> // for memset() 24 #include <algorithm> 25 #include <vector> 26 #include <limits> 27 28 using namespace __sanitizer; 29 30 // ATTENTION! 31 // Please don't call intercepted functions (including malloc() and friends) 32 // in this test. The static runtime library is linked explicitly (without 33 // -fsanitize=address), thus the interceptors do not work correctly on OS X. 34 35 // Make sure __asan_init is called before any test case is run. 36 struct AsanInitCaller { 37 AsanInitCaller() { 38 __asan_init(); 39 } 40 }; 41 static AsanInitCaller asan_init_caller; 42 43 TEST(AddressSanitizer, InternalSimpleDeathTest) { 44 EXPECT_DEATH(exit(1), ""); 45 } 46 47 static void MallocStress(size_t n) { 48 u32 seed = my_rand(); 49 BufferedStackTrace stack1; 50 stack1.trace_buffer[0] = 0xa123; 51 stack1.trace_buffer[1] = 0xa456; 52 stack1.size = 2; 53 54 BufferedStackTrace stack2; 55 stack2.trace_buffer[0] = 0xb123; 56 stack2.trace_buffer[1] = 0xb456; 57 stack2.size = 2; 58 59 BufferedStackTrace stack3; 60 stack3.trace_buffer[0] = 0xc123; 61 stack3.trace_buffer[1] = 0xc456; 62 stack3.size = 2; 63 64 std::vector<void *> vec; 65 for (size_t i = 0; i < n; i++) { 66 if ((i % 3) == 0) { 67 if (vec.empty()) continue; 68 size_t idx = my_rand_r(&seed) % vec.size(); 69 void *ptr = vec[idx]; 70 vec[idx] = vec.back(); 71 vec.pop_back(); 72 __asan::asan_free(ptr, &stack1, __asan::FROM_MALLOC); 73 } else { 74 size_t size = my_rand_r(&seed) % 1000 + 1; 75 switch ((my_rand_r(&seed) % 128)) { 76 case 0: size += 1024; break; 77 case 1: size += 2048; break; 78 case 2: size += 4096; break; 79 } 80 size_t alignment = 1 << (my_rand_r(&seed) % 10 + 1); 81 char *ptr = (char*)__asan::asan_memalign(alignment, size, 82 &stack2, __asan::FROM_MALLOC); 83 EXPECT_EQ(size, __asan::asan_malloc_usable_size(ptr, 0, 0)); 84 vec.push_back(ptr); 85 ptr[0] = 0; 86 ptr[size-1] = 0; 87 ptr[size/2] = 0; 88 } 89 } 90 for (size_t i = 0; i < vec.size(); i++) 91 __asan::asan_free(vec[i], &stack3, __asan::FROM_MALLOC); 92 } 93 94 95 TEST(AddressSanitizer, NoInstMallocTest) { 96 MallocStress(ASAN_LOW_MEMORY ? 300000 : 1000000); 97 } 98 99 TEST(AddressSanitizer, ThreadedMallocStressTest) { 100 const int kNumThreads = 4; 101 const int kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000; 102 pthread_t t[kNumThreads]; 103 for (int i = 0; i < kNumThreads; i++) { 104 PTHREAD_CREATE(&t[i], 0, (void* (*)(void *x))MallocStress, 105 (void*)kNumIterations); 106 } 107 for (int i = 0; i < kNumThreads; i++) { 108 PTHREAD_JOIN(t[i], 0); 109 } 110 } 111 112 static void PrintShadow(const char *tag, uptr ptr, size_t size) { 113 fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size); 114 uptr prev_shadow = 0; 115 for (sptr i = -32; i < (sptr)size + 32; i++) { 116 uptr shadow = __asan::MemToShadow(ptr + i); 117 if (i == 0 || i == (sptr)size) 118 fprintf(stderr, "."); 119 if (shadow != prev_shadow) { 120 prev_shadow = shadow; 121 fprintf(stderr, "%02x", (int)*(u8*)shadow); 122 } 123 } 124 fprintf(stderr, "\n"); 125 } 126 127 TEST(AddressSanitizer, DISABLED_InternalPrintShadow) { 128 for (size_t size = 1; size <= 513; size++) { 129 char *ptr = new char[size]; 130 PrintShadow("m", (uptr)ptr, size); 131 delete [] ptr; 132 PrintShadow("f", (uptr)ptr, size); 133 } 134 } 135 136 TEST(AddressSanitizer, QuarantineTest) { 137 BufferedStackTrace stack; 138 stack.trace_buffer[0] = 0x890; 139 stack.size = 1; 140 141 const int size = 1024; 142 void *p = __asan::asan_malloc(size, &stack); 143 __asan::asan_free(p, &stack, __asan::FROM_MALLOC); 144 size_t i; 145 size_t max_i = 1 << 30; 146 for (i = 0; i < max_i; i++) { 147 void *p1 = __asan::asan_malloc(size, &stack); 148 __asan::asan_free(p1, &stack, __asan::FROM_MALLOC); 149 if (p1 == p) break; 150 } 151 EXPECT_GE(i, 10000U); 152 EXPECT_LT(i, max_i); 153 } 154 155 #if !defined(__NetBSD__) 156 void *ThreadedQuarantineTestWorker(void *unused) { 157 (void)unused; 158 u32 seed = my_rand(); 159 BufferedStackTrace stack; 160 stack.trace_buffer[0] = 0x890; 161 stack.size = 1; 162 163 for (size_t i = 0; i < 1000; i++) { 164 void *p = __asan::asan_malloc(1 + (my_rand_r(&seed) % 4000), &stack); 165 __asan::asan_free(p, &stack, __asan::FROM_MALLOC); 166 } 167 return NULL; 168 } 169 170 // Check that the thread local allocators are flushed when threads are 171 // destroyed. 172 TEST(AddressSanitizer, ThreadedQuarantineTest) { 173 // Run the routine once to warm up ASAN internal structures to get more 174 // predictable incremental memory changes. 175 pthread_t t; 176 PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0); 177 PTHREAD_JOIN(t, 0); 178 179 const int n_threads = 3000; 180 size_t mmaped1 = __sanitizer_get_heap_size(); 181 for (int i = 0; i < n_threads; i++) { 182 pthread_t t; 183 PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0); 184 PTHREAD_JOIN(t, 0); 185 size_t mmaped2 = __sanitizer_get_heap_size(); 186 // Figure out why this much memory is required. 187 EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20)); 188 } 189 } 190 #endif 191 192 void *ThreadedOneSizeMallocStress(void *unused) { 193 (void)unused; 194 BufferedStackTrace stack; 195 stack.trace_buffer[0] = 0x890; 196 stack.size = 1; 197 const size_t kNumMallocs = 1000; 198 for (int iter = 0; iter < 1000; iter++) { 199 void *p[kNumMallocs]; 200 for (size_t i = 0; i < kNumMallocs; i++) { 201 p[i] = __asan::asan_malloc(32, &stack); 202 } 203 for (size_t i = 0; i < kNumMallocs; i++) { 204 __asan::asan_free(p[i], &stack, __asan::FROM_MALLOC); 205 } 206 } 207 return NULL; 208 } 209 210 TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) { 211 const int kNumThreads = 4; 212 pthread_t t[kNumThreads]; 213 for (int i = 0; i < kNumThreads; i++) { 214 PTHREAD_CREATE(&t[i], 0, ThreadedOneSizeMallocStress, 0); 215 } 216 for (int i = 0; i < kNumThreads; i++) { 217 PTHREAD_JOIN(t[i], 0); 218 } 219 } 220 221 TEST(AddressSanitizer, ShadowRegionIsPoisonedTest) { 222 using __asan::kHighMemEnd; 223 // Check that __asan_region_is_poisoned works for shadow regions. 224 uptr ptr = kLowShadowBeg + 200; 225 EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100)); 226 ptr = kShadowGapBeg + 200; 227 EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100)); 228 ptr = kHighShadowBeg + 200; 229 EXPECT_EQ(ptr, __asan_region_is_poisoned(ptr, 100)); 230 } 231 232 // Test __asan_load1 & friends. 233 TEST(AddressSanitizer, LoadStoreCallbacks) { 234 typedef void (*CB)(uptr p); 235 CB cb[2][5] = { 236 { 237 __asan_load1, __asan_load2, __asan_load4, __asan_load8, __asan_load16, 238 }, { 239 __asan_store1, __asan_store2, __asan_store4, __asan_store8, 240 __asan_store16, 241 } 242 }; 243 244 uptr buggy_ptr; 245 246 __asan_test_only_reported_buggy_pointer = &buggy_ptr; 247 BufferedStackTrace stack; 248 stack.trace_buffer[0] = 0x890; 249 stack.size = 1; 250 251 for (uptr len = 16; len <= 32; len++) { 252 char *ptr = (char*) __asan::asan_malloc(len, &stack); 253 uptr p = reinterpret_cast<uptr>(ptr); 254 for (uptr is_write = 0; is_write <= 1; is_write++) { 255 for (uptr size_log = 0; size_log <= 4; size_log++) { 256 uptr size = 1 << size_log; 257 CB call = cb[is_write][size_log]; 258 // Iterate only size-aligned offsets. 259 for (uptr offset = 0; offset <= len; offset += size) { 260 buggy_ptr = 0; 261 call(p + offset); 262 if (offset + size <= len) 263 EXPECT_EQ(buggy_ptr, 0U); 264 else 265 EXPECT_EQ(buggy_ptr, p + offset); 266 } 267 } 268 } 269 __asan::asan_free(ptr, &stack, __asan::FROM_MALLOC); 270 } 271 __asan_test_only_reported_buggy_pointer = 0; 272 } 273