1 //===-- sanitizer_common_test.cpp -----------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
10 //
11 //===----------------------------------------------------------------------===//
12 #include <algorithm>
13
14 #include "sanitizer_common/sanitizer_allocator_internal.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_file.h"
17 #include "sanitizer_common/sanitizer_flags.h"
18 #include "sanitizer_common/sanitizer_libc.h"
19 #include "sanitizer_common/sanitizer_platform.h"
20
21 #include "sanitizer_pthread_wrappers.h"
22
23 #include "gtest/gtest.h"
24
25 namespace __sanitizer {
26
IsSorted(const uptr * array,uptr n)27 static bool IsSorted(const uptr *array, uptr n) {
28 for (uptr i = 1; i < n; i++) {
29 if (array[i] < array[i - 1]) return false;
30 }
31 return true;
32 }
33
TEST(SanitizerCommon,SortTest)34 TEST(SanitizerCommon, SortTest) {
35 uptr array[100];
36 uptr n = 100;
37 // Already sorted.
38 for (uptr i = 0; i < n; i++) {
39 array[i] = i;
40 }
41 Sort(array, n);
42 EXPECT_TRUE(IsSorted(array, n));
43 // Reverse order.
44 for (uptr i = 0; i < n; i++) {
45 array[i] = n - 1 - i;
46 }
47 Sort(array, n);
48 EXPECT_TRUE(IsSorted(array, n));
49 // Mixed order.
50 for (uptr i = 0; i < n; i++) {
51 array[i] = (i % 2 == 0) ? i : n - 1 - i;
52 }
53 Sort(array, n);
54 EXPECT_TRUE(IsSorted(array, n));
55 // All equal.
56 for (uptr i = 0; i < n; i++) {
57 array[i] = 42;
58 }
59 Sort(array, n);
60 EXPECT_TRUE(IsSorted(array, n));
61 // All but one sorted.
62 for (uptr i = 0; i < n - 1; i++) {
63 array[i] = i;
64 }
65 array[n - 1] = 42;
66 Sort(array, n);
67 EXPECT_TRUE(IsSorted(array, n));
68 // Minimal case - sort three elements.
69 array[0] = 1;
70 array[1] = 0;
71 Sort(array, 2);
72 EXPECT_TRUE(IsSorted(array, 2));
73 }
74
TEST(SanitizerCommon,MmapAlignedOrDieOnFatalError)75 TEST(SanitizerCommon, MmapAlignedOrDieOnFatalError) {
76 uptr PageSize = GetPageSizeCached();
77 for (uptr size = 1; size <= 32; size *= 2) {
78 for (uptr alignment = 1; alignment <= 32; alignment *= 2) {
79 for (int iter = 0; iter < 100; iter++) {
80 uptr res = (uptr)MmapAlignedOrDieOnFatalError(
81 size * PageSize, alignment * PageSize, "MmapAlignedOrDieTest");
82 EXPECT_EQ(0U, res % (alignment * PageSize));
83 internal_memset((void*)res, 1, size * PageSize);
84 UnmapOrDie((void*)res, size * PageSize);
85 }
86 }
87 }
88 }
89
TEST(SanitizerCommon,InternalMmapVectorRoundUpCapacity)90 TEST(SanitizerCommon, InternalMmapVectorRoundUpCapacity) {
91 InternalMmapVector<uptr> v;
92 v.reserve(1);
93 CHECK_EQ(v.capacity(), GetPageSizeCached() / sizeof(uptr));
94 }
95
TEST(SanitizerCommon,InternalMmapVectorReize)96 TEST(SanitizerCommon, InternalMmapVectorReize) {
97 InternalMmapVector<uptr> v;
98 CHECK_EQ(0U, v.size());
99 CHECK_GE(v.capacity(), v.size());
100
101 v.reserve(1000);
102 CHECK_EQ(0U, v.size());
103 CHECK_GE(v.capacity(), 1000U);
104
105 v.resize(10000);
106 CHECK_EQ(10000U, v.size());
107 CHECK_GE(v.capacity(), v.size());
108 uptr cap = v.capacity();
109
110 v.resize(100);
111 CHECK_EQ(100U, v.size());
112 CHECK_EQ(v.capacity(), cap);
113
114 v.reserve(10);
115 CHECK_EQ(100U, v.size());
116 CHECK_EQ(v.capacity(), cap);
117 }
118
TEST(SanitizerCommon,InternalMmapVector)119 TEST(SanitizerCommon, InternalMmapVector) {
120 InternalMmapVector<uptr> vector;
121 for (uptr i = 0; i < 100; i++) {
122 EXPECT_EQ(i, vector.size());
123 vector.push_back(i);
124 }
125 for (uptr i = 0; i < 100; i++) {
126 EXPECT_EQ(i, vector[i]);
127 }
128 for (int i = 99; i >= 0; i--) {
129 EXPECT_EQ((uptr)i, vector.back());
130 vector.pop_back();
131 EXPECT_EQ((uptr)i, vector.size());
132 }
133 InternalMmapVector<uptr> empty_vector;
134 CHECK_EQ(empty_vector.capacity(), 0U);
135 CHECK_EQ(0U, empty_vector.size());
136 }
137
TEST(SanitizerCommon,InternalMmapVectorEq)138 TEST(SanitizerCommon, InternalMmapVectorEq) {
139 InternalMmapVector<uptr> vector1;
140 InternalMmapVector<uptr> vector2;
141 for (uptr i = 0; i < 100; i++) {
142 vector1.push_back(i);
143 vector2.push_back(i);
144 }
145 EXPECT_TRUE(vector1 == vector2);
146 EXPECT_FALSE(vector1 != vector2);
147
148 vector1.push_back(1);
149 EXPECT_FALSE(vector1 == vector2);
150 EXPECT_TRUE(vector1 != vector2);
151
152 vector2.push_back(1);
153 EXPECT_TRUE(vector1 == vector2);
154 EXPECT_FALSE(vector1 != vector2);
155
156 vector1[55] = 1;
157 EXPECT_FALSE(vector1 == vector2);
158 EXPECT_TRUE(vector1 != vector2);
159 }
160
TEST(SanitizerCommon,InternalMmapVectorSwap)161 TEST(SanitizerCommon, InternalMmapVectorSwap) {
162 InternalMmapVector<uptr> vector1;
163 InternalMmapVector<uptr> vector2;
164 InternalMmapVector<uptr> vector3;
165 InternalMmapVector<uptr> vector4;
166 for (uptr i = 0; i < 100; i++) {
167 vector1.push_back(i);
168 vector2.push_back(i);
169 vector3.push_back(-i);
170 vector4.push_back(-i);
171 }
172 EXPECT_NE(vector2, vector3);
173 EXPECT_NE(vector1, vector4);
174 vector1.swap(vector3);
175 EXPECT_EQ(vector2, vector3);
176 EXPECT_EQ(vector1, vector4);
177 }
178
TestThreadInfo(bool main)179 void TestThreadInfo(bool main) {
180 uptr stk_addr = 0;
181 uptr stk_size = 0;
182 uptr tls_addr = 0;
183 uptr tls_size = 0;
184 GetThreadStackAndTls(main, &stk_addr, &stk_size, &tls_addr, &tls_size);
185
186 int stack_var;
187 EXPECT_NE(stk_addr, (uptr)0);
188 EXPECT_NE(stk_size, (uptr)0);
189 EXPECT_GT((uptr)&stack_var, stk_addr);
190 EXPECT_LT((uptr)&stack_var, stk_addr + stk_size);
191
192 #if SANITIZER_LINUX && defined(__x86_64__)
193 static __thread int thread_var;
194 EXPECT_NE(tls_addr, (uptr)0);
195 EXPECT_NE(tls_size, (uptr)0);
196 EXPECT_GT((uptr)&thread_var, tls_addr);
197 EXPECT_LT((uptr)&thread_var, tls_addr + tls_size);
198
199 // Ensure that tls and stack do not intersect.
200 uptr tls_end = tls_addr + tls_size;
201 EXPECT_TRUE(tls_addr < stk_addr || tls_addr >= stk_addr + stk_size);
202 EXPECT_TRUE(tls_end < stk_addr || tls_end >= stk_addr + stk_size);
203 EXPECT_TRUE((tls_addr < stk_addr) == (tls_end < stk_addr));
204 #endif
205 }
206
WorkerThread(void * arg)207 static void *WorkerThread(void *arg) {
208 TestThreadInfo(false);
209 return 0;
210 }
211
TEST(SanitizerCommon,ThreadStackTlsMain)212 TEST(SanitizerCommon, ThreadStackTlsMain) {
213 InitTlsSize();
214 TestThreadInfo(true);
215 }
216
TEST(SanitizerCommon,ThreadStackTlsWorker)217 TEST(SanitizerCommon, ThreadStackTlsWorker) {
218 InitTlsSize();
219 pthread_t t;
220 PTHREAD_CREATE(&t, 0, WorkerThread, 0);
221 PTHREAD_JOIN(t, 0);
222 }
223
UptrLess(uptr a,uptr b)224 bool UptrLess(uptr a, uptr b) {
225 return a < b;
226 }
227
TEST(SanitizerCommon,InternalLowerBound)228 TEST(SanitizerCommon, InternalLowerBound) {
229 std::vector<int> arr = {1, 3, 5, 7, 11};
230
231 EXPECT_EQ(0u, InternalLowerBound(arr, 0));
232 EXPECT_EQ(0u, InternalLowerBound(arr, 1));
233 EXPECT_EQ(1u, InternalLowerBound(arr, 2));
234 EXPECT_EQ(1u, InternalLowerBound(arr, 3));
235 EXPECT_EQ(2u, InternalLowerBound(arr, 4));
236 EXPECT_EQ(2u, InternalLowerBound(arr, 5));
237 EXPECT_EQ(3u, InternalLowerBound(arr, 6));
238 EXPECT_EQ(3u, InternalLowerBound(arr, 7));
239 EXPECT_EQ(4u, InternalLowerBound(arr, 8));
240 EXPECT_EQ(4u, InternalLowerBound(arr, 9));
241 EXPECT_EQ(4u, InternalLowerBound(arr, 10));
242 EXPECT_EQ(4u, InternalLowerBound(arr, 11));
243 EXPECT_EQ(5u, InternalLowerBound(arr, 12));
244 }
245
TEST(SanitizerCommon,InternalLowerBoundVsStdLowerBound)246 TEST(SanitizerCommon, InternalLowerBoundVsStdLowerBound) {
247 std::vector<int> data;
248 auto create_item = [] (size_t i, size_t j) {
249 auto v = i * 10000 + j;
250 return ((v << 6) + (v >> 6) + 0x9e3779b9) % 100;
251 };
252 for (size_t i = 0; i < 1000; ++i) {
253 data.resize(i);
254 for (size_t j = 0; j < i; ++j) {
255 data[j] = create_item(i, j);
256 }
257
258 std::sort(data.begin(), data.end());
259
260 for (size_t j = 0; j < i; ++j) {
261 int val = create_item(i, j);
262 for (auto to_find : {val - 1, val, val + 1}) {
263 uptr expected =
264 std::lower_bound(data.begin(), data.end(), to_find) - data.begin();
265 EXPECT_EQ(expected,
266 InternalLowerBound(data, to_find, std::less<int>()));
267 }
268 }
269 }
270 }
271
272 class SortAndDedupTest : public ::testing::TestWithParam<std::vector<int>> {};
273
TEST_P(SortAndDedupTest,SortAndDedup)274 TEST_P(SortAndDedupTest, SortAndDedup) {
275 std::vector<int> v_std = GetParam();
276 std::sort(v_std.begin(), v_std.end());
277 v_std.erase(std::unique(v_std.begin(), v_std.end()), v_std.end());
278
279 std::vector<int> v = GetParam();
280 SortAndDedup(v);
281
282 EXPECT_EQ(v_std, v);
283 }
284
285 const std::vector<int> kSortAndDedupTests[] = {
286 {},
287 {1},
288 {1, 1},
289 {1, 1, 1},
290 {1, 2, 3},
291 {3, 2, 1},
292 {1, 2, 2, 3},
293 {3, 3, 2, 1, 2},
294 {3, 3, 2, 1, 2},
295 {1, 2, 1, 1, 2, 1, 1, 1, 2, 2},
296 {1, 3, 3, 2, 3, 1, 3, 1, 4, 4, 2, 1, 4, 1, 1, 2, 2},
297 };
298 INSTANTIATE_TEST_CASE_P(SortAndDedupTest, SortAndDedupTest,
299 ::testing::ValuesIn(kSortAndDedupTests));
300
301 #if SANITIZER_LINUX && !SANITIZER_ANDROID
TEST(SanitizerCommon,FindPathToBinary)302 TEST(SanitizerCommon, FindPathToBinary) {
303 char *true_path = FindPathToBinary("true");
304 EXPECT_NE((char*)0, internal_strstr(true_path, "/bin/true"));
305 InternalFree(true_path);
306 EXPECT_EQ(0, FindPathToBinary("unexisting_binary.ergjeorj"));
307 }
308 #elif SANITIZER_WINDOWS
TEST(SanitizerCommon,FindPathToBinary)309 TEST(SanitizerCommon, FindPathToBinary) {
310 // ntdll.dll should be on PATH in all supported test environments on all
311 // supported Windows versions.
312 char *ntdll_path = FindPathToBinary("ntdll.dll");
313 EXPECT_NE((char*)0, internal_strstr(ntdll_path, "ntdll.dll"));
314 InternalFree(ntdll_path);
315 EXPECT_EQ(0, FindPathToBinary("unexisting_binary.ergjeorj"));
316 }
317 #endif
318
TEST(SanitizerCommon,StripPathPrefix)319 TEST(SanitizerCommon, StripPathPrefix) {
320 EXPECT_EQ(0, StripPathPrefix(0, "prefix"));
321 EXPECT_STREQ("foo", StripPathPrefix("foo", 0));
322 EXPECT_STREQ("dir/file.cc",
323 StripPathPrefix("/usr/lib/dir/file.cc", "/usr/lib/"));
324 EXPECT_STREQ("/file.cc", StripPathPrefix("/usr/myroot/file.cc", "/myroot"));
325 EXPECT_STREQ("file.h", StripPathPrefix("/usr/lib/./file.h", "/usr/lib/"));
326 }
327
TEST(SanitizerCommon,RemoveANSIEscapeSequencesFromString)328 TEST(SanitizerCommon, RemoveANSIEscapeSequencesFromString) {
329 RemoveANSIEscapeSequencesFromString(nullptr);
330 const char *buffs[22] = {
331 "Default", "Default",
332 "\033[95mLight magenta", "Light magenta",
333 "\033[30mBlack\033[32mGreen\033[90mGray", "BlackGreenGray",
334 "\033[106mLight cyan \033[107mWhite ", "Light cyan White ",
335 "\033[31mHello\033[0m World", "Hello World",
336 "\033[38;5;82mHello \033[38;5;198mWorld", "Hello World",
337 "123[653456789012", "123[653456789012",
338 "Normal \033[5mBlink \033[25mNormal", "Normal Blink Normal",
339 "\033[106m\033[107m", "",
340 "", "",
341 " ", " ",
342 };
343
344 for (size_t i = 0; i < ARRAY_SIZE(buffs); i+=2) {
345 char *buffer_copy = internal_strdup(buffs[i]);
346 RemoveANSIEscapeSequencesFromString(buffer_copy);
347 EXPECT_STREQ(buffer_copy, buffs[i+1]);
348 InternalFree(buffer_copy);
349 }
350 }
351
TEST(SanitizerCommon,InternalScopedString)352 TEST(SanitizerCommon, InternalScopedString) {
353 InternalScopedString str(10);
354 EXPECT_EQ(0U, str.length());
355 EXPECT_STREQ("", str.data());
356
357 str.append("foo");
358 EXPECT_EQ(3U, str.length());
359 EXPECT_STREQ("foo", str.data());
360
361 int x = 1234;
362 str.append("%d", x);
363 EXPECT_EQ(7U, str.length());
364 EXPECT_STREQ("foo1234", str.data());
365
366 str.append("%d", x);
367 EXPECT_EQ(9U, str.length());
368 EXPECT_STREQ("foo123412", str.data());
369
370 str.clear();
371 EXPECT_EQ(0U, str.length());
372 EXPECT_STREQ("", str.data());
373
374 str.append("0123456789");
375 EXPECT_EQ(9U, str.length());
376 EXPECT_STREQ("012345678", str.data());
377 }
378
379 #if SANITIZER_LINUX || SANITIZER_FREEBSD || \
380 SANITIZER_MAC || SANITIZER_IOS
TEST(SanitizerCommon,GetRandom)381 TEST(SanitizerCommon, GetRandom) {
382 u8 buffer_1[32], buffer_2[32];
383 for (bool blocking : { false, true }) {
384 EXPECT_FALSE(GetRandom(nullptr, 32, blocking));
385 EXPECT_FALSE(GetRandom(buffer_1, 0, blocking));
386 EXPECT_FALSE(GetRandom(buffer_1, 512, blocking));
387 EXPECT_EQ(ARRAY_SIZE(buffer_1), ARRAY_SIZE(buffer_2));
388 for (uptr size = 4; size <= ARRAY_SIZE(buffer_1); size += 4) {
389 for (uptr i = 0; i < 100; i++) {
390 EXPECT_TRUE(GetRandom(buffer_1, size, blocking));
391 EXPECT_TRUE(GetRandom(buffer_2, size, blocking));
392 EXPECT_NE(internal_memcmp(buffer_1, buffer_2, size), 0);
393 }
394 }
395 }
396 }
397 #endif
398
TEST(SanitizerCommon,ReservedAddressRangeInit)399 TEST(SanitizerCommon, ReservedAddressRangeInit) {
400 uptr init_size = 0xffff;
401 ReservedAddressRange address_range;
402 uptr res = address_range.Init(init_size);
403 CHECK_NE(res, (void*)-1);
404 UnmapOrDie((void*)res, init_size);
405 // Should be able to map into the same space now.
406 ReservedAddressRange address_range2;
407 uptr res2 = address_range2.Init(init_size, nullptr, res);
408 CHECK_EQ(res, res2);
409
410 // TODO(flowerhack): Once this is switched to the "real" implementation
411 // (rather than passing through to MmapNoAccess*), enforce and test "no
412 // double initializations allowed"
413 }
414
TEST(SanitizerCommon,ReservedAddressRangeMap)415 TEST(SanitizerCommon, ReservedAddressRangeMap) {
416 constexpr uptr init_size = 0xffff;
417 ReservedAddressRange address_range;
418 uptr res = address_range.Init(init_size);
419 CHECK_NE(res, (void*) -1);
420
421 // Valid mappings should succeed.
422 CHECK_EQ(res, address_range.Map(res, init_size));
423
424 // Valid mappings should be readable.
425 unsigned char buffer[init_size];
426 memcpy(buffer, reinterpret_cast<void *>(res), init_size);
427
428 // TODO(flowerhack): Once this is switched to the "real" implementation, make
429 // sure you can only mmap into offsets in the Init range.
430 }
431
TEST(SanitizerCommon,ReservedAddressRangeUnmap)432 TEST(SanitizerCommon, ReservedAddressRangeUnmap) {
433 uptr PageSize = GetPageSizeCached();
434 uptr init_size = PageSize * 8;
435 ReservedAddressRange address_range;
436 uptr base_addr = address_range.Init(init_size);
437 CHECK_NE(base_addr, (void*)-1);
438 CHECK_EQ(base_addr, address_range.Map(base_addr, init_size));
439
440 // Unmapping the entire range should succeed.
441 address_range.Unmap(base_addr, init_size);
442
443 // Map a new range.
444 base_addr = address_range.Init(init_size);
445 CHECK_EQ(base_addr, address_range.Map(base_addr, init_size));
446
447 // Windows doesn't allow partial unmappings.
448 #if !SANITIZER_WINDOWS
449
450 // Unmapping at the beginning should succeed.
451 address_range.Unmap(base_addr, PageSize);
452
453 // Unmapping at the end should succeed.
454 uptr new_start = reinterpret_cast<uptr>(address_range.base()) +
455 address_range.size() - PageSize;
456 address_range.Unmap(new_start, PageSize);
457
458 #endif
459
460 // Unmapping in the middle of the ReservedAddressRange should fail.
461 EXPECT_DEATH(address_range.Unmap(base_addr + (PageSize * 2), PageSize), ".*");
462 }
463
464 // Windows has no working ReadBinaryName.
465 #if !SANITIZER_WINDOWS
TEST(SanitizerCommon,ReadBinaryNameCached)466 TEST(SanitizerCommon, ReadBinaryNameCached) {
467 char buf[256];
468 EXPECT_NE((uptr)0, ReadBinaryNameCached(buf, sizeof(buf)));
469 }
470 #endif
471
472 } // namespace __sanitizer
473