1 //===-- sanitizer_common_test.cpp -----------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
10 //
11 //===----------------------------------------------------------------------===//
12 #include <algorithm>
13 
14 #include "sanitizer_common/sanitizer_allocator_internal.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_file.h"
17 #include "sanitizer_common/sanitizer_flags.h"
18 #include "sanitizer_common/sanitizer_libc.h"
19 #include "sanitizer_common/sanitizer_platform.h"
20 
21 #include "sanitizer_pthread_wrappers.h"
22 
23 #include "gtest/gtest.h"
24 
25 namespace __sanitizer {
26 
27 static bool IsSorted(const uptr *array, uptr n) {
28   for (uptr i = 1; i < n; i++) {
29     if (array[i] < array[i - 1]) return false;
30   }
31   return true;
32 }
33 
34 TEST(SanitizerCommon, SortTest) {
35   uptr array[100];
36   uptr n = 100;
37   // Already sorted.
38   for (uptr i = 0; i < n; i++) {
39     array[i] = i;
40   }
41   Sort(array, n);
42   EXPECT_TRUE(IsSorted(array, n));
43   // Reverse order.
44   for (uptr i = 0; i < n; i++) {
45     array[i] = n - 1 - i;
46   }
47   Sort(array, n);
48   EXPECT_TRUE(IsSorted(array, n));
49   // Mixed order.
50   for (uptr i = 0; i < n; i++) {
51     array[i] = (i % 2 == 0) ? i : n - 1 - i;
52   }
53   Sort(array, n);
54   EXPECT_TRUE(IsSorted(array, n));
55   // All equal.
56   for (uptr i = 0; i < n; i++) {
57     array[i] = 42;
58   }
59   Sort(array, n);
60   EXPECT_TRUE(IsSorted(array, n));
61   // All but one sorted.
62   for (uptr i = 0; i < n - 1; i++) {
63     array[i] = i;
64   }
65   array[n - 1] = 42;
66   Sort(array, n);
67   EXPECT_TRUE(IsSorted(array, n));
68   // Minimal case - sort three elements.
69   array[0] = 1;
70   array[1] = 0;
71   Sort(array, 2);
72   EXPECT_TRUE(IsSorted(array, 2));
73 }
74 
75 TEST(SanitizerCommon, MmapAlignedOrDieOnFatalError) {
76   uptr PageSize = GetPageSizeCached();
77   for (uptr size = 1; size <= 32; size *= 2) {
78     for (uptr alignment = 1; alignment <= 32; alignment *= 2) {
79       for (int iter = 0; iter < 100; iter++) {
80         uptr res = (uptr)MmapAlignedOrDieOnFatalError(
81             size * PageSize, alignment * PageSize, "MmapAlignedOrDieTest");
82         EXPECT_EQ(0U, res % (alignment * PageSize));
83         internal_memset((void*)res, 1, size * PageSize);
84         UnmapOrDie((void*)res, size * PageSize);
85       }
86     }
87   }
88 }
89 
90 TEST(SanitizerCommon, InternalMmapVectorRoundUpCapacity) {
91   InternalMmapVector<uptr> v;
92   v.reserve(1);
93   CHECK_EQ(v.capacity(), GetPageSizeCached() / sizeof(uptr));
94 }
95 
96 TEST(SanitizerCommon, InternalMmapVectorReize) {
97   InternalMmapVector<uptr> v;
98   CHECK_EQ(0U, v.size());
99   CHECK_GE(v.capacity(), v.size());
100 
101   v.reserve(1000);
102   CHECK_EQ(0U, v.size());
103   CHECK_GE(v.capacity(), 1000U);
104 
105   v.resize(10000);
106   CHECK_EQ(10000U, v.size());
107   CHECK_GE(v.capacity(), v.size());
108   uptr cap = v.capacity();
109 
110   v.resize(100);
111   CHECK_EQ(100U, v.size());
112   CHECK_EQ(v.capacity(), cap);
113 
114   v.reserve(10);
115   CHECK_EQ(100U, v.size());
116   CHECK_EQ(v.capacity(), cap);
117 }
118 
119 TEST(SanitizerCommon, InternalMmapVector) {
120   InternalMmapVector<uptr> vector;
121   for (uptr i = 0; i < 100; i++) {
122     EXPECT_EQ(i, vector.size());
123     vector.push_back(i);
124   }
125   for (uptr i = 0; i < 100; i++) {
126     EXPECT_EQ(i, vector[i]);
127   }
128   for (int i = 99; i >= 0; i--) {
129     EXPECT_EQ((uptr)i, vector.back());
130     vector.pop_back();
131     EXPECT_EQ((uptr)i, vector.size());
132   }
133   InternalMmapVector<uptr> empty_vector;
134   CHECK_EQ(empty_vector.capacity(), 0U);
135   CHECK_EQ(0U, empty_vector.size());
136 }
137 
138 TEST(SanitizerCommon, InternalMmapVectorEq) {
139   InternalMmapVector<uptr> vector1;
140   InternalMmapVector<uptr> vector2;
141   for (uptr i = 0; i < 100; i++) {
142     vector1.push_back(i);
143     vector2.push_back(i);
144   }
145   EXPECT_TRUE(vector1 == vector2);
146   EXPECT_FALSE(vector1 != vector2);
147 
148   vector1.push_back(1);
149   EXPECT_FALSE(vector1 == vector2);
150   EXPECT_TRUE(vector1 != vector2);
151 
152   vector2.push_back(1);
153   EXPECT_TRUE(vector1 == vector2);
154   EXPECT_FALSE(vector1 != vector2);
155 
156   vector1[55] = 1;
157   EXPECT_FALSE(vector1 == vector2);
158   EXPECT_TRUE(vector1 != vector2);
159 }
160 
161 TEST(SanitizerCommon, InternalMmapVectorSwap) {
162   InternalMmapVector<uptr> vector1;
163   InternalMmapVector<uptr> vector2;
164   InternalMmapVector<uptr> vector3;
165   InternalMmapVector<uptr> vector4;
166   for (uptr i = 0; i < 100; i++) {
167     vector1.push_back(i);
168     vector2.push_back(i);
169     vector3.push_back(-i);
170     vector4.push_back(-i);
171   }
172   EXPECT_NE(vector2, vector3);
173   EXPECT_NE(vector1, vector4);
174   vector1.swap(vector3);
175   EXPECT_EQ(vector2, vector3);
176   EXPECT_EQ(vector1, vector4);
177 }
178 
179 void TestThreadInfo(bool main) {
180   uptr stk_addr = 0;
181   uptr stk_size = 0;
182   uptr tls_addr = 0;
183   uptr tls_size = 0;
184   GetThreadStackAndTls(main, &stk_addr, &stk_size, &tls_addr, &tls_size);
185 
186   int stack_var;
187   EXPECT_NE(stk_addr, (uptr)0);
188   EXPECT_NE(stk_size, (uptr)0);
189   EXPECT_GT((uptr)&stack_var, stk_addr);
190   EXPECT_LT((uptr)&stack_var, stk_addr + stk_size);
191 
192 #if SANITIZER_LINUX && defined(__x86_64__)
193   static __thread int thread_var;
194   EXPECT_NE(tls_addr, (uptr)0);
195   EXPECT_NE(tls_size, (uptr)0);
196   EXPECT_GT((uptr)&thread_var, tls_addr);
197   EXPECT_LT((uptr)&thread_var, tls_addr + tls_size);
198 
199   // Ensure that tls and stack do not intersect.
200   uptr tls_end = tls_addr + tls_size;
201   EXPECT_TRUE(tls_addr < stk_addr || tls_addr >= stk_addr + stk_size);
202   EXPECT_TRUE(tls_end  < stk_addr || tls_end  >=  stk_addr + stk_size);
203   EXPECT_TRUE((tls_addr < stk_addr) == (tls_end  < stk_addr));
204 #endif
205 }
206 
207 static void *WorkerThread(void *arg) {
208   TestThreadInfo(false);
209   return 0;
210 }
211 
212 TEST(SanitizerCommon, ThreadStackTlsMain) {
213   InitTlsSize();
214   TestThreadInfo(true);
215 }
216 
217 TEST(SanitizerCommon, ThreadStackTlsWorker) {
218   InitTlsSize();
219   pthread_t t;
220   PTHREAD_CREATE(&t, 0, WorkerThread, 0);
221   PTHREAD_JOIN(t, 0);
222 }
223 
224 bool UptrLess(uptr a, uptr b) {
225   return a < b;
226 }
227 
228 TEST(SanitizerCommon, InternalLowerBound) {
229   static const uptr kSize = 5;
230   int arr[kSize];
231   arr[0] = 1;
232   arr[1] = 3;
233   arr[2] = 5;
234   arr[3] = 7;
235   arr[4] = 11;
236 
237   EXPECT_EQ(0u, InternalLowerBound(arr, 0, kSize, 0, UptrLess));
238   EXPECT_EQ(0u, InternalLowerBound(arr, 0, kSize, 1, UptrLess));
239   EXPECT_EQ(1u, InternalLowerBound(arr, 0, kSize, 2, UptrLess));
240   EXPECT_EQ(1u, InternalLowerBound(arr, 0, kSize, 3, UptrLess));
241   EXPECT_EQ(2u, InternalLowerBound(arr, 0, kSize, 4, UptrLess));
242   EXPECT_EQ(2u, InternalLowerBound(arr, 0, kSize, 5, UptrLess));
243   EXPECT_EQ(3u, InternalLowerBound(arr, 0, kSize, 6, UptrLess));
244   EXPECT_EQ(3u, InternalLowerBound(arr, 0, kSize, 7, UptrLess));
245   EXPECT_EQ(4u, InternalLowerBound(arr, 0, kSize, 8, UptrLess));
246   EXPECT_EQ(4u, InternalLowerBound(arr, 0, kSize, 9, UptrLess));
247   EXPECT_EQ(4u, InternalLowerBound(arr, 0, kSize, 10, UptrLess));
248   EXPECT_EQ(4u, InternalLowerBound(arr, 0, kSize, 11, UptrLess));
249   EXPECT_EQ(5u, InternalLowerBound(arr, 0, kSize, 12, UptrLess));
250 }
251 
252 TEST(SanitizerCommon, InternalLowerBoundVsStdLowerBound) {
253   std::vector<int> data;
254   auto create_item = [] (size_t i, size_t j) {
255     auto v = i * 10000 + j;
256     return ((v << 6) + (v >> 6) + 0x9e3779b9) % 100;
257   };
258   for (size_t i = 0; i < 1000; ++i) {
259     data.resize(i);
260     for (size_t j = 0; j < i; ++j) {
261       data[j] = create_item(i, j);
262     }
263 
264     std::sort(data.begin(), data.end());
265 
266     for (size_t j = 0; j < i; ++j) {
267       int val = create_item(i, j);
268       for (auto to_find : {val - 1, val, val + 1}) {
269         uptr expected =
270             std::lower_bound(data.begin(), data.end(), to_find) - data.begin();
271         EXPECT_EQ(expected, InternalLowerBound(data.data(), 0, data.size(),
272                                                to_find, std::less<int>()));
273       }
274     }
275   }
276 }
277 
278 #if SANITIZER_LINUX && !SANITIZER_ANDROID
279 TEST(SanitizerCommon, FindPathToBinary) {
280   char *true_path = FindPathToBinary("true");
281   EXPECT_NE((char*)0, internal_strstr(true_path, "/bin/true"));
282   InternalFree(true_path);
283   EXPECT_EQ(0, FindPathToBinary("unexisting_binary.ergjeorj"));
284 }
285 #elif SANITIZER_WINDOWS
286 TEST(SanitizerCommon, FindPathToBinary) {
287   // ntdll.dll should be on PATH in all supported test environments on all
288   // supported Windows versions.
289   char *ntdll_path = FindPathToBinary("ntdll.dll");
290   EXPECT_NE((char*)0, internal_strstr(ntdll_path, "ntdll.dll"));
291   InternalFree(ntdll_path);
292   EXPECT_EQ(0, FindPathToBinary("unexisting_binary.ergjeorj"));
293 }
294 #endif
295 
296 TEST(SanitizerCommon, StripPathPrefix) {
297   EXPECT_EQ(0, StripPathPrefix(0, "prefix"));
298   EXPECT_STREQ("foo", StripPathPrefix("foo", 0));
299   EXPECT_STREQ("dir/file.cc",
300                StripPathPrefix("/usr/lib/dir/file.cc", "/usr/lib/"));
301   EXPECT_STREQ("/file.cc", StripPathPrefix("/usr/myroot/file.cc", "/myroot"));
302   EXPECT_STREQ("file.h", StripPathPrefix("/usr/lib/./file.h", "/usr/lib/"));
303 }
304 
305 TEST(SanitizerCommon, RemoveANSIEscapeSequencesFromString) {
306   RemoveANSIEscapeSequencesFromString(nullptr);
307   const char *buffs[22] = {
308     "Default",                                "Default",
309     "\033[95mLight magenta",                  "Light magenta",
310     "\033[30mBlack\033[32mGreen\033[90mGray", "BlackGreenGray",
311     "\033[106mLight cyan \033[107mWhite ",    "Light cyan White ",
312     "\033[31mHello\033[0m World",             "Hello World",
313     "\033[38;5;82mHello \033[38;5;198mWorld", "Hello World",
314     "123[653456789012",                       "123[653456789012",
315     "Normal \033[5mBlink \033[25mNormal",     "Normal Blink Normal",
316     "\033[106m\033[107m",                     "",
317     "",                                       "",
318     " ",                                      " ",
319   };
320 
321   for (size_t i = 0; i < ARRAY_SIZE(buffs); i+=2) {
322     char *buffer_copy = internal_strdup(buffs[i]);
323     RemoveANSIEscapeSequencesFromString(buffer_copy);
324     EXPECT_STREQ(buffer_copy, buffs[i+1]);
325     InternalFree(buffer_copy);
326   }
327 }
328 
329 TEST(SanitizerCommon, InternalScopedString) {
330   InternalScopedString str(10);
331   EXPECT_EQ(0U, str.length());
332   EXPECT_STREQ("", str.data());
333 
334   str.append("foo");
335   EXPECT_EQ(3U, str.length());
336   EXPECT_STREQ("foo", str.data());
337 
338   int x = 1234;
339   str.append("%d", x);
340   EXPECT_EQ(7U, str.length());
341   EXPECT_STREQ("foo1234", str.data());
342 
343   str.append("%d", x);
344   EXPECT_EQ(9U, str.length());
345   EXPECT_STREQ("foo123412", str.data());
346 
347   str.clear();
348   EXPECT_EQ(0U, str.length());
349   EXPECT_STREQ("", str.data());
350 
351   str.append("0123456789");
352   EXPECT_EQ(9U, str.length());
353   EXPECT_STREQ("012345678", str.data());
354 }
355 
356 #if SANITIZER_LINUX || SANITIZER_FREEBSD || \
357   SANITIZER_OPENBSD || SANITIZER_MAC || SANITIZER_IOS
358 TEST(SanitizerCommon, GetRandom) {
359   u8 buffer_1[32], buffer_2[32];
360   for (bool blocking : { false, true }) {
361     EXPECT_FALSE(GetRandom(nullptr, 32, blocking));
362     EXPECT_FALSE(GetRandom(buffer_1, 0, blocking));
363     EXPECT_FALSE(GetRandom(buffer_1, 512, blocking));
364     EXPECT_EQ(ARRAY_SIZE(buffer_1), ARRAY_SIZE(buffer_2));
365     for (uptr size = 4; size <= ARRAY_SIZE(buffer_1); size += 4) {
366       for (uptr i = 0; i < 100; i++) {
367         EXPECT_TRUE(GetRandom(buffer_1, size, blocking));
368         EXPECT_TRUE(GetRandom(buffer_2, size, blocking));
369         EXPECT_NE(internal_memcmp(buffer_1, buffer_2, size), 0);
370       }
371     }
372   }
373 }
374 #endif
375 
376 TEST(SanitizerCommon, ReservedAddressRangeInit) {
377   uptr init_size = 0xffff;
378   ReservedAddressRange address_range;
379   uptr res = address_range.Init(init_size);
380   CHECK_NE(res, (void*)-1);
381   UnmapOrDie((void*)res, init_size);
382   // Should be able to map into the same space now.
383   ReservedAddressRange address_range2;
384   uptr res2 = address_range2.Init(init_size, nullptr, res);
385   CHECK_EQ(res, res2);
386 
387   // TODO(flowerhack): Once this is switched to the "real" implementation
388   // (rather than passing through to MmapNoAccess*), enforce and test "no
389   // double initializations allowed"
390 }
391 
392 TEST(SanitizerCommon, ReservedAddressRangeMap) {
393   constexpr uptr init_size = 0xffff;
394   ReservedAddressRange address_range;
395   uptr res = address_range.Init(init_size);
396   CHECK_NE(res, (void*) -1);
397 
398   // Valid mappings should succeed.
399   CHECK_EQ(res, address_range.Map(res, init_size));
400 
401   // Valid mappings should be readable.
402   unsigned char buffer[init_size];
403   memcpy(buffer, reinterpret_cast<void *>(res), init_size);
404 
405   // TODO(flowerhack): Once this is switched to the "real" implementation, make
406   // sure you can only mmap into offsets in the Init range.
407 }
408 
409 TEST(SanitizerCommon, ReservedAddressRangeUnmap) {
410   uptr PageSize = GetPageSizeCached();
411   uptr init_size = PageSize * 8;
412   ReservedAddressRange address_range;
413   uptr base_addr = address_range.Init(init_size);
414   CHECK_NE(base_addr, (void*)-1);
415   CHECK_EQ(base_addr, address_range.Map(base_addr, init_size));
416 
417   // Unmapping the entire range should succeed.
418   address_range.Unmap(base_addr, init_size);
419 
420   // Map a new range.
421   base_addr = address_range.Init(init_size);
422   CHECK_EQ(base_addr, address_range.Map(base_addr, init_size));
423 
424   // Windows doesn't allow partial unmappings.
425   #if !SANITIZER_WINDOWS
426 
427   // Unmapping at the beginning should succeed.
428   address_range.Unmap(base_addr, PageSize);
429 
430   // Unmapping at the end should succeed.
431   uptr new_start = reinterpret_cast<uptr>(address_range.base()) +
432                    address_range.size() - PageSize;
433   address_range.Unmap(new_start, PageSize);
434 
435   #endif
436 
437   // Unmapping in the middle of the ReservedAddressRange should fail.
438   EXPECT_DEATH(address_range.Unmap(base_addr + (PageSize * 2), PageSize), ".*");
439 }
440 
441 // Windows has no working ReadBinaryName.
442 #if !SANITIZER_WINDOWS
443 TEST(SanitizerCommon, ReadBinaryNameCached) {
444   char buf[256];
445   EXPECT_NE((uptr)0, ReadBinaryNameCached(buf, sizeof(buf)));
446 }
447 #endif
448 
449 }  // namespace __sanitizer
450