1 //===-- tsan_platform_linux.cpp -------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of ThreadSanitizer (TSan), a race detector. 10 // 11 // Linux- and BSD-specific code. 12 //===----------------------------------------------------------------------===// 13 14 #include "sanitizer_common/sanitizer_platform.h" 15 #if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \ 16 SANITIZER_OPENBSD 17 18 #include "sanitizer_common/sanitizer_common.h" 19 #include "sanitizer_common/sanitizer_libc.h" 20 #include "sanitizer_common/sanitizer_linux.h" 21 #include "sanitizer_common/sanitizer_platform_limits_netbsd.h" 22 #include "sanitizer_common/sanitizer_platform_limits_openbsd.h" 23 #include "sanitizer_common/sanitizer_platform_limits_posix.h" 24 #include "sanitizer_common/sanitizer_posix.h" 25 #include "sanitizer_common/sanitizer_procmaps.h" 26 #include "sanitizer_common/sanitizer_stackdepot.h" 27 #include "sanitizer_common/sanitizer_stoptheworld.h" 28 #include "tsan_flags.h" 29 #include "tsan_platform.h" 30 #include "tsan_rtl.h" 31 32 #include <fcntl.h> 33 #include <pthread.h> 34 #include <signal.h> 35 #include <stdio.h> 36 #include <stdlib.h> 37 #include <string.h> 38 #include <stdarg.h> 39 #include <sys/mman.h> 40 #if SANITIZER_LINUX 41 #include <sys/personality.h> 42 #include <setjmp.h> 43 #endif 44 #include <sys/syscall.h> 45 #include <sys/socket.h> 46 #include <sys/time.h> 47 #include <sys/types.h> 48 #include <sys/resource.h> 49 #include <sys/stat.h> 50 #include <unistd.h> 51 #include <sched.h> 52 #include <dlfcn.h> 53 #if SANITIZER_LINUX 54 #define __need_res_state 55 #include <resolv.h> 56 #endif 57 58 #ifdef sa_handler 59 # undef sa_handler 60 #endif 61 62 #ifdef sa_sigaction 63 # undef sa_sigaction 64 #endif 65 66 #if SANITIZER_FREEBSD 67 extern "C" void *__libc_stack_end; 68 void *__libc_stack_end = 0; 69 #endif 70 71 #if SANITIZER_LINUX && defined(__aarch64__) && !SANITIZER_GO 72 # define INIT_LONGJMP_XOR_KEY 1 73 #else 74 # define INIT_LONGJMP_XOR_KEY 0 75 #endif 76 77 #if INIT_LONGJMP_XOR_KEY 78 #include "interception/interception.h" 79 // Must be declared outside of other namespaces. 80 DECLARE_REAL(int, _setjmp, void *env) 81 #endif 82 83 namespace __tsan { 84 85 #if INIT_LONGJMP_XOR_KEY 86 static void InitializeLongjmpXorKey(); 87 static uptr longjmp_xor_key; 88 #endif 89 90 #ifdef TSAN_RUNTIME_VMA 91 // Runtime detected VMA size. 92 uptr vmaSize; 93 #endif 94 95 enum { 96 MemTotal = 0, 97 MemShadow = 1, 98 MemMeta = 2, 99 MemFile = 3, 100 MemMmap = 4, 101 MemTrace = 5, 102 MemHeap = 6, 103 MemOther = 7, 104 MemCount = 8, 105 }; 106 107 void FillProfileCallback(uptr p, uptr rss, bool file, 108 uptr *mem, uptr stats_size) { 109 mem[MemTotal] += rss; 110 if (p >= ShadowBeg() && p < ShadowEnd()) 111 mem[MemShadow] += rss; 112 else if (p >= MetaShadowBeg() && p < MetaShadowEnd()) 113 mem[MemMeta] += rss; 114 #if !SANITIZER_GO 115 else if (p >= HeapMemBeg() && p < HeapMemEnd()) 116 mem[MemHeap] += rss; 117 else if (p >= LoAppMemBeg() && p < LoAppMemEnd()) 118 mem[file ? MemFile : MemMmap] += rss; 119 else if (p >= HiAppMemBeg() && p < HiAppMemEnd()) 120 mem[file ? MemFile : MemMmap] += rss; 121 #else 122 else if (p >= AppMemBeg() && p < AppMemEnd()) 123 mem[file ? MemFile : MemMmap] += rss; 124 #endif 125 else if (p >= TraceMemBeg() && p < TraceMemEnd()) 126 mem[MemTrace] += rss; 127 else 128 mem[MemOther] += rss; 129 } 130 131 void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) { 132 uptr mem[MemCount]; 133 internal_memset(mem, 0, sizeof(mem[0]) * MemCount); 134 __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7); 135 StackDepotStats *stacks = StackDepotGetStats(); 136 internal_snprintf(buf, buf_size, 137 "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd" 138 " trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n", 139 mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20, 140 mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20, 141 mem[MemHeap] >> 20, mem[MemOther] >> 20, 142 stacks->allocated >> 20, stacks->n_uniq_ids, 143 nlive, nthread); 144 } 145 146 #if SANITIZER_LINUX 147 void FlushShadowMemoryCallback( 148 const SuspendedThreadsList &suspended_threads_list, 149 void *argument) { 150 ReleaseMemoryPagesToOS(ShadowBeg(), ShadowEnd()); 151 } 152 #endif 153 154 void FlushShadowMemory() { 155 #if SANITIZER_LINUX 156 StopTheWorld(FlushShadowMemoryCallback, 0); 157 #endif 158 } 159 160 #if !SANITIZER_GO 161 // Mark shadow for .rodata sections with the special kShadowRodata marker. 162 // Accesses to .rodata can't race, so this saves time, memory and trace space. 163 static void MapRodata() { 164 // First create temp file. 165 const char *tmpdir = GetEnv("TMPDIR"); 166 if (tmpdir == 0) 167 tmpdir = GetEnv("TEST_TMPDIR"); 168 #ifdef P_tmpdir 169 if (tmpdir == 0) 170 tmpdir = P_tmpdir; 171 #endif 172 if (tmpdir == 0) 173 return; 174 char name[256]; 175 internal_snprintf(name, sizeof(name), "%s/tsan.rodata.%d", 176 tmpdir, (int)internal_getpid()); 177 uptr openrv = internal_open(name, O_RDWR | O_CREAT | O_EXCL, 0600); 178 if (internal_iserror(openrv)) 179 return; 180 internal_unlink(name); // Unlink it now, so that we can reuse the buffer. 181 fd_t fd = openrv; 182 // Fill the file with kShadowRodata. 183 const uptr kMarkerSize = 512 * 1024 / sizeof(u64); 184 InternalMmapVector<u64> marker(kMarkerSize); 185 // volatile to prevent insertion of memset 186 for (volatile u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++) 187 *p = kShadowRodata; 188 internal_write(fd, marker.data(), marker.size() * sizeof(u64)); 189 // Map the file into memory. 190 uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE, 191 MAP_PRIVATE | MAP_ANONYMOUS, fd, 0); 192 if (internal_iserror(page)) { 193 internal_close(fd); 194 return; 195 } 196 // Map the file into shadow of .rodata sections. 197 MemoryMappingLayout proc_maps(/*cache_enabled*/true); 198 // Reusing the buffer 'name'. 199 MemoryMappedSegment segment(name, ARRAY_SIZE(name)); 200 while (proc_maps.Next(&segment)) { 201 if (segment.filename[0] != 0 && segment.filename[0] != '[' && 202 segment.IsReadable() && segment.IsExecutable() && 203 !segment.IsWritable() && IsAppMem(segment.start)) { 204 // Assume it's .rodata 205 char *shadow_start = (char *)MemToShadow(segment.start); 206 char *shadow_end = (char *)MemToShadow(segment.end); 207 for (char *p = shadow_start; p < shadow_end; 208 p += marker.size() * sizeof(u64)) { 209 internal_mmap(p, Min<uptr>(marker.size() * sizeof(u64), shadow_end - p), 210 PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0); 211 } 212 } 213 } 214 internal_close(fd); 215 } 216 217 void InitializeShadowMemoryPlatform() { 218 MapRodata(); 219 } 220 221 #endif // #if !SANITIZER_GO 222 223 void InitializePlatformEarly() { 224 #ifdef TSAN_RUNTIME_VMA 225 vmaSize = 226 (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); 227 #if defined(__aarch64__) 228 # if !SANITIZER_GO 229 if (vmaSize != 39 && vmaSize != 42 && vmaSize != 48) { 230 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); 231 Printf("FATAL: Found %zd - Supported 39, 42 and 48\n", vmaSize); 232 Die(); 233 } 234 #else 235 if (vmaSize != 48) { 236 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); 237 Printf("FATAL: Found %zd - Supported 48\n", vmaSize); 238 Die(); 239 } 240 #endif 241 #elif defined(__powerpc64__) 242 # if !SANITIZER_GO 243 if (vmaSize != 44 && vmaSize != 46 && vmaSize != 47) { 244 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); 245 Printf("FATAL: Found %zd - Supported 44, 46, and 47\n", vmaSize); 246 Die(); 247 } 248 # else 249 if (vmaSize != 46 && vmaSize != 47) { 250 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); 251 Printf("FATAL: Found %zd - Supported 46, and 47\n", vmaSize); 252 Die(); 253 } 254 # endif 255 #endif 256 #endif 257 } 258 259 void InitializePlatform() { 260 DisableCoreDumperIfNecessary(); 261 262 // Go maps shadow memory lazily and works fine with limited address space. 263 // Unlimited stack is not a problem as well, because the executable 264 // is not compiled with -pie. 265 #if !SANITIZER_GO 266 { 267 bool reexec = false; 268 // TSan doesn't play well with unlimited stack size (as stack 269 // overlaps with shadow memory). If we detect unlimited stack size, 270 // we re-exec the program with limited stack size as a best effort. 271 if (StackSizeIsUnlimited()) { 272 const uptr kMaxStackSize = 32 * 1024 * 1024; 273 VReport(1, "Program is run with unlimited stack size, which wouldn't " 274 "work with ThreadSanitizer.\n" 275 "Re-execing with stack size limited to %zd bytes.\n", 276 kMaxStackSize); 277 SetStackSizeLimitInBytes(kMaxStackSize); 278 reexec = true; 279 } 280 281 if (!AddressSpaceIsUnlimited()) { 282 Report("WARNING: Program is run with limited virtual address space," 283 " which wouldn't work with ThreadSanitizer.\n"); 284 Report("Re-execing with unlimited virtual address space.\n"); 285 SetAddressSpaceUnlimited(); 286 reexec = true; 287 } 288 #if SANITIZER_LINUX && defined(__aarch64__) 289 // After patch "arm64: mm: support ARCH_MMAP_RND_BITS." is introduced in 290 // linux kernel, the random gap between stack and mapped area is increased 291 // from 128M to 36G on 39-bit aarch64. As it is almost impossible to cover 292 // this big range, we should disable randomized virtual space on aarch64. 293 int old_personality = personality(0xffffffff); 294 if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) { 295 VReport(1, "WARNING: Program is run with randomized virtual address " 296 "space, which wouldn't work with ThreadSanitizer.\n" 297 "Re-execing with fixed virtual address space.\n"); 298 CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1); 299 reexec = true; 300 } 301 // Initialize the xor key used in {sig}{set,long}jump. 302 InitializeLongjmpXorKey(); 303 #endif 304 if (reexec) 305 ReExec(); 306 } 307 308 CheckAndProtect(); 309 InitTlsSize(); 310 #endif // !SANITIZER_GO 311 } 312 313 #if !SANITIZER_GO 314 // Extract file descriptors passed to glibc internal __res_iclose function. 315 // This is required to properly "close" the fds, because we do not see internal 316 // closes within glibc. The code is a pure hack. 317 int ExtractResolvFDs(void *state, int *fds, int nfd) { 318 #if SANITIZER_LINUX && !SANITIZER_ANDROID 319 int cnt = 0; 320 struct __res_state *statp = (struct __res_state*)state; 321 for (int i = 0; i < MAXNS && cnt < nfd; i++) { 322 if (statp->_u._ext.nsaddrs[i] && statp->_u._ext.nssocks[i] != -1) 323 fds[cnt++] = statp->_u._ext.nssocks[i]; 324 } 325 return cnt; 326 #else 327 return 0; 328 #endif 329 } 330 331 // Extract file descriptors passed via UNIX domain sockets. 332 // This is requried to properly handle "open" of these fds. 333 // see 'man recvmsg' and 'man 3 cmsg'. 334 int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) { 335 int res = 0; 336 msghdr *msg = (msghdr*)msgp; 337 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg); 338 for (; cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { 339 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) 340 continue; 341 int n = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(fds[0]); 342 for (int i = 0; i < n; i++) { 343 fds[res++] = ((int*)CMSG_DATA(cmsg))[i]; 344 if (res == nfd) 345 return res; 346 } 347 } 348 return res; 349 } 350 351 // Reverse operation of libc stack pointer mangling 352 static uptr UnmangleLongJmpSp(uptr mangled_sp) { 353 #if defined(__x86_64__) 354 # if SANITIZER_LINUX 355 // Reverse of: 356 // xor %fs:0x30, %rsi 357 // rol $0x11, %rsi 358 uptr sp; 359 asm("ror $0x11, %0 \n" 360 "xor %%fs:0x30, %0 \n" 361 : "=r" (sp) 362 : "0" (mangled_sp)); 363 return sp; 364 # else 365 return mangled_sp; 366 # endif 367 #elif defined(__aarch64__) 368 # if SANITIZER_LINUX 369 return mangled_sp ^ longjmp_xor_key; 370 # else 371 return mangled_sp; 372 # endif 373 #elif defined(__powerpc64__) 374 // Reverse of: 375 // ld r4, -28696(r13) 376 // xor r4, r3, r4 377 uptr xor_key; 378 asm("ld %0, -28696(%%r13)" : "=r" (xor_key)); 379 return mangled_sp ^ xor_key; 380 #elif defined(__mips__) 381 return mangled_sp; 382 #else 383 #error "Unknown platform" 384 #endif 385 } 386 387 #ifdef __powerpc__ 388 # define LONG_JMP_SP_ENV_SLOT 0 389 #elif SANITIZER_FREEBSD 390 # define LONG_JMP_SP_ENV_SLOT 2 391 #elif SANITIZER_NETBSD 392 # define LONG_JMP_SP_ENV_SLOT 6 393 #elif SANITIZER_LINUX 394 # ifdef __aarch64__ 395 # define LONG_JMP_SP_ENV_SLOT 13 396 # elif defined(__mips64) 397 # define LONG_JMP_SP_ENV_SLOT 1 398 # else 399 # define LONG_JMP_SP_ENV_SLOT 6 400 # endif 401 #endif 402 403 uptr ExtractLongJmpSp(uptr *env) { 404 uptr mangled_sp = env[LONG_JMP_SP_ENV_SLOT]; 405 return UnmangleLongJmpSp(mangled_sp); 406 } 407 408 #if INIT_LONGJMP_XOR_KEY 409 // GLIBC mangles the function pointers in jmp_buf (used in {set,long}*jmp 410 // functions) by XORing them with a random key. For AArch64 it is a global 411 // variable rather than a TCB one (as for x86_64/powerpc). We obtain the key by 412 // issuing a setjmp and XORing the SP pointer values to derive the key. 413 static void InitializeLongjmpXorKey() { 414 // 1. Call REAL(setjmp), which stores the mangled SP in env. 415 jmp_buf env; 416 REAL(_setjmp)(env); 417 418 // 2. Retrieve vanilla/mangled SP. 419 uptr sp; 420 asm("mov %0, sp" : "=r" (sp)); 421 uptr mangled_sp = ((uptr *)&env)[LONG_JMP_SP_ENV_SLOT]; 422 423 // 3. xor SPs to obtain key. 424 longjmp_xor_key = mangled_sp ^ sp; 425 } 426 #endif 427 428 void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) { 429 // Check that the thr object is in tls; 430 const uptr thr_beg = (uptr)thr; 431 const uptr thr_end = (uptr)thr + sizeof(*thr); 432 CHECK_GE(thr_beg, tls_addr); 433 CHECK_LE(thr_beg, tls_addr + tls_size); 434 CHECK_GE(thr_end, tls_addr); 435 CHECK_LE(thr_end, tls_addr + tls_size); 436 // Since the thr object is huge, skip it. 437 MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, thr_beg - tls_addr); 438 MemoryRangeImitateWrite(thr, /*pc=*/2, thr_end, 439 tls_addr + tls_size - thr_end); 440 } 441 442 // Note: this function runs with async signals enabled, 443 // so it must not touch any tsan state. 444 int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, 445 void *abstime), void *c, void *m, void *abstime, 446 void(*cleanup)(void *arg), void *arg) { 447 // pthread_cleanup_push/pop are hardcore macros mess. 448 // We can't intercept nor call them w/o including pthread.h. 449 int res; 450 pthread_cleanup_push(cleanup, arg); 451 res = fn(c, m, abstime); 452 pthread_cleanup_pop(0); 453 return res; 454 } 455 #endif // !SANITIZER_GO 456 457 #if !SANITIZER_GO 458 void ReplaceSystemMalloc() { } 459 #endif 460 461 #if !SANITIZER_GO 462 #if SANITIZER_ANDROID 463 // On Android, one thread can call intercepted functions after 464 // DestroyThreadState(), so add a fake thread state for "dead" threads. 465 static ThreadState *dead_thread_state = nullptr; 466 467 ThreadState *cur_thread() { 468 ThreadState* thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr()); 469 if (thr == nullptr) { 470 __sanitizer_sigset_t emptyset; 471 internal_sigfillset(&emptyset); 472 __sanitizer_sigset_t oldset; 473 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset)); 474 thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr()); 475 if (thr == nullptr) { 476 thr = reinterpret_cast<ThreadState*>(MmapOrDie(sizeof(ThreadState), 477 "ThreadState")); 478 *get_android_tls_ptr() = reinterpret_cast<uptr>(thr); 479 if (dead_thread_state == nullptr) { 480 dead_thread_state = reinterpret_cast<ThreadState*>( 481 MmapOrDie(sizeof(ThreadState), "ThreadState")); 482 dead_thread_state->fast_state.SetIgnoreBit(); 483 dead_thread_state->ignore_interceptors = 1; 484 dead_thread_state->is_dead = true; 485 *const_cast<int*>(&dead_thread_state->tid) = -1; 486 CHECK_EQ(0, internal_mprotect(dead_thread_state, sizeof(ThreadState), 487 PROT_READ)); 488 } 489 } 490 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr)); 491 } 492 return thr; 493 } 494 495 void set_cur_thread(ThreadState *thr) { 496 *get_android_tls_ptr() = reinterpret_cast<uptr>(thr); 497 } 498 499 void cur_thread_finalize() { 500 __sanitizer_sigset_t emptyset; 501 internal_sigfillset(&emptyset); 502 __sanitizer_sigset_t oldset; 503 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset)); 504 ThreadState* thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr()); 505 if (thr != dead_thread_state) { 506 *get_android_tls_ptr() = reinterpret_cast<uptr>(dead_thread_state); 507 UnmapOrDie(thr, sizeof(ThreadState)); 508 } 509 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr)); 510 } 511 #endif // SANITIZER_ANDROID 512 #endif // if !SANITIZER_GO 513 514 } // namespace __tsan 515 516 #endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || 517 // SANITIZER_OPENBSD 518