1 //===-- tsan_platform_linux.cpp -------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of ThreadSanitizer (TSan), a race detector. 10 // 11 // Linux- and BSD-specific code. 12 //===----------------------------------------------------------------------===// 13 14 #include "sanitizer_common/sanitizer_platform.h" 15 #if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD 16 17 #include "sanitizer_common/sanitizer_common.h" 18 #include "sanitizer_common/sanitizer_libc.h" 19 #include "sanitizer_common/sanitizer_linux.h" 20 #include "sanitizer_common/sanitizer_platform_limits_netbsd.h" 21 #include "sanitizer_common/sanitizer_platform_limits_posix.h" 22 #include "sanitizer_common/sanitizer_posix.h" 23 #include "sanitizer_common/sanitizer_procmaps.h" 24 #include "sanitizer_common/sanitizer_stackdepot.h" 25 #include "sanitizer_common/sanitizer_stoptheworld.h" 26 #include "tsan_flags.h" 27 #include "tsan_platform.h" 28 #include "tsan_rtl.h" 29 30 #include <fcntl.h> 31 #include <pthread.h> 32 #include <signal.h> 33 #include <stdio.h> 34 #include <stdlib.h> 35 #include <string.h> 36 #include <stdarg.h> 37 #include <sys/mman.h> 38 #if SANITIZER_LINUX 39 #include <sys/personality.h> 40 #include <setjmp.h> 41 #endif 42 #include <sys/syscall.h> 43 #include <sys/socket.h> 44 #include <sys/time.h> 45 #include <sys/types.h> 46 #include <sys/resource.h> 47 #include <sys/stat.h> 48 #include <unistd.h> 49 #include <sched.h> 50 #include <dlfcn.h> 51 #if SANITIZER_LINUX 52 #define __need_res_state 53 #include <resolv.h> 54 #endif 55 56 #ifdef sa_handler 57 # undef sa_handler 58 #endif 59 60 #ifdef sa_sigaction 61 # undef sa_sigaction 62 #endif 63 64 #if SANITIZER_FREEBSD 65 extern "C" void *__libc_stack_end; 66 void *__libc_stack_end = 0; 67 #endif 68 69 #if SANITIZER_LINUX && defined(__aarch64__) && !SANITIZER_GO 70 # define INIT_LONGJMP_XOR_KEY 1 71 #else 72 # define INIT_LONGJMP_XOR_KEY 0 73 #endif 74 75 #if INIT_LONGJMP_XOR_KEY 76 #include "interception/interception.h" 77 // Must be declared outside of other namespaces. 78 DECLARE_REAL(int, _setjmp, void *env) 79 #endif 80 81 namespace __tsan { 82 83 #if INIT_LONGJMP_XOR_KEY 84 static void InitializeLongjmpXorKey(); 85 static uptr longjmp_xor_key; 86 #endif 87 88 // Runtime detected VMA size. 89 uptr vmaSize; 90 91 enum { 92 MemTotal, 93 MemShadow, 94 MemMeta, 95 MemFile, 96 MemMmap, 97 MemHeap, 98 MemOther, 99 MemCount, 100 }; 101 102 void FillProfileCallback(uptr p, uptr rss, bool file, uptr *mem) { 103 mem[MemTotal] += rss; 104 if (p >= ShadowBeg() && p < ShadowEnd()) 105 mem[MemShadow] += rss; 106 else if (p >= MetaShadowBeg() && p < MetaShadowEnd()) 107 mem[MemMeta] += rss; 108 else if ((p >= LoAppMemBeg() && p < LoAppMemEnd()) || 109 (p >= MidAppMemBeg() && p < MidAppMemEnd()) || 110 (p >= HiAppMemBeg() && p < HiAppMemEnd())) 111 mem[file ? MemFile : MemMmap] += rss; 112 else if (p >= HeapMemBeg() && p < HeapMemEnd()) 113 mem[MemHeap] += rss; 114 else 115 mem[MemOther] += rss; 116 } 117 118 void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) { 119 uptr mem[MemCount]; 120 internal_memset(mem, 0, sizeof(mem)); 121 GetMemoryProfile(FillProfileCallback, mem); 122 auto meta = ctx->metamap.GetMemoryStats(); 123 StackDepotStats stacks = StackDepotGetStats(); 124 uptr nthread, nlive; 125 ctx->thread_registry.GetNumberOfThreads(&nthread, &nlive); 126 uptr trace_mem; 127 { 128 Lock l(&ctx->slot_mtx); 129 trace_mem = ctx->trace_part_total_allocated * sizeof(TracePart); 130 } 131 uptr internal_stats[AllocatorStatCount]; 132 internal_allocator()->GetStats(internal_stats); 133 // All these are allocated from the common mmap region. 134 mem[MemMmap] -= meta.mem_block + meta.sync_obj + trace_mem + 135 stacks.allocated + internal_stats[AllocatorStatMapped]; 136 if (s64(mem[MemMmap]) < 0) 137 mem[MemMmap] = 0; 138 internal_snprintf( 139 buf, buf_size, 140 "==%zu== %llus [%zu]: RSS %zd MB: shadow:%zd meta:%zd file:%zd" 141 " mmap:%zd heap:%zd other:%zd intalloc:%zd memblocks:%zd syncobj:%zu" 142 " trace:%zu stacks=%zd threads=%zu/%zu\n", 143 internal_getpid(), uptime_ns / (1000 * 1000 * 1000), ctx->global_epoch, 144 mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20, 145 mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemHeap] >> 20, 146 mem[MemOther] >> 20, internal_stats[AllocatorStatMapped] >> 20, 147 meta.mem_block >> 20, meta.sync_obj >> 20, trace_mem >> 20, 148 stacks.allocated >> 20, nlive, nthread); 149 } 150 151 #if !SANITIZER_GO 152 // Mark shadow for .rodata sections with the special Shadow::kRodata marker. 153 // Accesses to .rodata can't race, so this saves time, memory and trace space. 154 static void MapRodata() { 155 // First create temp file. 156 const char *tmpdir = GetEnv("TMPDIR"); 157 if (tmpdir == 0) 158 tmpdir = GetEnv("TEST_TMPDIR"); 159 #ifdef P_tmpdir 160 if (tmpdir == 0) 161 tmpdir = P_tmpdir; 162 #endif 163 if (tmpdir == 0) 164 return; 165 char name[256]; 166 internal_snprintf(name, sizeof(name), "%s/tsan.rodata.%d", 167 tmpdir, (int)internal_getpid()); 168 uptr openrv = internal_open(name, O_RDWR | O_CREAT | O_EXCL, 0600); 169 if (internal_iserror(openrv)) 170 return; 171 internal_unlink(name); // Unlink it now, so that we can reuse the buffer. 172 fd_t fd = openrv; 173 // Fill the file with Shadow::kRodata. 174 const uptr kMarkerSize = 512 * 1024 / sizeof(RawShadow); 175 InternalMmapVector<RawShadow> marker(kMarkerSize); 176 // volatile to prevent insertion of memset 177 for (volatile RawShadow *p = marker.data(); p < marker.data() + kMarkerSize; 178 p++) 179 *p = Shadow::kRodata; 180 internal_write(fd, marker.data(), marker.size() * sizeof(RawShadow)); 181 // Map the file into memory. 182 uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE, 183 MAP_PRIVATE | MAP_ANONYMOUS, fd, 0); 184 if (internal_iserror(page)) { 185 internal_close(fd); 186 return; 187 } 188 // Map the file into shadow of .rodata sections. 189 MemoryMappingLayout proc_maps(/*cache_enabled*/true); 190 // Reusing the buffer 'name'. 191 MemoryMappedSegment segment(name, ARRAY_SIZE(name)); 192 while (proc_maps.Next(&segment)) { 193 if (segment.filename[0] != 0 && segment.filename[0] != '[' && 194 segment.IsReadable() && segment.IsExecutable() && 195 !segment.IsWritable() && IsAppMem(segment.start)) { 196 // Assume it's .rodata 197 char *shadow_start = (char *)MemToShadow(segment.start); 198 char *shadow_end = (char *)MemToShadow(segment.end); 199 for (char *p = shadow_start; p < shadow_end; 200 p += marker.size() * sizeof(RawShadow)) { 201 internal_mmap( 202 p, Min<uptr>(marker.size() * sizeof(RawShadow), shadow_end - p), 203 PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0); 204 } 205 } 206 } 207 internal_close(fd); 208 } 209 210 void InitializeShadowMemoryPlatform() { 211 MapRodata(); 212 } 213 214 #endif // #if !SANITIZER_GO 215 216 void InitializePlatformEarly() { 217 vmaSize = 218 (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); 219 #if defined(__aarch64__) 220 # if !SANITIZER_GO 221 if (vmaSize != 39 && vmaSize != 42 && vmaSize != 48) { 222 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); 223 Printf("FATAL: Found %zd - Supported 39, 42 and 48\n", vmaSize); 224 Die(); 225 } 226 #else 227 if (vmaSize != 48) { 228 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); 229 Printf("FATAL: Found %zd - Supported 48\n", vmaSize); 230 Die(); 231 } 232 #endif 233 #elif defined(__powerpc64__) 234 # if !SANITIZER_GO 235 if (vmaSize != 44 && vmaSize != 46 && vmaSize != 47) { 236 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); 237 Printf("FATAL: Found %zd - Supported 44, 46, and 47\n", vmaSize); 238 Die(); 239 } 240 # else 241 if (vmaSize != 46 && vmaSize != 47) { 242 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); 243 Printf("FATAL: Found %zd - Supported 46, and 47\n", vmaSize); 244 Die(); 245 } 246 # endif 247 #elif defined(__mips64) 248 # if !SANITIZER_GO 249 if (vmaSize != 40) { 250 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); 251 Printf("FATAL: Found %zd - Supported 40\n", vmaSize); 252 Die(); 253 } 254 # else 255 if (vmaSize != 47) { 256 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); 257 Printf("FATAL: Found %zd - Supported 47\n", vmaSize); 258 Die(); 259 } 260 # endif 261 #endif 262 } 263 264 void InitializePlatform() { 265 DisableCoreDumperIfNecessary(); 266 267 // Go maps shadow memory lazily and works fine with limited address space. 268 // Unlimited stack is not a problem as well, because the executable 269 // is not compiled with -pie. 270 #if !SANITIZER_GO 271 { 272 bool reexec = false; 273 // TSan doesn't play well with unlimited stack size (as stack 274 // overlaps with shadow memory). If we detect unlimited stack size, 275 // we re-exec the program with limited stack size as a best effort. 276 if (StackSizeIsUnlimited()) { 277 const uptr kMaxStackSize = 32 * 1024 * 1024; 278 VReport(1, "Program is run with unlimited stack size, which wouldn't " 279 "work with ThreadSanitizer.\n" 280 "Re-execing with stack size limited to %zd bytes.\n", 281 kMaxStackSize); 282 SetStackSizeLimitInBytes(kMaxStackSize); 283 reexec = true; 284 } 285 286 if (!AddressSpaceIsUnlimited()) { 287 Report("WARNING: Program is run with limited virtual address space," 288 " which wouldn't work with ThreadSanitizer.\n"); 289 Report("Re-execing with unlimited virtual address space.\n"); 290 SetAddressSpaceUnlimited(); 291 reexec = true; 292 } 293 #if SANITIZER_LINUX && defined(__aarch64__) 294 // After patch "arm64: mm: support ARCH_MMAP_RND_BITS." is introduced in 295 // linux kernel, the random gap between stack and mapped area is increased 296 // from 128M to 36G on 39-bit aarch64. As it is almost impossible to cover 297 // this big range, we should disable randomized virtual space on aarch64. 298 int old_personality = personality(0xffffffff); 299 if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) { 300 VReport(1, "WARNING: Program is run with randomized virtual address " 301 "space, which wouldn't work with ThreadSanitizer.\n" 302 "Re-execing with fixed virtual address space.\n"); 303 CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1); 304 reexec = true; 305 } 306 // Initialize the xor key used in {sig}{set,long}jump. 307 InitializeLongjmpXorKey(); 308 #endif 309 if (reexec) 310 ReExec(); 311 } 312 313 CheckAndProtect(); 314 InitTlsSize(); 315 #endif // !SANITIZER_GO 316 } 317 318 #if !SANITIZER_GO 319 // Extract file descriptors passed to glibc internal __res_iclose function. 320 // This is required to properly "close" the fds, because we do not see internal 321 // closes within glibc. The code is a pure hack. 322 int ExtractResolvFDs(void *state, int *fds, int nfd) { 323 #if SANITIZER_LINUX && !SANITIZER_ANDROID 324 int cnt = 0; 325 struct __res_state *statp = (struct __res_state*)state; 326 for (int i = 0; i < MAXNS && cnt < nfd; i++) { 327 if (statp->_u._ext.nsaddrs[i] && statp->_u._ext.nssocks[i] != -1) 328 fds[cnt++] = statp->_u._ext.nssocks[i]; 329 } 330 return cnt; 331 #else 332 return 0; 333 #endif 334 } 335 336 // Extract file descriptors passed via UNIX domain sockets. 337 // This is required to properly handle "open" of these fds. 338 // see 'man recvmsg' and 'man 3 cmsg'. 339 int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) { 340 int res = 0; 341 msghdr *msg = (msghdr*)msgp; 342 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg); 343 for (; cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { 344 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) 345 continue; 346 int n = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(fds[0]); 347 for (int i = 0; i < n; i++) { 348 fds[res++] = ((int*)CMSG_DATA(cmsg))[i]; 349 if (res == nfd) 350 return res; 351 } 352 } 353 return res; 354 } 355 356 // Reverse operation of libc stack pointer mangling 357 static uptr UnmangleLongJmpSp(uptr mangled_sp) { 358 #if defined(__x86_64__) 359 # if SANITIZER_LINUX 360 // Reverse of: 361 // xor %fs:0x30, %rsi 362 // rol $0x11, %rsi 363 uptr sp; 364 asm("ror $0x11, %0 \n" 365 "xor %%fs:0x30, %0 \n" 366 : "=r" (sp) 367 : "0" (mangled_sp)); 368 return sp; 369 # else 370 return mangled_sp; 371 # endif 372 #elif defined(__aarch64__) 373 # if SANITIZER_LINUX 374 return mangled_sp ^ longjmp_xor_key; 375 # else 376 return mangled_sp; 377 # endif 378 #elif defined(__powerpc64__) 379 // Reverse of: 380 // ld r4, -28696(r13) 381 // xor r4, r3, r4 382 uptr xor_key; 383 asm("ld %0, -28696(%%r13)" : "=r" (xor_key)); 384 return mangled_sp ^ xor_key; 385 #elif defined(__mips__) 386 return mangled_sp; 387 #elif defined(__s390x__) 388 // tcbhead_t.stack_guard 389 uptr xor_key = ((uptr *)__builtin_thread_pointer())[5]; 390 return mangled_sp ^ xor_key; 391 #else 392 #error "Unknown platform" 393 #endif 394 } 395 396 #if SANITIZER_NETBSD 397 # ifdef __x86_64__ 398 # define LONG_JMP_SP_ENV_SLOT 6 399 # else 400 # error unsupported 401 # endif 402 #elif defined(__powerpc__) 403 # define LONG_JMP_SP_ENV_SLOT 0 404 #elif SANITIZER_FREEBSD 405 # ifdef __aarch64__ 406 # define LONG_JMP_SP_ENV_SLOT 1 407 # else 408 # define LONG_JMP_SP_ENV_SLOT 2 409 # endif 410 #elif SANITIZER_LINUX 411 # ifdef __aarch64__ 412 # define LONG_JMP_SP_ENV_SLOT 13 413 # elif defined(__mips64) 414 # define LONG_JMP_SP_ENV_SLOT 1 415 # elif defined(__s390x__) 416 # define LONG_JMP_SP_ENV_SLOT 9 417 # else 418 # define LONG_JMP_SP_ENV_SLOT 6 419 # endif 420 #endif 421 422 uptr ExtractLongJmpSp(uptr *env) { 423 uptr mangled_sp = env[LONG_JMP_SP_ENV_SLOT]; 424 return UnmangleLongJmpSp(mangled_sp); 425 } 426 427 #if INIT_LONGJMP_XOR_KEY 428 // GLIBC mangles the function pointers in jmp_buf (used in {set,long}*jmp 429 // functions) by XORing them with a random key. For AArch64 it is a global 430 // variable rather than a TCB one (as for x86_64/powerpc). We obtain the key by 431 // issuing a setjmp and XORing the SP pointer values to derive the key. 432 static void InitializeLongjmpXorKey() { 433 // 1. Call REAL(setjmp), which stores the mangled SP in env. 434 jmp_buf env; 435 REAL(_setjmp)(env); 436 437 // 2. Retrieve vanilla/mangled SP. 438 uptr sp; 439 asm("mov %0, sp" : "=r" (sp)); 440 uptr mangled_sp = ((uptr *)&env)[LONG_JMP_SP_ENV_SLOT]; 441 442 // 3. xor SPs to obtain key. 443 longjmp_xor_key = mangled_sp ^ sp; 444 } 445 #endif 446 447 extern "C" void __tsan_tls_initialization() {} 448 449 void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) { 450 // Check that the thr object is in tls; 451 const uptr thr_beg = (uptr)thr; 452 const uptr thr_end = (uptr)thr + sizeof(*thr); 453 CHECK_GE(thr_beg, tls_addr); 454 CHECK_LE(thr_beg, tls_addr + tls_size); 455 CHECK_GE(thr_end, tls_addr); 456 CHECK_LE(thr_end, tls_addr + tls_size); 457 // Since the thr object is huge, skip it. 458 const uptr pc = StackTrace::GetNextInstructionPc( 459 reinterpret_cast<uptr>(__tsan_tls_initialization)); 460 MemoryRangeImitateWrite(thr, pc, tls_addr, thr_beg - tls_addr); 461 MemoryRangeImitateWrite(thr, pc, thr_end, tls_addr + tls_size - thr_end); 462 } 463 464 // Note: this function runs with async signals enabled, 465 // so it must not touch any tsan state. 466 int call_pthread_cancel_with_cleanup(int (*fn)(void *arg), 467 void (*cleanup)(void *arg), void *arg) { 468 // pthread_cleanup_push/pop are hardcore macros mess. 469 // We can't intercept nor call them w/o including pthread.h. 470 int res; 471 pthread_cleanup_push(cleanup, arg); 472 res = fn(arg); 473 pthread_cleanup_pop(0); 474 return res; 475 } 476 #endif // !SANITIZER_GO 477 478 #if !SANITIZER_GO 479 void ReplaceSystemMalloc() { } 480 #endif 481 482 #if !SANITIZER_GO 483 #if SANITIZER_ANDROID 484 // On Android, one thread can call intercepted functions after 485 // DestroyThreadState(), so add a fake thread state for "dead" threads. 486 static ThreadState *dead_thread_state = nullptr; 487 488 ThreadState *cur_thread() { 489 ThreadState* thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr()); 490 if (thr == nullptr) { 491 __sanitizer_sigset_t emptyset; 492 internal_sigfillset(&emptyset); 493 __sanitizer_sigset_t oldset; 494 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset)); 495 thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr()); 496 if (thr == nullptr) { 497 thr = reinterpret_cast<ThreadState*>(MmapOrDie(sizeof(ThreadState), 498 "ThreadState")); 499 *get_android_tls_ptr() = reinterpret_cast<uptr>(thr); 500 if (dead_thread_state == nullptr) { 501 dead_thread_state = reinterpret_cast<ThreadState*>( 502 MmapOrDie(sizeof(ThreadState), "ThreadState")); 503 dead_thread_state->fast_state.SetIgnoreBit(); 504 dead_thread_state->ignore_interceptors = 1; 505 dead_thread_state->is_dead = true; 506 *const_cast<u32*>(&dead_thread_state->tid) = -1; 507 CHECK_EQ(0, internal_mprotect(dead_thread_state, sizeof(ThreadState), 508 PROT_READ)); 509 } 510 } 511 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr)); 512 } 513 return thr; 514 } 515 516 void set_cur_thread(ThreadState *thr) { 517 *get_android_tls_ptr() = reinterpret_cast<uptr>(thr); 518 } 519 520 void cur_thread_finalize() { 521 __sanitizer_sigset_t emptyset; 522 internal_sigfillset(&emptyset); 523 __sanitizer_sigset_t oldset; 524 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset)); 525 ThreadState* thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr()); 526 if (thr != dead_thread_state) { 527 *get_android_tls_ptr() = reinterpret_cast<uptr>(dead_thread_state); 528 UnmapOrDie(thr, sizeof(ThreadState)); 529 } 530 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr)); 531 } 532 #endif // SANITIZER_ANDROID 533 #endif // if !SANITIZER_GO 534 535 } // namespace __tsan 536 537 #endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD 538