1 //===-- tsan_rtl_report.cpp -----------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of ThreadSanitizer (TSan), a race detector. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "sanitizer_common/sanitizer_libc.h" 14 #include "sanitizer_common/sanitizer_placement_new.h" 15 #include "sanitizer_common/sanitizer_stackdepot.h" 16 #include "sanitizer_common/sanitizer_common.h" 17 #include "sanitizer_common/sanitizer_stacktrace.h" 18 #include "tsan_platform.h" 19 #include "tsan_rtl.h" 20 #include "tsan_suppressions.h" 21 #include "tsan_symbolize.h" 22 #include "tsan_report.h" 23 #include "tsan_sync.h" 24 #include "tsan_mman.h" 25 #include "tsan_flags.h" 26 #include "tsan_fd.h" 27 28 namespace __tsan { 29 30 using namespace __sanitizer; 31 32 static ReportStack *SymbolizeStack(StackTrace trace); 33 34 // Can be overriden by an application/test to intercept reports. 35 #ifdef TSAN_EXTERNAL_HOOKS 36 bool OnReport(const ReportDesc *rep, bool suppressed); 37 #else 38 SANITIZER_WEAK_CXX_DEFAULT_IMPL 39 bool OnReport(const ReportDesc *rep, bool suppressed) { 40 (void)rep; 41 return suppressed; 42 } 43 #endif 44 45 SANITIZER_WEAK_DEFAULT_IMPL 46 void __tsan_on_report(const ReportDesc *rep) { 47 (void)rep; 48 } 49 50 static void StackStripMain(SymbolizedStack *frames) { 51 SymbolizedStack *last_frame = nullptr; 52 SymbolizedStack *last_frame2 = nullptr; 53 for (SymbolizedStack *cur = frames; cur; cur = cur->next) { 54 last_frame2 = last_frame; 55 last_frame = cur; 56 } 57 58 if (last_frame2 == 0) 59 return; 60 #if !SANITIZER_GO 61 const char *last = last_frame->info.function; 62 const char *last2 = last_frame2->info.function; 63 // Strip frame above 'main' 64 if (last2 && 0 == internal_strcmp(last2, "main")) { 65 last_frame->ClearAll(); 66 last_frame2->next = nullptr; 67 // Strip our internal thread start routine. 68 } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) { 69 last_frame->ClearAll(); 70 last_frame2->next = nullptr; 71 // Strip global ctors init, .preinit_array and main caller. 72 } else if (last && (0 == internal_strcmp(last, "__do_global_ctors_aux") || 73 0 == internal_strcmp(last, "__libc_csu_init") || 74 0 == internal_strcmp(last, "__libc_start_main"))) { 75 last_frame->ClearAll(); 76 last_frame2->next = nullptr; 77 // If both are 0, then we probably just failed to symbolize. 78 } else if (last || last2) { 79 // Ensure that we recovered stack completely. Trimmed stack 80 // can actually happen if we do not instrument some code, 81 // so it's only a debug print. However we must try hard to not miss it 82 // due to our fault. 83 DPrintf("Bottom stack frame is missed\n"); 84 } 85 #else 86 // The last frame always point into runtime (gosched0, goexit0, runtime.main). 87 last_frame->ClearAll(); 88 last_frame2->next = nullptr; 89 #endif 90 } 91 92 ReportStack *SymbolizeStackId(u32 stack_id) { 93 if (stack_id == 0) 94 return 0; 95 StackTrace stack = StackDepotGet(stack_id); 96 if (stack.trace == nullptr) 97 return nullptr; 98 return SymbolizeStack(stack); 99 } 100 101 static ReportStack *SymbolizeStack(StackTrace trace) { 102 if (trace.size == 0) 103 return 0; 104 SymbolizedStack *top = nullptr; 105 for (uptr si = 0; si < trace.size; si++) { 106 const uptr pc = trace.trace[si]; 107 uptr pc1 = pc; 108 // We obtain the return address, but we're interested in the previous 109 // instruction. 110 if ((pc & kExternalPCBit) == 0) 111 pc1 = StackTrace::GetPreviousInstructionPc(pc); 112 SymbolizedStack *ent = SymbolizeCode(pc1); 113 CHECK_NE(ent, 0); 114 SymbolizedStack *last = ent; 115 while (last->next) { 116 last->info.address = pc; // restore original pc for report 117 last = last->next; 118 } 119 last->info.address = pc; // restore original pc for report 120 last->next = top; 121 top = ent; 122 } 123 StackStripMain(top); 124 125 auto *stack = New<ReportStack>(); 126 stack->frames = top; 127 return stack; 128 } 129 130 bool ShouldReport(ThreadState *thr, ReportType typ) { 131 // We set thr->suppress_reports in the fork context. 132 // Taking any locking in the fork context can lead to deadlocks. 133 // If any locks are already taken, it's too late to do this check. 134 CheckedMutex::CheckNoLocks(); 135 // For the same reason check we didn't lock thread_registry yet. 136 if (SANITIZER_DEBUG) 137 ThreadRegistryLock l(&ctx->thread_registry); 138 if (!flags()->report_bugs || thr->suppress_reports) 139 return false; 140 switch (typ) { 141 case ReportTypeSignalUnsafe: 142 return flags()->report_signal_unsafe; 143 case ReportTypeThreadLeak: 144 #if !SANITIZER_GO 145 // It's impossible to join phantom threads 146 // in the child after fork. 147 if (ctx->after_multithreaded_fork) 148 return false; 149 #endif 150 return flags()->report_thread_leaks; 151 case ReportTypeMutexDestroyLocked: 152 return flags()->report_destroy_locked; 153 default: 154 return true; 155 } 156 } 157 158 ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) { 159 ctx->thread_registry.CheckLocked(); 160 rep_ = New<ReportDesc>(); 161 rep_->typ = typ; 162 rep_->tag = tag; 163 ctx->report_mtx.Lock(); 164 } 165 166 ScopedReportBase::~ScopedReportBase() { 167 ctx->report_mtx.Unlock(); 168 DestroyAndFree(rep_); 169 } 170 171 void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) { 172 ReportStack **rs = rep_->stacks.PushBack(); 173 *rs = SymbolizeStack(stack); 174 (*rs)->suppressable = suppressable; 175 } 176 177 void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, 178 Tid tid, StackTrace stack, 179 const MutexSet *mset) { 180 uptr addr0, size; 181 AccessType typ; 182 s.GetAccess(&addr0, &size, &typ); 183 auto *mop = New<ReportMop>(); 184 rep_->mops.PushBack(mop); 185 mop->tid = tid; 186 mop->addr = addr + addr0; 187 mop->size = size; 188 mop->write = !(typ & kAccessRead); 189 mop->atomic = typ & kAccessAtomic; 190 mop->stack = SymbolizeStack(stack); 191 mop->external_tag = external_tag; 192 if (mop->stack) 193 mop->stack->suppressable = true; 194 for (uptr i = 0; i < mset->Size(); i++) { 195 MutexSet::Desc d = mset->Get(i); 196 int id = this->AddMutex(d.addr, d.stack_id); 197 ReportMopMutex mtx = {id, d.write}; 198 mop->mset.PushBack(mtx); 199 } 200 } 201 202 void ScopedReportBase::AddUniqueTid(Tid unique_tid) { 203 rep_->unique_tids.PushBack(unique_tid); 204 } 205 206 void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) { 207 for (uptr i = 0; i < rep_->threads.Size(); i++) { 208 if ((u32)rep_->threads[i]->id == tctx->tid) 209 return; 210 } 211 auto *rt = New<ReportThread>(); 212 rep_->threads.PushBack(rt); 213 rt->id = tctx->tid; 214 rt->os_id = tctx->os_id; 215 rt->running = (tctx->status == ThreadStatusRunning); 216 rt->name = internal_strdup(tctx->name); 217 rt->parent_tid = tctx->parent_tid; 218 rt->thread_type = tctx->thread_type; 219 rt->stack = 0; 220 rt->stack = SymbolizeStackId(tctx->creation_stack_id); 221 if (rt->stack) 222 rt->stack->suppressable = suppressable; 223 } 224 225 #if !SANITIZER_GO 226 static ThreadContext *FindThreadByTidLocked(Tid tid) { 227 ctx->thread_registry.CheckLocked(); 228 return static_cast<ThreadContext *>( 229 ctx->thread_registry.GetThreadLocked(tid)); 230 } 231 232 static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) { 233 uptr addr = (uptr)arg; 234 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); 235 if (tctx->status != ThreadStatusRunning) 236 return false; 237 ThreadState *thr = tctx->thr; 238 CHECK(thr); 239 return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) || 240 (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size)); 241 } 242 243 ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) { 244 ctx->thread_registry.CheckLocked(); 245 ThreadContext *tctx = 246 static_cast<ThreadContext *>(ctx->thread_registry.FindThreadContextLocked( 247 IsInStackOrTls, (void *)addr)); 248 if (!tctx) 249 return 0; 250 ThreadState *thr = tctx->thr; 251 CHECK(thr); 252 *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size); 253 return tctx; 254 } 255 #endif 256 257 void ScopedReportBase::AddThread(Tid tid, bool suppressable) { 258 #if !SANITIZER_GO 259 if (const ThreadContext *tctx = FindThreadByTidLocked(tid)) 260 AddThread(tctx, suppressable); 261 #endif 262 } 263 264 int ScopedReportBase::AddMutex(uptr addr, StackID creation_stack_id) { 265 for (uptr i = 0; i < rep_->mutexes.Size(); i++) { 266 if (rep_->mutexes[i]->addr == addr) 267 return rep_->mutexes[i]->id; 268 } 269 auto *rm = New<ReportMutex>(); 270 rep_->mutexes.PushBack(rm); 271 rm->id = rep_->mutexes.Size() - 1; 272 rm->addr = addr; 273 rm->stack = SymbolizeStackId(creation_stack_id); 274 return rm->id; 275 } 276 277 void ScopedReportBase::AddLocation(uptr addr, uptr size) { 278 if (addr == 0) 279 return; 280 #if !SANITIZER_GO 281 int fd = -1; 282 Tid creat_tid = kInvalidTid; 283 StackID creat_stack = 0; 284 if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) { 285 auto *loc = New<ReportLocation>(); 286 loc->type = ReportLocationFD; 287 loc->fd = fd; 288 loc->tid = creat_tid; 289 loc->stack = SymbolizeStackId(creat_stack); 290 rep_->locs.PushBack(loc); 291 ThreadContext *tctx = FindThreadByTidLocked(creat_tid); 292 if (tctx) 293 AddThread(tctx); 294 return; 295 } 296 MBlock *b = 0; 297 uptr block_begin = 0; 298 Allocator *a = allocator(); 299 if (a->PointerIsMine((void*)addr)) { 300 block_begin = (uptr)a->GetBlockBegin((void *)addr); 301 if (block_begin) 302 b = ctx->metamap.GetBlock(block_begin); 303 } 304 if (!b) 305 b = JavaHeapBlock(addr, &block_begin); 306 if (b != 0) { 307 auto *loc = New<ReportLocation>(); 308 loc->type = ReportLocationHeap; 309 loc->heap_chunk_start = block_begin; 310 loc->heap_chunk_size = b->siz; 311 loc->external_tag = b->tag; 312 loc->tid = b->tid; 313 loc->stack = SymbolizeStackId(b->stk); 314 rep_->locs.PushBack(loc); 315 if (ThreadContext *tctx = FindThreadByTidLocked(b->tid)) 316 AddThread(tctx); 317 return; 318 } 319 bool is_stack = false; 320 if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) { 321 auto *loc = New<ReportLocation>(); 322 loc->type = is_stack ? ReportLocationStack : ReportLocationTLS; 323 loc->tid = tctx->tid; 324 rep_->locs.PushBack(loc); 325 AddThread(tctx); 326 } 327 #endif 328 if (ReportLocation *loc = SymbolizeData(addr)) { 329 loc->suppressable = true; 330 rep_->locs.PushBack(loc); 331 return; 332 } 333 } 334 335 #if !SANITIZER_GO 336 void ScopedReportBase::AddSleep(StackID stack_id) { 337 rep_->sleep = SymbolizeStackId(stack_id); 338 } 339 #endif 340 341 void ScopedReportBase::SetCount(int count) { rep_->count = count; } 342 343 void ScopedReportBase::SetSigNum(int sig) { rep_->signum = sig; } 344 345 const ReportDesc *ScopedReportBase::GetReport() const { return rep_; } 346 347 ScopedReport::ScopedReport(ReportType typ, uptr tag) 348 : ScopedReportBase(typ, tag) {} 349 350 ScopedReport::~ScopedReport() {} 351 352 // Replays the trace up to last_pos position in the last part 353 // or up to the provided epoch/sid (whichever is earlier) 354 // and calls the provided function f for each event. 355 template <typename Func> 356 void TraceReplay(Trace *trace, TracePart *last, Event *last_pos, Sid sid, 357 Epoch epoch, Func f) { 358 TracePart *part = trace->parts.Front(); 359 Sid ev_sid = kFreeSid; 360 Epoch ev_epoch = kEpochOver; 361 for (;;) { 362 DCHECK_EQ(part->trace, trace); 363 // Note: an event can't start in the last element. 364 // Since an event can take up to 2 elements, 365 // we ensure we have at least 2 before adding an event. 366 Event *end = &part->events[TracePart::kSize - 1]; 367 if (part == last) 368 end = last_pos; 369 f(kFreeSid, kEpochOver, nullptr); // notify about part start 370 for (Event *evp = &part->events[0]; evp < end; evp++) { 371 Event *evp0 = evp; 372 if (!evp->is_access && !evp->is_func) { 373 switch (evp->type) { 374 case EventType::kTime: { 375 auto *ev = reinterpret_cast<EventTime *>(evp); 376 ev_sid = static_cast<Sid>(ev->sid); 377 ev_epoch = static_cast<Epoch>(ev->epoch); 378 if (ev_sid == sid && ev_epoch > epoch) 379 return; 380 break; 381 } 382 case EventType::kAccessExt: 383 FALLTHROUGH; 384 case EventType::kAccessRange: 385 FALLTHROUGH; 386 case EventType::kLock: 387 FALLTHROUGH; 388 case EventType::kRLock: 389 // These take 2 Event elements. 390 evp++; 391 break; 392 case EventType::kUnlock: 393 // This takes 1 Event element. 394 break; 395 } 396 } 397 CHECK_NE(ev_sid, kFreeSid); 398 CHECK_NE(ev_epoch, kEpochOver); 399 f(ev_sid, ev_epoch, evp0); 400 } 401 if (part == last) 402 return; 403 part = trace->parts.Next(part); 404 CHECK(part); 405 } 406 CHECK(0); 407 } 408 409 static void RestoreStackMatch(VarSizeStackTrace *pstk, MutexSet *pmset, 410 Vector<uptr> *stack, MutexSet *mset, uptr pc, 411 bool *found) { 412 DPrintf2(" MATCHED\n"); 413 *pmset = *mset; 414 stack->PushBack(pc); 415 pstk->Init(&(*stack)[0], stack->Size()); 416 stack->PopBack(); 417 *found = true; 418 } 419 420 // Checks if addr1|size1 is fully contained in addr2|size2. 421 // We check for fully contained instread of just overlapping 422 // because a memory access is always traced once, but can be 423 // split into multiple accesses in the shadow. 424 static constexpr bool IsWithinAccess(uptr addr1, uptr size1, uptr addr2, 425 uptr size2) { 426 return addr1 >= addr2 && addr1 + size1 <= addr2 + size2; 427 } 428 429 // Replays the trace of slot sid up to the target event identified 430 // by epoch/addr/size/typ and restores and returns tid, stack, mutex set 431 // and tag for that event. If there are multiple such events, it returns 432 // the last one. Returns false if the event is not present in the trace. 433 bool RestoreStack(EventType type, Sid sid, Epoch epoch, uptr addr, uptr size, 434 AccessType typ, Tid *ptid, VarSizeStackTrace *pstk, 435 MutexSet *pmset, uptr *ptag) { 436 // This function restores stack trace and mutex set for the thread/epoch. 437 // It does so by getting stack trace and mutex set at the beginning of 438 // trace part, and then replaying the trace till the given epoch. 439 DPrintf2("RestoreStack: sid=%u@%u addr=0x%zx/%zu typ=%x\n", 440 static_cast<int>(sid), static_cast<int>(epoch), addr, size, 441 static_cast<int>(typ)); 442 ctx->slot_mtx.CheckLocked(); // needed to prevent trace part recycling 443 ctx->thread_registry.CheckLocked(); 444 TidSlot *slot = &ctx->slots[static_cast<uptr>(sid)]; 445 Tid tid = kInvalidTid; 446 // Need to lock the slot mutex as it protects slot->journal. 447 slot->mtx.CheckLocked(); 448 for (uptr i = 0; i < slot->journal.Size(); i++) { 449 DPrintf2(" journal: epoch=%d tid=%d\n", 450 static_cast<int>(slot->journal[i].epoch), slot->journal[i].tid); 451 if (i == slot->journal.Size() - 1 || slot->journal[i + 1].epoch > epoch) { 452 tid = slot->journal[i].tid; 453 break; 454 } 455 } 456 if (tid == kInvalidTid) 457 return false; 458 *ptid = tid; 459 ThreadContext *tctx = 460 static_cast<ThreadContext *>(ctx->thread_registry.GetThreadLocked(tid)); 461 Trace *trace = &tctx->trace; 462 // Snapshot first/last parts and the current position in the last part. 463 TracePart *first_part; 464 TracePart *last_part; 465 Event *last_pos; 466 { 467 Lock lock(&trace->mtx); 468 first_part = trace->parts.Front(); 469 if (!first_part) { 470 DPrintf2("RestoreStack: tid=%d trace=%p no trace parts\n", tid, trace); 471 return false; 472 } 473 last_part = trace->parts.Back(); 474 last_pos = trace->final_pos; 475 if (tctx->thr) 476 last_pos = (Event *)atomic_load_relaxed(&tctx->thr->trace_pos); 477 } 478 DynamicMutexSet mset; 479 Vector<uptr> stack; 480 uptr prev_pc = 0; 481 bool found = false; 482 bool is_read = typ & kAccessRead; 483 bool is_atomic = typ & kAccessAtomic; 484 bool is_free = typ & kAccessFree; 485 DPrintf2("RestoreStack: tid=%d parts=[%p-%p] last_pos=%p\n", tid, 486 trace->parts.Front(), last_part, last_pos); 487 TraceReplay( 488 trace, last_part, last_pos, sid, epoch, 489 [&](Sid ev_sid, Epoch ev_epoch, Event *evp) { 490 if (evp == nullptr) { 491 // Each trace part is self-consistent, so we reset state. 492 stack.Resize(0); 493 mset->Reset(); 494 prev_pc = 0; 495 return; 496 } 497 bool match = ev_sid == sid && ev_epoch == epoch; 498 if (evp->is_access) { 499 if (evp->is_func == 0 && evp->type == EventType::kAccessExt && 500 evp->_ == 0) // NopEvent 501 return; 502 auto *ev = reinterpret_cast<EventAccess *>(evp); 503 uptr ev_addr = RestoreAddr(ev->addr); 504 uptr ev_size = 1 << ev->size_log; 505 uptr ev_pc = 506 prev_pc + ev->pc_delta - (1 << (EventAccess::kPCBits - 1)); 507 prev_pc = ev_pc; 508 DPrintf2(" Access: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc, 509 ev_addr, ev_size, ev->is_read, ev->is_atomic); 510 if (match && type == EventType::kAccessExt && 511 IsWithinAccess(addr, size, ev_addr, ev_size) && 512 is_read == ev->is_read && is_atomic == ev->is_atomic && !is_free) 513 RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found); 514 return; 515 } 516 if (evp->is_func) { 517 auto *ev = reinterpret_cast<EventFunc *>(evp); 518 if (ev->pc) { 519 DPrintf2(" FuncEnter: pc=0x%llx\n", ev->pc); 520 stack.PushBack(ev->pc); 521 } else { 522 DPrintf2(" FuncExit\n"); 523 // We don't log pathologically large stacks in each part, 524 // if the stack was truncated we can have more func exits than 525 // entries. 526 if (stack.Size()) 527 stack.PopBack(); 528 } 529 return; 530 } 531 switch (evp->type) { 532 case EventType::kAccessExt: { 533 auto *ev = reinterpret_cast<EventAccessExt *>(evp); 534 uptr ev_addr = RestoreAddr(ev->addr); 535 uptr ev_size = 1 << ev->size_log; 536 prev_pc = ev->pc; 537 DPrintf2(" AccessExt: pc=0x%llx addr=0x%zx/%zu type=%u/%u\n", 538 ev->pc, ev_addr, ev_size, ev->is_read, ev->is_atomic); 539 if (match && type == EventType::kAccessExt && 540 IsWithinAccess(addr, size, ev_addr, ev_size) && 541 is_read == ev->is_read && is_atomic == ev->is_atomic && 542 !is_free) 543 RestoreStackMatch(pstk, pmset, &stack, mset, ev->pc, &found); 544 break; 545 } 546 case EventType::kAccessRange: { 547 auto *ev = reinterpret_cast<EventAccessRange *>(evp); 548 uptr ev_addr = RestoreAddr(ev->addr); 549 uptr ev_size = 550 (ev->size_hi << EventAccessRange::kSizeLoBits) + ev->size_lo; 551 uptr ev_pc = RestoreAddr(ev->pc); 552 prev_pc = ev_pc; 553 DPrintf2(" Range: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc, 554 ev_addr, ev_size, ev->is_read, ev->is_free); 555 if (match && type == EventType::kAccessExt && 556 IsWithinAccess(addr, size, ev_addr, ev_size) && 557 is_read == ev->is_read && !is_atomic && is_free == ev->is_free) 558 RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found); 559 break; 560 } 561 case EventType::kLock: 562 FALLTHROUGH; 563 case EventType::kRLock: { 564 auto *ev = reinterpret_cast<EventLock *>(evp); 565 bool is_write = ev->type == EventType::kLock; 566 uptr ev_addr = RestoreAddr(ev->addr); 567 uptr ev_pc = RestoreAddr(ev->pc); 568 StackID stack_id = 569 (ev->stack_hi << EventLock::kStackIDLoBits) + ev->stack_lo; 570 DPrintf2(" Lock: pc=0x%zx addr=0x%zx stack=%u write=%d\n", ev_pc, 571 ev_addr, stack_id, is_write); 572 mset->AddAddr(ev_addr, stack_id, is_write); 573 // Events with ev_pc == 0 are written to the beginning of trace 574 // part as initial mutex set (are not real). 575 if (match && type == EventType::kLock && addr == ev_addr && ev_pc) 576 RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found); 577 break; 578 } 579 case EventType::kUnlock: { 580 auto *ev = reinterpret_cast<EventUnlock *>(evp); 581 uptr ev_addr = RestoreAddr(ev->addr); 582 DPrintf2(" Unlock: addr=0x%zx\n", ev_addr); 583 mset->DelAddr(ev_addr); 584 break; 585 } 586 case EventType::kTime: 587 // TraceReplay already extracted sid/epoch from it, 588 // nothing else to do here. 589 break; 590 } 591 }); 592 ExtractTagFromStack(pstk, ptag); 593 return found; 594 } 595 596 bool RacyStacks::operator==(const RacyStacks &other) const { 597 if (hash[0] == other.hash[0] && hash[1] == other.hash[1]) 598 return true; 599 if (hash[0] == other.hash[1] && hash[1] == other.hash[0]) 600 return true; 601 return false; 602 } 603 604 static bool FindRacyStacks(const RacyStacks &hash) { 605 for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { 606 if (hash == ctx->racy_stacks[i]) { 607 VPrintf(2, "ThreadSanitizer: suppressing report as doubled (stack)\n"); 608 return true; 609 } 610 } 611 return false; 612 } 613 614 static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) { 615 if (!flags()->suppress_equal_stacks) 616 return false; 617 RacyStacks hash; 618 hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); 619 hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); 620 { 621 ReadLock lock(&ctx->racy_mtx); 622 if (FindRacyStacks(hash)) 623 return true; 624 } 625 Lock lock(&ctx->racy_mtx); 626 if (FindRacyStacks(hash)) 627 return true; 628 ctx->racy_stacks.PushBack(hash); 629 return false; 630 } 631 632 bool OutputReport(ThreadState *thr, const ScopedReport &srep) { 633 // These should have been checked in ShouldReport. 634 // It's too late to check them here, we have already taken locks. 635 CHECK(flags()->report_bugs); 636 CHECK(!thr->suppress_reports); 637 atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime()); 638 const ReportDesc *rep = srep.GetReport(); 639 CHECK_EQ(thr->current_report, nullptr); 640 thr->current_report = rep; 641 Suppression *supp = 0; 642 uptr pc_or_addr = 0; 643 for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++) 644 pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp); 645 for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++) 646 pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp); 647 for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++) 648 pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp); 649 for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++) 650 pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp); 651 if (pc_or_addr != 0) { 652 Lock lock(&ctx->fired_suppressions_mtx); 653 FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp}; 654 ctx->fired_suppressions.push_back(s); 655 } 656 { 657 bool suppressed = OnReport(rep, pc_or_addr != 0); 658 if (suppressed) { 659 thr->current_report = nullptr; 660 return false; 661 } 662 } 663 PrintReport(rep); 664 __tsan_on_report(rep); 665 ctx->nreported++; 666 if (flags()->halt_on_error) 667 Die(); 668 thr->current_report = nullptr; 669 return true; 670 } 671 672 bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) { 673 ReadLock lock(&ctx->fired_suppressions_mtx); 674 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { 675 if (ctx->fired_suppressions[k].type != type) 676 continue; 677 for (uptr j = 0; j < trace.size; j++) { 678 FiredSuppression *s = &ctx->fired_suppressions[k]; 679 if (trace.trace[j] == s->pc_or_addr) { 680 if (s->supp) 681 atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed); 682 return true; 683 } 684 } 685 } 686 return false; 687 } 688 689 static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) { 690 ReadLock lock(&ctx->fired_suppressions_mtx); 691 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { 692 if (ctx->fired_suppressions[k].type != type) 693 continue; 694 FiredSuppression *s = &ctx->fired_suppressions[k]; 695 if (addr == s->pc_or_addr) { 696 if (s->supp) 697 atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed); 698 return true; 699 } 700 } 701 return false; 702 } 703 704 static bool SpuriousRace(Shadow old) { 705 Shadow last(LoadShadow(&ctx->last_spurious_race)); 706 return last.sid() == old.sid() && last.epoch() == old.epoch(); 707 } 708 709 void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old, 710 AccessType typ0) { 711 CheckedMutex::CheckNoLocks(); 712 713 // Symbolizer makes lots of intercepted calls. If we try to process them, 714 // at best it will cause deadlocks on internal mutexes. 715 ScopedIgnoreInterceptors ignore; 716 717 uptr addr = ShadowToMem(shadow_mem); 718 DPrintf("#%d: ReportRace %p\n", thr->tid, (void *)addr); 719 if (!ShouldReport(thr, ReportTypeRace)) 720 return; 721 uptr addr_off0, size0; 722 cur.GetAccess(&addr_off0, &size0, nullptr); 723 uptr addr_off1, size1, typ1; 724 old.GetAccess(&addr_off1, &size1, &typ1); 725 if (!flags()->report_atomic_races && 726 ((typ0 & kAccessAtomic) || (typ1 & kAccessAtomic)) && 727 !(typ0 & kAccessFree) && !(typ1 & kAccessFree)) 728 return; 729 if (SpuriousRace(old)) 730 return; 731 732 const uptr kMop = 2; 733 Shadow s[kMop] = {cur, old}; 734 uptr addr0 = addr + addr_off0; 735 uptr addr1 = addr + addr_off1; 736 uptr end0 = addr0 + size0; 737 uptr end1 = addr1 + size1; 738 uptr addr_min = min(addr0, addr1); 739 uptr addr_max = max(end0, end1); 740 if (IsExpectedReport(addr_min, addr_max - addr_min)) 741 return; 742 743 ReportType rep_typ = ReportTypeRace; 744 if ((typ0 & kAccessVptr) && (typ1 & kAccessFree)) 745 rep_typ = ReportTypeVptrUseAfterFree; 746 else if (typ0 & kAccessVptr) 747 rep_typ = ReportTypeVptrRace; 748 else if (typ1 & kAccessFree) 749 rep_typ = ReportTypeUseAfterFree; 750 751 if (IsFiredSuppression(ctx, rep_typ, addr)) 752 return; 753 754 VarSizeStackTrace traces[kMop]; 755 Tid tids[kMop] = {thr->tid, kInvalidTid}; 756 uptr tags[kMop] = {kExternalTagNone, kExternalTagNone}; 757 758 ObtainCurrentStack(thr, thr->trace_prev_pc, &traces[0], &tags[0]); 759 if (IsFiredSuppression(ctx, rep_typ, traces[0])) 760 return; 761 762 DynamicMutexSet mset1; 763 MutexSet *mset[kMop] = {&thr->mset, mset1}; 764 765 // We need to lock the slot during RestoreStack because it protects 766 // the slot journal. 767 Lock slot_lock(&ctx->slots[static_cast<uptr>(s[1].sid())].mtx); 768 ThreadRegistryLock l0(&ctx->thread_registry); 769 Lock slots_lock(&ctx->slot_mtx); 770 if (SpuriousRace(old)) 771 return; 772 if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1, 773 size1, typ1, &tids[1], &traces[1], mset[1], &tags[1])) { 774 StoreShadow(&ctx->last_spurious_race, old.raw()); 775 return; 776 } 777 778 if (IsFiredSuppression(ctx, rep_typ, traces[1])) 779 return; 780 781 if (HandleRacyStacks(thr, traces)) 782 return; 783 784 // If any of the accesses has a tag, treat this as an "external" race. 785 uptr tag = kExternalTagNone; 786 for (uptr i = 0; i < kMop; i++) { 787 if (tags[i] != kExternalTagNone) { 788 rep_typ = ReportTypeExternalRace; 789 tag = tags[i]; 790 break; 791 } 792 } 793 794 ScopedReport rep(rep_typ, tag); 795 for (uptr i = 0; i < kMop; i++) 796 rep.AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]); 797 798 for (uptr i = 0; i < kMop; i++) { 799 ThreadContext *tctx = static_cast<ThreadContext *>( 800 ctx->thread_registry.GetThreadLocked(tids[i])); 801 rep.AddThread(tctx); 802 } 803 804 rep.AddLocation(addr_min, addr_max - addr_min); 805 806 if (flags()->print_full_thread_history) { 807 const ReportDesc *rep_desc = rep.GetReport(); 808 for (uptr i = 0; i < rep_desc->threads.Size(); i++) { 809 Tid parent_tid = rep_desc->threads[i]->parent_tid; 810 if (parent_tid == kMainTid || parent_tid == kInvalidTid) 811 continue; 812 ThreadContext *parent_tctx = static_cast<ThreadContext *>( 813 ctx->thread_registry.GetThreadLocked(parent_tid)); 814 rep.AddThread(parent_tctx); 815 } 816 } 817 818 #if !SANITIZER_GO 819 if (!((typ0 | typ1) & kAccessFree) && 820 s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid())) 821 rep.AddSleep(thr->last_sleep_stack_id); 822 #endif 823 OutputReport(thr, rep); 824 } 825 826 void PrintCurrentStack(ThreadState *thr, uptr pc) { 827 VarSizeStackTrace trace; 828 ObtainCurrentStack(thr, pc, &trace); 829 PrintStack(SymbolizeStack(trace)); 830 } 831 832 // Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes 833 // __sanitizer_print_stack_trace exists in the actual unwinded stack, but 834 // tail-call to PrintCurrentStackSlow breaks this assumption because 835 // __sanitizer_print_stack_trace disappears after tail-call. 836 // However, this solution is not reliable enough, please see dvyukov's comment 837 // http://reviews.llvm.org/D19148#406208 838 // Also see PR27280 comment 2 and 3 for breaking examples and analysis. 839 ALWAYS_INLINE USED void PrintCurrentStackSlow(uptr pc) { 840 #if !SANITIZER_GO 841 uptr bp = GET_CURRENT_FRAME(); 842 auto *ptrace = New<BufferedStackTrace>(); 843 ptrace->Unwind(pc, bp, nullptr, false); 844 845 for (uptr i = 0; i < ptrace->size / 2; i++) { 846 uptr tmp = ptrace->trace_buffer[i]; 847 ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1]; 848 ptrace->trace_buffer[ptrace->size - i - 1] = tmp; 849 } 850 PrintStack(SymbolizeStack(*ptrace)); 851 #endif 852 } 853 854 } // namespace __tsan 855 856 using namespace __tsan; 857 858 extern "C" { 859 SANITIZER_INTERFACE_ATTRIBUTE 860 void __sanitizer_print_stack_trace() { 861 PrintCurrentStackSlow(StackTrace::GetCurrentPc()); 862 } 863 } // extern "C" 864