1 //===-- tsan_rtl_report.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "sanitizer_common/sanitizer_libc.h"
14 #include "sanitizer_common/sanitizer_placement_new.h"
15 #include "sanitizer_common/sanitizer_stackdepot.h"
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_stacktrace.h"
18 #include "tsan_platform.h"
19 #include "tsan_rtl.h"
20 #include "tsan_suppressions.h"
21 #include "tsan_symbolize.h"
22 #include "tsan_report.h"
23 #include "tsan_sync.h"
24 #include "tsan_mman.h"
25 #include "tsan_flags.h"
26 #include "tsan_fd.h"
27
28 namespace __tsan {
29
30 using namespace __sanitizer;
31
32 static ReportStack *SymbolizeStack(StackTrace trace);
33
34 // Can be overriden by an application/test to intercept reports.
35 #ifdef TSAN_EXTERNAL_HOOKS
36 bool OnReport(const ReportDesc *rep, bool suppressed);
37 #else
38 SANITIZER_WEAK_CXX_DEFAULT_IMPL
OnReport(const ReportDesc * rep,bool suppressed)39 bool OnReport(const ReportDesc *rep, bool suppressed) {
40 (void)rep;
41 return suppressed;
42 }
43 #endif
44
45 SANITIZER_WEAK_DEFAULT_IMPL
__tsan_on_report(const ReportDesc * rep)46 void __tsan_on_report(const ReportDesc *rep) {
47 (void)rep;
48 }
49
StackStripMain(SymbolizedStack * frames)50 static void StackStripMain(SymbolizedStack *frames) {
51 SymbolizedStack *last_frame = nullptr;
52 SymbolizedStack *last_frame2 = nullptr;
53 for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
54 last_frame2 = last_frame;
55 last_frame = cur;
56 }
57
58 if (last_frame2 == 0)
59 return;
60 #if !SANITIZER_GO
61 const char *last = last_frame->info.function;
62 const char *last2 = last_frame2->info.function;
63 // Strip frame above 'main'
64 if (last2 && 0 == internal_strcmp(last2, "main")) {
65 last_frame->ClearAll();
66 last_frame2->next = nullptr;
67 // Strip our internal thread start routine.
68 } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
69 last_frame->ClearAll();
70 last_frame2->next = nullptr;
71 // Strip global ctors init.
72 } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
73 last_frame->ClearAll();
74 last_frame2->next = nullptr;
75 // If both are 0, then we probably just failed to symbolize.
76 } else if (last || last2) {
77 // Ensure that we recovered stack completely. Trimmed stack
78 // can actually happen if we do not instrument some code,
79 // so it's only a debug print. However we must try hard to not miss it
80 // due to our fault.
81 DPrintf("Bottom stack frame is missed\n");
82 }
83 #else
84 // The last frame always point into runtime (gosched0, goexit0, runtime.main).
85 last_frame->ClearAll();
86 last_frame2->next = nullptr;
87 #endif
88 }
89
SymbolizeStackId(u32 stack_id)90 ReportStack *SymbolizeStackId(u32 stack_id) {
91 if (stack_id == 0)
92 return 0;
93 StackTrace stack = StackDepotGet(stack_id);
94 if (stack.trace == nullptr)
95 return nullptr;
96 return SymbolizeStack(stack);
97 }
98
SymbolizeStack(StackTrace trace)99 static ReportStack *SymbolizeStack(StackTrace trace) {
100 if (trace.size == 0)
101 return 0;
102 SymbolizedStack *top = nullptr;
103 for (uptr si = 0; si < trace.size; si++) {
104 const uptr pc = trace.trace[si];
105 uptr pc1 = pc;
106 // We obtain the return address, but we're interested in the previous
107 // instruction.
108 if ((pc & kExternalPCBit) == 0)
109 pc1 = StackTrace::GetPreviousInstructionPc(pc);
110 SymbolizedStack *ent = SymbolizeCode(pc1);
111 CHECK_NE(ent, 0);
112 SymbolizedStack *last = ent;
113 while (last->next) {
114 last->info.address = pc; // restore original pc for report
115 last = last->next;
116 }
117 last->info.address = pc; // restore original pc for report
118 last->next = top;
119 top = ent;
120 }
121 StackStripMain(top);
122
123 ReportStack *stack = ReportStack::New();
124 stack->frames = top;
125 return stack;
126 }
127
ShouldReport(ThreadState * thr,ReportType typ)128 bool ShouldReport(ThreadState *thr, ReportType typ) {
129 // We set thr->suppress_reports in the fork context.
130 // Taking any locking in the fork context can lead to deadlocks.
131 // If any locks are already taken, it's too late to do this check.
132 CheckedMutex::CheckNoLocks();
133 // For the same reason check we didn't lock thread_registry yet.
134 if (SANITIZER_DEBUG)
135 ThreadRegistryLock l(ctx->thread_registry);
136 if (!flags()->report_bugs || thr->suppress_reports)
137 return false;
138 switch (typ) {
139 case ReportTypeSignalUnsafe:
140 return flags()->report_signal_unsafe;
141 case ReportTypeThreadLeak:
142 #if !SANITIZER_GO
143 // It's impossible to join phantom threads
144 // in the child after fork.
145 if (ctx->after_multithreaded_fork)
146 return false;
147 #endif
148 return flags()->report_thread_leaks;
149 case ReportTypeMutexDestroyLocked:
150 return flags()->report_destroy_locked;
151 default:
152 return true;
153 }
154 }
155
ScopedReportBase(ReportType typ,uptr tag)156 ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) {
157 ctx->thread_registry->CheckLocked();
158 void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
159 rep_ = new(mem) ReportDesc;
160 rep_->typ = typ;
161 rep_->tag = tag;
162 ctx->report_mtx.Lock();
163 }
164
~ScopedReportBase()165 ScopedReportBase::~ScopedReportBase() {
166 ctx->report_mtx.Unlock();
167 DestroyAndFree(rep_);
168 rep_ = nullptr;
169 }
170
AddStack(StackTrace stack,bool suppressable)171 void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) {
172 ReportStack **rs = rep_->stacks.PushBack();
173 *rs = SymbolizeStack(stack);
174 (*rs)->suppressable = suppressable;
175 }
176
AddMemoryAccess(uptr addr,uptr external_tag,Shadow s,StackTrace stack,const MutexSet * mset)177 void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
178 StackTrace stack, const MutexSet *mset) {
179 void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
180 ReportMop *mop = new(mem) ReportMop;
181 rep_->mops.PushBack(mop);
182 mop->tid = s.tid();
183 mop->addr = addr + s.addr0();
184 mop->size = s.size();
185 mop->write = s.IsWrite();
186 mop->atomic = s.IsAtomic();
187 mop->stack = SymbolizeStack(stack);
188 mop->external_tag = external_tag;
189 if (mop->stack)
190 mop->stack->suppressable = true;
191 for (uptr i = 0; i < mset->Size(); i++) {
192 MutexSet::Desc d = mset->Get(i);
193 u64 mid = this->AddMutex(d.id);
194 ReportMopMutex mtx = {mid, d.write};
195 mop->mset.PushBack(mtx);
196 }
197 }
198
AddUniqueTid(int unique_tid)199 void ScopedReportBase::AddUniqueTid(int unique_tid) {
200 rep_->unique_tids.PushBack(unique_tid);
201 }
202
AddThread(const ThreadContext * tctx,bool suppressable)203 void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) {
204 for (uptr i = 0; i < rep_->threads.Size(); i++) {
205 if ((u32)rep_->threads[i]->id == tctx->tid)
206 return;
207 }
208 void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
209 ReportThread *rt = new(mem) ReportThread;
210 rep_->threads.PushBack(rt);
211 rt->id = tctx->tid;
212 rt->os_id = tctx->os_id;
213 rt->running = (tctx->status == ThreadStatusRunning);
214 rt->name = internal_strdup(tctx->name);
215 rt->parent_tid = tctx->parent_tid;
216 rt->thread_type = tctx->thread_type;
217 rt->stack = 0;
218 rt->stack = SymbolizeStackId(tctx->creation_stack_id);
219 if (rt->stack)
220 rt->stack->suppressable = suppressable;
221 }
222
223 #if !SANITIZER_GO
FindThreadByUidLockedCallback(ThreadContextBase * tctx,void * arg)224 static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) {
225 int unique_id = *(int *)arg;
226 return tctx->unique_id == (u32)unique_id;
227 }
228
FindThreadByUidLocked(int unique_id)229 static ThreadContext *FindThreadByUidLocked(int unique_id) {
230 ctx->thread_registry->CheckLocked();
231 return static_cast<ThreadContext *>(
232 ctx->thread_registry->FindThreadContextLocked(
233 FindThreadByUidLockedCallback, &unique_id));
234 }
235
FindThreadByTidLocked(int tid)236 static ThreadContext *FindThreadByTidLocked(int tid) {
237 ctx->thread_registry->CheckLocked();
238 return static_cast<ThreadContext*>(
239 ctx->thread_registry->GetThreadLocked(tid));
240 }
241
IsInStackOrTls(ThreadContextBase * tctx_base,void * arg)242 static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
243 uptr addr = (uptr)arg;
244 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
245 if (tctx->status != ThreadStatusRunning)
246 return false;
247 ThreadState *thr = tctx->thr;
248 CHECK(thr);
249 return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
250 (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
251 }
252
IsThreadStackOrTls(uptr addr,bool * is_stack)253 ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
254 ctx->thread_registry->CheckLocked();
255 ThreadContext *tctx = static_cast<ThreadContext*>(
256 ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
257 (void*)addr));
258 if (!tctx)
259 return 0;
260 ThreadState *thr = tctx->thr;
261 CHECK(thr);
262 *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
263 return tctx;
264 }
265 #endif
266
AddThread(int unique_tid,bool suppressable)267 void ScopedReportBase::AddThread(int unique_tid, bool suppressable) {
268 #if !SANITIZER_GO
269 if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
270 AddThread(tctx, suppressable);
271 #endif
272 }
273
AddMutex(const SyncVar * s)274 void ScopedReportBase::AddMutex(const SyncVar *s) {
275 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
276 if (rep_->mutexes[i]->id == s->uid)
277 return;
278 }
279 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
280 ReportMutex *rm = new(mem) ReportMutex;
281 rep_->mutexes.PushBack(rm);
282 rm->id = s->uid;
283 rm->addr = s->addr;
284 rm->destroyed = false;
285 rm->stack = SymbolizeStackId(s->creation_stack_id);
286 }
287
AddMutex(u64 id)288 u64 ScopedReportBase::AddMutex(u64 id) NO_THREAD_SAFETY_ANALYSIS {
289 u64 uid = 0;
290 u64 mid = id;
291 uptr addr = SyncVar::SplitId(id, &uid);
292 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
293 // Check that the mutex is still alive.
294 // Another mutex can be created at the same address,
295 // so check uid as well.
296 if (s && s->CheckId(uid)) {
297 mid = s->uid;
298 AddMutex(s);
299 } else {
300 AddDeadMutex(id);
301 }
302 if (s)
303 s->mtx.Unlock();
304 return mid;
305 }
306
AddDeadMutex(u64 id)307 void ScopedReportBase::AddDeadMutex(u64 id) {
308 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
309 if (rep_->mutexes[i]->id == id)
310 return;
311 }
312 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
313 ReportMutex *rm = new(mem) ReportMutex;
314 rep_->mutexes.PushBack(rm);
315 rm->id = id;
316 rm->addr = 0;
317 rm->destroyed = true;
318 rm->stack = 0;
319 }
320
AddLocation(uptr addr,uptr size)321 void ScopedReportBase::AddLocation(uptr addr, uptr size) {
322 if (addr == 0)
323 return;
324 #if !SANITIZER_GO
325 int fd = -1;
326 int creat_tid = kInvalidTid;
327 u32 creat_stack = 0;
328 if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
329 ReportLocation *loc = ReportLocation::New(ReportLocationFD);
330 loc->fd = fd;
331 loc->tid = creat_tid;
332 loc->stack = SymbolizeStackId(creat_stack);
333 rep_->locs.PushBack(loc);
334 ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
335 if (tctx)
336 AddThread(tctx);
337 return;
338 }
339 MBlock *b = 0;
340 Allocator *a = allocator();
341 if (a->PointerIsMine((void*)addr)) {
342 void *block_begin = a->GetBlockBegin((void*)addr);
343 if (block_begin)
344 b = ctx->metamap.GetBlock((uptr)block_begin);
345 }
346 if (b != 0) {
347 ThreadContext *tctx = FindThreadByTidLocked(b->tid);
348 ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
349 loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
350 loc->heap_chunk_size = b->siz;
351 loc->external_tag = b->tag;
352 loc->tid = tctx ? tctx->tid : b->tid;
353 loc->stack = SymbolizeStackId(b->stk);
354 rep_->locs.PushBack(loc);
355 if (tctx)
356 AddThread(tctx);
357 return;
358 }
359 bool is_stack = false;
360 if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
361 ReportLocation *loc =
362 ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS);
363 loc->tid = tctx->tid;
364 rep_->locs.PushBack(loc);
365 AddThread(tctx);
366 }
367 #endif
368 if (ReportLocation *loc = SymbolizeData(addr)) {
369 loc->suppressable = true;
370 rep_->locs.PushBack(loc);
371 return;
372 }
373 }
374
375 #if !SANITIZER_GO
AddSleep(u32 stack_id)376 void ScopedReportBase::AddSleep(u32 stack_id) {
377 rep_->sleep = SymbolizeStackId(stack_id);
378 }
379 #endif
380
SetCount(int count)381 void ScopedReportBase::SetCount(int count) { rep_->count = count; }
382
GetReport() const383 const ReportDesc *ScopedReportBase::GetReport() const { return rep_; }
384
ScopedReport(ReportType typ,uptr tag)385 ScopedReport::ScopedReport(ReportType typ, uptr tag)
386 : ScopedReportBase(typ, tag) {}
387
~ScopedReport()388 ScopedReport::~ScopedReport() {}
389
RestoreStack(int tid,const u64 epoch,VarSizeStackTrace * stk,MutexSet * mset,uptr * tag)390 void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
391 MutexSet *mset, uptr *tag) {
392 // This function restores stack trace and mutex set for the thread/epoch.
393 // It does so by getting stack trace and mutex set at the beginning of
394 // trace part, and then replaying the trace till the given epoch.
395 Trace* trace = ThreadTrace(tid);
396 ReadLock l(&trace->mtx);
397 const int partidx = (epoch / kTracePartSize) % TraceParts();
398 TraceHeader* hdr = &trace->headers[partidx];
399 if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize)
400 return;
401 CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0);
402 const u64 epoch0 = RoundDown(epoch, TraceSize());
403 const u64 eend = epoch % TraceSize();
404 const u64 ebegin = RoundDown(eend, kTracePartSize);
405 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
406 tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
407 Vector<uptr> stack;
408 stack.Resize(hdr->stack0.size + 64);
409 for (uptr i = 0; i < hdr->stack0.size; i++) {
410 stack[i] = hdr->stack0.trace[i];
411 DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]);
412 }
413 if (mset)
414 *mset = hdr->mset0;
415 uptr pos = hdr->stack0.size;
416 Event *events = (Event*)GetThreadTrace(tid);
417 for (uptr i = ebegin; i <= eend; i++) {
418 Event ev = events[i];
419 EventType typ = (EventType)(ev >> kEventPCBits);
420 uptr pc = (uptr)(ev & ((1ull << kEventPCBits) - 1));
421 DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
422 if (typ == EventTypeMop) {
423 stack[pos] = pc;
424 } else if (typ == EventTypeFuncEnter) {
425 if (stack.Size() < pos + 2)
426 stack.Resize(pos + 2);
427 stack[pos++] = pc;
428 } else if (typ == EventTypeFuncExit) {
429 if (pos > 0)
430 pos--;
431 }
432 if (mset) {
433 if (typ == EventTypeLock) {
434 mset->Add(pc, true, epoch0 + i);
435 } else if (typ == EventTypeUnlock) {
436 mset->Del(pc, true);
437 } else if (typ == EventTypeRLock) {
438 mset->Add(pc, false, epoch0 + i);
439 } else if (typ == EventTypeRUnlock) {
440 mset->Del(pc, false);
441 }
442 }
443 for (uptr j = 0; j <= pos; j++)
444 DPrintf2(" #%zu: %zx\n", j, stack[j]);
445 }
446 if (pos == 0 && stack[0] == 0)
447 return;
448 pos++;
449 stk->Init(&stack[0], pos);
450 ExtractTagFromStack(stk, tag);
451 }
452
FindRacyStacks(const RacyStacks & hash)453 static bool FindRacyStacks(const RacyStacks &hash) {
454 for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
455 if (hash == ctx->racy_stacks[i]) {
456 VPrintf(2, "ThreadSanitizer: suppressing report as doubled (stack)\n");
457 return true;
458 }
459 }
460 return false;
461 }
462
HandleRacyStacks(ThreadState * thr,VarSizeStackTrace traces[2])463 static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) {
464 if (!flags()->suppress_equal_stacks)
465 return false;
466 RacyStacks hash;
467 hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
468 hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
469 {
470 ReadLock lock(&ctx->racy_mtx);
471 if (FindRacyStacks(hash))
472 return true;
473 }
474 Lock lock(&ctx->racy_mtx);
475 if (FindRacyStacks(hash))
476 return true;
477 ctx->racy_stacks.PushBack(hash);
478 return false;
479 }
480
FindRacyAddress(const RacyAddress & ra0)481 static bool FindRacyAddress(const RacyAddress &ra0) {
482 for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
483 RacyAddress ra2 = ctx->racy_addresses[i];
484 uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
485 uptr minend = min(ra0.addr_max, ra2.addr_max);
486 if (maxbeg < minend) {
487 VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
488 return true;
489 }
490 }
491 return false;
492 }
493
HandleRacyAddress(ThreadState * thr,uptr addr_min,uptr addr_max)494 static bool HandleRacyAddress(ThreadState *thr, uptr addr_min, uptr addr_max) {
495 if (!flags()->suppress_equal_addresses)
496 return false;
497 RacyAddress ra0 = {addr_min, addr_max};
498 {
499 ReadLock lock(&ctx->racy_mtx);
500 if (FindRacyAddress(ra0))
501 return true;
502 }
503 Lock lock(&ctx->racy_mtx);
504 if (FindRacyAddress(ra0))
505 return true;
506 ctx->racy_addresses.PushBack(ra0);
507 return false;
508 }
509
OutputReport(ThreadState * thr,const ScopedReport & srep)510 bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
511 // These should have been checked in ShouldReport.
512 // It's too late to check them here, we have already taken locks.
513 CHECK(flags()->report_bugs);
514 CHECK(!thr->suppress_reports);
515 atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
516 const ReportDesc *rep = srep.GetReport();
517 CHECK_EQ(thr->current_report, nullptr);
518 thr->current_report = rep;
519 Suppression *supp = 0;
520 uptr pc_or_addr = 0;
521 for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++)
522 pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
523 for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++)
524 pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp);
525 for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++)
526 pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
527 for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++)
528 pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp);
529 if (pc_or_addr != 0) {
530 Lock lock(&ctx->fired_suppressions_mtx);
531 FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp};
532 ctx->fired_suppressions.push_back(s);
533 }
534 {
535 bool old_is_freeing = thr->is_freeing;
536 thr->is_freeing = false;
537 bool suppressed = OnReport(rep, pc_or_addr != 0);
538 thr->is_freeing = old_is_freeing;
539 if (suppressed) {
540 thr->current_report = nullptr;
541 return false;
542 }
543 }
544 PrintReport(rep);
545 __tsan_on_report(rep);
546 ctx->nreported++;
547 if (flags()->halt_on_error)
548 Die();
549 thr->current_report = nullptr;
550 return true;
551 }
552
IsFiredSuppression(Context * ctx,ReportType type,StackTrace trace)553 bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) {
554 ReadLock lock(&ctx->fired_suppressions_mtx);
555 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
556 if (ctx->fired_suppressions[k].type != type)
557 continue;
558 for (uptr j = 0; j < trace.size; j++) {
559 FiredSuppression *s = &ctx->fired_suppressions[k];
560 if (trace.trace[j] == s->pc_or_addr) {
561 if (s->supp)
562 atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
563 return true;
564 }
565 }
566 }
567 return false;
568 }
569
IsFiredSuppression(Context * ctx,ReportType type,uptr addr)570 static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
571 ReadLock lock(&ctx->fired_suppressions_mtx);
572 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
573 if (ctx->fired_suppressions[k].type != type)
574 continue;
575 FiredSuppression *s = &ctx->fired_suppressions[k];
576 if (addr == s->pc_or_addr) {
577 if (s->supp)
578 atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
579 return true;
580 }
581 }
582 return false;
583 }
584
RaceBetweenAtomicAndFree(ThreadState * thr)585 static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
586 Shadow s0(thr->racy_state[0]);
587 Shadow s1(thr->racy_state[1]);
588 CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
589 if (!s0.IsAtomic() && !s1.IsAtomic())
590 return true;
591 if (s0.IsAtomic() && s1.IsFreed())
592 return true;
593 if (s1.IsAtomic() && thr->is_freeing)
594 return true;
595 return false;
596 }
597
ReportRace(ThreadState * thr)598 void ReportRace(ThreadState *thr) {
599 CheckedMutex::CheckNoLocks();
600
601 // Symbolizer makes lots of intercepted calls. If we try to process them,
602 // at best it will cause deadlocks on internal mutexes.
603 ScopedIgnoreInterceptors ignore;
604
605 if (!ShouldReport(thr, ReportTypeRace))
606 return;
607 if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
608 return;
609
610 bool freed = false;
611 {
612 Shadow s(thr->racy_state[1]);
613 freed = s.GetFreedAndReset();
614 thr->racy_state[1] = s.raw();
615 }
616
617 uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
618 uptr addr_min = 0;
619 uptr addr_max = 0;
620 {
621 uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
622 uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
623 uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
624 uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
625 addr_min = min(a0, a1);
626 addr_max = max(e0, e1);
627 if (IsExpectedReport(addr_min, addr_max - addr_min))
628 return;
629 }
630 if (HandleRacyAddress(thr, addr_min, addr_max))
631 return;
632
633 ReportType typ = ReportTypeRace;
634 if (thr->is_vptr_access && freed)
635 typ = ReportTypeVptrUseAfterFree;
636 else if (thr->is_vptr_access)
637 typ = ReportTypeVptrRace;
638 else if (freed)
639 typ = ReportTypeUseAfterFree;
640
641 if (IsFiredSuppression(ctx, typ, addr))
642 return;
643
644 const uptr kMop = 2;
645 VarSizeStackTrace traces[kMop];
646 uptr tags[kMop] = {kExternalTagNone};
647 uptr toppc = TraceTopPC(thr);
648 if (toppc >> kEventPCBits) {
649 // This is a work-around for a known issue.
650 // The scenario where this happens is rather elaborate and requires
651 // an instrumented __sanitizer_report_error_summary callback and
652 // a __tsan_symbolize_external callback and a race during a range memory
653 // access larger than 8 bytes. MemoryAccessRange adds the current PC to
654 // the trace and starts processing memory accesses. A first memory access
655 // triggers a race, we report it and call the instrumented
656 // __sanitizer_report_error_summary, which adds more stuff to the trace
657 // since it is intrumented. Then a second memory access in MemoryAccessRange
658 // also triggers a race and we get here and call TraceTopPC to get the
659 // current PC, however now it contains some unrelated events from the
660 // callback. Most likely, TraceTopPC will now return a EventTypeFuncExit
661 // event. Later we subtract -1 from it (in GetPreviousInstructionPc)
662 // and the resulting PC has kExternalPCBit set, so we pass it to
663 // __tsan_symbolize_external_ex. __tsan_symbolize_external_ex is within its
664 // rights to crash since the PC is completely bogus.
665 // test/tsan/double_race.cpp contains a test case for this.
666 toppc = 0;
667 }
668 ObtainCurrentStack(thr, toppc, &traces[0], &tags[0]);
669 if (IsFiredSuppression(ctx, typ, traces[0]))
670 return;
671
672 // MutexSet is too large to live on stack.
673 Vector<u64> mset_buffer;
674 mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1);
675 MutexSet *mset2 = new(&mset_buffer[0]) MutexSet();
676
677 Shadow s2(thr->racy_state[1]);
678 RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2, &tags[1]);
679 if (IsFiredSuppression(ctx, typ, traces[1]))
680 return;
681
682 if (HandleRacyStacks(thr, traces))
683 return;
684
685 // If any of the accesses has a tag, treat this as an "external" race.
686 uptr tag = kExternalTagNone;
687 for (uptr i = 0; i < kMop; i++) {
688 if (tags[i] != kExternalTagNone) {
689 typ = ReportTypeExternalRace;
690 tag = tags[i];
691 break;
692 }
693 }
694
695 ThreadRegistryLock l0(ctx->thread_registry);
696 ScopedReport rep(typ, tag);
697 for (uptr i = 0; i < kMop; i++) {
698 Shadow s(thr->racy_state[i]);
699 rep.AddMemoryAccess(addr, tags[i], s, traces[i],
700 i == 0 ? &thr->mset : mset2);
701 }
702
703 for (uptr i = 0; i < kMop; i++) {
704 FastState s(thr->racy_state[i]);
705 ThreadContext *tctx = static_cast<ThreadContext*>(
706 ctx->thread_registry->GetThreadLocked(s.tid()));
707 if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
708 continue;
709 rep.AddThread(tctx);
710 }
711
712 rep.AddLocation(addr_min, addr_max - addr_min);
713
714 #if !SANITIZER_GO
715 {
716 Shadow s(thr->racy_state[1]);
717 if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
718 rep.AddSleep(thr->last_sleep_stack_id);
719 }
720 #endif
721
722 OutputReport(thr, rep);
723 }
724
PrintCurrentStack(ThreadState * thr,uptr pc)725 void PrintCurrentStack(ThreadState *thr, uptr pc) {
726 VarSizeStackTrace trace;
727 ObtainCurrentStack(thr, pc, &trace);
728 PrintStack(SymbolizeStack(trace));
729 }
730
731 // Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes
732 // __sanitizer_print_stack_trace exists in the actual unwinded stack, but
733 // tail-call to PrintCurrentStackSlow breaks this assumption because
734 // __sanitizer_print_stack_trace disappears after tail-call.
735 // However, this solution is not reliable enough, please see dvyukov's comment
736 // http://reviews.llvm.org/D19148#406208
737 // Also see PR27280 comment 2 and 3 for breaking examples and analysis.
PrintCurrentStackSlow(uptr pc)738 ALWAYS_INLINE USED void PrintCurrentStackSlow(uptr pc) {
739 #if !SANITIZER_GO
740 uptr bp = GET_CURRENT_FRAME();
741 BufferedStackTrace *ptrace =
742 new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
743 BufferedStackTrace();
744 ptrace->Unwind(pc, bp, nullptr, false);
745
746 for (uptr i = 0; i < ptrace->size / 2; i++) {
747 uptr tmp = ptrace->trace_buffer[i];
748 ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
749 ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
750 }
751 PrintStack(SymbolizeStack(*ptrace));
752 #endif
753 }
754
755 } // namespace __tsan
756
757 using namespace __tsan;
758
759 extern "C" {
760 SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_print_stack_trace()761 void __sanitizer_print_stack_trace() {
762 PrintCurrentStackSlow(StackTrace::GetCurrentPc());
763 }
764 } // extern "C"
765