1 //===-- tsan_rtl_report.cc ------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11
12 #include "sanitizer_common/sanitizer_libc.h"
13 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "sanitizer_common/sanitizer_stackdepot.h"
15 #include "sanitizer_common/sanitizer_common.h"
16 #include "sanitizer_common/sanitizer_stacktrace.h"
17 #include "tsan_platform.h"
18 #include "tsan_rtl.h"
19 #include "tsan_suppressions.h"
20 #include "tsan_symbolize.h"
21 #include "tsan_report.h"
22 #include "tsan_sync.h"
23 #include "tsan_mman.h"
24 #include "tsan_flags.h"
25 #include "tsan_fd.h"
26
27 namespace __tsan {
28
29 using namespace __sanitizer; // NOLINT
30
31 static ReportStack *SymbolizeStack(StackTrace trace);
32
TsanCheckFailed(const char * file,int line,const char * cond,u64 v1,u64 v2)33 void TsanCheckFailed(const char *file, int line, const char *cond,
34 u64 v1, u64 v2) {
35 // There is high probability that interceptors will check-fail as well,
36 // on the other hand there is no sense in processing interceptors
37 // since we are going to die soon.
38 ScopedIgnoreInterceptors ignore;
39 #if !SANITIZER_GO
40 cur_thread()->ignore_sync++;
41 cur_thread()->ignore_reads_and_writes++;
42 #endif
43 Printf("FATAL: ThreadSanitizer CHECK failed: "
44 "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
45 file, line, cond, (uptr)v1, (uptr)v2);
46 PrintCurrentStackSlow(StackTrace::GetCurrentPc());
47 Die();
48 }
49
50 // Can be overriden by an application/test to intercept reports.
51 #ifdef TSAN_EXTERNAL_HOOKS
52 bool OnReport(const ReportDesc *rep, bool suppressed);
53 #else
54 SANITIZER_WEAK_CXX_DEFAULT_IMPL
OnReport(const ReportDesc * rep,bool suppressed)55 bool OnReport(const ReportDesc *rep, bool suppressed) {
56 (void)rep;
57 return suppressed;
58 }
59 #endif
60
61 SANITIZER_WEAK_DEFAULT_IMPL
__tsan_on_report(const ReportDesc * rep)62 void __tsan_on_report(const ReportDesc *rep) {
63 (void)rep;
64 }
65
StackStripMain(SymbolizedStack * frames)66 static void StackStripMain(SymbolizedStack *frames) {
67 SymbolizedStack *last_frame = nullptr;
68 SymbolizedStack *last_frame2 = nullptr;
69 for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
70 last_frame2 = last_frame;
71 last_frame = cur;
72 }
73
74 if (last_frame2 == 0)
75 return;
76 #if !SANITIZER_GO
77 const char *last = last_frame->info.function;
78 const char *last2 = last_frame2->info.function;
79 // Strip frame above 'main'
80 if (last2 && 0 == internal_strcmp(last2, "main")) {
81 last_frame->ClearAll();
82 last_frame2->next = nullptr;
83 // Strip our internal thread start routine.
84 } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
85 last_frame->ClearAll();
86 last_frame2->next = nullptr;
87 // Strip global ctors init.
88 } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
89 last_frame->ClearAll();
90 last_frame2->next = nullptr;
91 // If both are 0, then we probably just failed to symbolize.
92 } else if (last || last2) {
93 // Ensure that we recovered stack completely. Trimmed stack
94 // can actually happen if we do not instrument some code,
95 // so it's only a debug print. However we must try hard to not miss it
96 // due to our fault.
97 DPrintf("Bottom stack frame is missed\n");
98 }
99 #else
100 // The last frame always point into runtime (gosched0, goexit0, runtime.main).
101 last_frame->ClearAll();
102 last_frame2->next = nullptr;
103 #endif
104 }
105
SymbolizeStackId(u32 stack_id)106 ReportStack *SymbolizeStackId(u32 stack_id) {
107 if (stack_id == 0)
108 return 0;
109 StackTrace stack = StackDepotGet(stack_id);
110 if (stack.trace == nullptr)
111 return nullptr;
112 return SymbolizeStack(stack);
113 }
114
SymbolizeStack(StackTrace trace)115 static ReportStack *SymbolizeStack(StackTrace trace) {
116 if (trace.size == 0)
117 return 0;
118 SymbolizedStack *top = nullptr;
119 for (uptr si = 0; si < trace.size; si++) {
120 const uptr pc = trace.trace[si];
121 uptr pc1 = pc;
122 // We obtain the return address, but we're interested in the previous
123 // instruction.
124 if ((pc & kExternalPCBit) == 0)
125 pc1 = StackTrace::GetPreviousInstructionPc(pc);
126 SymbolizedStack *ent = SymbolizeCode(pc1);
127 CHECK_NE(ent, 0);
128 SymbolizedStack *last = ent;
129 while (last->next) {
130 last->info.address = pc; // restore original pc for report
131 last = last->next;
132 }
133 last->info.address = pc; // restore original pc for report
134 last->next = top;
135 top = ent;
136 }
137 StackStripMain(top);
138
139 ReportStack *stack = ReportStack::New();
140 stack->frames = top;
141 return stack;
142 }
143
ScopedReportBase(ReportType typ,uptr tag)144 ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) {
145 ctx->thread_registry->CheckLocked();
146 void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
147 rep_ = new(mem) ReportDesc;
148 rep_->typ = typ;
149 rep_->tag = tag;
150 ctx->report_mtx.Lock();
151 }
152
~ScopedReportBase()153 ScopedReportBase::~ScopedReportBase() {
154 ctx->report_mtx.Unlock();
155 DestroyAndFree(rep_);
156 }
157
AddStack(StackTrace stack,bool suppressable)158 void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) {
159 ReportStack **rs = rep_->stacks.PushBack();
160 *rs = SymbolizeStack(stack);
161 (*rs)->suppressable = suppressable;
162 }
163
AddMemoryAccess(uptr addr,uptr external_tag,Shadow s,StackTrace stack,const MutexSet * mset)164 void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
165 StackTrace stack, const MutexSet *mset) {
166 void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
167 ReportMop *mop = new(mem) ReportMop;
168 rep_->mops.PushBack(mop);
169 mop->tid = s.tid();
170 mop->addr = addr + s.addr0();
171 mop->size = s.size();
172 mop->write = s.IsWrite();
173 mop->atomic = s.IsAtomic();
174 mop->stack = SymbolizeStack(stack);
175 mop->external_tag = external_tag;
176 if (mop->stack)
177 mop->stack->suppressable = true;
178 for (uptr i = 0; i < mset->Size(); i++) {
179 MutexSet::Desc d = mset->Get(i);
180 u64 mid = this->AddMutex(d.id);
181 ReportMopMutex mtx = {mid, d.write};
182 mop->mset.PushBack(mtx);
183 }
184 }
185
AddUniqueTid(int unique_tid)186 void ScopedReportBase::AddUniqueTid(int unique_tid) {
187 rep_->unique_tids.PushBack(unique_tid);
188 }
189
AddThread(const ThreadContext * tctx,bool suppressable)190 void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) {
191 for (uptr i = 0; i < rep_->threads.Size(); i++) {
192 if ((u32)rep_->threads[i]->id == tctx->tid)
193 return;
194 }
195 void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
196 ReportThread *rt = new(mem) ReportThread;
197 rep_->threads.PushBack(rt);
198 rt->id = tctx->tid;
199 rt->os_id = tctx->os_id;
200 rt->running = (tctx->status == ThreadStatusRunning);
201 rt->name = internal_strdup(tctx->name);
202 rt->parent_tid = tctx->parent_tid;
203 rt->workerthread = tctx->workerthread;
204 rt->stack = 0;
205 rt->stack = SymbolizeStackId(tctx->creation_stack_id);
206 if (rt->stack)
207 rt->stack->suppressable = suppressable;
208 }
209
210 #if !SANITIZER_GO
FindThreadByUidLockedCallback(ThreadContextBase * tctx,void * arg)211 static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) {
212 int unique_id = *(int *)arg;
213 return tctx->unique_id == (u32)unique_id;
214 }
215
FindThreadByUidLocked(int unique_id)216 static ThreadContext *FindThreadByUidLocked(int unique_id) {
217 ctx->thread_registry->CheckLocked();
218 return static_cast<ThreadContext *>(
219 ctx->thread_registry->FindThreadContextLocked(
220 FindThreadByUidLockedCallback, &unique_id));
221 }
222
FindThreadByTidLocked(int tid)223 static ThreadContext *FindThreadByTidLocked(int tid) {
224 ctx->thread_registry->CheckLocked();
225 return static_cast<ThreadContext*>(
226 ctx->thread_registry->GetThreadLocked(tid));
227 }
228
IsInStackOrTls(ThreadContextBase * tctx_base,void * arg)229 static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
230 uptr addr = (uptr)arg;
231 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
232 if (tctx->status != ThreadStatusRunning)
233 return false;
234 ThreadState *thr = tctx->thr;
235 CHECK(thr);
236 return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
237 (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
238 }
239
IsThreadStackOrTls(uptr addr,bool * is_stack)240 ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
241 ctx->thread_registry->CheckLocked();
242 ThreadContext *tctx = static_cast<ThreadContext*>(
243 ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
244 (void*)addr));
245 if (!tctx)
246 return 0;
247 ThreadState *thr = tctx->thr;
248 CHECK(thr);
249 *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
250 return tctx;
251 }
252 #endif
253
AddThread(int unique_tid,bool suppressable)254 void ScopedReportBase::AddThread(int unique_tid, bool suppressable) {
255 #if !SANITIZER_GO
256 if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
257 AddThread(tctx, suppressable);
258 #endif
259 }
260
AddMutex(const SyncVar * s)261 void ScopedReportBase::AddMutex(const SyncVar *s) {
262 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
263 if (rep_->mutexes[i]->id == s->uid)
264 return;
265 }
266 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
267 ReportMutex *rm = new(mem) ReportMutex;
268 rep_->mutexes.PushBack(rm);
269 rm->id = s->uid;
270 rm->addr = s->addr;
271 rm->destroyed = false;
272 rm->stack = SymbolizeStackId(s->creation_stack_id);
273 }
274
AddMutex(u64 id)275 u64 ScopedReportBase::AddMutex(u64 id) {
276 u64 uid = 0;
277 u64 mid = id;
278 uptr addr = SyncVar::SplitId(id, &uid);
279 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
280 // Check that the mutex is still alive.
281 // Another mutex can be created at the same address,
282 // so check uid as well.
283 if (s && s->CheckId(uid)) {
284 mid = s->uid;
285 AddMutex(s);
286 } else {
287 AddDeadMutex(id);
288 }
289 if (s)
290 s->mtx.Unlock();
291 return mid;
292 }
293
AddDeadMutex(u64 id)294 void ScopedReportBase::AddDeadMutex(u64 id) {
295 for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
296 if (rep_->mutexes[i]->id == id)
297 return;
298 }
299 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
300 ReportMutex *rm = new(mem) ReportMutex;
301 rep_->mutexes.PushBack(rm);
302 rm->id = id;
303 rm->addr = 0;
304 rm->destroyed = true;
305 rm->stack = 0;
306 }
307
AddLocation(uptr addr,uptr size)308 void ScopedReportBase::AddLocation(uptr addr, uptr size) {
309 if (addr == 0)
310 return;
311 #if !SANITIZER_GO
312 int fd = -1;
313 int creat_tid = kInvalidTid;
314 u32 creat_stack = 0;
315 if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
316 ReportLocation *loc = ReportLocation::New(ReportLocationFD);
317 loc->fd = fd;
318 loc->tid = creat_tid;
319 loc->stack = SymbolizeStackId(creat_stack);
320 rep_->locs.PushBack(loc);
321 ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
322 if (tctx)
323 AddThread(tctx);
324 return;
325 }
326 MBlock *b = 0;
327 Allocator *a = allocator();
328 if (a->PointerIsMine((void*)addr)) {
329 void *block_begin = a->GetBlockBegin((void*)addr);
330 if (block_begin)
331 b = ctx->metamap.GetBlock((uptr)block_begin);
332 }
333 if (b != 0) {
334 ThreadContext *tctx = FindThreadByTidLocked(b->tid);
335 ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
336 loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
337 loc->heap_chunk_size = b->siz;
338 loc->external_tag = b->tag;
339 loc->tid = tctx ? tctx->tid : b->tid;
340 loc->stack = SymbolizeStackId(b->stk);
341 rep_->locs.PushBack(loc);
342 if (tctx)
343 AddThread(tctx);
344 return;
345 }
346 bool is_stack = false;
347 if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
348 ReportLocation *loc =
349 ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS);
350 loc->tid = tctx->tid;
351 rep_->locs.PushBack(loc);
352 AddThread(tctx);
353 }
354 #endif
355 if (ReportLocation *loc = SymbolizeData(addr)) {
356 loc->suppressable = true;
357 rep_->locs.PushBack(loc);
358 return;
359 }
360 }
361
362 #if !SANITIZER_GO
AddSleep(u32 stack_id)363 void ScopedReportBase::AddSleep(u32 stack_id) {
364 rep_->sleep = SymbolizeStackId(stack_id);
365 }
366 #endif
367
SetCount(int count)368 void ScopedReportBase::SetCount(int count) { rep_->count = count; }
369
GetReport() const370 const ReportDesc *ScopedReportBase::GetReport() const { return rep_; }
371
ScopedReport(ReportType typ,uptr tag)372 ScopedReport::ScopedReport(ReportType typ, uptr tag)
373 : ScopedReportBase(typ, tag) {}
374
~ScopedReport()375 ScopedReport::~ScopedReport() {}
376
RestoreStack(int tid,const u64 epoch,VarSizeStackTrace * stk,MutexSet * mset,uptr * tag)377 void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
378 MutexSet *mset, uptr *tag) {
379 // This function restores stack trace and mutex set for the thread/epoch.
380 // It does so by getting stack trace and mutex set at the beginning of
381 // trace part, and then replaying the trace till the given epoch.
382 Trace* trace = ThreadTrace(tid);
383 ReadLock l(&trace->mtx);
384 const int partidx = (epoch / kTracePartSize) % TraceParts();
385 TraceHeader* hdr = &trace->headers[partidx];
386 if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize)
387 return;
388 CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0);
389 const u64 epoch0 = RoundDown(epoch, TraceSize());
390 const u64 eend = epoch % TraceSize();
391 const u64 ebegin = RoundDown(eend, kTracePartSize);
392 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
393 tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
394 Vector<uptr> stack;
395 stack.Resize(hdr->stack0.size + 64);
396 for (uptr i = 0; i < hdr->stack0.size; i++) {
397 stack[i] = hdr->stack0.trace[i];
398 DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]);
399 }
400 if (mset)
401 *mset = hdr->mset0;
402 uptr pos = hdr->stack0.size;
403 Event *events = (Event*)GetThreadTrace(tid);
404 for (uptr i = ebegin; i <= eend; i++) {
405 Event ev = events[i];
406 EventType typ = (EventType)(ev >> kEventPCBits);
407 uptr pc = (uptr)(ev & ((1ull << kEventPCBits) - 1));
408 DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
409 if (typ == EventTypeMop) {
410 stack[pos] = pc;
411 } else if (typ == EventTypeFuncEnter) {
412 if (stack.Size() < pos + 2)
413 stack.Resize(pos + 2);
414 stack[pos++] = pc;
415 } else if (typ == EventTypeFuncExit) {
416 if (pos > 0)
417 pos--;
418 }
419 if (mset) {
420 if (typ == EventTypeLock) {
421 mset->Add(pc, true, epoch0 + i);
422 } else if (typ == EventTypeUnlock) {
423 mset->Del(pc, true);
424 } else if (typ == EventTypeRLock) {
425 mset->Add(pc, false, epoch0 + i);
426 } else if (typ == EventTypeRUnlock) {
427 mset->Del(pc, false);
428 }
429 }
430 for (uptr j = 0; j <= pos; j++)
431 DPrintf2(" #%zu: %zx\n", j, stack[j]);
432 }
433 if (pos == 0 && stack[0] == 0)
434 return;
435 pos++;
436 stk->Init(&stack[0], pos);
437 ExtractTagFromStack(stk, tag);
438 }
439
HandleRacyStacks(ThreadState * thr,VarSizeStackTrace traces[2],uptr addr_min,uptr addr_max)440 static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
441 uptr addr_min, uptr addr_max) {
442 bool equal_stack = false;
443 RacyStacks hash;
444 bool equal_address = false;
445 RacyAddress ra0 = {addr_min, addr_max};
446 {
447 ReadLock lock(&ctx->racy_mtx);
448 if (flags()->suppress_equal_stacks) {
449 hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
450 hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
451 for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
452 if (hash == ctx->racy_stacks[i]) {
453 VPrintf(2,
454 "ThreadSanitizer: suppressing report as doubled (stack)\n");
455 equal_stack = true;
456 break;
457 }
458 }
459 }
460 if (flags()->suppress_equal_addresses) {
461 for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
462 RacyAddress ra2 = ctx->racy_addresses[i];
463 uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
464 uptr minend = min(ra0.addr_max, ra2.addr_max);
465 if (maxbeg < minend) {
466 VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
467 equal_address = true;
468 break;
469 }
470 }
471 }
472 }
473 if (!equal_stack && !equal_address)
474 return false;
475 if (!equal_stack) {
476 Lock lock(&ctx->racy_mtx);
477 ctx->racy_stacks.PushBack(hash);
478 }
479 if (!equal_address) {
480 Lock lock(&ctx->racy_mtx);
481 ctx->racy_addresses.PushBack(ra0);
482 }
483 return true;
484 }
485
AddRacyStacks(ThreadState * thr,VarSizeStackTrace traces[2],uptr addr_min,uptr addr_max)486 static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
487 uptr addr_min, uptr addr_max) {
488 Lock lock(&ctx->racy_mtx);
489 if (flags()->suppress_equal_stacks) {
490 RacyStacks hash;
491 hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
492 hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
493 ctx->racy_stacks.PushBack(hash);
494 }
495 if (flags()->suppress_equal_addresses) {
496 RacyAddress ra0 = {addr_min, addr_max};
497 ctx->racy_addresses.PushBack(ra0);
498 }
499 }
500
OutputReport(ThreadState * thr,const ScopedReport & srep)501 bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
502 if (!flags()->report_bugs || thr->suppress_reports)
503 return false;
504 atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
505 const ReportDesc *rep = srep.GetReport();
506 CHECK_EQ(thr->current_report, nullptr);
507 thr->current_report = rep;
508 Suppression *supp = 0;
509 uptr pc_or_addr = 0;
510 for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++)
511 pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
512 for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++)
513 pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp);
514 for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++)
515 pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
516 for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++)
517 pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp);
518 if (pc_or_addr != 0) {
519 Lock lock(&ctx->fired_suppressions_mtx);
520 FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp};
521 ctx->fired_suppressions.push_back(s);
522 }
523 {
524 bool old_is_freeing = thr->is_freeing;
525 thr->is_freeing = false;
526 bool suppressed = OnReport(rep, pc_or_addr != 0);
527 thr->is_freeing = old_is_freeing;
528 if (suppressed) {
529 thr->current_report = nullptr;
530 return false;
531 }
532 }
533 PrintReport(rep);
534 __tsan_on_report(rep);
535 ctx->nreported++;
536 if (flags()->halt_on_error)
537 Die();
538 thr->current_report = nullptr;
539 return true;
540 }
541
IsFiredSuppression(Context * ctx,ReportType type,StackTrace trace)542 bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) {
543 ReadLock lock(&ctx->fired_suppressions_mtx);
544 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
545 if (ctx->fired_suppressions[k].type != type)
546 continue;
547 for (uptr j = 0; j < trace.size; j++) {
548 FiredSuppression *s = &ctx->fired_suppressions[k];
549 if (trace.trace[j] == s->pc_or_addr) {
550 if (s->supp)
551 atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
552 return true;
553 }
554 }
555 }
556 return false;
557 }
558
IsFiredSuppression(Context * ctx,ReportType type,uptr addr)559 static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
560 ReadLock lock(&ctx->fired_suppressions_mtx);
561 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
562 if (ctx->fired_suppressions[k].type != type)
563 continue;
564 FiredSuppression *s = &ctx->fired_suppressions[k];
565 if (addr == s->pc_or_addr) {
566 if (s->supp)
567 atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
568 return true;
569 }
570 }
571 return false;
572 }
573
RaceBetweenAtomicAndFree(ThreadState * thr)574 static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
575 Shadow s0(thr->racy_state[0]);
576 Shadow s1(thr->racy_state[1]);
577 CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
578 if (!s0.IsAtomic() && !s1.IsAtomic())
579 return true;
580 if (s0.IsAtomic() && s1.IsFreed())
581 return true;
582 if (s1.IsAtomic() && thr->is_freeing)
583 return true;
584 return false;
585 }
586
ReportRace(ThreadState * thr)587 void ReportRace(ThreadState *thr) {
588 CheckNoLocks(thr);
589
590 // Symbolizer makes lots of intercepted calls. If we try to process them,
591 // at best it will cause deadlocks on internal mutexes.
592 ScopedIgnoreInterceptors ignore;
593
594 if (!flags()->report_bugs)
595 return;
596 if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
597 return;
598
599 bool freed = false;
600 {
601 Shadow s(thr->racy_state[1]);
602 freed = s.GetFreedAndReset();
603 thr->racy_state[1] = s.raw();
604 }
605
606 uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
607 uptr addr_min = 0;
608 uptr addr_max = 0;
609 {
610 uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
611 uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
612 uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
613 uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
614 addr_min = min(a0, a1);
615 addr_max = max(e0, e1);
616 if (IsExpectedReport(addr_min, addr_max - addr_min))
617 return;
618 }
619
620 ReportType typ = ReportTypeRace;
621 if (thr->is_vptr_access && freed)
622 typ = ReportTypeVptrUseAfterFree;
623 else if (thr->is_vptr_access)
624 typ = ReportTypeVptrRace;
625 else if (freed)
626 typ = ReportTypeUseAfterFree;
627
628 if (IsFiredSuppression(ctx, typ, addr))
629 return;
630
631 const uptr kMop = 2;
632 VarSizeStackTrace traces[kMop];
633 uptr tags[kMop] = {kExternalTagNone};
634 uptr toppc = TraceTopPC(thr);
635 if (toppc >> kEventPCBits) {
636 // This is a work-around for a known issue.
637 // The scenario where this happens is rather elaborate and requires
638 // an instrumented __sanitizer_report_error_summary callback and
639 // a __tsan_symbolize_external callback and a race during a range memory
640 // access larger than 8 bytes. MemoryAccessRange adds the current PC to
641 // the trace and starts processing memory accesses. A first memory access
642 // triggers a race, we report it and call the instrumented
643 // __sanitizer_report_error_summary, which adds more stuff to the trace
644 // since it is intrumented. Then a second memory access in MemoryAccessRange
645 // also triggers a race and we get here and call TraceTopPC to get the
646 // current PC, however now it contains some unrelated events from the
647 // callback. Most likely, TraceTopPC will now return a EventTypeFuncExit
648 // event. Later we subtract -1 from it (in GetPreviousInstructionPc)
649 // and the resulting PC has kExternalPCBit set, so we pass it to
650 // __tsan_symbolize_external_ex. __tsan_symbolize_external_ex is within its
651 // rights to crash since the PC is completely bogus.
652 // test/tsan/double_race.cc contains a test case for this.
653 toppc = 0;
654 }
655 ObtainCurrentStack(thr, toppc, &traces[0], &tags[0]);
656 if (IsFiredSuppression(ctx, typ, traces[0]))
657 return;
658
659 // MutexSet is too large to live on stack.
660 Vector<u64> mset_buffer;
661 mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1);
662 MutexSet *mset2 = new(&mset_buffer[0]) MutexSet();
663
664 Shadow s2(thr->racy_state[1]);
665 RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2, &tags[1]);
666 if (IsFiredSuppression(ctx, typ, traces[1]))
667 return;
668
669 if (HandleRacyStacks(thr, traces, addr_min, addr_max))
670 return;
671
672 // If any of the accesses has a tag, treat this as an "external" race.
673 uptr tag = kExternalTagNone;
674 for (uptr i = 0; i < kMop; i++) {
675 if (tags[i] != kExternalTagNone) {
676 typ = ReportTypeExternalRace;
677 tag = tags[i];
678 break;
679 }
680 }
681
682 ThreadRegistryLock l0(ctx->thread_registry);
683 ScopedReport rep(typ, tag);
684 for (uptr i = 0; i < kMop; i++) {
685 Shadow s(thr->racy_state[i]);
686 rep.AddMemoryAccess(addr, tags[i], s, traces[i],
687 i == 0 ? &thr->mset : mset2);
688 }
689
690 for (uptr i = 0; i < kMop; i++) {
691 FastState s(thr->racy_state[i]);
692 ThreadContext *tctx = static_cast<ThreadContext*>(
693 ctx->thread_registry->GetThreadLocked(s.tid()));
694 if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
695 continue;
696 rep.AddThread(tctx);
697 }
698
699 rep.AddLocation(addr_min, addr_max - addr_min);
700
701 #if !SANITIZER_GO
702 { // NOLINT
703 Shadow s(thr->racy_state[1]);
704 if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
705 rep.AddSleep(thr->last_sleep_stack_id);
706 }
707 #endif
708
709 if (!OutputReport(thr, rep))
710 return;
711
712 AddRacyStacks(thr, traces, addr_min, addr_max);
713 }
714
PrintCurrentStack(ThreadState * thr,uptr pc)715 void PrintCurrentStack(ThreadState *thr, uptr pc) {
716 VarSizeStackTrace trace;
717 ObtainCurrentStack(thr, pc, &trace);
718 PrintStack(SymbolizeStack(trace));
719 }
720
721 // Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes
722 // __sanitizer_print_stack_trace exists in the actual unwinded stack, but
723 // tail-call to PrintCurrentStackSlow breaks this assumption because
724 // __sanitizer_print_stack_trace disappears after tail-call.
725 // However, this solution is not reliable enough, please see dvyukov's comment
726 // http://reviews.llvm.org/D19148#406208
727 // Also see PR27280 comment 2 and 3 for breaking examples and analysis.
728 ALWAYS_INLINE
PrintCurrentStackSlow(uptr pc)729 void PrintCurrentStackSlow(uptr pc) {
730 #if !SANITIZER_GO
731 BufferedStackTrace *ptrace =
732 new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
733 BufferedStackTrace();
734 ptrace->Unwind(kStackTraceMax, pc, 0, 0, 0, 0, false);
735 for (uptr i = 0; i < ptrace->size / 2; i++) {
736 uptr tmp = ptrace->trace_buffer[i];
737 ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
738 ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
739 }
740 PrintStack(SymbolizeStack(*ptrace));
741 #endif
742 }
743
744 } // namespace __tsan
745
746 using namespace __tsan;
747
748 extern "C" {
749 SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_print_stack_trace()750 void __sanitizer_print_stack_trace() {
751 PrintCurrentStackSlow(StackTrace::GetCurrentPc());
752 }
753 } // extern "C"
754