1 //===-- tsan_rtl_report.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "sanitizer_common/sanitizer_libc.h"
14 #include "sanitizer_common/sanitizer_placement_new.h"
15 #include "sanitizer_common/sanitizer_stackdepot.h"
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_stacktrace.h"
18 #include "tsan_platform.h"
19 #include "tsan_rtl.h"
20 #include "tsan_suppressions.h"
21 #include "tsan_symbolize.h"
22 #include "tsan_report.h"
23 #include "tsan_sync.h"
24 #include "tsan_mman.h"
25 #include "tsan_flags.h"
26 #include "tsan_fd.h"
27 
28 namespace __tsan {
29 
30 using namespace __sanitizer;
31 
32 static ReportStack *SymbolizeStack(StackTrace trace);
33 
34 // Can be overriden by an application/test to intercept reports.
35 #ifdef TSAN_EXTERNAL_HOOKS
36 bool OnReport(const ReportDesc *rep, bool suppressed);
37 #else
38 SANITIZER_WEAK_CXX_DEFAULT_IMPL
OnReport(const ReportDesc * rep,bool suppressed)39 bool OnReport(const ReportDesc *rep, bool suppressed) {
40   (void)rep;
41   return suppressed;
42 }
43 #endif
44 
45 SANITIZER_WEAK_DEFAULT_IMPL
__tsan_on_report(const ReportDesc * rep)46 void __tsan_on_report(const ReportDesc *rep) {
47   (void)rep;
48 }
49 
StackStripMain(SymbolizedStack * frames)50 static void StackStripMain(SymbolizedStack *frames) {
51   SymbolizedStack *last_frame = nullptr;
52   SymbolizedStack *last_frame2 = nullptr;
53   for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
54     last_frame2 = last_frame;
55     last_frame = cur;
56   }
57 
58   if (last_frame2 == 0)
59     return;
60 #if !SANITIZER_GO
61   const char *last = last_frame->info.function;
62   const char *last2 = last_frame2->info.function;
63   // Strip frame above 'main'
64   if (last2 && 0 == internal_strcmp(last2, "main")) {
65     last_frame->ClearAll();
66     last_frame2->next = nullptr;
67   // Strip our internal thread start routine.
68   } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
69     last_frame->ClearAll();
70     last_frame2->next = nullptr;
71     // Strip global ctors init, .preinit_array and main caller.
72   } else if (last && (0 == internal_strcmp(last, "__do_global_ctors_aux") ||
73                       0 == internal_strcmp(last, "__libc_csu_init") ||
74                       0 == internal_strcmp(last, "__libc_start_main"))) {
75     last_frame->ClearAll();
76     last_frame2->next = nullptr;
77   // If both are 0, then we probably just failed to symbolize.
78   } else if (last || last2) {
79     // Ensure that we recovered stack completely. Trimmed stack
80     // can actually happen if we do not instrument some code,
81     // so it's only a debug print. However we must try hard to not miss it
82     // due to our fault.
83     DPrintf("Bottom stack frame is missed\n");
84   }
85 #else
86   // The last frame always point into runtime (gosched0, goexit0, runtime.main).
87   last_frame->ClearAll();
88   last_frame2->next = nullptr;
89 #endif
90 }
91 
SymbolizeStackId(u32 stack_id)92 ReportStack *SymbolizeStackId(u32 stack_id) {
93   if (stack_id == 0)
94     return 0;
95   StackTrace stack = StackDepotGet(stack_id);
96   if (stack.trace == nullptr)
97     return nullptr;
98   return SymbolizeStack(stack);
99 }
100 
SymbolizeStack(StackTrace trace)101 static ReportStack *SymbolizeStack(StackTrace trace) {
102   if (trace.size == 0)
103     return 0;
104   SymbolizedStack *top = nullptr;
105   for (uptr si = 0; si < trace.size; si++) {
106     const uptr pc = trace.trace[si];
107     uptr pc1 = pc;
108     // We obtain the return address, but we're interested in the previous
109     // instruction.
110     if ((pc & kExternalPCBit) == 0)
111       pc1 = StackTrace::GetPreviousInstructionPc(pc);
112     SymbolizedStack *ent = SymbolizeCode(pc1);
113     CHECK_NE(ent, 0);
114     SymbolizedStack *last = ent;
115     while (last->next) {
116       last->info.address = pc;  // restore original pc for report
117       last = last->next;
118     }
119     last->info.address = pc;  // restore original pc for report
120     last->next = top;
121     top = ent;
122   }
123   StackStripMain(top);
124 
125   auto *stack = New<ReportStack>();
126   stack->frames = top;
127   return stack;
128 }
129 
ShouldReport(ThreadState * thr,ReportType typ)130 bool ShouldReport(ThreadState *thr, ReportType typ) {
131   // We set thr->suppress_reports in the fork context.
132   // Taking any locking in the fork context can lead to deadlocks.
133   // If any locks are already taken, it's too late to do this check.
134   CheckedMutex::CheckNoLocks();
135   // For the same reason check we didn't lock thread_registry yet.
136   if (SANITIZER_DEBUG)
137     ThreadRegistryLock l(&ctx->thread_registry);
138   if (!flags()->report_bugs || thr->suppress_reports)
139     return false;
140   switch (typ) {
141     case ReportTypeSignalUnsafe:
142       return flags()->report_signal_unsafe;
143     case ReportTypeThreadLeak:
144 #if !SANITIZER_GO
145       // It's impossible to join phantom threads
146       // in the child after fork.
147       if (ctx->after_multithreaded_fork)
148         return false;
149 #endif
150       return flags()->report_thread_leaks;
151     case ReportTypeMutexDestroyLocked:
152       return flags()->report_destroy_locked;
153     default:
154       return true;
155   }
156 }
157 
ScopedReportBase(ReportType typ,uptr tag)158 ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) {
159   ctx->thread_registry.CheckLocked();
160   rep_ = New<ReportDesc>();
161   rep_->typ = typ;
162   rep_->tag = tag;
163   ctx->report_mtx.Lock();
164 }
165 
~ScopedReportBase()166 ScopedReportBase::~ScopedReportBase() {
167   ctx->report_mtx.Unlock();
168   DestroyAndFree(rep_);
169 }
170 
AddStack(StackTrace stack,bool suppressable)171 void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) {
172   ReportStack **rs = rep_->stacks.PushBack();
173   *rs = SymbolizeStack(stack);
174   (*rs)->suppressable = suppressable;
175 }
176 
AddMemoryAccess(uptr addr,uptr external_tag,Shadow s,StackTrace stack,const MutexSet * mset)177 void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
178                                        StackTrace stack, const MutexSet *mset) {
179   auto *mop = New<ReportMop>();
180   rep_->mops.PushBack(mop);
181   mop->tid = s.tid();
182   mop->addr = addr + s.addr0();
183   mop->size = s.size();
184   mop->write = s.IsWrite();
185   mop->atomic = s.IsAtomic();
186   mop->stack = SymbolizeStack(stack);
187   mop->external_tag = external_tag;
188   if (mop->stack)
189     mop->stack->suppressable = true;
190   for (uptr i = 0; i < mset->Size(); i++) {
191     MutexSet::Desc d = mset->Get(i);
192     u64 mid = this->AddMutex(d.id);
193     ReportMopMutex mtx = {mid, d.write};
194     mop->mset.PushBack(mtx);
195   }
196 }
197 
AddUniqueTid(Tid unique_tid)198 void ScopedReportBase::AddUniqueTid(Tid unique_tid) {
199   rep_->unique_tids.PushBack(unique_tid);
200 }
201 
AddThread(const ThreadContext * tctx,bool suppressable)202 void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) {
203   for (uptr i = 0; i < rep_->threads.Size(); i++) {
204     if ((u32)rep_->threads[i]->id == tctx->tid)
205       return;
206   }
207   auto *rt = New<ReportThread>();
208   rep_->threads.PushBack(rt);
209   rt->id = tctx->tid;
210   rt->os_id = tctx->os_id;
211   rt->running = (tctx->status == ThreadStatusRunning);
212   rt->name = internal_strdup(tctx->name);
213   rt->parent_tid = tctx->parent_tid;
214   rt->thread_type = tctx->thread_type;
215   rt->stack = 0;
216   rt->stack = SymbolizeStackId(tctx->creation_stack_id);
217   if (rt->stack)
218     rt->stack->suppressable = suppressable;
219 }
220 
221 #if !SANITIZER_GO
FindThreadByUidLockedCallback(ThreadContextBase * tctx,void * arg)222 static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) {
223   int unique_id = *(int *)arg;
224   return tctx->unique_id == (u32)unique_id;
225 }
226 
FindThreadByUidLocked(Tid unique_id)227 static ThreadContext *FindThreadByUidLocked(Tid unique_id) {
228   ctx->thread_registry.CheckLocked();
229   return static_cast<ThreadContext *>(
230       ctx->thread_registry.FindThreadContextLocked(
231           FindThreadByUidLockedCallback, &unique_id));
232 }
233 
FindThreadByTidLocked(Tid tid)234 static ThreadContext *FindThreadByTidLocked(Tid tid) {
235   ctx->thread_registry.CheckLocked();
236   return static_cast<ThreadContext *>(
237       ctx->thread_registry.GetThreadLocked(tid));
238 }
239 
IsInStackOrTls(ThreadContextBase * tctx_base,void * arg)240 static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
241   uptr addr = (uptr)arg;
242   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
243   if (tctx->status != ThreadStatusRunning)
244     return false;
245   ThreadState *thr = tctx->thr;
246   CHECK(thr);
247   return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
248           (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
249 }
250 
IsThreadStackOrTls(uptr addr,bool * is_stack)251 ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
252   ctx->thread_registry.CheckLocked();
253   ThreadContext *tctx =
254       static_cast<ThreadContext *>(ctx->thread_registry.FindThreadContextLocked(
255           IsInStackOrTls, (void *)addr));
256   if (!tctx)
257     return 0;
258   ThreadState *thr = tctx->thr;
259   CHECK(thr);
260   *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
261   return tctx;
262 }
263 #endif
264 
AddThread(Tid unique_tid,bool suppressable)265 void ScopedReportBase::AddThread(Tid unique_tid, bool suppressable) {
266 #if !SANITIZER_GO
267   if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
268     AddThread(tctx, suppressable);
269 #endif
270 }
271 
AddMutex(const SyncVar * s)272 void ScopedReportBase::AddMutex(const SyncVar *s) {
273   for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
274     if (rep_->mutexes[i]->id == s->uid)
275       return;
276   }
277   auto *rm = New<ReportMutex>();
278   rep_->mutexes.PushBack(rm);
279   rm->id = s->uid;
280   rm->addr = s->addr;
281   rm->destroyed = false;
282   rm->stack = SymbolizeStackId(s->creation_stack_id);
283 }
284 
AddMutex(u64 id)285 u64 ScopedReportBase::AddMutex(u64 id) {
286   u64 uid = 0;
287   u64 mid = id;
288   uptr addr = SyncVar::SplitId(id, &uid);
289   SyncVar *s = ctx->metamap.GetSyncIfExists(addr);
290   // Check that the mutex is still alive.
291   // Another mutex can be created at the same address,
292   // so check uid as well.
293   if (s && s->CheckId(uid)) {
294     Lock l(&s->mtx);
295     mid = s->uid;
296     AddMutex(s);
297   } else {
298     AddDeadMutex(id);
299   }
300   return mid;
301 }
302 
AddDeadMutex(u64 id)303 void ScopedReportBase::AddDeadMutex(u64 id) {
304   for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
305     if (rep_->mutexes[i]->id == id)
306       return;
307   }
308   auto *rm = New<ReportMutex>();
309   rep_->mutexes.PushBack(rm);
310   rm->id = id;
311   rm->addr = 0;
312   rm->destroyed = true;
313   rm->stack = 0;
314 }
315 
AddLocation(uptr addr,uptr size)316 void ScopedReportBase::AddLocation(uptr addr, uptr size) {
317   if (addr == 0)
318     return;
319 #if !SANITIZER_GO
320   int fd = -1;
321   Tid creat_tid = kInvalidTid;
322   StackID creat_stack = 0;
323   if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
324     auto *loc = New<ReportLocation>();
325     loc->type = ReportLocationFD;
326     loc->fd = fd;
327     loc->tid = creat_tid;
328     loc->stack = SymbolizeStackId(creat_stack);
329     rep_->locs.PushBack(loc);
330     ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
331     if (tctx)
332       AddThread(tctx);
333     return;
334   }
335   MBlock *b = 0;
336   uptr block_begin = 0;
337   Allocator *a = allocator();
338   if (a->PointerIsMine((void*)addr)) {
339     block_begin = (uptr)a->GetBlockBegin((void *)addr);
340     if (block_begin)
341       b = ctx->metamap.GetBlock(block_begin);
342   }
343   if (!b)
344     b = JavaHeapBlock(addr, &block_begin);
345   if (b != 0) {
346     ThreadContext *tctx = FindThreadByTidLocked(b->tid);
347     auto *loc = New<ReportLocation>();
348     loc->type = ReportLocationHeap;
349     loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
350     loc->heap_chunk_size = b->siz;
351     loc->external_tag = b->tag;
352     loc->tid = tctx ? tctx->tid : b->tid;
353     loc->stack = SymbolizeStackId(b->stk);
354     rep_->locs.PushBack(loc);
355     if (tctx)
356       AddThread(tctx);
357     return;
358   }
359   bool is_stack = false;
360   if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
361     auto *loc = New<ReportLocation>();
362     loc->type = is_stack ? ReportLocationStack : ReportLocationTLS;
363     loc->tid = tctx->tid;
364     rep_->locs.PushBack(loc);
365     AddThread(tctx);
366   }
367 #endif
368   if (ReportLocation *loc = SymbolizeData(addr)) {
369     loc->suppressable = true;
370     rep_->locs.PushBack(loc);
371     return;
372   }
373 }
374 
375 #if !SANITIZER_GO
AddSleep(StackID stack_id)376 void ScopedReportBase::AddSleep(StackID stack_id) {
377   rep_->sleep = SymbolizeStackId(stack_id);
378 }
379 #endif
380 
SetCount(int count)381 void ScopedReportBase::SetCount(int count) { rep_->count = count; }
382 
GetReport() const383 const ReportDesc *ScopedReportBase::GetReport() const { return rep_; }
384 
ScopedReport(ReportType typ,uptr tag)385 ScopedReport::ScopedReport(ReportType typ, uptr tag)
386     : ScopedReportBase(typ, tag) {}
387 
~ScopedReport()388 ScopedReport::~ScopedReport() {}
389 
RestoreStack(Tid tid,const u64 epoch,VarSizeStackTrace * stk,MutexSet * mset,uptr * tag)390 void RestoreStack(Tid tid, const u64 epoch, VarSizeStackTrace *stk,
391                   MutexSet *mset, uptr *tag) {
392   // This function restores stack trace and mutex set for the thread/epoch.
393   // It does so by getting stack trace and mutex set at the beginning of
394   // trace part, and then replaying the trace till the given epoch.
395   Trace* trace = ThreadTrace(tid);
396   ReadLock l(&trace->mtx);
397   const int partidx = (epoch / kTracePartSize) % TraceParts();
398   TraceHeader* hdr = &trace->headers[partidx];
399   if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize)
400     return;
401   CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0);
402   const u64 epoch0 = RoundDown(epoch, TraceSize());
403   const u64 eend = epoch % TraceSize();
404   const u64 ebegin = RoundDown(eend, kTracePartSize);
405   DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
406           tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
407   Vector<uptr> stack;
408   stack.Resize(hdr->stack0.size + 64);
409   for (uptr i = 0; i < hdr->stack0.size; i++) {
410     stack[i] = hdr->stack0.trace[i];
411     DPrintf2("  #%02zu: pc=%zx\n", i, stack[i]);
412   }
413   if (mset)
414     *mset = hdr->mset0;
415   uptr pos = hdr->stack0.size;
416   Event *events = (Event*)GetThreadTrace(tid);
417   for (uptr i = ebegin; i <= eend; i++) {
418     Event ev = events[i];
419     EventType typ = (EventType)(ev >> kEventPCBits);
420     uptr pc = (uptr)(ev & ((1ull << kEventPCBits) - 1));
421     DPrintf2("  %zu typ=%d pc=%zx\n", i, typ, pc);
422     if (typ == EventTypeMop) {
423       stack[pos] = pc;
424     } else if (typ == EventTypeFuncEnter) {
425       if (stack.Size() < pos + 2)
426         stack.Resize(pos + 2);
427       stack[pos++] = pc;
428     } else if (typ == EventTypeFuncExit) {
429       if (pos > 0)
430         pos--;
431     }
432     if (mset) {
433       if (typ == EventTypeLock) {
434         mset->Add(pc, true, epoch0 + i);
435       } else if (typ == EventTypeUnlock) {
436         mset->Del(pc, true);
437       } else if (typ == EventTypeRLock) {
438         mset->Add(pc, false, epoch0 + i);
439       } else if (typ == EventTypeRUnlock) {
440         mset->Del(pc, false);
441       }
442     }
443     for (uptr j = 0; j <= pos; j++)
444       DPrintf2("      #%zu: %zx\n", j, stack[j]);
445   }
446   if (pos == 0 && stack[0] == 0)
447     return;
448   pos++;
449   stk->Init(&stack[0], pos);
450   ExtractTagFromStack(stk, tag);
451 }
452 
453 namespace v3 {
454 
455 // Replays the trace up to last_pos position in the last part
456 // or up to the provided epoch/sid (whichever is earlier)
457 // and calls the provided function f for each event.
458 template <typename Func>
TraceReplay(Trace * trace,TracePart * last,Event * last_pos,Sid sid,Epoch epoch,Func f)459 void TraceReplay(Trace *trace, TracePart *last, Event *last_pos, Sid sid,
460                  Epoch epoch, Func f) {
461   TracePart *part = trace->parts.Front();
462   Sid ev_sid = kFreeSid;
463   Epoch ev_epoch = kEpochOver;
464   for (;;) {
465     DCHECK_EQ(part->trace, trace);
466     // Note: an event can't start in the last element.
467     // Since an event can take up to 2 elements,
468     // we ensure we have at least 2 before adding an event.
469     Event *end = &part->events[TracePart::kSize - 1];
470     if (part == last)
471       end = last_pos;
472     for (Event *evp = &part->events[0]; evp < end; evp++) {
473       Event *evp0 = evp;
474       if (!evp->is_access && !evp->is_func) {
475         switch (evp->type) {
476           case EventType::kTime: {
477             auto *ev = reinterpret_cast<EventTime *>(evp);
478             ev_sid = static_cast<Sid>(ev->sid);
479             ev_epoch = static_cast<Epoch>(ev->epoch);
480             if (ev_sid == sid && ev_epoch > epoch)
481               return;
482             break;
483           }
484           case EventType::kAccessExt:
485             FALLTHROUGH;
486           case EventType::kAccessRange:
487             FALLTHROUGH;
488           case EventType::kLock:
489             FALLTHROUGH;
490           case EventType::kRLock:
491             // These take 2 Event elements.
492             evp++;
493             break;
494           case EventType::kUnlock:
495             // This takes 1 Event element.
496             break;
497         }
498       }
499       CHECK_NE(ev_sid, kFreeSid);
500       CHECK_NE(ev_epoch, kEpochOver);
501       f(ev_sid, ev_epoch, evp0);
502     }
503     if (part == last)
504       return;
505     part = trace->parts.Next(part);
506     CHECK(part);
507   }
508   CHECK(0);
509 }
510 
RestoreStackMatch(VarSizeStackTrace * pstk,MutexSet * pmset,Vector<uptr> * stack,MutexSet * mset,uptr pc,bool * found)511 static void RestoreStackMatch(VarSizeStackTrace *pstk, MutexSet *pmset,
512                               Vector<uptr> *stack, MutexSet *mset, uptr pc,
513                               bool *found) {
514   DPrintf2("    MATCHED\n");
515   *pmset = *mset;
516   stack->PushBack(pc);
517   pstk->Init(&(*stack)[0], stack->Size());
518   stack->PopBack();
519   *found = true;
520 }
521 
522 // Checks if addr1|size1 is fully contained in addr2|size2.
523 // We check for fully contained instread of just overlapping
524 // because a memory access is always traced once, but can be
525 // split into multiple accesses in the shadow.
IsWithinAccess(uptr addr1,uptr size1,uptr addr2,uptr size2)526 static constexpr bool IsWithinAccess(uptr addr1, uptr size1, uptr addr2,
527                                      uptr size2) {
528   return addr1 >= addr2 && addr1 + size1 <= addr2 + size2;
529 }
530 
531 // Replays the trace of thread tid up to the target event identified
532 // by sid/epoch/addr/size/typ and restores and returns stack, mutex set
533 // and tag for that event. If there are multiple such events, it returns
534 // the last one. Returns false if the event is not present in the trace.
RestoreStack(Tid tid,EventType type,Sid sid,Epoch epoch,uptr addr,uptr size,AccessType typ,VarSizeStackTrace * pstk,MutexSet * pmset,uptr * ptag)535 bool RestoreStack(Tid tid, EventType type, Sid sid, Epoch epoch, uptr addr,
536                   uptr size, AccessType typ, VarSizeStackTrace *pstk,
537                   MutexSet *pmset, uptr *ptag) {
538   // This function restores stack trace and mutex set for the thread/epoch.
539   // It does so by getting stack trace and mutex set at the beginning of
540   // trace part, and then replaying the trace till the given epoch.
541   DPrintf2("RestoreStack: tid=%u sid=%u@%u addr=0x%zx/%zu typ=%x\n", tid,
542            static_cast<int>(sid), static_cast<int>(epoch), addr, size,
543            static_cast<int>(typ));
544   ctx->slot_mtx.CheckLocked();  // needed to prevent trace part recycling
545   ctx->thread_registry.CheckLocked();
546   ThreadContext *tctx =
547       static_cast<ThreadContext *>(ctx->thread_registry.GetThreadLocked(tid));
548   Trace *trace = &tctx->trace;
549   // Snapshot first/last parts and the current position in the last part.
550   TracePart *first_part;
551   TracePart *last_part;
552   Event *last_pos;
553   {
554     Lock lock(&trace->mtx);
555     first_part = trace->parts.Front();
556     if (!first_part)
557       return false;
558     last_part = trace->parts.Back();
559     last_pos = trace->final_pos;
560     if (tctx->thr)
561       last_pos = (Event *)atomic_load_relaxed(&tctx->thr->trace_pos);
562   }
563   DynamicMutexSet mset;
564   Vector<uptr> stack;
565   uptr prev_pc = 0;
566   bool found = false;
567   bool is_read = typ & kAccessRead;
568   bool is_atomic = typ & kAccessAtomic;
569   bool is_free = typ & kAccessFree;
570   TraceReplay(
571       trace, last_part, last_pos, sid, epoch,
572       [&](Sid ev_sid, Epoch ev_epoch, Event *evp) {
573         bool match = ev_sid == sid && ev_epoch == epoch;
574         if (evp->is_access) {
575           if (evp->is_func == 0 && evp->type == EventType::kAccessExt &&
576               evp->_ == 0)  // NopEvent
577             return;
578           auto *ev = reinterpret_cast<EventAccess *>(evp);
579           uptr ev_addr = RestoreAddr(ev->addr);
580           uptr ev_size = 1 << ev->size_log;
581           uptr ev_pc =
582               prev_pc + ev->pc_delta - (1 << (EventAccess::kPCBits - 1));
583           prev_pc = ev_pc;
584           DPrintf2("  Access: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc,
585                    ev_addr, ev_size, ev->is_read, ev->is_atomic);
586           if (match && type == EventType::kAccessExt &&
587               IsWithinAccess(addr, size, ev_addr, ev_size) &&
588               is_read == ev->is_read && is_atomic == ev->is_atomic && !is_free)
589             RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
590           return;
591         }
592         if (evp->is_func) {
593           auto *ev = reinterpret_cast<EventFunc *>(evp);
594           if (ev->pc) {
595             DPrintf2("  FuncEnter: pc=0x%llx\n", ev->pc);
596             stack.PushBack(ev->pc);
597           } else {
598             DPrintf2("  FuncExit\n");
599             CHECK(stack.Size());
600             stack.PopBack();
601           }
602           return;
603         }
604         switch (evp->type) {
605           case EventType::kAccessExt: {
606             auto *ev = reinterpret_cast<EventAccessExt *>(evp);
607             uptr ev_addr = RestoreAddr(ev->addr);
608             uptr ev_size = 1 << ev->size_log;
609             prev_pc = ev->pc;
610             DPrintf2("  AccessExt: pc=0x%llx addr=0x%zx/%zu type=%u/%u\n",
611                      ev->pc, ev_addr, ev_size, ev->is_read, ev->is_atomic);
612             if (match && type == EventType::kAccessExt &&
613                 IsWithinAccess(addr, size, ev_addr, ev_size) &&
614                 is_read == ev->is_read && is_atomic == ev->is_atomic &&
615                 !is_free)
616               RestoreStackMatch(pstk, pmset, &stack, mset, ev->pc, &found);
617             break;
618           }
619           case EventType::kAccessRange: {
620             auto *ev = reinterpret_cast<EventAccessRange *>(evp);
621             uptr ev_addr = RestoreAddr(ev->addr);
622             uptr ev_size =
623                 (ev->size_hi << EventAccessRange::kSizeLoBits) + ev->size_lo;
624             uptr ev_pc = RestoreAddr(ev->pc);
625             prev_pc = ev_pc;
626             DPrintf2("  Range: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc,
627                      ev_addr, ev_size, ev->is_read, ev->is_free);
628             if (match && type == EventType::kAccessExt &&
629                 IsWithinAccess(addr, size, ev_addr, ev_size) &&
630                 is_read == ev->is_read && !is_atomic && is_free == ev->is_free)
631               RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
632             break;
633           }
634           case EventType::kLock:
635             FALLTHROUGH;
636           case EventType::kRLock: {
637             auto *ev = reinterpret_cast<EventLock *>(evp);
638             bool is_write = ev->type == EventType::kLock;
639             uptr ev_addr = RestoreAddr(ev->addr);
640             uptr ev_pc = RestoreAddr(ev->pc);
641             StackID stack_id =
642                 (ev->stack_hi << EventLock::kStackIDLoBits) + ev->stack_lo;
643             DPrintf2("  Lock: pc=0x%zx addr=0x%zx stack=%u write=%d\n", ev_pc,
644                      ev_addr, stack_id, is_write);
645             mset->AddAddr(ev_addr, stack_id, is_write);
646             // Events with ev_pc == 0 are written to the beginning of trace
647             // part as initial mutex set (are not real).
648             if (match && type == EventType::kLock && addr == ev_addr && ev_pc)
649               RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
650             break;
651           }
652           case EventType::kUnlock: {
653             auto *ev = reinterpret_cast<EventUnlock *>(evp);
654             uptr ev_addr = RestoreAddr(ev->addr);
655             DPrintf2("  Unlock: addr=0x%zx\n", ev_addr);
656             mset->DelAddr(ev_addr);
657             break;
658           }
659           case EventType::kTime:
660             // TraceReplay already extracted sid/epoch from it,
661             // nothing else to do here.
662             break;
663         }
664       });
665   ExtractTagFromStack(pstk, ptag);
666   return found;
667 }
668 
669 }  // namespace v3
670 
operator ==(const RacyStacks & other) const671 bool RacyStacks::operator==(const RacyStacks &other) const {
672   if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
673     return true;
674   if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
675     return true;
676   return false;
677 }
678 
FindRacyStacks(const RacyStacks & hash)679 static bool FindRacyStacks(const RacyStacks &hash) {
680   for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
681     if (hash == ctx->racy_stacks[i]) {
682       VPrintf(2, "ThreadSanitizer: suppressing report as doubled (stack)\n");
683       return true;
684     }
685   }
686   return false;
687 }
688 
HandleRacyStacks(ThreadState * thr,VarSizeStackTrace traces[2])689 static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) {
690   if (!flags()->suppress_equal_stacks)
691     return false;
692   RacyStacks hash;
693   hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
694   hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
695   {
696     ReadLock lock(&ctx->racy_mtx);
697     if (FindRacyStacks(hash))
698       return true;
699   }
700   Lock lock(&ctx->racy_mtx);
701   if (FindRacyStacks(hash))
702     return true;
703   ctx->racy_stacks.PushBack(hash);
704   return false;
705 }
706 
FindRacyAddress(const RacyAddress & ra0)707 static bool FindRacyAddress(const RacyAddress &ra0) {
708   for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
709     RacyAddress ra2 = ctx->racy_addresses[i];
710     uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
711     uptr minend = min(ra0.addr_max, ra2.addr_max);
712     if (maxbeg < minend) {
713       VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
714       return true;
715     }
716   }
717   return false;
718 }
719 
HandleRacyAddress(ThreadState * thr,uptr addr_min,uptr addr_max)720 static bool HandleRacyAddress(ThreadState *thr, uptr addr_min, uptr addr_max) {
721   if (!flags()->suppress_equal_addresses)
722     return false;
723   RacyAddress ra0 = {addr_min, addr_max};
724   {
725     ReadLock lock(&ctx->racy_mtx);
726     if (FindRacyAddress(ra0))
727       return true;
728   }
729   Lock lock(&ctx->racy_mtx);
730   if (FindRacyAddress(ra0))
731     return true;
732   ctx->racy_addresses.PushBack(ra0);
733   return false;
734 }
735 
OutputReport(ThreadState * thr,const ScopedReport & srep)736 bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
737   // These should have been checked in ShouldReport.
738   // It's too late to check them here, we have already taken locks.
739   CHECK(flags()->report_bugs);
740   CHECK(!thr->suppress_reports);
741   atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
742   const ReportDesc *rep = srep.GetReport();
743   CHECK_EQ(thr->current_report, nullptr);
744   thr->current_report = rep;
745   Suppression *supp = 0;
746   uptr pc_or_addr = 0;
747   for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++)
748     pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
749   for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++)
750     pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp);
751   for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++)
752     pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
753   for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++)
754     pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp);
755   if (pc_or_addr != 0) {
756     Lock lock(&ctx->fired_suppressions_mtx);
757     FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp};
758     ctx->fired_suppressions.push_back(s);
759   }
760   {
761     bool old_is_freeing = thr->is_freeing;
762     thr->is_freeing = false;
763     bool suppressed = OnReport(rep, pc_or_addr != 0);
764     thr->is_freeing = old_is_freeing;
765     if (suppressed) {
766       thr->current_report = nullptr;
767       return false;
768     }
769   }
770   PrintReport(rep);
771   __tsan_on_report(rep);
772   ctx->nreported++;
773   if (flags()->halt_on_error)
774     Die();
775   thr->current_report = nullptr;
776   return true;
777 }
778 
IsFiredSuppression(Context * ctx,ReportType type,StackTrace trace)779 bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) {
780   ReadLock lock(&ctx->fired_suppressions_mtx);
781   for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
782     if (ctx->fired_suppressions[k].type != type)
783       continue;
784     for (uptr j = 0; j < trace.size; j++) {
785       FiredSuppression *s = &ctx->fired_suppressions[k];
786       if (trace.trace[j] == s->pc_or_addr) {
787         if (s->supp)
788           atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
789         return true;
790       }
791     }
792   }
793   return false;
794 }
795 
IsFiredSuppression(Context * ctx,ReportType type,uptr addr)796 static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
797   ReadLock lock(&ctx->fired_suppressions_mtx);
798   for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
799     if (ctx->fired_suppressions[k].type != type)
800       continue;
801     FiredSuppression *s = &ctx->fired_suppressions[k];
802     if (addr == s->pc_or_addr) {
803       if (s->supp)
804         atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
805       return true;
806     }
807   }
808   return false;
809 }
810 
RaceBetweenAtomicAndFree(ThreadState * thr)811 static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
812   Shadow s0(thr->racy_state[0]);
813   Shadow s1(thr->racy_state[1]);
814   CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
815   if (!s0.IsAtomic() && !s1.IsAtomic())
816     return true;
817   if (s0.IsAtomic() && s1.IsFreed())
818     return true;
819   if (s1.IsAtomic() && thr->is_freeing)
820     return true;
821   return false;
822 }
823 
ReportRace(ThreadState * thr)824 void ReportRace(ThreadState *thr) {
825   CheckedMutex::CheckNoLocks();
826 
827   // Symbolizer makes lots of intercepted calls. If we try to process them,
828   // at best it will cause deadlocks on internal mutexes.
829   ScopedIgnoreInterceptors ignore;
830 
831   if (!ShouldReport(thr, ReportTypeRace))
832     return;
833   if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
834     return;
835 
836   bool freed = false;
837   {
838     Shadow s(thr->racy_state[1]);
839     freed = s.GetFreedAndReset();
840     thr->racy_state[1] = s.raw();
841   }
842 
843   uptr addr = ShadowToMem(thr->racy_shadow_addr);
844   uptr addr_min = 0;
845   uptr addr_max = 0;
846   {
847     uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
848     uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
849     uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
850     uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
851     addr_min = min(a0, a1);
852     addr_max = max(e0, e1);
853     if (IsExpectedReport(addr_min, addr_max - addr_min))
854       return;
855   }
856   if (HandleRacyAddress(thr, addr_min, addr_max))
857     return;
858 
859   ReportType typ = ReportTypeRace;
860   if (thr->is_vptr_access && freed)
861     typ = ReportTypeVptrUseAfterFree;
862   else if (thr->is_vptr_access)
863     typ = ReportTypeVptrRace;
864   else if (freed)
865     typ = ReportTypeUseAfterFree;
866 
867   if (IsFiredSuppression(ctx, typ, addr))
868     return;
869 
870   const uptr kMop = 2;
871   VarSizeStackTrace traces[kMop];
872   uptr tags[kMop] = {kExternalTagNone};
873   uptr toppc = TraceTopPC(thr);
874   if (toppc >> kEventPCBits) {
875     // This is a work-around for a known issue.
876     // The scenario where this happens is rather elaborate and requires
877     // an instrumented __sanitizer_report_error_summary callback and
878     // a __tsan_symbolize_external callback and a race during a range memory
879     // access larger than 8 bytes. MemoryAccessRange adds the current PC to
880     // the trace and starts processing memory accesses. A first memory access
881     // triggers a race, we report it and call the instrumented
882     // __sanitizer_report_error_summary, which adds more stuff to the trace
883     // since it is intrumented. Then a second memory access in MemoryAccessRange
884     // also triggers a race and we get here and call TraceTopPC to get the
885     // current PC, however now it contains some unrelated events from the
886     // callback. Most likely, TraceTopPC will now return a EventTypeFuncExit
887     // event. Later we subtract -1 from it (in GetPreviousInstructionPc)
888     // and the resulting PC has kExternalPCBit set, so we pass it to
889     // __tsan_symbolize_external_ex. __tsan_symbolize_external_ex is within its
890     // rights to crash since the PC is completely bogus.
891     // test/tsan/double_race.cpp contains a test case for this.
892     toppc = 0;
893   }
894   ObtainCurrentStack(thr, toppc, &traces[0], &tags[0]);
895   if (IsFiredSuppression(ctx, typ, traces[0]))
896     return;
897 
898   DynamicMutexSet mset2;
899   Shadow s2(thr->racy_state[1]);
900   RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2, &tags[1]);
901   if (IsFiredSuppression(ctx, typ, traces[1]))
902     return;
903 
904   if (HandleRacyStacks(thr, traces))
905     return;
906 
907   // If any of the accesses has a tag, treat this as an "external" race.
908   uptr tag = kExternalTagNone;
909   for (uptr i = 0; i < kMop; i++) {
910     if (tags[i] != kExternalTagNone) {
911       typ = ReportTypeExternalRace;
912       tag = tags[i];
913       break;
914     }
915   }
916 
917   ThreadRegistryLock l0(&ctx->thread_registry);
918   ScopedReport rep(typ, tag);
919   for (uptr i = 0; i < kMop; i++) {
920     Shadow s(thr->racy_state[i]);
921     rep.AddMemoryAccess(addr, tags[i], s, traces[i],
922                         i == 0 ? &thr->mset : mset2);
923   }
924 
925   for (uptr i = 0; i < kMop; i++) {
926     FastState s(thr->racy_state[i]);
927     ThreadContext *tctx = static_cast<ThreadContext *>(
928         ctx->thread_registry.GetThreadLocked(s.tid()));
929     if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
930       continue;
931     rep.AddThread(tctx);
932   }
933 
934   rep.AddLocation(addr_min, addr_max - addr_min);
935 
936 #if !SANITIZER_GO
937   {
938     Shadow s(thr->racy_state[1]);
939     if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
940       rep.AddSleep(thr->last_sleep_stack_id);
941   }
942 #endif
943 
944   OutputReport(thr, rep);
945 }
946 
PrintCurrentStack(ThreadState * thr,uptr pc)947 void PrintCurrentStack(ThreadState *thr, uptr pc) {
948   VarSizeStackTrace trace;
949   ObtainCurrentStack(thr, pc, &trace);
950   PrintStack(SymbolizeStack(trace));
951 }
952 
953 // Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes
954 // __sanitizer_print_stack_trace exists in the actual unwinded stack, but
955 // tail-call to PrintCurrentStackSlow breaks this assumption because
956 // __sanitizer_print_stack_trace disappears after tail-call.
957 // However, this solution is not reliable enough, please see dvyukov's comment
958 // http://reviews.llvm.org/D19148#406208
959 // Also see PR27280 comment 2 and 3 for breaking examples and analysis.
PrintCurrentStackSlow(uptr pc)960 ALWAYS_INLINE USED void PrintCurrentStackSlow(uptr pc) {
961 #if !SANITIZER_GO
962   uptr bp = GET_CURRENT_FRAME();
963   auto *ptrace = New<BufferedStackTrace>();
964   ptrace->Unwind(pc, bp, nullptr, false);
965 
966   for (uptr i = 0; i < ptrace->size / 2; i++) {
967     uptr tmp = ptrace->trace_buffer[i];
968     ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
969     ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
970   }
971   PrintStack(SymbolizeStack(*ptrace));
972 #endif
973 }
974 
975 }  // namespace __tsan
976 
977 using namespace __tsan;
978 
979 extern "C" {
980 SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_print_stack_trace()981 void __sanitizer_print_stack_trace() {
982   PrintCurrentStackSlow(StackTrace::GetCurrentPc());
983 }
984 }  // extern "C"
985