1 //===-- tsan_rtl_thread.cc ------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11 
12 #include "sanitizer_common/sanitizer_placement_new.h"
13 #include "tsan_rtl.h"
14 #include "tsan_mman.h"
15 #include "tsan_platform.h"
16 #include "tsan_report.h"
17 #include "tsan_sync.h"
18 
19 namespace __tsan {
20 
21 // ThreadContext implementation.
22 
ThreadContext(int tid)23 ThreadContext::ThreadContext(int tid)
24   : ThreadContextBase(tid)
25   , thr()
26   , sync()
27   , epoch0()
28   , epoch1() {
29 }
30 
31 #if !SANITIZER_GO
~ThreadContext()32 ThreadContext::~ThreadContext() {
33 }
34 #endif
35 
OnDead()36 void ThreadContext::OnDead() {
37   CHECK_EQ(sync.size(), 0);
38 }
39 
OnJoined(void * arg)40 void ThreadContext::OnJoined(void *arg) {
41   ThreadState *caller_thr = static_cast<ThreadState *>(arg);
42   AcquireImpl(caller_thr, 0, &sync);
43   sync.Reset(&caller_thr->proc()->clock_cache);
44 }
45 
46 struct OnCreatedArgs {
47   ThreadState *thr;
48   uptr pc;
49 };
50 
OnCreated(void * arg)51 void ThreadContext::OnCreated(void *arg) {
52   thr = 0;
53   if (tid == 0)
54     return;
55   OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
56   if (!args->thr)  // GCD workers don't have a parent thread.
57     return;
58   args->thr->fast_state.IncrementEpoch();
59   // Can't increment epoch w/o writing to the trace as well.
60   TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0);
61   ReleaseImpl(args->thr, 0, &sync);
62   creation_stack_id = CurrentStackId(args->thr, args->pc);
63   if (reuse_count == 0)
64     StatInc(args->thr, StatThreadMaxTid);
65 }
66 
OnReset()67 void ThreadContext::OnReset() {
68   CHECK_EQ(sync.size(), 0);
69   uptr trace_p = GetThreadTrace(tid);
70   ReleaseMemoryPagesToOS(trace_p, trace_p + TraceSize() * sizeof(Event));
71   //!!! ReleaseMemoryToOS(GetThreadTraceHeader(tid), sizeof(Trace));
72 }
73 
OnDetached(void * arg)74 void ThreadContext::OnDetached(void *arg) {
75   ThreadState *thr1 = static_cast<ThreadState*>(arg);
76   sync.Reset(&thr1->proc()->clock_cache);
77 }
78 
79 struct OnStartedArgs {
80   ThreadState *thr;
81   uptr stk_addr;
82   uptr stk_size;
83   uptr tls_addr;
84   uptr tls_size;
85 };
86 
OnStarted(void * arg)87 void ThreadContext::OnStarted(void *arg) {
88   OnStartedArgs *args = static_cast<OnStartedArgs*>(arg);
89   thr = args->thr;
90   // RoundUp so that one trace part does not contain events
91   // from different threads.
92   epoch0 = RoundUp(epoch1 + 1, kTracePartSize);
93   epoch1 = (u64)-1;
94   new(thr) ThreadState(ctx, tid, unique_id, epoch0, reuse_count,
95       args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
96 #if !SANITIZER_GO
97   thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0];
98   thr->shadow_stack_pos = thr->shadow_stack;
99   thr->shadow_stack_end = thr->shadow_stack + kShadowStackSize;
100 #else
101   // Setup dynamic shadow stack.
102   const int kInitStackSize = 8;
103   thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack,
104       kInitStackSize * sizeof(uptr));
105   thr->shadow_stack_pos = thr->shadow_stack;
106   thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
107 #endif
108   if (common_flags()->detect_deadlocks)
109     thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id);
110   thr->fast_state.SetHistorySize(flags()->history_size);
111   // Commit switch to the new part of the trace.
112   // TraceAddEvent will reset stack0/mset0 in the new part for us.
113   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
114 
115   thr->fast_synch_epoch = epoch0;
116   AcquireImpl(thr, 0, &sync);
117   StatInc(thr, StatSyncAcquire);
118   sync.Reset(&thr->proc()->clock_cache);
119   thr->is_inited = true;
120   DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
121           "tls_addr=%zx tls_size=%zx\n",
122           tid, (uptr)epoch0, args->stk_addr, args->stk_size,
123           args->tls_addr, args->tls_size);
124 }
125 
OnFinished()126 void ThreadContext::OnFinished() {
127 #if SANITIZER_GO
128   internal_free(thr->shadow_stack);
129   thr->shadow_stack = nullptr;
130   thr->shadow_stack_pos = nullptr;
131   thr->shadow_stack_end = nullptr;
132 #endif
133   if (!detached) {
134     thr->fast_state.IncrementEpoch();
135     // Can't increment epoch w/o writing to the trace as well.
136     TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
137     ReleaseImpl(thr, 0, &sync);
138   }
139   epoch1 = thr->fast_state.epoch();
140 
141   if (common_flags()->detect_deadlocks)
142     ctx->dd->DestroyLogicalThread(thr->dd_lt);
143   thr->clock.ResetCached(&thr->proc()->clock_cache);
144 #if !SANITIZER_GO
145   thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
146 #endif
147   thr->~ThreadState();
148 #if TSAN_COLLECT_STATS
149   StatAggregate(ctx->stat, thr->stat);
150 #endif
151   thr = 0;
152 }
153 
154 #if !SANITIZER_GO
155 struct ThreadLeak {
156   ThreadContext *tctx;
157   int count;
158 };
159 
MaybeReportThreadLeak(ThreadContextBase * tctx_base,void * arg)160 static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) {
161   Vector<ThreadLeak> &leaks = *(Vector<ThreadLeak>*)arg;
162   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
163   if (tctx->detached || tctx->status != ThreadStatusFinished)
164     return;
165   for (uptr i = 0; i < leaks.Size(); i++) {
166     if (leaks[i].tctx->creation_stack_id == tctx->creation_stack_id) {
167       leaks[i].count++;
168       return;
169     }
170   }
171   ThreadLeak leak = {tctx, 1};
172   leaks.PushBack(leak);
173 }
174 #endif
175 
176 #if !SANITIZER_GO
ReportIgnoresEnabled(ThreadContext * tctx,IgnoreSet * set)177 static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
178   if (tctx->tid == 0) {
179     Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
180   } else {
181     Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled,"
182       " created at:\n", tctx->tid, tctx->name);
183     PrintStack(SymbolizeStackId(tctx->creation_stack_id));
184   }
185   Printf("  One of the following ignores was not ended"
186       " (in order of probability)\n");
187   for (uptr i = 0; i < set->Size(); i++) {
188     Printf("  Ignore was enabled at:\n");
189     PrintStack(SymbolizeStackId(set->At(i)));
190   }
191   Die();
192 }
193 
ThreadCheckIgnore(ThreadState * thr)194 static void ThreadCheckIgnore(ThreadState *thr) {
195   if (ctx->after_multithreaded_fork)
196     return;
197   if (thr->ignore_reads_and_writes)
198     ReportIgnoresEnabled(thr->tctx, &thr->mop_ignore_set);
199   if (thr->ignore_sync)
200     ReportIgnoresEnabled(thr->tctx, &thr->sync_ignore_set);
201 }
202 #else
ThreadCheckIgnore(ThreadState * thr)203 static void ThreadCheckIgnore(ThreadState *thr) {}
204 #endif
205 
ThreadFinalize(ThreadState * thr)206 void ThreadFinalize(ThreadState *thr) {
207   ThreadCheckIgnore(thr);
208 #if !SANITIZER_GO
209   if (!flags()->report_thread_leaks)
210     return;
211   ThreadRegistryLock l(ctx->thread_registry);
212   Vector<ThreadLeak> leaks;
213   ctx->thread_registry->RunCallbackForEachThreadLocked(
214       MaybeReportThreadLeak, &leaks);
215   for (uptr i = 0; i < leaks.Size(); i++) {
216     ScopedReport rep(ReportTypeThreadLeak);
217     rep.AddThread(leaks[i].tctx, true);
218     rep.SetCount(leaks[i].count);
219     OutputReport(thr, rep);
220   }
221 #endif
222 }
223 
ThreadCount(ThreadState * thr)224 int ThreadCount(ThreadState *thr) {
225   uptr result;
226   ctx->thread_registry->GetNumberOfThreads(0, 0, &result);
227   return (int)result;
228 }
229 
ThreadCreate(ThreadState * thr,uptr pc,uptr uid,bool detached)230 int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
231   StatInc(thr, StatThreadCreate);
232   OnCreatedArgs args = { thr, pc };
233   u32 parent_tid = thr ? thr->tid : kInvalidTid;  // No parent for GCD workers.
234   int tid =
235       ctx->thread_registry->CreateThread(uid, detached, parent_tid, &args);
236   DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent_tid, tid, uid);
237   StatSet(thr, StatThreadMaxAlive, ctx->thread_registry->GetMaxAliveThreads());
238   return tid;
239 }
240 
ThreadStart(ThreadState * thr,int tid,tid_t os_id,bool workerthread)241 void ThreadStart(ThreadState *thr, int tid, tid_t os_id, bool workerthread) {
242   uptr stk_addr = 0;
243   uptr stk_size = 0;
244   uptr tls_addr = 0;
245   uptr tls_size = 0;
246 #if !SANITIZER_GO
247   GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size);
248 
249   if (tid) {
250     if (stk_addr && stk_size)
251       MemoryRangeImitateWrite(thr, /*pc=*/ 1, stk_addr, stk_size);
252 
253     if (tls_addr && tls_size) ImitateTlsWrite(thr, tls_addr, tls_size);
254   }
255 #endif
256 
257   ThreadRegistry *tr = ctx->thread_registry;
258   OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size };
259   tr->StartThread(tid, os_id, workerthread, &args);
260 
261   tr->Lock();
262   thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid);
263   tr->Unlock();
264 
265 #if !SANITIZER_GO
266   if (ctx->after_multithreaded_fork) {
267     thr->ignore_interceptors++;
268     ThreadIgnoreBegin(thr, 0);
269     ThreadIgnoreSyncBegin(thr, 0);
270   }
271 #endif
272 }
273 
ThreadFinish(ThreadState * thr)274 void ThreadFinish(ThreadState *thr) {
275   ThreadCheckIgnore(thr);
276   StatInc(thr, StatThreadFinish);
277   if (thr->stk_addr && thr->stk_size)
278     DontNeedShadowFor(thr->stk_addr, thr->stk_size);
279   if (thr->tls_addr && thr->tls_size)
280     DontNeedShadowFor(thr->tls_addr, thr->tls_size);
281   thr->is_dead = true;
282   ctx->thread_registry->FinishThread(thr->tid);
283 }
284 
FindThreadByUid(ThreadContextBase * tctx,void * arg)285 static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) {
286   uptr uid = (uptr)arg;
287   if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) {
288     tctx->user_id = 0;
289     return true;
290   }
291   return false;
292 }
293 
ThreadTid(ThreadState * thr,uptr pc,uptr uid)294 int ThreadTid(ThreadState *thr, uptr pc, uptr uid) {
295   int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid);
296   DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res);
297   return res;
298 }
299 
ThreadJoin(ThreadState * thr,uptr pc,int tid)300 void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
301   CHECK_GT(tid, 0);
302   CHECK_LT(tid, kMaxTid);
303   DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
304   ctx->thread_registry->JoinThread(tid, thr);
305 }
306 
ThreadDetach(ThreadState * thr,uptr pc,int tid)307 void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
308   CHECK_GT(tid, 0);
309   CHECK_LT(tid, kMaxTid);
310   ctx->thread_registry->DetachThread(tid, thr);
311 }
312 
ThreadSetName(ThreadState * thr,const char * name)313 void ThreadSetName(ThreadState *thr, const char *name) {
314   ctx->thread_registry->SetThreadName(thr->tid, name);
315 }
316 
MemoryAccessRange(ThreadState * thr,uptr pc,uptr addr,uptr size,bool is_write)317 void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
318                        uptr size, bool is_write) {
319   if (size == 0)
320     return;
321 
322   u64 *shadow_mem = (u64*)MemToShadow(addr);
323   DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n",
324       thr->tid, (void*)pc, (void*)addr,
325       (int)size, is_write);
326 
327 #if SANITIZER_DEBUG
328   if (!IsAppMem(addr)) {
329     Printf("Access to non app mem %zx\n", addr);
330     DCHECK(IsAppMem(addr));
331   }
332   if (!IsAppMem(addr + size - 1)) {
333     Printf("Access to non app mem %zx\n", addr + size - 1);
334     DCHECK(IsAppMem(addr + size - 1));
335   }
336   if (!IsShadowMem((uptr)shadow_mem)) {
337     Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
338     DCHECK(IsShadowMem((uptr)shadow_mem));
339   }
340   if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) {
341     Printf("Bad shadow addr %p (%zx)\n",
342                shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1);
343     DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1)));
344   }
345 #endif
346 
347   StatInc(thr, StatMopRange);
348 
349   if (*shadow_mem == kShadowRodata) {
350     DCHECK(!is_write);
351     // Access to .rodata section, no races here.
352     // Measurements show that it can be 10-20% of all memory accesses.
353     StatInc(thr, StatMopRangeRodata);
354     return;
355   }
356 
357   FastState fast_state = thr->fast_state;
358   if (fast_state.GetIgnoreBit())
359     return;
360 
361   fast_state.IncrementEpoch();
362   thr->fast_state = fast_state;
363   TraceAddEvent(thr, fast_state, EventTypeMop, pc);
364 
365   bool unaligned = (addr % kShadowCell) != 0;
366 
367   // Handle unaligned beginning, if any.
368   for (; addr % kShadowCell && size; addr++, size--) {
369     int const kAccessSizeLog = 0;
370     Shadow cur(fast_state);
371     cur.SetWrite(is_write);
372     cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
373     MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
374         shadow_mem, cur);
375   }
376   if (unaligned)
377     shadow_mem += kShadowCnt;
378   // Handle middle part, if any.
379   for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) {
380     int const kAccessSizeLog = 3;
381     Shadow cur(fast_state);
382     cur.SetWrite(is_write);
383     cur.SetAddr0AndSizeLog(0, kAccessSizeLog);
384     MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
385         shadow_mem, cur);
386     shadow_mem += kShadowCnt;
387   }
388   // Handle ending, if any.
389   for (; size; addr++, size--) {
390     int const kAccessSizeLog = 0;
391     Shadow cur(fast_state);
392     cur.SetWrite(is_write);
393     cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
394     MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
395         shadow_mem, cur);
396   }
397 }
398 
399 }  // namespace __tsan
400