1 //===-- tsan_rtl_thread.cc ------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11 
12 #include "sanitizer_common/sanitizer_placement_new.h"
13 #include "tsan_rtl.h"
14 #include "tsan_mman.h"
15 #include "tsan_platform.h"
16 #include "tsan_report.h"
17 #include "tsan_sync.h"
18 
19 namespace __tsan {
20 
21 #ifndef TSAN_GO
22 const int kThreadQuarantineSize = 16;
23 #else
24 const int kThreadQuarantineSize = 64;
25 #endif
26 
MaybeReportThreadLeak(ThreadContext * tctx)27 static void MaybeReportThreadLeak(ThreadContext *tctx) {
28   if (tctx->detached)
29     return;
30   if (tctx->status != ThreadStatusCreated
31       && tctx->status != ThreadStatusRunning
32       && tctx->status != ThreadStatusFinished)
33     return;
34   ScopedReport rep(ReportTypeThreadLeak);
35   rep.AddThread(tctx);
36   OutputReport(CTX(), rep);
37 }
38 
ThreadFinalize(ThreadState * thr)39 void ThreadFinalize(ThreadState *thr) {
40   CHECK_GT(thr->in_rtl, 0);
41   if (!flags()->report_thread_leaks)
42     return;
43   Context *ctx = CTX();
44   Lock l(&ctx->thread_mtx);
45   for (unsigned i = 0; i < kMaxTid; i++) {
46     ThreadContext *tctx = ctx->threads[i];
47     if (tctx == 0)
48       continue;
49     MaybeReportThreadLeak(tctx);
50   }
51 }
52 
ThreadCount(ThreadState * thr)53 int ThreadCount(ThreadState *thr) {
54   CHECK_GT(thr->in_rtl, 0);
55   Context *ctx = CTX();
56   Lock l(&ctx->thread_mtx);
57   int cnt = 0;
58   for (unsigned i = 0; i < kMaxTid; i++) {
59     ThreadContext *tctx = ctx->threads[i];
60     if (tctx == 0)
61       continue;
62     if (tctx->status != ThreadStatusCreated
63         && tctx->status != ThreadStatusRunning)
64       continue;
65     cnt++;
66   }
67   return cnt;
68 }
69 
ThreadDead(ThreadState * thr,ThreadContext * tctx)70 static void ThreadDead(ThreadState *thr, ThreadContext *tctx) {
71   Context *ctx = CTX();
72   CHECK_GT(thr->in_rtl, 0);
73   CHECK(tctx->status == ThreadStatusRunning
74       || tctx->status == ThreadStatusFinished);
75   DPrintf("#%d: ThreadDead uid=%zu\n", thr->tid, tctx->user_id);
76   tctx->status = ThreadStatusDead;
77   tctx->user_id = 0;
78   tctx->sync.Reset();
79 
80   // Put to dead list.
81   tctx->dead_next = 0;
82   if (ctx->dead_list_size == 0)
83     ctx->dead_list_head = tctx;
84   else
85     ctx->dead_list_tail->dead_next = tctx;
86   ctx->dead_list_tail = tctx;
87   ctx->dead_list_size++;
88 }
89 
ThreadCreate(ThreadState * thr,uptr pc,uptr uid,bool detached)90 int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
91   CHECK_GT(thr->in_rtl, 0);
92   Context *ctx = CTX();
93   Lock l(&ctx->thread_mtx);
94   StatInc(thr, StatThreadCreate);
95   int tid = -1;
96   ThreadContext *tctx = 0;
97   if (ctx->dead_list_size > kThreadQuarantineSize
98       || ctx->thread_seq >= kMaxTid) {
99     // Reusing old thread descriptor and tid.
100     if (ctx->dead_list_size == 0) {
101       Printf("ThreadSanitizer: %d thread limit exceeded. Dying.\n",
102                  kMaxTid);
103       Die();
104     }
105     StatInc(thr, StatThreadReuse);
106     tctx = ctx->dead_list_head;
107     ctx->dead_list_head = tctx->dead_next;
108     ctx->dead_list_size--;
109     if (ctx->dead_list_size == 0) {
110       CHECK_EQ(tctx->dead_next, 0);
111       ctx->dead_list_head = 0;
112     }
113     CHECK_EQ(tctx->status, ThreadStatusDead);
114     tctx->status = ThreadStatusInvalid;
115     tctx->reuse_count++;
116     tctx->sync.Reset();
117     tid = tctx->tid;
118     DestroyAndFree(tctx->dead_info);
119     if (tctx->name) {
120       internal_free(tctx->name);
121       tctx->name = 0;
122     }
123   } else {
124     // Allocating new thread descriptor and tid.
125     StatInc(thr, StatThreadMaxTid);
126     tid = ctx->thread_seq++;
127     void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
128     tctx = new(mem) ThreadContext(tid);
129     ctx->threads[tid] = tctx;
130     MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event));
131   }
132   CHECK_NE(tctx, 0);
133   CHECK_GE(tid, 0);
134   CHECK_LT(tid, kMaxTid);
135   DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", thr->tid, tid, uid);
136   CHECK_EQ(tctx->status, ThreadStatusInvalid);
137   ctx->alive_threads++;
138   if (ctx->max_alive_threads < ctx->alive_threads) {
139     ctx->max_alive_threads++;
140     CHECK_EQ(ctx->max_alive_threads, ctx->alive_threads);
141     StatInc(thr, StatThreadMaxAlive);
142   }
143   tctx->status = ThreadStatusCreated;
144   tctx->thr = 0;
145   tctx->user_id = uid;
146   tctx->unique_id = ctx->unique_thread_seq++;
147   tctx->detached = detached;
148   if (tid) {
149     thr->fast_state.IncrementEpoch();
150     // Can't increment epoch w/o writing to the trace as well.
151     TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
152     thr->clock.set(thr->tid, thr->fast_state.epoch());
153     thr->fast_synch_epoch = thr->fast_state.epoch();
154     thr->clock.release(&tctx->sync);
155     StatInc(thr, StatSyncRelease);
156     tctx->creation_stack.ObtainCurrent(thr, pc);
157     tctx->creation_tid = thr->tid;
158   }
159   return tid;
160 }
161 
ThreadStart(ThreadState * thr,int tid,uptr os_id)162 void ThreadStart(ThreadState *thr, int tid, uptr os_id) {
163   CHECK_GT(thr->in_rtl, 0);
164   uptr stk_addr = 0;
165   uptr stk_size = 0;
166   uptr tls_addr = 0;
167   uptr tls_size = 0;
168   GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size);
169 
170   if (tid) {
171     if (stk_addr && stk_size) {
172       MemoryResetRange(thr, /*pc=*/ 1, stk_addr, stk_size);
173     }
174 
175     if (tls_addr && tls_size) {
176       // Check that the thr object is in tls;
177       const uptr thr_beg = (uptr)thr;
178       const uptr thr_end = (uptr)thr + sizeof(*thr);
179       CHECK_GE(thr_beg, tls_addr);
180       CHECK_LE(thr_beg, tls_addr + tls_size);
181       CHECK_GE(thr_end, tls_addr);
182       CHECK_LE(thr_end, tls_addr + tls_size);
183       // Since the thr object is huge, skip it.
184       MemoryResetRange(thr, /*pc=*/ 2, tls_addr, thr_beg - tls_addr);
185       MemoryResetRange(thr, /*pc=*/ 2, thr_end, tls_addr + tls_size - thr_end);
186     }
187   }
188 
189   Lock l(&CTX()->thread_mtx);
190   ThreadContext *tctx = CTX()->threads[tid];
191   CHECK_NE(tctx, 0);
192   CHECK_EQ(tctx->status, ThreadStatusCreated);
193   tctx->status = ThreadStatusRunning;
194   tctx->os_id = os_id;
195   // RoundUp so that one trace part does not contain events
196   // from different threads.
197   tctx->epoch0 = RoundUp(tctx->epoch1 + 1, kTracePartSize);
198   tctx->epoch1 = (u64)-1;
199   new(thr) ThreadState(CTX(), tid, tctx->unique_id,
200       tctx->epoch0, stk_addr, stk_size,
201       tls_addr, tls_size);
202 #ifdef TSAN_GO
203   // Setup dynamic shadow stack.
204   const int kInitStackSize = 8;
205   thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack,
206       kInitStackSize * sizeof(uptr));
207   thr->shadow_stack_pos = thr->shadow_stack;
208   thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
209 #endif
210 #ifndef TSAN_GO
211   AllocatorThreadStart(thr);
212 #endif
213   tctx->thr = thr;
214   thr->fast_synch_epoch = tctx->epoch0;
215   thr->clock.set(tid, tctx->epoch0);
216   thr->clock.acquire(&tctx->sync);
217   thr->fast_state.SetHistorySize(flags()->history_size);
218   const uptr trace = (tctx->epoch0 / kTracePartSize) % TraceParts();
219   thr->trace.headers[trace].epoch0 = tctx->epoch0;
220   StatInc(thr, StatSyncAcquire);
221   DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
222           "tls_addr=%zx tls_size=%zx\n",
223           tid, (uptr)tctx->epoch0, stk_addr, stk_size, tls_addr, tls_size);
224   thr->is_alive = true;
225 }
226 
ThreadFinish(ThreadState * thr)227 void ThreadFinish(ThreadState *thr) {
228   CHECK_GT(thr->in_rtl, 0);
229   StatInc(thr, StatThreadFinish);
230   // FIXME: Treat it as write.
231   if (thr->stk_addr && thr->stk_size)
232     MemoryResetRange(thr, /*pc=*/ 3, thr->stk_addr, thr->stk_size);
233   if (thr->tls_addr && thr->tls_size) {
234     const uptr thr_beg = (uptr)thr;
235     const uptr thr_end = (uptr)thr + sizeof(*thr);
236     // Since the thr object is huge, skip it.
237     MemoryResetRange(thr, /*pc=*/ 4, thr->tls_addr, thr_beg - thr->tls_addr);
238     MemoryResetRange(thr, /*pc=*/ 5,
239         thr_end, thr->tls_addr + thr->tls_size - thr_end);
240   }
241   thr->is_alive = false;
242   Context *ctx = CTX();
243   Lock l(&ctx->thread_mtx);
244   ThreadContext *tctx = ctx->threads[thr->tid];
245   CHECK_NE(tctx, 0);
246   CHECK_EQ(tctx->status, ThreadStatusRunning);
247   CHECK_GT(ctx->alive_threads, 0);
248   ctx->alive_threads--;
249   if (tctx->detached) {
250     ThreadDead(thr, tctx);
251   } else {
252     thr->fast_state.IncrementEpoch();
253     // Can't increment epoch w/o writing to the trace as well.
254     TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
255     thr->clock.set(thr->tid, thr->fast_state.epoch());
256     thr->fast_synch_epoch = thr->fast_state.epoch();
257     thr->clock.release(&tctx->sync);
258     StatInc(thr, StatSyncRelease);
259     tctx->status = ThreadStatusFinished;
260   }
261 
262   // Save from info about the thread.
263   tctx->dead_info = new(internal_alloc(MBlockDeadInfo, sizeof(ThreadDeadInfo)))
264       ThreadDeadInfo();
265   for (uptr i = 0; i < TraceParts(); i++) {
266     tctx->dead_info->trace.headers[i].epoch0 = thr->trace.headers[i].epoch0;
267     tctx->dead_info->trace.headers[i].stack0.CopyFrom(
268         thr->trace.headers[i].stack0);
269   }
270   tctx->epoch1 = thr->fast_state.epoch();
271 
272 #ifndef TSAN_GO
273   AllocatorThreadFinish(thr);
274 #endif
275   thr->~ThreadState();
276   StatAggregate(ctx->stat, thr->stat);
277   tctx->thr = 0;
278 }
279 
ThreadTid(ThreadState * thr,uptr pc,uptr uid)280 int ThreadTid(ThreadState *thr, uptr pc, uptr uid) {
281   CHECK_GT(thr->in_rtl, 0);
282   Context *ctx = CTX();
283   Lock l(&ctx->thread_mtx);
284   int res = -1;
285   for (unsigned tid = 0; tid < kMaxTid; tid++) {
286     ThreadContext *tctx = ctx->threads[tid];
287     if (tctx != 0 && tctx->user_id == uid
288         && tctx->status != ThreadStatusInvalid) {
289       tctx->user_id = 0;
290       res = tid;
291       break;
292     }
293   }
294   DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res);
295   return res;
296 }
297 
ThreadJoin(ThreadState * thr,uptr pc,int tid)298 void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
299   CHECK_GT(thr->in_rtl, 0);
300   CHECK_GT(tid, 0);
301   CHECK_LT(tid, kMaxTid);
302   DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
303   Context *ctx = CTX();
304   Lock l(&ctx->thread_mtx);
305   ThreadContext *tctx = ctx->threads[tid];
306   if (tctx->status == ThreadStatusInvalid) {
307     Printf("ThreadSanitizer: join of non-existent thread\n");
308     return;
309   }
310   // FIXME(dvyukov): print message and continue (it's user error).
311   CHECK_EQ(tctx->detached, false);
312   CHECK_EQ(tctx->status, ThreadStatusFinished);
313   thr->clock.acquire(&tctx->sync);
314   StatInc(thr, StatSyncAcquire);
315   ThreadDead(thr, tctx);
316 }
317 
ThreadDetach(ThreadState * thr,uptr pc,int tid)318 void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
319   CHECK_GT(thr->in_rtl, 0);
320   CHECK_GT(tid, 0);
321   CHECK_LT(tid, kMaxTid);
322   Context *ctx = CTX();
323   Lock l(&ctx->thread_mtx);
324   ThreadContext *tctx = ctx->threads[tid];
325   if (tctx->status == ThreadStatusInvalid) {
326     Printf("ThreadSanitizer: detach of non-existent thread\n");
327     return;
328   }
329   if (tctx->status == ThreadStatusFinished) {
330     ThreadDead(thr, tctx);
331   } else {
332     tctx->detached = true;
333   }
334 }
335 
ThreadSetName(ThreadState * thr,const char * name)336 void ThreadSetName(ThreadState *thr, const char *name) {
337   Context *ctx = CTX();
338   Lock l(&ctx->thread_mtx);
339   ThreadContext *tctx = ctx->threads[thr->tid];
340   CHECK_NE(tctx, 0);
341   CHECK_EQ(tctx->status, ThreadStatusRunning);
342   if (tctx->name) {
343     internal_free(tctx->name);
344     tctx->name = 0;
345   }
346   if (name)
347     tctx->name = internal_strdup(name);
348 }
349 
MemoryAccessRange(ThreadState * thr,uptr pc,uptr addr,uptr size,bool is_write)350 void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
351                        uptr size, bool is_write) {
352   if (size == 0)
353     return;
354 
355   u64 *shadow_mem = (u64*)MemToShadow(addr);
356   DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n",
357       thr->tid, (void*)pc, (void*)addr,
358       (int)size, is_write);
359 
360 #if TSAN_DEBUG
361   if (!IsAppMem(addr)) {
362     Printf("Access to non app mem %zx\n", addr);
363     DCHECK(IsAppMem(addr));
364   }
365   if (!IsAppMem(addr + size - 1)) {
366     Printf("Access to non app mem %zx\n", addr + size - 1);
367     DCHECK(IsAppMem(addr + size - 1));
368   }
369   if (!IsShadowMem((uptr)shadow_mem)) {
370     Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
371     DCHECK(IsShadowMem((uptr)shadow_mem));
372   }
373   if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) {
374     Printf("Bad shadow addr %p (%zx)\n",
375                shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1);
376     DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1)));
377   }
378 #endif
379 
380   StatInc(thr, StatMopRange);
381 
382   FastState fast_state = thr->fast_state;
383   if (fast_state.GetIgnoreBit())
384     return;
385 
386   fast_state.IncrementEpoch();
387   thr->fast_state = fast_state;
388   TraceAddEvent(thr, fast_state, EventTypeMop, pc);
389 
390   bool unaligned = (addr % kShadowCell) != 0;
391 
392   // Handle unaligned beginning, if any.
393   for (; addr % kShadowCell && size; addr++, size--) {
394     int const kAccessSizeLog = 0;
395     Shadow cur(fast_state);
396     cur.SetWrite(is_write);
397     cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
398     MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
399         shadow_mem, cur);
400   }
401   if (unaligned)
402     shadow_mem += kShadowCnt;
403   // Handle middle part, if any.
404   for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) {
405     int const kAccessSizeLog = 3;
406     Shadow cur(fast_state);
407     cur.SetWrite(is_write);
408     cur.SetAddr0AndSizeLog(0, kAccessSizeLog);
409     MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
410         shadow_mem, cur);
411     shadow_mem += kShadowCnt;
412   }
413   // Handle ending, if any.
414   for (; size; addr++, size--) {
415     int const kAccessSizeLog = 0;
416     Shadow cur(fast_state);
417     cur.SetWrite(is_write);
418     cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
419     MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
420         shadow_mem, cur);
421   }
422 }
423 
MemoryAccessRangeStep(ThreadState * thr,uptr pc,uptr addr,uptr size,uptr step,bool is_write)424 void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
425     uptr size, uptr step, bool is_write) {
426   if (size == 0)
427     return;
428   FastState fast_state = thr->fast_state;
429   if (fast_state.GetIgnoreBit())
430     return;
431   StatInc(thr, StatMopRange);
432   fast_state.IncrementEpoch();
433   thr->fast_state = fast_state;
434   TraceAddEvent(thr, fast_state, EventTypeMop, pc);
435 
436   for (uptr addr_end = addr + size; addr < addr_end; addr += step) {
437     u64 *shadow_mem = (u64*)MemToShadow(addr);
438     Shadow cur(fast_state);
439     cur.SetWrite(is_write);
440     cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kSizeLog1);
441     MemoryAccessImpl(thr, addr, kSizeLog1, is_write, false,
442         shadow_mem, cur);
443   }
444 }
445 }  // namespace __tsan
446