1 //===-- tsan_rtl.cc -------------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 // Main file (entry points) for the TSan run-time.
11 //===----------------------------------------------------------------------===//
12
13 #include "sanitizer_common/sanitizer_atomic.h"
14 #include "sanitizer_common/sanitizer_common.h"
15 #include "sanitizer_common/sanitizer_libc.h"
16 #include "sanitizer_common/sanitizer_stackdepot.h"
17 #include "sanitizer_common/sanitizer_placement_new.h"
18 #include "sanitizer_common/sanitizer_symbolizer.h"
19 #include "tsan_defs.h"
20 #include "tsan_platform.h"
21 #include "tsan_rtl.h"
22 #include "tsan_mman.h"
23 #include "tsan_suppressions.h"
24 #include "tsan_symbolize.h"
25 #include "ubsan/ubsan_init.h"
26
27 #ifdef __SSE3__
28 // <emmintrin.h> transitively includes <stdlib.h>,
29 // and it's prohibited to include std headers into tsan runtime.
30 // So we do this dirty trick.
31 #define _MM_MALLOC_H_INCLUDED
32 #define __MM_MALLOC_H
33 #include <emmintrin.h>
34 typedef __m128i m128;
35 #endif
36
37 volatile int __tsan_resumed = 0;
38
__tsan_resume()39 extern "C" void __tsan_resume() {
40 __tsan_resumed = 1;
41 }
42
43 namespace __tsan {
44
45 #if !defined(SANITIZER_GO) && !SANITIZER_MAC
46 __attribute__((tls_model("initial-exec")))
47 THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
48 #endif
49 static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
50 Context *ctx;
51
52 // Can be overriden by a front-end.
53 #ifdef TSAN_EXTERNAL_HOOKS
54 bool OnFinalize(bool failed);
55 void OnInitialize();
56 #else
57 SANITIZER_INTERFACE_ATTRIBUTE
OnFinalize(bool failed)58 bool WEAK OnFinalize(bool failed) {
59 return failed;
60 }
61 SANITIZER_INTERFACE_ATTRIBUTE
OnInitialize()62 void WEAK OnInitialize() {}
63 #endif
64
65 static char thread_registry_placeholder[sizeof(ThreadRegistry)];
66
CreateThreadContext(u32 tid)67 static ThreadContextBase *CreateThreadContext(u32 tid) {
68 // Map thread trace when context is created.
69 char name[50];
70 internal_snprintf(name, sizeof(name), "trace %u", tid);
71 MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name);
72 const uptr hdr = GetThreadTraceHeader(tid);
73 internal_snprintf(name, sizeof(name), "trace header %u", tid);
74 MapThreadTrace(hdr, sizeof(Trace), name);
75 new((void*)hdr) Trace();
76 // We are going to use only a small part of the trace with the default
77 // value of history_size. However, the constructor writes to the whole trace.
78 // Unmap the unused part.
79 uptr hdr_end = hdr + sizeof(Trace);
80 hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts());
81 hdr_end = RoundUp(hdr_end, GetPageSizeCached());
82 if (hdr_end < hdr + sizeof(Trace))
83 UnmapOrDie((void*)hdr_end, hdr + sizeof(Trace) - hdr_end);
84 void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
85 return new(mem) ThreadContext(tid);
86 }
87
88 #ifndef SANITIZER_GO
89 static const u32 kThreadQuarantineSize = 16;
90 #else
91 static const u32 kThreadQuarantineSize = 64;
92 #endif
93
Context()94 Context::Context()
95 : initialized()
96 , report_mtx(MutexTypeReport, StatMtxReport)
97 , nreported()
98 , nmissed_expected()
99 , thread_registry(new(thread_registry_placeholder) ThreadRegistry(
100 CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse))
101 , racy_mtx(MutexTypeRacy, StatMtxRacy)
102 , racy_stacks(MBlockRacyStacks)
103 , racy_addresses(MBlockRacyAddresses)
104 , fired_suppressions_mtx(MutexTypeFired, StatMtxFired)
105 , fired_suppressions(8) {
106 }
107
108 // The objects are allocated in TLS, so one may rely on zero-initialization.
ThreadState(Context * ctx,int tid,int unique_id,u64 epoch,unsigned reuse_count,uptr stk_addr,uptr stk_size,uptr tls_addr,uptr tls_size)109 ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
110 unsigned reuse_count,
111 uptr stk_addr, uptr stk_size,
112 uptr tls_addr, uptr tls_size)
113 : fast_state(tid, epoch)
114 // Do not touch these, rely on zero initialization,
115 // they may be accessed before the ctor.
116 // , ignore_reads_and_writes()
117 // , ignore_interceptors()
118 , clock(tid, reuse_count)
119 #ifndef SANITIZER_GO
120 , jmp_bufs(MBlockJmpBuf)
121 #endif
122 , tid(tid)
123 , unique_id(unique_id)
124 , stk_addr(stk_addr)
125 , stk_size(stk_size)
126 , tls_addr(tls_addr)
127 , tls_size(tls_size)
128 #ifndef SANITIZER_GO
129 , last_sleep_clock(tid)
130 #endif
131 {
132 }
133
134 #ifndef SANITIZER_GO
MemoryProfiler(Context * ctx,fd_t fd,int i)135 static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
136 uptr n_threads;
137 uptr n_running_threads;
138 ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
139 InternalScopedBuffer<char> buf(4096);
140 WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads);
141 WriteToFile(fd, buf.data(), internal_strlen(buf.data()));
142 }
143
BackgroundThread(void * arg)144 static void BackgroundThread(void *arg) {
145 // This is a non-initialized non-user thread, nothing to see here.
146 // We don't use ScopedIgnoreInterceptors, because we want ignores to be
147 // enabled even when the thread function exits (e.g. during pthread thread
148 // shutdown code).
149 cur_thread()->ignore_interceptors++;
150 const u64 kMs2Ns = 1000 * 1000;
151
152 fd_t mprof_fd = kInvalidFd;
153 if (flags()->profile_memory && flags()->profile_memory[0]) {
154 if (internal_strcmp(flags()->profile_memory, "stdout") == 0) {
155 mprof_fd = 1;
156 } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) {
157 mprof_fd = 2;
158 } else {
159 InternalScopedString filename(kMaxPathLength);
160 filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid());
161 fd_t fd = OpenFile(filename.data(), WrOnly);
162 if (fd == kInvalidFd) {
163 Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
164 &filename[0]);
165 } else {
166 mprof_fd = fd;
167 }
168 }
169 }
170
171 u64 last_flush = NanoTime();
172 uptr last_rss = 0;
173 for (int i = 0;
174 atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
175 i++) {
176 SleepForMillis(100);
177 u64 now = NanoTime();
178
179 // Flush memory if requested.
180 if (flags()->flush_memory_ms > 0) {
181 if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
182 VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
183 FlushShadowMemory();
184 last_flush = NanoTime();
185 }
186 }
187 // GetRSS can be expensive on huge programs, so don't do it every 100ms.
188 if (flags()->memory_limit_mb > 0) {
189 uptr rss = GetRSS();
190 uptr limit = uptr(flags()->memory_limit_mb) << 20;
191 VPrintf(1, "ThreadSanitizer: memory flush check"
192 " RSS=%llu LAST=%llu LIMIT=%llu\n",
193 (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
194 if (2 * rss > limit + last_rss) {
195 VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
196 FlushShadowMemory();
197 rss = GetRSS();
198 VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
199 }
200 last_rss = rss;
201 }
202
203 // Write memory profile if requested.
204 if (mprof_fd != kInvalidFd)
205 MemoryProfiler(ctx, mprof_fd, i);
206
207 // Flush symbolizer cache if requested.
208 if (flags()->flush_symbolizer_ms > 0) {
209 u64 last = atomic_load(&ctx->last_symbolize_time_ns,
210 memory_order_relaxed);
211 if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
212 Lock l(&ctx->report_mtx);
213 SpinMutexLock l2(&CommonSanitizerReportMutex);
214 SymbolizeFlush();
215 atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
216 }
217 }
218 }
219 }
220
StartBackgroundThread()221 static void StartBackgroundThread() {
222 ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
223 }
224
225 #ifndef __mips__
StopBackgroundThread()226 static void StopBackgroundThread() {
227 atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
228 internal_join_thread(ctx->background_thread);
229 ctx->background_thread = 0;
230 }
231 #endif
232 #endif
233
DontNeedShadowFor(uptr addr,uptr size)234 void DontNeedShadowFor(uptr addr, uptr size) {
235 uptr shadow_beg = MemToShadow(addr);
236 uptr shadow_end = MemToShadow(addr + size);
237 FlushUnneededShadowMemory(shadow_beg, shadow_end - shadow_beg);
238 }
239
MapShadow(uptr addr,uptr size)240 void MapShadow(uptr addr, uptr size) {
241 // Global data is not 64K aligned, but there are no adjacent mappings,
242 // so we can get away with unaligned mapping.
243 // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
244 MmapFixedNoReserve(MemToShadow(addr), size * kShadowMultiplier, "shadow");
245
246 // Meta shadow is 2:1, so tread carefully.
247 static bool data_mapped = false;
248 static uptr mapped_meta_end = 0;
249 uptr meta_begin = (uptr)MemToMeta(addr);
250 uptr meta_end = (uptr)MemToMeta(addr + size);
251 meta_begin = RoundDownTo(meta_begin, 64 << 10);
252 meta_end = RoundUpTo(meta_end, 64 << 10);
253 if (!data_mapped) {
254 // First call maps data+bss.
255 data_mapped = true;
256 MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow");
257 } else {
258 // Mapping continous heap.
259 // Windows wants 64K alignment.
260 meta_begin = RoundDownTo(meta_begin, 64 << 10);
261 meta_end = RoundUpTo(meta_end, 64 << 10);
262 if (meta_end <= mapped_meta_end)
263 return;
264 if (meta_begin < mapped_meta_end)
265 meta_begin = mapped_meta_end;
266 MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow");
267 mapped_meta_end = meta_end;
268 }
269 VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",
270 addr, addr+size, meta_begin, meta_end);
271 }
272
MapThreadTrace(uptr addr,uptr size,const char * name)273 void MapThreadTrace(uptr addr, uptr size, const char *name) {
274 DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
275 CHECK_GE(addr, kTraceMemBeg);
276 CHECK_LE(addr + size, kTraceMemEnd);
277 CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
278 uptr addr1 = (uptr)MmapFixedNoReserve(addr, size, name);
279 if (addr1 != addr) {
280 Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p->%p)\n",
281 addr, size, addr1);
282 Die();
283 }
284 }
285
CheckShadowMapping()286 static void CheckShadowMapping() {
287 for (uptr i = 0; i < ARRAY_SIZE(UserRegions); i += 2) {
288 const uptr beg = UserRegions[i];
289 const uptr end = UserRegions[i + 1];
290 VPrintf(3, "checking shadow region %p-%p\n", beg, end);
291 for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
292 for (int x = -1; x <= 1; x++) {
293 const uptr p = p0 + x;
294 if (p < beg || p >= end)
295 continue;
296 const uptr s = MemToShadow(p);
297 const uptr m = (uptr)MemToMeta(p);
298 VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m);
299 CHECK(IsAppMem(p));
300 CHECK(IsShadowMem(s));
301 CHECK_EQ(p & ~(kShadowCell - 1), ShadowToMem(s));
302 CHECK(IsMetaMem(m));
303 }
304 }
305 }
306 }
307
Initialize(ThreadState * thr)308 void Initialize(ThreadState *thr) {
309 // Thread safe because done before all threads exist.
310 static bool is_initialized = false;
311 if (is_initialized)
312 return;
313 is_initialized = true;
314 // We are not ready to handle interceptors yet.
315 ScopedIgnoreInterceptors ignore;
316 SanitizerToolName = "ThreadSanitizer";
317 // Install tool-specific callbacks in sanitizer_common.
318 SetCheckFailedCallback(TsanCheckFailed);
319
320 ctx = new(ctx_placeholder) Context;
321 const char *options = GetEnv(kTsanOptionsEnv);
322 CacheBinaryName();
323 InitializeFlags(&ctx->flags, options);
324 CheckVMASize();
325 #ifndef SANITIZER_GO
326 InitializeAllocator();
327 ReplaceSystemMalloc();
328 #endif
329 InitializeInterceptors();
330 CheckShadowMapping();
331 InitializePlatform();
332 InitializeMutex();
333 InitializeDynamicAnnotations();
334 #ifndef SANITIZER_GO
335 InitializeShadowMemory();
336 #endif
337 // Setup correct file descriptor for error reports.
338 __sanitizer_set_report_path(common_flags()->log_path);
339 InitializeSuppressions();
340 #ifndef SANITIZER_GO
341 InitializeLibIgnore();
342 Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
343 // On MIPS, TSan initialization is run before
344 // __pthread_initialize_minimal_internal() is finished, so we can not spawn
345 // new threads.
346 #ifndef __mips__
347 StartBackgroundThread();
348 SetSandboxingCallback(StopBackgroundThread);
349 #endif
350 #endif
351 if (common_flags()->detect_deadlocks)
352 ctx->dd = DDetector::Create(flags());
353
354 VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
355 (int)internal_getpid());
356
357 // Initialize thread 0.
358 int tid = ThreadCreate(thr, 0, 0, true);
359 CHECK_EQ(tid, 0);
360 ThreadStart(thr, tid, internal_getpid());
361 #if TSAN_CONTAINS_UBSAN
362 __ubsan::InitAsPlugin();
363 #endif
364 ctx->initialized = true;
365
366 if (flags()->stop_on_start) {
367 Printf("ThreadSanitizer is suspended at startup (pid %d)."
368 " Call __tsan_resume().\n",
369 (int)internal_getpid());
370 while (__tsan_resumed == 0) {}
371 }
372
373 OnInitialize();
374 }
375
Finalize(ThreadState * thr)376 int Finalize(ThreadState *thr) {
377 bool failed = false;
378
379 if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
380 SleepForMillis(flags()->atexit_sleep_ms);
381
382 // Wait for pending reports.
383 ctx->report_mtx.Lock();
384 CommonSanitizerReportMutex.Lock();
385 CommonSanitizerReportMutex.Unlock();
386 ctx->report_mtx.Unlock();
387
388 #ifndef SANITIZER_GO
389 if (Verbosity()) AllocatorPrintStats();
390 #endif
391
392 ThreadFinalize(thr);
393
394 if (ctx->nreported) {
395 failed = true;
396 #ifndef SANITIZER_GO
397 Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
398 #else
399 Printf("Found %d data race(s)\n", ctx->nreported);
400 #endif
401 }
402
403 if (ctx->nmissed_expected) {
404 failed = true;
405 Printf("ThreadSanitizer: missed %d expected races\n",
406 ctx->nmissed_expected);
407 }
408
409 if (common_flags()->print_suppressions)
410 PrintMatchedSuppressions();
411 #ifndef SANITIZER_GO
412 if (flags()->print_benign)
413 PrintMatchedBenignRaces();
414 #endif
415
416 failed = OnFinalize(failed);
417
418 #if TSAN_COLLECT_STATS
419 StatAggregate(ctx->stat, thr->stat);
420 StatOutput(ctx->stat);
421 #endif
422
423 return failed ? common_flags()->exitcode : 0;
424 }
425
426 #ifndef SANITIZER_GO
ForkBefore(ThreadState * thr,uptr pc)427 void ForkBefore(ThreadState *thr, uptr pc) {
428 ctx->thread_registry->Lock();
429 ctx->report_mtx.Lock();
430 }
431
ForkParentAfter(ThreadState * thr,uptr pc)432 void ForkParentAfter(ThreadState *thr, uptr pc) {
433 ctx->report_mtx.Unlock();
434 ctx->thread_registry->Unlock();
435 }
436
ForkChildAfter(ThreadState * thr,uptr pc)437 void ForkChildAfter(ThreadState *thr, uptr pc) {
438 ctx->report_mtx.Unlock();
439 ctx->thread_registry->Unlock();
440
441 uptr nthread = 0;
442 ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */);
443 VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
444 " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
445 if (nthread == 1) {
446 StartBackgroundThread();
447 } else {
448 // We've just forked a multi-threaded process. We cannot reasonably function
449 // after that (some mutexes may be locked before fork). So just enable
450 // ignores for everything in the hope that we will exec soon.
451 ctx->after_multithreaded_fork = true;
452 thr->ignore_interceptors++;
453 ThreadIgnoreBegin(thr, pc);
454 ThreadIgnoreSyncBegin(thr, pc);
455 }
456 }
457 #endif
458
459 #ifdef SANITIZER_GO
460 NOINLINE
GrowShadowStack(ThreadState * thr)461 void GrowShadowStack(ThreadState *thr) {
462 const int sz = thr->shadow_stack_end - thr->shadow_stack;
463 const int newsz = 2 * sz;
464 uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
465 newsz * sizeof(uptr));
466 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
467 internal_free(thr->shadow_stack);
468 thr->shadow_stack = newstack;
469 thr->shadow_stack_pos = newstack + sz;
470 thr->shadow_stack_end = newstack + newsz;
471 }
472 #endif
473
CurrentStackId(ThreadState * thr,uptr pc)474 u32 CurrentStackId(ThreadState *thr, uptr pc) {
475 if (!thr->is_inited) // May happen during bootstrap.
476 return 0;
477 if (pc != 0) {
478 #ifndef SANITIZER_GO
479 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
480 #else
481 if (thr->shadow_stack_pos == thr->shadow_stack_end)
482 GrowShadowStack(thr);
483 #endif
484 thr->shadow_stack_pos[0] = pc;
485 thr->shadow_stack_pos++;
486 }
487 u32 id = StackDepotPut(
488 StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
489 if (pc != 0)
490 thr->shadow_stack_pos--;
491 return id;
492 }
493
TraceSwitch(ThreadState * thr)494 void TraceSwitch(ThreadState *thr) {
495 thr->nomalloc++;
496 Trace *thr_trace = ThreadTrace(thr->tid);
497 Lock l(&thr_trace->mtx);
498 unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
499 TraceHeader *hdr = &thr_trace->headers[trace];
500 hdr->epoch0 = thr->fast_state.epoch();
501 ObtainCurrentStack(thr, 0, &hdr->stack0);
502 hdr->mset0 = thr->mset;
503 thr->nomalloc--;
504 }
505
ThreadTrace(int tid)506 Trace *ThreadTrace(int tid) {
507 return (Trace*)GetThreadTraceHeader(tid);
508 }
509
TraceTopPC(ThreadState * thr)510 uptr TraceTopPC(ThreadState *thr) {
511 Event *events = (Event*)GetThreadTrace(thr->tid);
512 uptr pc = events[thr->fast_state.GetTracePos()];
513 return pc;
514 }
515
TraceSize()516 uptr TraceSize() {
517 return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
518 }
519
TraceParts()520 uptr TraceParts() {
521 return TraceSize() / kTracePartSize;
522 }
523
524 #ifndef SANITIZER_GO
__tsan_trace_switch()525 extern "C" void __tsan_trace_switch() {
526 TraceSwitch(cur_thread());
527 }
528
__tsan_report_race()529 extern "C" void __tsan_report_race() {
530 ReportRace(cur_thread());
531 }
532 #endif
533
534 ALWAYS_INLINE
LoadShadow(u64 * p)535 Shadow LoadShadow(u64 *p) {
536 u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
537 return Shadow(raw);
538 }
539
540 ALWAYS_INLINE
StoreShadow(u64 * sp,u64 s)541 void StoreShadow(u64 *sp, u64 s) {
542 atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
543 }
544
545 ALWAYS_INLINE
StoreIfNotYetStored(u64 * sp,u64 * s)546 void StoreIfNotYetStored(u64 *sp, u64 *s) {
547 StoreShadow(sp, *s);
548 *s = 0;
549 }
550
551 ALWAYS_INLINE
HandleRace(ThreadState * thr,u64 * shadow_mem,Shadow cur,Shadow old)552 void HandleRace(ThreadState *thr, u64 *shadow_mem,
553 Shadow cur, Shadow old) {
554 thr->racy_state[0] = cur.raw();
555 thr->racy_state[1] = old.raw();
556 thr->racy_shadow_addr = shadow_mem;
557 #ifndef SANITIZER_GO
558 HACKY_CALL(__tsan_report_race);
559 #else
560 ReportRace(thr);
561 #endif
562 }
563
HappensBefore(Shadow old,ThreadState * thr)564 static inline bool HappensBefore(Shadow old, ThreadState *thr) {
565 return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
566 }
567
568 ALWAYS_INLINE
MemoryAccessImpl1(ThreadState * thr,uptr addr,int kAccessSizeLog,bool kAccessIsWrite,bool kIsAtomic,u64 * shadow_mem,Shadow cur)569 void MemoryAccessImpl1(ThreadState *thr, uptr addr,
570 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
571 u64 *shadow_mem, Shadow cur) {
572 StatInc(thr, StatMop);
573 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
574 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
575
576 // This potentially can live in an MMX/SSE scratch register.
577 // The required intrinsics are:
578 // __m128i _mm_move_epi64(__m128i*);
579 // _mm_storel_epi64(u64*, __m128i);
580 u64 store_word = cur.raw();
581
582 // scan all the shadow values and dispatch to 4 categories:
583 // same, replace, candidate and race (see comments below).
584 // we consider only 3 cases regarding access sizes:
585 // equal, intersect and not intersect. initially I considered
586 // larger and smaller as well, it allowed to replace some
587 // 'candidates' with 'same' or 'replace', but I think
588 // it's just not worth it (performance- and complexity-wise).
589
590 Shadow old(0);
591
592 // It release mode we manually unroll the loop,
593 // because empirically gcc generates better code this way.
594 // However, we can't afford unrolling in debug mode, because the function
595 // consumes almost 4K of stack. Gtest gives only 4K of stack to death test
596 // threads, which is not enough for the unrolled loop.
597 #if SANITIZER_DEBUG
598 for (int idx = 0; idx < 4; idx++) {
599 #include "tsan_update_shadow_word_inl.h"
600 }
601 #else
602 int idx = 0;
603 #include "tsan_update_shadow_word_inl.h"
604 idx = 1;
605 #include "tsan_update_shadow_word_inl.h"
606 idx = 2;
607 #include "tsan_update_shadow_word_inl.h"
608 idx = 3;
609 #include "tsan_update_shadow_word_inl.h"
610 #endif
611
612 // we did not find any races and had already stored
613 // the current access info, so we are done
614 if (LIKELY(store_word == 0))
615 return;
616 // choose a random candidate slot and replace it
617 StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
618 StatInc(thr, StatShadowReplace);
619 return;
620 RACE:
621 HandleRace(thr, shadow_mem, cur, old);
622 return;
623 }
624
UnalignedMemoryAccess(ThreadState * thr,uptr pc,uptr addr,int size,bool kAccessIsWrite,bool kIsAtomic)625 void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
626 int size, bool kAccessIsWrite, bool kIsAtomic) {
627 while (size) {
628 int size1 = 1;
629 int kAccessSizeLog = kSizeLog1;
630 if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) {
631 size1 = 8;
632 kAccessSizeLog = kSizeLog8;
633 } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) {
634 size1 = 4;
635 kAccessSizeLog = kSizeLog4;
636 } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) {
637 size1 = 2;
638 kAccessSizeLog = kSizeLog2;
639 }
640 MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
641 addr += size1;
642 size -= size1;
643 }
644 }
645
646 ALWAYS_INLINE
ContainsSameAccessSlow(u64 * s,u64 a,u64 sync_epoch,bool is_write)647 bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
648 Shadow cur(a);
649 for (uptr i = 0; i < kShadowCnt; i++) {
650 Shadow old(LoadShadow(&s[i]));
651 if (Shadow::Addr0AndSizeAreEqual(cur, old) &&
652 old.TidWithIgnore() == cur.TidWithIgnore() &&
653 old.epoch() > sync_epoch &&
654 old.IsAtomic() == cur.IsAtomic() &&
655 old.IsRead() <= cur.IsRead())
656 return true;
657 }
658 return false;
659 }
660
661 #if defined(__SSE3__)
662 #define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \
663 _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \
664 (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
665 ALWAYS_INLINE
ContainsSameAccessFast(u64 * s,u64 a,u64 sync_epoch,bool is_write)666 bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
667 // This is an optimized version of ContainsSameAccessSlow.
668 // load current access into access[0:63]
669 const m128 access = _mm_cvtsi64_si128(a);
670 // duplicate high part of access in addr0:
671 // addr0[0:31] = access[32:63]
672 // addr0[32:63] = access[32:63]
673 // addr0[64:95] = access[32:63]
674 // addr0[96:127] = access[32:63]
675 const m128 addr0 = SHUF(access, access, 1, 1, 1, 1);
676 // load 4 shadow slots
677 const m128 shadow0 = _mm_load_si128((__m128i*)s);
678 const m128 shadow1 = _mm_load_si128((__m128i*)s + 1);
679 // load high parts of 4 shadow slots into addr_vect:
680 // addr_vect[0:31] = shadow0[32:63]
681 // addr_vect[32:63] = shadow0[96:127]
682 // addr_vect[64:95] = shadow1[32:63]
683 // addr_vect[96:127] = shadow1[96:127]
684 m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3);
685 if (!is_write) {
686 // set IsRead bit in addr_vect
687 const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15);
688 const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0);
689 addr_vect = _mm_or_si128(addr_vect, rw_mask);
690 }
691 // addr0 == addr_vect?
692 const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect);
693 // epoch1[0:63] = sync_epoch
694 const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch);
695 // epoch[0:31] = sync_epoch[0:31]
696 // epoch[32:63] = sync_epoch[0:31]
697 // epoch[64:95] = sync_epoch[0:31]
698 // epoch[96:127] = sync_epoch[0:31]
699 const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0);
700 // load low parts of shadow cell epochs into epoch_vect:
701 // epoch_vect[0:31] = shadow0[0:31]
702 // epoch_vect[32:63] = shadow0[64:95]
703 // epoch_vect[64:95] = shadow1[0:31]
704 // epoch_vect[96:127] = shadow1[64:95]
705 const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2);
706 // epoch_vect >= sync_epoch?
707 const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch);
708 // addr_res & epoch_res
709 const m128 res = _mm_and_si128(addr_res, epoch_res);
710 // mask[0] = res[7]
711 // mask[1] = res[15]
712 // ...
713 // mask[15] = res[127]
714 const int mask = _mm_movemask_epi8(res);
715 return mask != 0;
716 }
717 #endif
718
719 ALWAYS_INLINE
ContainsSameAccess(u64 * s,u64 a,u64 sync_epoch,bool is_write)720 bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
721 #if defined(__SSE3__)
722 bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
723 // NOTE: this check can fail if the shadow is concurrently mutated
724 // by other threads. But it still can be useful if you modify
725 // ContainsSameAccessFast and want to ensure that it's not completely broken.
726 // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
727 return res;
728 #else
729 return ContainsSameAccessSlow(s, a, sync_epoch, is_write);
730 #endif
731 }
732
733 ALWAYS_INLINE USED
MemoryAccess(ThreadState * thr,uptr pc,uptr addr,int kAccessSizeLog,bool kAccessIsWrite,bool kIsAtomic)734 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
735 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
736 u64 *shadow_mem = (u64*)MemToShadow(addr);
737 DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
738 " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
739 (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
740 (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
741 (uptr)shadow_mem[0], (uptr)shadow_mem[1],
742 (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
743 #if SANITIZER_DEBUG
744 if (!IsAppMem(addr)) {
745 Printf("Access to non app mem %zx\n", addr);
746 DCHECK(IsAppMem(addr));
747 }
748 if (!IsShadowMem((uptr)shadow_mem)) {
749 Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
750 DCHECK(IsShadowMem((uptr)shadow_mem));
751 }
752 #endif
753
754 if (kCppMode && *shadow_mem == kShadowRodata) {
755 // Access to .rodata section, no races here.
756 // Measurements show that it can be 10-20% of all memory accesses.
757 StatInc(thr, StatMop);
758 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
759 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
760 StatInc(thr, StatMopRodata);
761 return;
762 }
763
764 FastState fast_state = thr->fast_state;
765 if (fast_state.GetIgnoreBit()) {
766 StatInc(thr, StatMop);
767 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
768 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
769 StatInc(thr, StatMopIgnored);
770 return;
771 }
772
773 Shadow cur(fast_state);
774 cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
775 cur.SetWrite(kAccessIsWrite);
776 cur.SetAtomic(kIsAtomic);
777
778 if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
779 thr->fast_synch_epoch, kAccessIsWrite))) {
780 StatInc(thr, StatMop);
781 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
782 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
783 StatInc(thr, StatMopSame);
784 return;
785 }
786
787 if (kCollectHistory) {
788 fast_state.IncrementEpoch();
789 thr->fast_state = fast_state;
790 TraceAddEvent(thr, fast_state, EventTypeMop, pc);
791 cur.IncrementEpoch();
792 }
793
794 MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
795 shadow_mem, cur);
796 }
797
798 // Called by MemoryAccessRange in tsan_rtl_thread.cc
799 ALWAYS_INLINE USED
MemoryAccessImpl(ThreadState * thr,uptr addr,int kAccessSizeLog,bool kAccessIsWrite,bool kIsAtomic,u64 * shadow_mem,Shadow cur)800 void MemoryAccessImpl(ThreadState *thr, uptr addr,
801 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
802 u64 *shadow_mem, Shadow cur) {
803 if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
804 thr->fast_synch_epoch, kAccessIsWrite))) {
805 StatInc(thr, StatMop);
806 StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
807 StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
808 StatInc(thr, StatMopSame);
809 return;
810 }
811
812 MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
813 shadow_mem, cur);
814 }
815
MemoryRangeSet(ThreadState * thr,uptr pc,uptr addr,uptr size,u64 val)816 static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
817 u64 val) {
818 (void)thr;
819 (void)pc;
820 if (size == 0)
821 return;
822 // FIXME: fix me.
823 uptr offset = addr % kShadowCell;
824 if (offset) {
825 offset = kShadowCell - offset;
826 if (size <= offset)
827 return;
828 addr += offset;
829 size -= offset;
830 }
831 DCHECK_EQ(addr % 8, 0);
832 // If a user passes some insane arguments (memset(0)),
833 // let it just crash as usual.
834 if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
835 return;
836 // Don't want to touch lots of shadow memory.
837 // If a program maps 10MB stack, there is no need reset the whole range.
838 size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
839 // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
840 // so we do it only for C/C++.
841 if (kGoMode || size < common_flags()->clear_shadow_mmap_threshold) {
842 u64 *p = (u64*)MemToShadow(addr);
843 CHECK(IsShadowMem((uptr)p));
844 CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
845 // FIXME: may overwrite a part outside the region
846 for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
847 p[i++] = val;
848 for (uptr j = 1; j < kShadowCnt; j++)
849 p[i++] = 0;
850 }
851 } else {
852 // The region is big, reset only beginning and end.
853 const uptr kPageSize = GetPageSizeCached();
854 u64 *begin = (u64*)MemToShadow(addr);
855 u64 *end = begin + size / kShadowCell * kShadowCnt;
856 u64 *p = begin;
857 // Set at least first kPageSize/2 to page boundary.
858 while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
859 *p++ = val;
860 for (uptr j = 1; j < kShadowCnt; j++)
861 *p++ = 0;
862 }
863 // Reset middle part.
864 u64 *p1 = p;
865 p = RoundDown(end, kPageSize);
866 UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
867 MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1);
868 // Set the ending.
869 while (p < end) {
870 *p++ = val;
871 for (uptr j = 1; j < kShadowCnt; j++)
872 *p++ = 0;
873 }
874 }
875 }
876
MemoryResetRange(ThreadState * thr,uptr pc,uptr addr,uptr size)877 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
878 MemoryRangeSet(thr, pc, addr, size, 0);
879 }
880
MemoryRangeFreed(ThreadState * thr,uptr pc,uptr addr,uptr size)881 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
882 // Processing more than 1k (4k of shadow) is expensive,
883 // can cause excessive memory consumption (user does not necessary touch
884 // the whole range) and most likely unnecessary.
885 if (size > 1024)
886 size = 1024;
887 CHECK_EQ(thr->is_freeing, false);
888 thr->is_freeing = true;
889 MemoryAccessRange(thr, pc, addr, size, true);
890 thr->is_freeing = false;
891 if (kCollectHistory) {
892 thr->fast_state.IncrementEpoch();
893 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
894 }
895 Shadow s(thr->fast_state);
896 s.ClearIgnoreBit();
897 s.MarkAsFreed();
898 s.SetWrite(true);
899 s.SetAddr0AndSizeLog(0, 3);
900 MemoryRangeSet(thr, pc, addr, size, s.raw());
901 }
902
MemoryRangeImitateWrite(ThreadState * thr,uptr pc,uptr addr,uptr size)903 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
904 if (kCollectHistory) {
905 thr->fast_state.IncrementEpoch();
906 TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
907 }
908 Shadow s(thr->fast_state);
909 s.ClearIgnoreBit();
910 s.SetWrite(true);
911 s.SetAddr0AndSizeLog(0, 3);
912 MemoryRangeSet(thr, pc, addr, size, s.raw());
913 }
914
915 ALWAYS_INLINE USED
FuncEntry(ThreadState * thr,uptr pc)916 void FuncEntry(ThreadState *thr, uptr pc) {
917 StatInc(thr, StatFuncEnter);
918 DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
919 if (kCollectHistory) {
920 thr->fast_state.IncrementEpoch();
921 TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
922 }
923
924 // Shadow stack maintenance can be replaced with
925 // stack unwinding during trace switch (which presumably must be faster).
926 DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
927 #ifndef SANITIZER_GO
928 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
929 #else
930 if (thr->shadow_stack_pos == thr->shadow_stack_end)
931 GrowShadowStack(thr);
932 #endif
933 thr->shadow_stack_pos[0] = pc;
934 thr->shadow_stack_pos++;
935 }
936
937 ALWAYS_INLINE USED
FuncExit(ThreadState * thr)938 void FuncExit(ThreadState *thr) {
939 StatInc(thr, StatFuncExit);
940 DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
941 if (kCollectHistory) {
942 thr->fast_state.IncrementEpoch();
943 TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
944 }
945
946 DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
947 #ifndef SANITIZER_GO
948 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
949 #endif
950 thr->shadow_stack_pos--;
951 }
952
ThreadIgnoreBegin(ThreadState * thr,uptr pc)953 void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
954 DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
955 thr->ignore_reads_and_writes++;
956 CHECK_GT(thr->ignore_reads_and_writes, 0);
957 thr->fast_state.SetIgnoreBit();
958 #ifndef SANITIZER_GO
959 if (!ctx->after_multithreaded_fork)
960 thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
961 #endif
962 }
963
ThreadIgnoreEnd(ThreadState * thr,uptr pc)964 void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
965 DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
966 thr->ignore_reads_and_writes--;
967 CHECK_GE(thr->ignore_reads_and_writes, 0);
968 if (thr->ignore_reads_and_writes == 0) {
969 thr->fast_state.ClearIgnoreBit();
970 #ifndef SANITIZER_GO
971 thr->mop_ignore_set.Reset();
972 #endif
973 }
974 }
975
ThreadIgnoreSyncBegin(ThreadState * thr,uptr pc)976 void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
977 DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
978 thr->ignore_sync++;
979 CHECK_GT(thr->ignore_sync, 0);
980 #ifndef SANITIZER_GO
981 if (!ctx->after_multithreaded_fork)
982 thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
983 #endif
984 }
985
ThreadIgnoreSyncEnd(ThreadState * thr,uptr pc)986 void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
987 DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
988 thr->ignore_sync--;
989 CHECK_GE(thr->ignore_sync, 0);
990 #ifndef SANITIZER_GO
991 if (thr->ignore_sync == 0)
992 thr->sync_ignore_set.Reset();
993 #endif
994 }
995
operator ==(const MD5Hash & other) const996 bool MD5Hash::operator==(const MD5Hash &other) const {
997 return hash[0] == other.hash[0] && hash[1] == other.hash[1];
998 }
999
1000 #if SANITIZER_DEBUG
build_consistency_debug()1001 void build_consistency_debug() {}
1002 #else
build_consistency_release()1003 void build_consistency_release() {}
1004 #endif
1005
1006 #if TSAN_COLLECT_STATS
build_consistency_stats()1007 void build_consistency_stats() {}
1008 #else
build_consistency_nostats()1009 void build_consistency_nostats() {}
1010 #endif
1011
1012 } // namespace __tsan
1013
1014 #ifndef SANITIZER_GO
1015 // Must be included in this file to make sure everything is inlined.
1016 #include "tsan_interface_inl.h"
1017 #endif
1018