1 //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 // Main internal TSan header file.
13 //
14 // Ground rules:
15 // - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
16 // function-scope locals)
17 // - All functions/classes/etc reside in namespace __tsan, except for those
18 // declared in tsan_interface.h.
19 // - Platform-specific files should be used instead of ifdefs (*).
20 // - No system headers included in header files (*).
21 // - Platform specific headres included only into platform-specific files (*).
22 //
23 // (*) Except when inlining is critical for performance.
24 //===----------------------------------------------------------------------===//
25
26 #ifndef TSAN_RTL_H
27 #define TSAN_RTL_H
28
29 #include "sanitizer_common/sanitizer_allocator.h"
30 #include "sanitizer_common/sanitizer_allocator_internal.h"
31 #include "sanitizer_common/sanitizer_asm.h"
32 #include "sanitizer_common/sanitizer_common.h"
33 #include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
34 #include "sanitizer_common/sanitizer_libignore.h"
35 #include "sanitizer_common/sanitizer_suppressions.h"
36 #include "sanitizer_common/sanitizer_thread_registry.h"
37 #include "sanitizer_common/sanitizer_vector.h"
38 #include "tsan_clock.h"
39 #include "tsan_defs.h"
40 #include "tsan_flags.h"
41 #include "tsan_mman.h"
42 #include "tsan_sync.h"
43 #include "tsan_trace.h"
44 #include "tsan_report.h"
45 #include "tsan_platform.h"
46 #include "tsan_mutexset.h"
47 #include "tsan_ignoreset.h"
48 #include "tsan_stack_trace.h"
49
50 #if SANITIZER_WORDSIZE != 64
51 # error "ThreadSanitizer is supported only on 64-bit platforms"
52 #endif
53
54 namespace __tsan {
55
56 #if !SANITIZER_GO
57 struct MapUnmapCallback;
58 #if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
59 static const uptr kAllocatorRegionSizeLog = 20;
60 static const uptr kAllocatorNumRegions =
61 SANITIZER_MMAP_RANGE_SIZE >> kAllocatorRegionSizeLog;
62 using ByteMap = TwoLevelByteMap<(kAllocatorNumRegions >> 12), 1 << 12,
63 LocalAddressSpaceView, MapUnmapCallback>;
64 struct AP32 {
65 static const uptr kSpaceBeg = 0;
66 static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
67 static const uptr kMetadataSize = 0;
68 typedef __sanitizer::CompactSizeClassMap SizeClassMap;
69 static const uptr kRegionSizeLog = kAllocatorRegionSizeLog;
70 using AddressSpaceView = LocalAddressSpaceView;
71 using ByteMap = __tsan::ByteMap;
72 typedef __tsan::MapUnmapCallback MapUnmapCallback;
73 static const uptr kFlags = 0;
74 };
75 typedef SizeClassAllocator32<AP32> PrimaryAllocator;
76 #else
77 struct AP64 { // Allocator64 parameters. Deliberately using a short name.
78 static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
79 static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
80 static const uptr kMetadataSize = 0;
81 typedef DefaultSizeClassMap SizeClassMap;
82 typedef __tsan::MapUnmapCallback MapUnmapCallback;
83 static const uptr kFlags = 0;
84 using AddressSpaceView = LocalAddressSpaceView;
85 };
86 typedef SizeClassAllocator64<AP64> PrimaryAllocator;
87 #endif
88 typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
89 typedef LargeMmapAllocator<MapUnmapCallback> SecondaryAllocator;
90 typedef CombinedAllocator<PrimaryAllocator, AllocatorCache,
91 SecondaryAllocator> Allocator;
92 Allocator *allocator();
93 #endif
94
95 void TsanCheckFailed(const char *file, int line, const char *cond,
96 u64 v1, u64 v2);
97
98 const u64 kShadowRodata = (u64)-1; // .rodata shadow marker
99
100 // FastState (from most significant bit):
101 // ignore : 1
102 // tid : kTidBits
103 // unused : -
104 // history_size : 3
105 // epoch : kClkBits
106 class FastState {
107 public:
FastState(u64 tid,u64 epoch)108 FastState(u64 tid, u64 epoch) {
109 x_ = tid << kTidShift;
110 x_ |= epoch;
111 DCHECK_EQ(tid, this->tid());
112 DCHECK_EQ(epoch, this->epoch());
113 DCHECK_EQ(GetIgnoreBit(), false);
114 }
115
FastState(u64 x)116 explicit FastState(u64 x)
117 : x_(x) {
118 }
119
raw()120 u64 raw() const {
121 return x_;
122 }
123
tid()124 u64 tid() const {
125 u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
126 return res;
127 }
128
TidWithIgnore()129 u64 TidWithIgnore() const {
130 u64 res = x_ >> kTidShift;
131 return res;
132 }
133
epoch()134 u64 epoch() const {
135 u64 res = x_ & ((1ull << kClkBits) - 1);
136 return res;
137 }
138
IncrementEpoch()139 void IncrementEpoch() {
140 u64 old_epoch = epoch();
141 x_ += 1;
142 DCHECK_EQ(old_epoch + 1, epoch());
143 (void)old_epoch;
144 }
145
SetIgnoreBit()146 void SetIgnoreBit() { x_ |= kIgnoreBit; }
ClearIgnoreBit()147 void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
GetIgnoreBit()148 bool GetIgnoreBit() const { return (s64)x_ < 0; }
149
SetHistorySize(int hs)150 void SetHistorySize(int hs) {
151 CHECK_GE(hs, 0);
152 CHECK_LE(hs, 7);
153 x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
154 }
155
156 ALWAYS_INLINE
GetHistorySize()157 int GetHistorySize() const {
158 return (int)((x_ >> kHistoryShift) & kHistoryMask);
159 }
160
ClearHistorySize()161 void ClearHistorySize() {
162 SetHistorySize(0);
163 }
164
165 ALWAYS_INLINE
GetTracePos()166 u64 GetTracePos() const {
167 const int hs = GetHistorySize();
168 // When hs == 0, the trace consists of 2 parts.
169 const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
170 return epoch() & mask;
171 }
172
173 private:
174 friend class Shadow;
175 static const int kTidShift = 64 - kTidBits - 1;
176 static const u64 kIgnoreBit = 1ull << 63;
177 static const u64 kFreedBit = 1ull << 63;
178 static const u64 kHistoryShift = kClkBits;
179 static const u64 kHistoryMask = 7;
180 u64 x_;
181 };
182
183 // Shadow (from most significant bit):
184 // freed : 1
185 // tid : kTidBits
186 // is_atomic : 1
187 // is_read : 1
188 // size_log : 2
189 // addr0 : 3
190 // epoch : kClkBits
191 class Shadow : public FastState {
192 public:
Shadow(u64 x)193 explicit Shadow(u64 x)
194 : FastState(x) {
195 }
196
Shadow(const FastState & s)197 explicit Shadow(const FastState &s)
198 : FastState(s.x_) {
199 ClearHistorySize();
200 }
201
SetAddr0AndSizeLog(u64 addr0,unsigned kAccessSizeLog)202 void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
203 DCHECK_EQ((x_ >> kClkBits) & 31, 0);
204 DCHECK_LE(addr0, 7);
205 DCHECK_LE(kAccessSizeLog, 3);
206 x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
207 DCHECK_EQ(kAccessSizeLog, size_log());
208 DCHECK_EQ(addr0, this->addr0());
209 }
210
SetWrite(unsigned kAccessIsWrite)211 void SetWrite(unsigned kAccessIsWrite) {
212 DCHECK_EQ(x_ & kReadBit, 0);
213 if (!kAccessIsWrite)
214 x_ |= kReadBit;
215 DCHECK_EQ(kAccessIsWrite, IsWrite());
216 }
217
SetAtomic(bool kIsAtomic)218 void SetAtomic(bool kIsAtomic) {
219 DCHECK(!IsAtomic());
220 if (kIsAtomic)
221 x_ |= kAtomicBit;
222 DCHECK_EQ(IsAtomic(), kIsAtomic);
223 }
224
IsAtomic()225 bool IsAtomic() const {
226 return x_ & kAtomicBit;
227 }
228
IsZero()229 bool IsZero() const {
230 return x_ == 0;
231 }
232
TidsAreEqual(const Shadow s1,const Shadow s2)233 static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
234 u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
235 DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
236 return shifted_xor == 0;
237 }
238
239 static ALWAYS_INLINE
Addr0AndSizeAreEqual(const Shadow s1,const Shadow s2)240 bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
241 u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
242 return masked_xor == 0;
243 }
244
TwoRangesIntersect(Shadow s1,Shadow s2,unsigned kS2AccessSize)245 static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
246 unsigned kS2AccessSize) {
247 bool res = false;
248 u64 diff = s1.addr0() - s2.addr0();
249 if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT
250 // if (s1.addr0() + size1) > s2.addr0()) return true;
251 if (s1.size() > -diff)
252 res = true;
253 } else {
254 // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
255 if (kS2AccessSize > diff)
256 res = true;
257 }
258 DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
259 DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
260 return res;
261 }
262
addr0()263 u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
size()264 u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
IsWrite()265 bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
IsRead()266 bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
267
268 // The idea behind the freed bit is as follows.
269 // When the memory is freed (or otherwise unaccessible) we write to the shadow
270 // values with tid/epoch related to the free and the freed bit set.
271 // During memory accesses processing the freed bit is considered
272 // as msb of tid. So any access races with shadow with freed bit set
273 // (it is as if write from a thread with which we never synchronized before).
274 // This allows us to detect accesses to freed memory w/o additional
275 // overheads in memory access processing and at the same time restore
276 // tid/epoch of free.
MarkAsFreed()277 void MarkAsFreed() {
278 x_ |= kFreedBit;
279 }
280
IsFreed()281 bool IsFreed() const {
282 return x_ & kFreedBit;
283 }
284
GetFreedAndReset()285 bool GetFreedAndReset() {
286 bool res = x_ & kFreedBit;
287 x_ &= ~kFreedBit;
288 return res;
289 }
290
IsBothReadsOrAtomic(bool kIsWrite,bool kIsAtomic)291 bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
292 bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift)
293 | (u64(kIsAtomic) << kAtomicShift));
294 DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
295 return v;
296 }
297
IsRWNotWeaker(bool kIsWrite,bool kIsAtomic)298 bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
299 bool v = ((x_ >> kReadShift) & 3)
300 <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
301 DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
302 (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
303 return v;
304 }
305
IsRWWeakerOrEqual(bool kIsWrite,bool kIsAtomic)306 bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
307 bool v = ((x_ >> kReadShift) & 3)
308 >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
309 DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
310 (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
311 return v;
312 }
313
314 private:
315 static const u64 kReadShift = 5 + kClkBits;
316 static const u64 kReadBit = 1ull << kReadShift;
317 static const u64 kAtomicShift = 6 + kClkBits;
318 static const u64 kAtomicBit = 1ull << kAtomicShift;
319
size_log()320 u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
321
TwoRangesIntersectSlow(const Shadow s1,const Shadow s2)322 static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
323 if (s1.addr0() == s2.addr0()) return true;
324 if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
325 return true;
326 if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
327 return true;
328 return false;
329 }
330 };
331
332 struct ThreadSignalContext;
333
334 struct JmpBuf {
335 uptr sp;
336 uptr mangled_sp;
337 int int_signal_send;
338 bool in_blocking_func;
339 uptr in_signal_handler;
340 uptr *shadow_stack_pos;
341 };
342
343 // A Processor represents a physical thread, or a P for Go.
344 // It is used to store internal resources like allocate cache, and does not
345 // participate in race-detection logic (invisible to end user).
346 // In C++ it is tied to an OS thread just like ThreadState, however ideally
347 // it should be tied to a CPU (this way we will have fewer allocator caches).
348 // In Go it is tied to a P, so there are significantly fewer Processor's than
349 // ThreadState's (which are tied to Gs).
350 // A ThreadState must be wired with a Processor to handle events.
351 struct Processor {
352 ThreadState *thr; // currently wired thread, or nullptr
353 #if !SANITIZER_GO
354 AllocatorCache alloc_cache;
355 InternalAllocatorCache internal_alloc_cache;
356 #endif
357 DenseSlabAllocCache block_cache;
358 DenseSlabAllocCache sync_cache;
359 DenseSlabAllocCache clock_cache;
360 DDPhysicalThread *dd_pt;
361 };
362
363 #if !SANITIZER_GO
364 // ScopedGlobalProcessor temporary setups a global processor for the current
365 // thread, if it does not have one. Intended for interceptors that can run
366 // at the very thread end, when we already destroyed the thread processor.
367 struct ScopedGlobalProcessor {
368 ScopedGlobalProcessor();
369 ~ScopedGlobalProcessor();
370 };
371 #endif
372
373 // This struct is stored in TLS.
374 struct ThreadState {
375 FastState fast_state;
376 // Synch epoch represents the threads's epoch before the last synchronization
377 // action. It allows to reduce number of shadow state updates.
378 // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
379 // if we are processing write to X from the same thread at epoch=200,
380 // we do nothing, because both writes happen in the same 'synch epoch'.
381 // That is, if another memory access does not race with the former write,
382 // it does not race with the latter as well.
383 // QUESTION: can we can squeeze this into ThreadState::Fast?
384 // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
385 // taken by epoch between synchs.
386 // This way we can save one load from tls.
387 u64 fast_synch_epoch;
388 // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
389 // We do not distinguish beteween ignoring reads and writes
390 // for better performance.
391 int ignore_reads_and_writes;
392 int ignore_sync;
393 int suppress_reports;
394 // Go does not support ignores.
395 #if !SANITIZER_GO
396 IgnoreSet mop_ignore_set;
397 IgnoreSet sync_ignore_set;
398 #endif
399 // C/C++ uses fixed size shadow stack embed into Trace.
400 // Go uses malloc-allocated shadow stack with dynamic size.
401 uptr *shadow_stack;
402 uptr *shadow_stack_end;
403 uptr *shadow_stack_pos;
404 u64 *racy_shadow_addr;
405 u64 racy_state[2];
406 MutexSet mset;
407 ThreadClock clock;
408 #if !SANITIZER_GO
409 Vector<JmpBuf> jmp_bufs;
410 int ignore_interceptors;
411 #endif
412 #if TSAN_COLLECT_STATS
413 u64 stat[StatCnt];
414 #endif
415 const int tid;
416 const int unique_id;
417 bool in_symbolizer;
418 bool in_ignored_lib;
419 bool is_inited;
420 bool is_dead;
421 bool is_freeing;
422 bool is_vptr_access;
423 const uptr stk_addr;
424 const uptr stk_size;
425 const uptr tls_addr;
426 const uptr tls_size;
427 ThreadContext *tctx;
428
429 #if SANITIZER_DEBUG && !SANITIZER_GO
430 InternalDeadlockDetector internal_deadlock_detector;
431 #endif
432 DDLogicalThread *dd_lt;
433
434 // Current wired Processor, or nullptr. Required to handle any events.
435 Processor *proc1;
436 #if !SANITIZER_GO
procThreadState437 Processor *proc() { return proc1; }
438 #else
439 Processor *proc();
440 #endif
441
442 atomic_uintptr_t in_signal_handler;
443 ThreadSignalContext *signal_ctx;
444
445 #if !SANITIZER_GO
446 u32 last_sleep_stack_id;
447 ThreadClock last_sleep_clock;
448 #endif
449
450 // Set in regions of runtime that must be signal-safe and fork-safe.
451 // If set, malloc must not be called.
452 int nomalloc;
453
454 const ReportDesc *current_report;
455
456 explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
457 unsigned reuse_count,
458 uptr stk_addr, uptr stk_size,
459 uptr tls_addr, uptr tls_size);
460 };
461
462 #if !SANITIZER_GO
463 #if SANITIZER_MAC || SANITIZER_ANDROID
464 ThreadState *cur_thread();
465 void cur_thread_finalize();
466 #else
467 __attribute__((tls_model("initial-exec")))
468 extern THREADLOCAL char cur_thread_placeholder[];
cur_thread()469 INLINE ThreadState *cur_thread() {
470 return reinterpret_cast<ThreadState *>(&cur_thread_placeholder);
471 }
cur_thread_finalize()472 INLINE void cur_thread_finalize() { }
473 #endif // SANITIZER_MAC || SANITIZER_ANDROID
474 #endif // SANITIZER_GO
475
476 class ThreadContext : public ThreadContextBase {
477 public:
478 explicit ThreadContext(int tid);
479 ~ThreadContext();
480 ThreadState *thr;
481 u32 creation_stack_id;
482 SyncClock sync;
483 // Epoch at which the thread had started.
484 // If we see an event from the thread stamped by an older epoch,
485 // the event is from a dead thread that shared tid with this thread.
486 u64 epoch0;
487 u64 epoch1;
488
489 // Override superclass callbacks.
490 void OnDead() override;
491 void OnJoined(void *arg) override;
492 void OnFinished() override;
493 void OnStarted(void *arg) override;
494 void OnCreated(void *arg) override;
495 void OnReset() override;
496 void OnDetached(void *arg) override;
497 };
498
499 struct RacyStacks {
500 MD5Hash hash[2];
501 bool operator==(const RacyStacks &other) const {
502 if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
503 return true;
504 if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
505 return true;
506 return false;
507 }
508 };
509
510 struct RacyAddress {
511 uptr addr_min;
512 uptr addr_max;
513 };
514
515 struct FiredSuppression {
516 ReportType type;
517 uptr pc_or_addr;
518 Suppression *supp;
519 };
520
521 struct Context {
522 Context();
523
524 bool initialized;
525 #if !SANITIZER_GO
526 bool after_multithreaded_fork;
527 #endif
528
529 MetaMap metamap;
530
531 Mutex report_mtx;
532 int nreported;
533 int nmissed_expected;
534 atomic_uint64_t last_symbolize_time_ns;
535
536 void *background_thread;
537 atomic_uint32_t stop_background_thread;
538
539 ThreadRegistry *thread_registry;
540
541 Mutex racy_mtx;
542 Vector<RacyStacks> racy_stacks;
543 Vector<RacyAddress> racy_addresses;
544 // Number of fired suppressions may be large enough.
545 Mutex fired_suppressions_mtx;
546 InternalMmapVector<FiredSuppression> fired_suppressions;
547 DDetector *dd;
548
549 ClockAlloc clock_alloc;
550
551 Flags flags;
552
553 u64 stat[StatCnt];
554 u64 int_alloc_cnt[MBlockTypeCount];
555 u64 int_alloc_siz[MBlockTypeCount];
556 };
557
558 extern Context *ctx; // The one and the only global runtime context.
559
flags()560 ALWAYS_INLINE Flags *flags() {
561 return &ctx->flags;
562 }
563
564 struct ScopedIgnoreInterceptors {
ScopedIgnoreInterceptorsScopedIgnoreInterceptors565 ScopedIgnoreInterceptors() {
566 #if !SANITIZER_GO
567 cur_thread()->ignore_interceptors++;
568 #endif
569 }
570
~ScopedIgnoreInterceptorsScopedIgnoreInterceptors571 ~ScopedIgnoreInterceptors() {
572 #if !SANITIZER_GO
573 cur_thread()->ignore_interceptors--;
574 #endif
575 }
576 };
577
578 const char *GetObjectTypeFromTag(uptr tag);
579 const char *GetReportHeaderFromTag(uptr tag);
580 uptr TagFromShadowStackFrame(uptr pc);
581
582 class ScopedReportBase {
583 public:
584 void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, StackTrace stack,
585 const MutexSet *mset);
586 void AddStack(StackTrace stack, bool suppressable = false);
587 void AddThread(const ThreadContext *tctx, bool suppressable = false);
588 void AddThread(int unique_tid, bool suppressable = false);
589 void AddUniqueTid(int unique_tid);
590 void AddMutex(const SyncVar *s);
591 u64 AddMutex(u64 id);
592 void AddLocation(uptr addr, uptr size);
593 void AddSleep(u32 stack_id);
594 void SetCount(int count);
595
596 const ReportDesc *GetReport() const;
597
598 protected:
599 ScopedReportBase(ReportType typ, uptr tag);
600 ~ScopedReportBase();
601
602 private:
603 ReportDesc *rep_;
604 // Symbolizer makes lots of intercepted calls. If we try to process them,
605 // at best it will cause deadlocks on internal mutexes.
606 ScopedIgnoreInterceptors ignore_interceptors_;
607
608 void AddDeadMutex(u64 id);
609
610 ScopedReportBase(const ScopedReportBase &) = delete;
611 void operator=(const ScopedReportBase &) = delete;
612 };
613
614 class ScopedReport : public ScopedReportBase {
615 public:
616 explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone);
617 ~ScopedReport();
618
619 private:
620 ScopedErrorReportLock lock_;
621 };
622
623 ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
624 void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
625 MutexSet *mset, uptr *tag = nullptr);
626
627 // The stack could look like:
628 // <start> | <main> | <foo> | tag | <bar>
629 // This will extract the tag and keep:
630 // <start> | <main> | <foo> | <bar>
631 template<typename StackTraceTy>
632 void ExtractTagFromStack(StackTraceTy *stack, uptr *tag = nullptr) {
633 if (stack->size < 2) return;
634 uptr possible_tag_pc = stack->trace[stack->size - 2];
635 uptr possible_tag = TagFromShadowStackFrame(possible_tag_pc);
636 if (possible_tag == kExternalTagNone) return;
637 stack->trace_buffer[stack->size - 2] = stack->trace_buffer[stack->size - 1];
638 stack->size -= 1;
639 if (tag) *tag = possible_tag;
640 }
641
642 template<typename StackTraceTy>
643 void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack,
644 uptr *tag = nullptr) {
645 uptr size = thr->shadow_stack_pos - thr->shadow_stack;
646 uptr start = 0;
647 if (size + !!toppc > kStackTraceMax) {
648 start = size + !!toppc - kStackTraceMax;
649 size = kStackTraceMax - !!toppc;
650 }
651 stack->Init(&thr->shadow_stack[start], size, toppc);
652 ExtractTagFromStack(stack, tag);
653 }
654
655 #define GET_STACK_TRACE_FATAL(thr, pc) \
656 VarSizeStackTrace stack; \
657 ObtainCurrentStack(thr, pc, &stack); \
658 stack.ReverseOrder();
659
660 #if TSAN_COLLECT_STATS
661 void StatAggregate(u64 *dst, u64 *src);
662 void StatOutput(u64 *stat);
663 #endif
664
665 void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
666 #if TSAN_COLLECT_STATS
667 thr->stat[typ] += n;
668 #endif
669 }
StatSet(ThreadState * thr,StatType typ,u64 n)670 void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
671 #if TSAN_COLLECT_STATS
672 thr->stat[typ] = n;
673 #endif
674 }
675
676 void MapShadow(uptr addr, uptr size);
677 void MapThreadTrace(uptr addr, uptr size, const char *name);
678 void DontNeedShadowFor(uptr addr, uptr size);
679 void InitializeShadowMemory();
680 void InitializeInterceptors();
681 void InitializeLibIgnore();
682 void InitializeDynamicAnnotations();
683
684 void ForkBefore(ThreadState *thr, uptr pc);
685 void ForkParentAfter(ThreadState *thr, uptr pc);
686 void ForkChildAfter(ThreadState *thr, uptr pc);
687
688 void ReportRace(ThreadState *thr);
689 bool OutputReport(ThreadState *thr, const ScopedReport &srep);
690 bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
691 bool IsExpectedReport(uptr addr, uptr size);
692 void PrintMatchedBenignRaces();
693
694 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
695 # define DPrintf Printf
696 #else
697 # define DPrintf(...)
698 #endif
699
700 #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
701 # define DPrintf2 Printf
702 #else
703 # define DPrintf2(...)
704 #endif
705
706 u32 CurrentStackId(ThreadState *thr, uptr pc);
707 ReportStack *SymbolizeStackId(u32 stack_id);
708 void PrintCurrentStack(ThreadState *thr, uptr pc);
709 void PrintCurrentStackSlow(uptr pc); // uses libunwind
710
711 void Initialize(ThreadState *thr);
712 void MaybeSpawnBackgroundThread();
713 int Finalize(ThreadState *thr);
714
715 void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
716 void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
717
718 void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
719 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
720 void MemoryAccessImpl(ThreadState *thr, uptr addr,
721 int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
722 u64 *shadow_mem, Shadow cur);
723 void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
724 uptr size, bool is_write);
725 void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
726 uptr size, uptr step, bool is_write);
727 void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
728 int size, bool kAccessIsWrite, bool kIsAtomic);
729
730 const int kSizeLog1 = 0;
731 const int kSizeLog2 = 1;
732 const int kSizeLog4 = 2;
733 const int kSizeLog8 = 3;
734
MemoryRead(ThreadState * thr,uptr pc,uptr addr,int kAccessSizeLog)735 void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc,
736 uptr addr, int kAccessSizeLog) {
737 MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
738 }
739
MemoryWrite(ThreadState * thr,uptr pc,uptr addr,int kAccessSizeLog)740 void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc,
741 uptr addr, int kAccessSizeLog) {
742 MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
743 }
744
MemoryReadAtomic(ThreadState * thr,uptr pc,uptr addr,int kAccessSizeLog)745 void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
746 uptr addr, int kAccessSizeLog) {
747 MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
748 }
749
MemoryWriteAtomic(ThreadState * thr,uptr pc,uptr addr,int kAccessSizeLog)750 void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
751 uptr addr, int kAccessSizeLog) {
752 MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
753 }
754
755 void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
756 void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
757 void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
758
759 void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack = true);
760 void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
761 void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack = true);
762 void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
763
764 void FuncEntry(ThreadState *thr, uptr pc);
765 void FuncExit(ThreadState *thr);
766
767 int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
768 void ThreadStart(ThreadState *thr, int tid, tid_t os_id, bool workerthread);
769 void ThreadFinish(ThreadState *thr);
770 int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
771 void ThreadJoin(ThreadState *thr, uptr pc, int tid);
772 void ThreadDetach(ThreadState *thr, uptr pc, int tid);
773 void ThreadFinalize(ThreadState *thr);
774 void ThreadSetName(ThreadState *thr, const char *name);
775 int ThreadCount(ThreadState *thr);
776 void ProcessPendingSignals(ThreadState *thr);
777 void ThreadNotJoined(ThreadState *thr, uptr pc, int tid, uptr uid);
778
779 Processor *ProcCreate();
780 void ProcDestroy(Processor *proc);
781 void ProcWire(Processor *proc, ThreadState *thr);
782 void ProcUnwire(Processor *proc, ThreadState *thr);
783
784 // Note: the parameter is called flagz, because flags is already taken
785 // by the global function that returns flags.
786 void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
787 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
788 void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
789 void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0,
790 int rec = 1);
791 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
792 void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
793 void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
794 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
795 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
796 void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
797 void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
798
799 void Acquire(ThreadState *thr, uptr pc, uptr addr);
800 // AcquireGlobal synchronizes the current thread with all other threads.
801 // In terms of happens-before relation, it draws a HB edge from all threads
802 // (where they happen to execute right now) to the current thread. We use it to
803 // handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
804 // right before executing finalizers. This provides a coarse, but simple
805 // approximation of the actual required synchronization.
806 void AcquireGlobal(ThreadState *thr, uptr pc);
807 void Release(ThreadState *thr, uptr pc, uptr addr);
808 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
809 void AfterSleep(ThreadState *thr, uptr pc);
810 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
811 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
812 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c);
813 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
814
815 // The hacky call uses custom calling convention and an assembly thunk.
816 // It is considerably faster that a normal call for the caller
817 // if it is not executed (it is intended for slow paths from hot functions).
818 // The trick is that the call preserves all registers and the compiler
819 // does not treat it as a call.
820 // If it does not work for you, use normal call.
821 #if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC
822 // The caller may not create the stack frame for itself at all,
823 // so we create a reserve stack frame for it (1024b must be enough).
824 #define HACKY_CALL(f) \
825 __asm__ __volatile__("sub $1024, %%rsp;" \
826 CFI_INL_ADJUST_CFA_OFFSET(1024) \
827 ".hidden " #f "_thunk;" \
828 "call " #f "_thunk;" \
829 "add $1024, %%rsp;" \
830 CFI_INL_ADJUST_CFA_OFFSET(-1024) \
831 ::: "memory", "cc");
832 #else
833 #define HACKY_CALL(f) f()
834 #endif
835
836 void TraceSwitch(ThreadState *thr);
837 uptr TraceTopPC(ThreadState *thr);
838 uptr TraceSize();
839 uptr TraceParts();
840 Trace *ThreadTrace(int tid);
841
842 extern "C" void __tsan_trace_switch();
TraceAddEvent(ThreadState * thr,FastState fs,EventType typ,u64 addr)843 void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
844 EventType typ, u64 addr) {
845 if (!kCollectHistory)
846 return;
847 DCHECK_GE((int)typ, 0);
848 DCHECK_LE((int)typ, 7);
849 DCHECK_EQ(GetLsb(addr, kEventPCBits), addr);
850 StatInc(thr, StatEvents);
851 u64 pos = fs.GetTracePos();
852 if (UNLIKELY((pos % kTracePartSize) == 0)) {
853 #if !SANITIZER_GO
854 HACKY_CALL(__tsan_trace_switch);
855 #else
856 TraceSwitch(thr);
857 #endif
858 }
859 Event *trace = (Event*)GetThreadTrace(fs.tid());
860 Event *evp = &trace[pos];
861 Event ev = (u64)addr | ((u64)typ << kEventPCBits);
862 *evp = ev;
863 }
864
865 #if !SANITIZER_GO
HeapEnd()866 uptr ALWAYS_INLINE HeapEnd() {
867 return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
868 }
869 #endif
870
871 } // namespace __tsan
872
873 #endif // TSAN_RTL_H
874