1 //===-- tsan_interceptors_posix.cpp ---------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 // FIXME: move as many interceptors as possible into
12 // sanitizer_common/sanitizer_common_interceptors.inc
13 //===----------------------------------------------------------------------===//
14 
15 #include "sanitizer_common/sanitizer_atomic.h"
16 #include "sanitizer_common/sanitizer_errno.h"
17 #include "sanitizer_common/sanitizer_libc.h"
18 #include "sanitizer_common/sanitizer_linux.h"
19 #include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
20 #include "sanitizer_common/sanitizer_platform_limits_posix.h"
21 #include "sanitizer_common/sanitizer_placement_new.h"
22 #include "sanitizer_common/sanitizer_posix.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "sanitizer_common/sanitizer_tls_get_addr.h"
25 #include "interception/interception.h"
26 #include "tsan_interceptors.h"
27 #include "tsan_interface.h"
28 #include "tsan_platform.h"
29 #include "tsan_suppressions.h"
30 #include "tsan_rtl.h"
31 #include "tsan_mman.h"
32 #include "tsan_fd.h"
33 
34 #include <stdarg.h>
35 
36 using namespace __tsan;
37 
38 #if SANITIZER_FREEBSD || SANITIZER_APPLE
39 #define stdout __stdoutp
40 #define stderr __stderrp
41 #endif
42 
43 #if SANITIZER_NETBSD
44 #define dirfd(dirp) (*(int *)(dirp))
45 #define fileno_unlocked(fp)              \
46   (((__sanitizer_FILE *)fp)->_file == -1 \
47        ? -1                              \
48        : (int)(unsigned short)(((__sanitizer_FILE *)fp)->_file))
49 
50 #define stdout ((__sanitizer_FILE*)&__sF[1])
51 #define stderr ((__sanitizer_FILE*)&__sF[2])
52 
53 #define nanosleep __nanosleep50
54 #define vfork __vfork14
55 #endif
56 
57 #ifdef __mips__
58 const int kSigCount = 129;
59 #else
60 const int kSigCount = 65;
61 #endif
62 
63 #ifdef __mips__
64 struct ucontext_t {
65   u64 opaque[768 / sizeof(u64) + 1];
66 };
67 #else
68 struct ucontext_t {
69   // The size is determined by looking at sizeof of real ucontext_t on linux.
70   u64 opaque[936 / sizeof(u64) + 1];
71 };
72 #endif
73 
74 #if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \
75     defined(__s390x__)
76 #define PTHREAD_ABI_BASE  "GLIBC_2.3.2"
77 #elif defined(__aarch64__) || SANITIZER_PPC64V2
78 #define PTHREAD_ABI_BASE  "GLIBC_2.17"
79 #elif SANITIZER_LOONGARCH64
80 #define PTHREAD_ABI_BASE  "GLIBC_2.36"
81 #endif
82 
83 extern "C" int pthread_attr_init(void *attr);
84 extern "C" int pthread_attr_destroy(void *attr);
85 DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
86 extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
87 extern "C" int pthread_atfork(void (*prepare)(void), void (*parent)(void),
88                               void (*child)(void));
89 extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
90 extern "C" int pthread_setspecific(unsigned key, const void *v);
91 DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
92 DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
93 DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
94 DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
95 extern "C" int pthread_equal(void *t1, void *t2);
96 extern "C" void *pthread_self();
97 extern "C" void _exit(int status);
98 #if !SANITIZER_NETBSD
99 extern "C" int fileno_unlocked(void *stream);
100 extern "C" int dirfd(void *dirp);
101 #endif
102 #if SANITIZER_NETBSD
103 extern __sanitizer_FILE __sF[];
104 #else
105 extern __sanitizer_FILE *stdout, *stderr;
106 #endif
107 #if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
108 const int PTHREAD_MUTEX_RECURSIVE = 1;
109 const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
110 #else
111 const int PTHREAD_MUTEX_RECURSIVE = 2;
112 const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
113 #endif
114 #if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
115 const int EPOLL_CTL_ADD = 1;
116 #endif
117 const int SIGILL = 4;
118 const int SIGTRAP = 5;
119 const int SIGABRT = 6;
120 const int SIGFPE = 8;
121 const int SIGSEGV = 11;
122 const int SIGPIPE = 13;
123 const int SIGTERM = 15;
124 #if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
125 const int SIGBUS = 10;
126 const int SIGSYS = 12;
127 #else
128 const int SIGBUS = 7;
129 const int SIGSYS = 31;
130 #endif
131 const int SI_TIMER = -2;
132 void *const MAP_FAILED = (void*)-1;
133 #if SANITIZER_NETBSD
134 const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
135 #elif !SANITIZER_APPLE
136 const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
137 #endif
138 const int MAP_FIXED = 0x10;
139 typedef long long_t;
140 typedef __sanitizer::u16 mode_t;
141 
142 // From /usr/include/unistd.h
143 # define F_ULOCK 0      /* Unlock a previously locked region.  */
144 # define F_LOCK  1      /* Lock a region for exclusive use.  */
145 # define F_TLOCK 2      /* Test and lock a region for exclusive use.  */
146 # define F_TEST  3      /* Test a region for other processes locks.  */
147 
148 #if SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
149 const int SA_SIGINFO = 0x40;
150 const int SIG_SETMASK = 3;
151 #elif defined(__mips__)
152 const int SA_SIGINFO = 8;
153 const int SIG_SETMASK = 3;
154 #else
155 const int SA_SIGINFO = 4;
156 const int SIG_SETMASK = 2;
157 #endif
158 
159 #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \
160   (!cur_thread_init()->is_inited)
161 
162 namespace __tsan {
163 struct SignalDesc {
164   bool armed;
165   __sanitizer_siginfo siginfo;
166   ucontext_t ctx;
167 };
168 
169 struct ThreadSignalContext {
170   int int_signal_send;
171   SignalDesc pending_signals[kSigCount];
172   // emptyset and oldset are too big for stack.
173   __sanitizer_sigset_t emptyset;
174   __sanitizer_sigset_t oldset;
175 };
176 
177 void EnterBlockingFunc(ThreadState *thr) {
178   for (;;) {
179     // The order is important to not delay a signal infinitely if it's
180     // delivered right before we set in_blocking_func. Note: we can't call
181     // ProcessPendingSignals when in_blocking_func is set, or we can handle
182     // a signal synchronously when we are already handling a signal.
183     atomic_store(&thr->in_blocking_func, 1, memory_order_relaxed);
184     if (atomic_load(&thr->pending_signals, memory_order_relaxed) == 0)
185       break;
186     atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
187     ProcessPendingSignals(thr);
188   }
189 }
190 
191 // The sole reason tsan wraps atexit callbacks is to establish synchronization
192 // between callback setup and callback execution.
193 struct AtExitCtx {
194   void (*f)();
195   void *arg;
196   uptr pc;
197 };
198 
199 // InterceptorContext holds all global data required for interceptors.
200 // It's explicitly constructed in InitializeInterceptors with placement new
201 // and is never destroyed. This allows usage of members with non-trivial
202 // constructors and destructors.
203 struct InterceptorContext {
204   // The object is 64-byte aligned, because we want hot data to be located
205   // in a single cache line if possible (it's accessed in every interceptor).
206   ALIGNED(64) LibIgnore libignore;
207   __sanitizer_sigaction sigactions[kSigCount];
208 #if !SANITIZER_APPLE && !SANITIZER_NETBSD
209   unsigned finalize_key;
210 #endif
211 
212   Mutex atexit_mu;
213   Vector<struct AtExitCtx *> AtExitStack;
214 
215   InterceptorContext() : libignore(LINKER_INITIALIZED), atexit_mu(MutexTypeAtExit), AtExitStack() {}
216 };
217 
218 static ALIGNED(64) char interceptor_placeholder[sizeof(InterceptorContext)];
219 InterceptorContext *interceptor_ctx() {
220   return reinterpret_cast<InterceptorContext*>(&interceptor_placeholder[0]);
221 }
222 
223 LibIgnore *libignore() {
224   return &interceptor_ctx()->libignore;
225 }
226 
227 void InitializeLibIgnore() {
228   const SuppressionContext &supp = *Suppressions();
229   const uptr n = supp.SuppressionCount();
230   for (uptr i = 0; i < n; i++) {
231     const Suppression *s = supp.SuppressionAt(i);
232     if (0 == internal_strcmp(s->type, kSuppressionLib))
233       libignore()->AddIgnoredLibrary(s->templ);
234   }
235   if (flags()->ignore_noninstrumented_modules)
236     libignore()->IgnoreNoninstrumentedModules(true);
237   libignore()->OnLibraryLoaded(0);
238 }
239 
240 // The following two hooks can be used by for cooperative scheduling when
241 // locking.
242 #ifdef TSAN_EXTERNAL_HOOKS
243 void OnPotentiallyBlockingRegionBegin();
244 void OnPotentiallyBlockingRegionEnd();
245 #else
246 SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionBegin() {}
247 SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionEnd() {}
248 #endif
249 
250 }  // namespace __tsan
251 
252 static ThreadSignalContext *SigCtx(ThreadState *thr) {
253   // This function may be called reentrantly if it is interrupted by a signal
254   // handler. Use CAS to handle the race.
255   uptr ctx = atomic_load(&thr->signal_ctx, memory_order_relaxed);
256   if (ctx == 0 && !thr->is_dead) {
257     uptr pctx =
258         (uptr)MmapOrDie(sizeof(ThreadSignalContext), "ThreadSignalContext");
259     MemoryResetRange(thr, (uptr)&SigCtx, pctx, sizeof(ThreadSignalContext));
260     if (atomic_compare_exchange_strong(&thr->signal_ctx, &ctx, pctx,
261                                        memory_order_relaxed)) {
262       ctx = pctx;
263     } else {
264       UnmapOrDie((ThreadSignalContext *)pctx, sizeof(ThreadSignalContext));
265     }
266   }
267   return (ThreadSignalContext *)ctx;
268 }
269 
270 ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
271                                      uptr pc)
272     : thr_(thr) {
273   LazyInitialize(thr);
274   if (UNLIKELY(atomic_load(&thr->in_blocking_func, memory_order_relaxed))) {
275     // pthread_join is marked as blocking, but it's also known to call other
276     // intercepted functions (mmap, free). If we don't reset in_blocking_func
277     // we can get deadlocks and memory corruptions if we deliver a synchronous
278     // signal inside of an mmap/free interceptor.
279     // So reset it and restore it back in the destructor.
280     // See https://github.com/google/sanitizers/issues/1540
281     atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
282     in_blocking_func_ = true;
283   }
284   if (!thr_->is_inited) return;
285   if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
286   DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
287   ignoring_ =
288       !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses ||
289                                 libignore()->IsIgnored(pc, &in_ignored_lib_));
290   EnableIgnores();
291 }
292 
293 ScopedInterceptor::~ScopedInterceptor() {
294   if (!thr_->is_inited) return;
295   DisableIgnores();
296   if (UNLIKELY(in_blocking_func_))
297     EnterBlockingFunc(thr_);
298   if (!thr_->ignore_interceptors) {
299     ProcessPendingSignals(thr_);
300     FuncExit(thr_);
301     CheckedMutex::CheckNoLocks();
302   }
303 }
304 
305 NOINLINE
306 void ScopedInterceptor::EnableIgnoresImpl() {
307   ThreadIgnoreBegin(thr_, 0);
308   if (flags()->ignore_noninstrumented_modules)
309     thr_->suppress_reports++;
310   if (in_ignored_lib_) {
311     DCHECK(!thr_->in_ignored_lib);
312     thr_->in_ignored_lib = true;
313   }
314 }
315 
316 NOINLINE
317 void ScopedInterceptor::DisableIgnoresImpl() {
318   ThreadIgnoreEnd(thr_);
319   if (flags()->ignore_noninstrumented_modules)
320     thr_->suppress_reports--;
321   if (in_ignored_lib_) {
322     DCHECK(thr_->in_ignored_lib);
323     thr_->in_ignored_lib = false;
324   }
325 }
326 
327 #define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
328 #if SANITIZER_FREEBSD || SANITIZER_NETBSD
329 #  define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
330 #else
331 #  define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
332 #endif
333 #if SANITIZER_FREEBSD
334 #  define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func) \
335     INTERCEPT_FUNCTION(_pthread_##func)
336 #else
337 #  define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func)
338 #endif
339 #if SANITIZER_NETBSD
340 #  define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \
341     INTERCEPT_FUNCTION(__libc_##func)
342 #  define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \
343     INTERCEPT_FUNCTION(__libc_thr_##func)
344 #else
345 #  define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
346 #  define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
347 #endif
348 
349 #define READ_STRING_OF_LEN(thr, pc, s, len, n)                 \
350   MemoryAccessRange((thr), (pc), (uptr)(s),                         \
351     common_flags()->strict_string_checks ? (len) + 1 : (n), false)
352 
353 #define READ_STRING(thr, pc, s, n)                             \
354     READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n))
355 
356 #define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
357 
358 struct BlockingCall {
359   explicit BlockingCall(ThreadState *thr)
360       : thr(thr) {
361     EnterBlockingFunc(thr);
362     // When we are in a "blocking call", we process signals asynchronously
363     // (right when they arrive). In this context we do not expect to be
364     // executing any user/runtime code. The known interceptor sequence when
365     // this is not true is: pthread_join -> munmap(stack). It's fine
366     // to ignore munmap in this case -- we handle stack shadow separately.
367     thr->ignore_interceptors++;
368   }
369 
370   ~BlockingCall() {
371     thr->ignore_interceptors--;
372     atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
373   }
374 
375   ThreadState *thr;
376 };
377 
378 TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
379   SCOPED_TSAN_INTERCEPTOR(sleep, sec);
380   unsigned res = BLOCK_REAL(sleep)(sec);
381   AfterSleep(thr, pc);
382   return res;
383 }
384 
385 TSAN_INTERCEPTOR(int, usleep, long_t usec) {
386   SCOPED_TSAN_INTERCEPTOR(usleep, usec);
387   int res = BLOCK_REAL(usleep)(usec);
388   AfterSleep(thr, pc);
389   return res;
390 }
391 
392 TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
393   SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem);
394   int res = BLOCK_REAL(nanosleep)(req, rem);
395   AfterSleep(thr, pc);
396   return res;
397 }
398 
399 TSAN_INTERCEPTOR(int, pause, int fake) {
400   SCOPED_TSAN_INTERCEPTOR(pause, fake);
401   return BLOCK_REAL(pause)(fake);
402 }
403 
404 // Note: we specifically call the function in such strange way
405 // with "installed_at" because in reports it will appear between
406 // callback frames and the frame that installed the callback.
407 static void at_exit_callback_installed_at() {
408   AtExitCtx *ctx;
409   {
410     // Ensure thread-safety.
411     Lock l(&interceptor_ctx()->atexit_mu);
412 
413     // Pop AtExitCtx from the top of the stack of callback functions
414     uptr element = interceptor_ctx()->AtExitStack.Size() - 1;
415     ctx = interceptor_ctx()->AtExitStack[element];
416     interceptor_ctx()->AtExitStack.PopBack();
417   }
418 
419   ThreadState *thr = cur_thread();
420   Acquire(thr, ctx->pc, (uptr)ctx);
421   FuncEntry(thr, ctx->pc);
422   ((void(*)())ctx->f)();
423   FuncExit(thr);
424   Free(ctx);
425 }
426 
427 static void cxa_at_exit_callback_installed_at(void *arg) {
428   ThreadState *thr = cur_thread();
429   AtExitCtx *ctx = (AtExitCtx*)arg;
430   Acquire(thr, ctx->pc, (uptr)arg);
431   FuncEntry(thr, ctx->pc);
432   ((void(*)(void *arg))ctx->f)(ctx->arg);
433   FuncExit(thr);
434   Free(ctx);
435 }
436 
437 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
438       void *arg, void *dso);
439 
440 #if !SANITIZER_ANDROID
441 TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
442   if (in_symbolizer())
443     return 0;
444   // We want to setup the atexit callback even if we are in ignored lib
445   // or after fork.
446   SCOPED_INTERCEPTOR_RAW(atexit, f);
447   return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, 0, 0);
448 }
449 #endif
450 
451 TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
452   if (in_symbolizer())
453     return 0;
454   SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
455   return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, arg, dso);
456 }
457 
458 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
459       void *arg, void *dso) {
460   auto *ctx = New<AtExitCtx>();
461   ctx->f = f;
462   ctx->arg = arg;
463   ctx->pc = pc;
464   Release(thr, pc, (uptr)ctx);
465   // Memory allocation in __cxa_atexit will race with free during exit,
466   // because we do not see synchronization around atexit callback list.
467   ThreadIgnoreBegin(thr, pc);
468   int res;
469   if (!dso) {
470     // NetBSD does not preserve the 2nd argument if dso is equal to 0
471     // Store ctx in a local stack-like structure
472 
473     // Ensure thread-safety.
474     Lock l(&interceptor_ctx()->atexit_mu);
475     // __cxa_atexit calls calloc. If we don't ignore interceptors, we will fail
476     // due to atexit_mu held on exit from the calloc interceptor.
477     ScopedIgnoreInterceptors ignore;
478 
479     res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_callback_installed_at,
480                              0, 0);
481     // Push AtExitCtx on the top of the stack of callback functions
482     if (!res) {
483       interceptor_ctx()->AtExitStack.PushBack(ctx);
484     }
485   } else {
486     res = REAL(__cxa_atexit)(cxa_at_exit_callback_installed_at, ctx, dso);
487   }
488   ThreadIgnoreEnd(thr);
489   return res;
490 }
491 
492 #if !SANITIZER_APPLE && !SANITIZER_NETBSD
493 static void on_exit_callback_installed_at(int status, void *arg) {
494   ThreadState *thr = cur_thread();
495   AtExitCtx *ctx = (AtExitCtx*)arg;
496   Acquire(thr, ctx->pc, (uptr)arg);
497   FuncEntry(thr, ctx->pc);
498   ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
499   FuncExit(thr);
500   Free(ctx);
501 }
502 
503 TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
504   if (in_symbolizer())
505     return 0;
506   SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
507   auto *ctx = New<AtExitCtx>();
508   ctx->f = (void(*)())f;
509   ctx->arg = arg;
510   ctx->pc = GET_CALLER_PC();
511   Release(thr, pc, (uptr)ctx);
512   // Memory allocation in __cxa_atexit will race with free during exit,
513   // because we do not see synchronization around atexit callback list.
514   ThreadIgnoreBegin(thr, pc);
515   int res = REAL(on_exit)(on_exit_callback_installed_at, ctx);
516   ThreadIgnoreEnd(thr);
517   return res;
518 }
519 #define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit)
520 #else
521 #define TSAN_MAYBE_INTERCEPT_ON_EXIT
522 #endif
523 
524 // Cleanup old bufs.
525 static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
526   for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
527     JmpBuf *buf = &thr->jmp_bufs[i];
528     if (buf->sp <= sp) {
529       uptr sz = thr->jmp_bufs.Size();
530       internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf));
531       thr->jmp_bufs.PopBack();
532       i--;
533     }
534   }
535 }
536 
537 static void SetJmp(ThreadState *thr, uptr sp) {
538   if (!thr->is_inited)  // called from libc guts during bootstrap
539     return;
540   // Cleanup old bufs.
541   JmpBufGarbageCollect(thr, sp);
542   // Remember the buf.
543   JmpBuf *buf = thr->jmp_bufs.PushBack();
544   buf->sp = sp;
545   buf->shadow_stack_pos = thr->shadow_stack_pos;
546   ThreadSignalContext *sctx = SigCtx(thr);
547   buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
548   buf->in_blocking_func = atomic_load(&thr->in_blocking_func, memory_order_relaxed);
549   buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
550       memory_order_relaxed);
551 }
552 
553 static void LongJmp(ThreadState *thr, uptr *env) {
554   uptr sp = ExtractLongJmpSp(env);
555   // Find the saved buf with matching sp.
556   for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
557     JmpBuf *buf = &thr->jmp_bufs[i];
558     if (buf->sp == sp) {
559       CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos);
560       // Unwind the stack.
561       while (thr->shadow_stack_pos > buf->shadow_stack_pos)
562         FuncExit(thr);
563       ThreadSignalContext *sctx = SigCtx(thr);
564       if (sctx)
565         sctx->int_signal_send = buf->int_signal_send;
566       atomic_store(&thr->in_blocking_func, buf->in_blocking_func,
567           memory_order_relaxed);
568       atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
569           memory_order_relaxed);
570       JmpBufGarbageCollect(thr, buf->sp - 1);  // do not collect buf->sp
571       return;
572     }
573   }
574   Printf("ThreadSanitizer: can't find longjmp buf\n");
575   CHECK(0);
576 }
577 
578 // FIXME: put everything below into a common extern "C" block?
579 extern "C" void __tsan_setjmp(uptr sp) { SetJmp(cur_thread_init(), sp); }
580 
581 #if SANITIZER_APPLE
582 TSAN_INTERCEPTOR(int, setjmp, void *env);
583 TSAN_INTERCEPTOR(int, _setjmp, void *env);
584 TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
585 #else  // SANITIZER_APPLE
586 
587 #if SANITIZER_NETBSD
588 #define setjmp_symname __setjmp14
589 #define sigsetjmp_symname __sigsetjmp14
590 #else
591 #define setjmp_symname setjmp
592 #define sigsetjmp_symname sigsetjmp
593 #endif
594 
595 #define TSAN_INTERCEPTOR_SETJMP_(x) __interceptor_ ## x
596 #define TSAN_INTERCEPTOR_SETJMP__(x) TSAN_INTERCEPTOR_SETJMP_(x)
597 #define TSAN_INTERCEPTOR_SETJMP TSAN_INTERCEPTOR_SETJMP__(setjmp_symname)
598 #define TSAN_INTERCEPTOR_SIGSETJMP TSAN_INTERCEPTOR_SETJMP__(sigsetjmp_symname)
599 
600 #define TSAN_STRING_SETJMP SANITIZER_STRINGIFY(setjmp_symname)
601 #define TSAN_STRING_SIGSETJMP SANITIZER_STRINGIFY(sigsetjmp_symname)
602 
603 // Not called.  Merely to satisfy TSAN_INTERCEPT().
604 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
605 int TSAN_INTERCEPTOR_SETJMP(void *env);
606 extern "C" int TSAN_INTERCEPTOR_SETJMP(void *env) {
607   CHECK(0);
608   return 0;
609 }
610 
611 // FIXME: any reason to have a separate declaration?
612 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
613 int __interceptor__setjmp(void *env);
614 extern "C" int __interceptor__setjmp(void *env) {
615   CHECK(0);
616   return 0;
617 }
618 
619 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
620 int TSAN_INTERCEPTOR_SIGSETJMP(void *env);
621 extern "C" int TSAN_INTERCEPTOR_SIGSETJMP(void *env) {
622   CHECK(0);
623   return 0;
624 }
625 
626 #if !SANITIZER_NETBSD
627 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
628 int __interceptor___sigsetjmp(void *env);
629 extern "C" int __interceptor___sigsetjmp(void *env) {
630   CHECK(0);
631   return 0;
632 }
633 #endif
634 
635 extern "C" int setjmp_symname(void *env);
636 extern "C" int _setjmp(void *env);
637 extern "C" int sigsetjmp_symname(void *env);
638 #if !SANITIZER_NETBSD
639 extern "C" int __sigsetjmp(void *env);
640 #endif
641 DEFINE_REAL(int, setjmp_symname, void *env)
642 DEFINE_REAL(int, _setjmp, void *env)
643 DEFINE_REAL(int, sigsetjmp_symname, void *env)
644 #if !SANITIZER_NETBSD
645 DEFINE_REAL(int, __sigsetjmp, void *env)
646 #endif
647 #endif  // SANITIZER_APPLE
648 
649 #if SANITIZER_NETBSD
650 #define longjmp_symname __longjmp14
651 #define siglongjmp_symname __siglongjmp14
652 #else
653 #define longjmp_symname longjmp
654 #define siglongjmp_symname siglongjmp
655 #endif
656 
657 TSAN_INTERCEPTOR(void, longjmp_symname, uptr *env, int val) {
658   // Note: if we call REAL(longjmp) in the context of ScopedInterceptor,
659   // bad things will happen. We will jump over ScopedInterceptor dtor and can
660   // leave thr->in_ignored_lib set.
661   {
662     SCOPED_INTERCEPTOR_RAW(longjmp_symname, env, val);
663   }
664   LongJmp(cur_thread(), env);
665   REAL(longjmp_symname)(env, val);
666 }
667 
668 TSAN_INTERCEPTOR(void, siglongjmp_symname, uptr *env, int val) {
669   {
670     SCOPED_INTERCEPTOR_RAW(siglongjmp_symname, env, val);
671   }
672   LongJmp(cur_thread(), env);
673   REAL(siglongjmp_symname)(env, val);
674 }
675 
676 #if SANITIZER_NETBSD
677 TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) {
678   {
679     SCOPED_INTERCEPTOR_RAW(_longjmp, env, val);
680   }
681   LongJmp(cur_thread(), env);
682   REAL(_longjmp)(env, val);
683 }
684 #endif
685 
686 #if !SANITIZER_APPLE
687 TSAN_INTERCEPTOR(void*, malloc, uptr size) {
688   if (in_symbolizer())
689     return InternalAlloc(size);
690   void *p = 0;
691   {
692     SCOPED_INTERCEPTOR_RAW(malloc, size);
693     p = user_alloc(thr, pc, size);
694   }
695   invoke_malloc_hook(p, size);
696   return p;
697 }
698 
699 // In glibc<2.25, dynamic TLS blocks are allocated by __libc_memalign. Intercept
700 // __libc_memalign so that (1) we can detect races (2) free will not be called
701 // on libc internally allocated blocks.
702 TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
703   SCOPED_INTERCEPTOR_RAW(__libc_memalign, align, sz);
704   return user_memalign(thr, pc, align, sz);
705 }
706 
707 TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
708   if (in_symbolizer())
709     return InternalCalloc(size, n);
710   void *p = 0;
711   {
712     SCOPED_INTERCEPTOR_RAW(calloc, size, n);
713     p = user_calloc(thr, pc, size, n);
714   }
715   invoke_malloc_hook(p, n * size);
716   return p;
717 }
718 
719 TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
720   if (in_symbolizer())
721     return InternalRealloc(p, size);
722   if (p)
723     invoke_free_hook(p);
724   {
725     SCOPED_INTERCEPTOR_RAW(realloc, p, size);
726     p = user_realloc(thr, pc, p, size);
727   }
728   invoke_malloc_hook(p, size);
729   return p;
730 }
731 
732 TSAN_INTERCEPTOR(void*, reallocarray, void *p, uptr size, uptr n) {
733   if (in_symbolizer())
734     return InternalReallocArray(p, size, n);
735   if (p)
736     invoke_free_hook(p);
737   {
738     SCOPED_INTERCEPTOR_RAW(reallocarray, p, size, n);
739     p = user_reallocarray(thr, pc, p, size, n);
740   }
741   invoke_malloc_hook(p, size);
742   return p;
743 }
744 
745 TSAN_INTERCEPTOR(void, free, void *p) {
746   if (p == 0)
747     return;
748   if (in_symbolizer())
749     return InternalFree(p);
750   invoke_free_hook(p);
751   SCOPED_INTERCEPTOR_RAW(free, p);
752   user_free(thr, pc, p);
753 }
754 
755 TSAN_INTERCEPTOR(void, cfree, void *p) {
756   if (p == 0)
757     return;
758   if (in_symbolizer())
759     return InternalFree(p);
760   invoke_free_hook(p);
761   SCOPED_INTERCEPTOR_RAW(cfree, p);
762   user_free(thr, pc, p);
763 }
764 
765 TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
766   SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
767   return user_alloc_usable_size(p);
768 }
769 #endif
770 
771 TSAN_INTERCEPTOR(char *, strcpy, char *dst, const char *src) {
772   SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src);
773   uptr srclen = internal_strlen(src);
774   MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true);
775   MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false);
776   return REAL(strcpy)(dst, src);
777 }
778 
779 TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) {
780   SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n);
781   uptr srclen = internal_strnlen(src, n);
782   MemoryAccessRange(thr, pc, (uptr)dst, n, true);
783   MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false);
784   return REAL(strncpy)(dst, src, n);
785 }
786 
787 TSAN_INTERCEPTOR(char*, strdup, const char *str) {
788   SCOPED_TSAN_INTERCEPTOR(strdup, str);
789   // strdup will call malloc, so no instrumentation is required here.
790   return REAL(strdup)(str);
791 }
792 
793 // Zero out addr if it points into shadow memory and was provided as a hint
794 // only, i.e., MAP_FIXED is not set.
795 static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
796   if (*addr) {
797     if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) {
798       if (flags & MAP_FIXED) {
799         errno = errno_EINVAL;
800         return false;
801       } else {
802         *addr = 0;
803       }
804     }
805   }
806   return true;
807 }
808 
809 template <class Mmap>
810 static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap,
811                               void *addr, SIZE_T sz, int prot, int flags,
812                               int fd, OFF64_T off) {
813   if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED;
814   void *res = real_mmap(addr, sz, prot, flags, fd, off);
815   if (res != MAP_FAILED) {
816     if (!IsAppMem((uptr)res) || !IsAppMem((uptr)res + sz - 1)) {
817       Report("ThreadSanitizer: mmap at bad address: addr=%p size=%p res=%p\n",
818              addr, (void*)sz, res);
819       Die();
820     }
821     if (fd > 0) FdAccess(thr, pc, fd);
822     MemoryRangeImitateWriteOrResetRange(thr, pc, (uptr)res, sz);
823   }
824   return res;
825 }
826 
827 TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
828   SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
829   UnmapShadow(thr, (uptr)addr, sz);
830   int res = REAL(munmap)(addr, sz);
831   return res;
832 }
833 
834 #if SANITIZER_LINUX
835 TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
836   SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
837   return user_memalign(thr, pc, align, sz);
838 }
839 #define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
840 #else
841 #define TSAN_MAYBE_INTERCEPT_MEMALIGN
842 #endif
843 
844 #if !SANITIZER_APPLE
845 TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
846   if (in_symbolizer())
847     return InternalAlloc(sz, nullptr, align);
848   SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz);
849   return user_aligned_alloc(thr, pc, align, sz);
850 }
851 
852 TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
853   if (in_symbolizer())
854     return InternalAlloc(sz, nullptr, GetPageSizeCached());
855   SCOPED_INTERCEPTOR_RAW(valloc, sz);
856   return user_valloc(thr, pc, sz);
857 }
858 #endif
859 
860 #if SANITIZER_LINUX
861 TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
862   if (in_symbolizer()) {
863     uptr PageSize = GetPageSizeCached();
864     sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
865     return InternalAlloc(sz, nullptr, PageSize);
866   }
867   SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
868   return user_pvalloc(thr, pc, sz);
869 }
870 #define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
871 #else
872 #define TSAN_MAYBE_INTERCEPT_PVALLOC
873 #endif
874 
875 #if !SANITIZER_APPLE
876 TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
877   if (in_symbolizer()) {
878     void *p = InternalAlloc(sz, nullptr, align);
879     if (!p)
880       return errno_ENOMEM;
881     *memptr = p;
882     return 0;
883   }
884   SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
885   return user_posix_memalign(thr, pc, memptr, align, sz);
886 }
887 #endif
888 
889 // Both __cxa_guard_acquire and pthread_once 0-initialize
890 // the object initially. pthread_once does not have any
891 // other ABI requirements. __cxa_guard_acquire assumes
892 // that any non-0 value in the first byte means that
893 // initialization is completed. Contents of the remaining
894 // bytes are up to us.
895 constexpr u32 kGuardInit = 0;
896 constexpr u32 kGuardDone = 1;
897 constexpr u32 kGuardRunning = 1 << 16;
898 constexpr u32 kGuardWaiter = 1 << 17;
899 
900 static int guard_acquire(ThreadState *thr, uptr pc, atomic_uint32_t *g,
901                          bool blocking_hooks = true) {
902   if (blocking_hooks)
903     OnPotentiallyBlockingRegionBegin();
904   auto on_exit = at_scope_exit([blocking_hooks] {
905     if (blocking_hooks)
906       OnPotentiallyBlockingRegionEnd();
907   });
908 
909   for (;;) {
910     u32 cmp = atomic_load(g, memory_order_acquire);
911     if (cmp == kGuardInit) {
912       if (atomic_compare_exchange_strong(g, &cmp, kGuardRunning,
913                                          memory_order_relaxed))
914         return 1;
915     } else if (cmp == kGuardDone) {
916       if (!thr->in_ignored_lib)
917         Acquire(thr, pc, (uptr)g);
918       return 0;
919     } else {
920       if ((cmp & kGuardWaiter) ||
921           atomic_compare_exchange_strong(g, &cmp, cmp | kGuardWaiter,
922                                          memory_order_relaxed))
923         FutexWait(g, cmp | kGuardWaiter);
924     }
925   }
926 }
927 
928 static void guard_release(ThreadState *thr, uptr pc, atomic_uint32_t *g,
929                           u32 v) {
930   if (!thr->in_ignored_lib)
931     Release(thr, pc, (uptr)g);
932   u32 old = atomic_exchange(g, v, memory_order_release);
933   if (old & kGuardWaiter)
934     FutexWake(g, 1 << 30);
935 }
936 
937 // __cxa_guard_acquire and friends need to be intercepted in a special way -
938 // regular interceptors will break statically-linked libstdc++. Linux
939 // interceptors are especially defined as weak functions (so that they don't
940 // cause link errors when user defines them as well). So they silently
941 // auto-disable themselves when such symbol is already present in the binary. If
942 // we link libstdc++ statically, it will bring own __cxa_guard_acquire which
943 // will silently replace our interceptor.  That's why on Linux we simply export
944 // these interceptors with INTERFACE_ATTRIBUTE.
945 // On OS X, we don't support statically linking, so we just use a regular
946 // interceptor.
947 #if SANITIZER_APPLE
948 #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
949 #else
950 #define STDCXX_INTERCEPTOR(rettype, name, ...) \
951   extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__)
952 #endif
953 
954 // Used in thread-safe function static initialization.
955 STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
956   SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
957   return guard_acquire(thr, pc, g);
958 }
959 
960 STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
961   SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
962   guard_release(thr, pc, g, kGuardDone);
963 }
964 
965 STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
966   SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
967   guard_release(thr, pc, g, kGuardInit);
968 }
969 
970 namespace __tsan {
971 void DestroyThreadState() {
972   ThreadState *thr = cur_thread();
973   Processor *proc = thr->proc();
974   ThreadFinish(thr);
975   ProcUnwire(proc, thr);
976   ProcDestroy(proc);
977   DTLS_Destroy();
978   cur_thread_finalize();
979 }
980 
981 void PlatformCleanUpThreadState(ThreadState *thr) {
982   ThreadSignalContext *sctx = (ThreadSignalContext *)atomic_load(
983       &thr->signal_ctx, memory_order_relaxed);
984   if (sctx) {
985     atomic_store(&thr->signal_ctx, 0, memory_order_relaxed);
986     UnmapOrDie(sctx, sizeof(*sctx));
987   }
988 }
989 }  // namespace __tsan
990 
991 #if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
992 static void thread_finalize(void *v) {
993   uptr iter = (uptr)v;
994   if (iter > 1) {
995     if (pthread_setspecific(interceptor_ctx()->finalize_key,
996         (void*)(iter - 1))) {
997       Printf("ThreadSanitizer: failed to set thread key\n");
998       Die();
999     }
1000     return;
1001   }
1002   DestroyThreadState();
1003 }
1004 #endif
1005 
1006 
1007 struct ThreadParam {
1008   void* (*callback)(void *arg);
1009   void *param;
1010   Tid tid;
1011   Semaphore created;
1012   Semaphore started;
1013 };
1014 
1015 extern "C" void *__tsan_thread_start_func(void *arg) {
1016   ThreadParam *p = (ThreadParam*)arg;
1017   void* (*callback)(void *arg) = p->callback;
1018   void *param = p->param;
1019   {
1020     ThreadState *thr = cur_thread_init();
1021     // Thread-local state is not initialized yet.
1022     ScopedIgnoreInterceptors ignore;
1023 #if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
1024     ThreadIgnoreBegin(thr, 0);
1025     if (pthread_setspecific(interceptor_ctx()->finalize_key,
1026                             (void *)GetPthreadDestructorIterations())) {
1027       Printf("ThreadSanitizer: failed to set thread key\n");
1028       Die();
1029     }
1030     ThreadIgnoreEnd(thr);
1031 #endif
1032     p->created.Wait();
1033     Processor *proc = ProcCreate();
1034     ProcWire(proc, thr);
1035     ThreadStart(thr, p->tid, GetTid(), ThreadType::Regular);
1036     p->started.Post();
1037   }
1038   void *res = callback(param);
1039   // Prevent the callback from being tail called,
1040   // it mixes up stack traces.
1041   volatile int foo = 42;
1042   foo++;
1043   return res;
1044 }
1045 
1046 TSAN_INTERCEPTOR(int, pthread_create,
1047     void *th, void *attr, void *(*callback)(void*), void * param) {
1048   SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
1049 
1050   MaybeSpawnBackgroundThread();
1051 
1052   if (ctx->after_multithreaded_fork) {
1053     if (flags()->die_after_fork) {
1054       Report("ThreadSanitizer: starting new threads after multi-threaded "
1055           "fork is not supported. Dying (set die_after_fork=0 to override)\n");
1056       Die();
1057     } else {
1058       VPrintf(1,
1059               "ThreadSanitizer: starting new threads after multi-threaded "
1060               "fork is not supported (pid %lu). Continuing because of "
1061               "die_after_fork=0, but you are on your own\n",
1062               internal_getpid());
1063     }
1064   }
1065   __sanitizer_pthread_attr_t myattr;
1066   if (attr == 0) {
1067     pthread_attr_init(&myattr);
1068     attr = &myattr;
1069   }
1070   int detached = 0;
1071   REAL(pthread_attr_getdetachstate)(attr, &detached);
1072   AdjustStackSize(attr);
1073 
1074   ThreadParam p;
1075   p.callback = callback;
1076   p.param = param;
1077   p.tid = kMainTid;
1078   int res = -1;
1079   {
1080     // Otherwise we see false positives in pthread stack manipulation.
1081     ScopedIgnoreInterceptors ignore;
1082     ThreadIgnoreBegin(thr, pc);
1083     res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
1084     ThreadIgnoreEnd(thr);
1085   }
1086   if (res == 0) {
1087     p.tid = ThreadCreate(thr, pc, *(uptr *)th, IsStateDetached(detached));
1088     CHECK_NE(p.tid, kMainTid);
1089     // Synchronization on p.tid serves two purposes:
1090     // 1. ThreadCreate must finish before the new thread starts.
1091     //    Otherwise the new thread can call pthread_detach, but the pthread_t
1092     //    identifier is not yet registered in ThreadRegistry by ThreadCreate.
1093     // 2. ThreadStart must finish before this thread continues.
1094     //    Otherwise, this thread can call pthread_detach and reset thr->sync
1095     //    before the new thread got a chance to acquire from it in ThreadStart.
1096     p.created.Post();
1097     p.started.Wait();
1098   }
1099   if (attr == &myattr)
1100     pthread_attr_destroy(&myattr);
1101   return res;
1102 }
1103 
1104 TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
1105   SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
1106   Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1107   ThreadIgnoreBegin(thr, pc);
1108   int res = BLOCK_REAL(pthread_join)(th, ret);
1109   ThreadIgnoreEnd(thr);
1110   if (res == 0) {
1111     ThreadJoin(thr, pc, tid);
1112   }
1113   return res;
1114 }
1115 
1116 DEFINE_REAL_PTHREAD_FUNCTIONS
1117 
1118 TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
1119   SCOPED_INTERCEPTOR_RAW(pthread_detach, th);
1120   Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1121   int res = REAL(pthread_detach)(th);
1122   if (res == 0) {
1123     ThreadDetach(thr, pc, tid);
1124   }
1125   return res;
1126 }
1127 
1128 TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
1129   {
1130     SCOPED_INTERCEPTOR_RAW(pthread_exit, retval);
1131 #if !SANITIZER_APPLE && !SANITIZER_ANDROID
1132     CHECK_EQ(thr, &cur_thread_placeholder);
1133 #endif
1134   }
1135   REAL(pthread_exit)(retval);
1136 }
1137 
1138 #if SANITIZER_LINUX
1139 TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
1140   SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret);
1141   Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1142   ThreadIgnoreBegin(thr, pc);
1143   int res = REAL(pthread_tryjoin_np)(th, ret);
1144   ThreadIgnoreEnd(thr);
1145   if (res == 0)
1146     ThreadJoin(thr, pc, tid);
1147   else
1148     ThreadNotJoined(thr, pc, tid, (uptr)th);
1149   return res;
1150 }
1151 
1152 TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
1153                  const struct timespec *abstime) {
1154   SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime);
1155   Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1156   ThreadIgnoreBegin(thr, pc);
1157   int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
1158   ThreadIgnoreEnd(thr);
1159   if (res == 0)
1160     ThreadJoin(thr, pc, tid);
1161   else
1162     ThreadNotJoined(thr, pc, tid, (uptr)th);
1163   return res;
1164 }
1165 #endif
1166 
1167 // Problem:
1168 // NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
1169 // pthread_cond_t has different size in the different versions.
1170 // If call new REAL functions for old pthread_cond_t, they will corrupt memory
1171 // after pthread_cond_t (old cond is smaller).
1172 // If we call old REAL functions for new pthread_cond_t, we will lose  some
1173 // functionality (e.g. old functions do not support waiting against
1174 // CLOCK_REALTIME).
1175 // Proper handling would require to have 2 versions of interceptors as well.
1176 // But this is messy, in particular requires linker scripts when sanitizer
1177 // runtime is linked into a shared library.
1178 // Instead we assume we don't have dynamic libraries built against old
1179 // pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
1180 // that allows to work with old libraries (but this mode does not support
1181 // some features, e.g. pthread_condattr_getpshared).
1182 static void *init_cond(void *c, bool force = false) {
1183   // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
1184   // So we allocate additional memory on the side large enough to hold
1185   // any pthread_cond_t object. Always call new REAL functions, but pass
1186   // the aux object to them.
1187   // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
1188   // first word of pthread_cond_t to zero.
1189   // It's all relevant only for linux.
1190   if (!common_flags()->legacy_pthread_cond)
1191     return c;
1192   atomic_uintptr_t *p = (atomic_uintptr_t*)c;
1193   uptr cond = atomic_load(p, memory_order_acquire);
1194   if (!force && cond != 0)
1195     return (void*)cond;
1196   void *newcond = WRAP(malloc)(pthread_cond_t_sz);
1197   internal_memset(newcond, 0, pthread_cond_t_sz);
1198   if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond,
1199       memory_order_acq_rel))
1200     return newcond;
1201   WRAP(free)(newcond);
1202   return (void*)cond;
1203 }
1204 
1205 namespace {
1206 
1207 template <class Fn>
1208 struct CondMutexUnlockCtx {
1209   ScopedInterceptor *si;
1210   ThreadState *thr;
1211   uptr pc;
1212   void *m;
1213   void *c;
1214   const Fn &fn;
1215 
1216   int Cancel() const { return fn(); }
1217   void Unlock() const;
1218 };
1219 
1220 template <class Fn>
1221 void CondMutexUnlockCtx<Fn>::Unlock() const {
1222   // pthread_cond_wait interceptor has enabled async signal delivery
1223   // (see BlockingCall below). Disable async signals since we are running
1224   // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
1225   // since the thread is cancelled, so we have to manually execute them
1226   // (the thread still can run some user code due to pthread_cleanup_push).
1227   CHECK_EQ(atomic_load(&thr->in_blocking_func, memory_order_relaxed), 1);
1228   atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
1229   MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
1230   // Undo BlockingCall ctor effects.
1231   thr->ignore_interceptors--;
1232   si->~ScopedInterceptor();
1233 }
1234 }  // namespace
1235 
1236 INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
1237   void *cond = init_cond(c, true);
1238   SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
1239   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1240   return REAL(pthread_cond_init)(cond, a);
1241 }
1242 
1243 template <class Fn>
1244 int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si, const Fn &fn,
1245               void *c, void *m) {
1246   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1247   MutexUnlock(thr, pc, (uptr)m);
1248   int res = 0;
1249   // This ensures that we handle mutex lock even in case of pthread_cancel.
1250   // See test/tsan/cond_cancel.cpp.
1251   {
1252     // Enable signal delivery while the thread is blocked.
1253     BlockingCall bc(thr);
1254     CondMutexUnlockCtx<Fn> arg = {si, thr, pc, m, c, fn};
1255     res = call_pthread_cancel_with_cleanup(
1256         [](void *arg) -> int {
1257           return ((const CondMutexUnlockCtx<Fn> *)arg)->Cancel();
1258         },
1259         [](void *arg) { ((const CondMutexUnlockCtx<Fn> *)arg)->Unlock(); },
1260         &arg);
1261   }
1262   if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
1263   MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
1264   return res;
1265 }
1266 
1267 INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
1268   void *cond = init_cond(c);
1269   SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
1270   return cond_wait(
1271       thr, pc, &si, [=]() { return REAL(pthread_cond_wait)(cond, m); }, cond,
1272       m);
1273 }
1274 
1275 INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
1276   void *cond = init_cond(c);
1277   SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
1278   return cond_wait(
1279       thr, pc, &si,
1280       [=]() { return REAL(pthread_cond_timedwait)(cond, m, abstime); }, cond,
1281       m);
1282 }
1283 
1284 #if SANITIZER_LINUX
1285 INTERCEPTOR(int, pthread_cond_clockwait, void *c, void *m,
1286             __sanitizer_clockid_t clock, void *abstime) {
1287   void *cond = init_cond(c);
1288   SCOPED_TSAN_INTERCEPTOR(pthread_cond_clockwait, cond, m, clock, abstime);
1289   return cond_wait(
1290       thr, pc, &si,
1291       [=]() { return REAL(pthread_cond_clockwait)(cond, m, clock, abstime); },
1292       cond, m);
1293 }
1294 #define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT TSAN_INTERCEPT(pthread_cond_clockwait)
1295 #else
1296 #define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT
1297 #endif
1298 
1299 #if SANITIZER_APPLE
1300 INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
1301             void *reltime) {
1302   void *cond = init_cond(c);
1303   SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
1304   return cond_wait(
1305       thr, pc, &si,
1306       [=]() {
1307         return REAL(pthread_cond_timedwait_relative_np)(cond, m, reltime);
1308       },
1309       cond, m);
1310 }
1311 #endif
1312 
1313 INTERCEPTOR(int, pthread_cond_signal, void *c) {
1314   void *cond = init_cond(c);
1315   SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
1316   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1317   return REAL(pthread_cond_signal)(cond);
1318 }
1319 
1320 INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
1321   void *cond = init_cond(c);
1322   SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
1323   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1324   return REAL(pthread_cond_broadcast)(cond);
1325 }
1326 
1327 INTERCEPTOR(int, pthread_cond_destroy, void *c) {
1328   void *cond = init_cond(c);
1329   SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
1330   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1331   int res = REAL(pthread_cond_destroy)(cond);
1332   if (common_flags()->legacy_pthread_cond) {
1333     // Free our aux cond and zero the pointer to not leave dangling pointers.
1334     WRAP(free)(cond);
1335     atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed);
1336   }
1337   return res;
1338 }
1339 
1340 TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
1341   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
1342   int res = REAL(pthread_mutex_init)(m, a);
1343   if (res == 0) {
1344     u32 flagz = 0;
1345     if (a) {
1346       int type = 0;
1347       if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
1348         if (type == PTHREAD_MUTEX_RECURSIVE ||
1349             type == PTHREAD_MUTEX_RECURSIVE_NP)
1350           flagz |= MutexFlagWriteReentrant;
1351     }
1352     MutexCreate(thr, pc, (uptr)m, flagz);
1353   }
1354   return res;
1355 }
1356 
1357 TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
1358   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
1359   int res = REAL(pthread_mutex_destroy)(m);
1360   if (res == 0 || res == errno_EBUSY) {
1361     MutexDestroy(thr, pc, (uptr)m);
1362   }
1363   return res;
1364 }
1365 
1366 TSAN_INTERCEPTOR(int, pthread_mutex_lock, void *m) {
1367   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_lock, m);
1368   MutexPreLock(thr, pc, (uptr)m);
1369   int res = REAL(pthread_mutex_lock)(m);
1370   if (res == errno_EOWNERDEAD)
1371     MutexRepair(thr, pc, (uptr)m);
1372   if (res == 0 || res == errno_EOWNERDEAD)
1373     MutexPostLock(thr, pc, (uptr)m);
1374   if (res == errno_EINVAL)
1375     MutexInvalidAccess(thr, pc, (uptr)m);
1376   return res;
1377 }
1378 
1379 TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
1380   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
1381   int res = REAL(pthread_mutex_trylock)(m);
1382   if (res == errno_EOWNERDEAD)
1383     MutexRepair(thr, pc, (uptr)m);
1384   if (res == 0 || res == errno_EOWNERDEAD)
1385     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1386   return res;
1387 }
1388 
1389 #if !SANITIZER_APPLE
1390 TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
1391   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
1392   int res = REAL(pthread_mutex_timedlock)(m, abstime);
1393   if (res == 0) {
1394     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1395   }
1396   return res;
1397 }
1398 #endif
1399 
1400 TSAN_INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
1401   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_unlock, m);
1402   MutexUnlock(thr, pc, (uptr)m);
1403   int res = REAL(pthread_mutex_unlock)(m);
1404   if (res == errno_EINVAL)
1405     MutexInvalidAccess(thr, pc, (uptr)m);
1406   return res;
1407 }
1408 
1409 #if SANITIZER_GLIBC
1410 #  if !__GLIBC_PREREQ(2, 34)
1411 // glibc 2.34 applies a non-default version for the two functions. They are no
1412 // longer expected to be intercepted by programs.
1413 TSAN_INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
1414   SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_lock, m);
1415   MutexPreLock(thr, pc, (uptr)m);
1416   int res = REAL(__pthread_mutex_lock)(m);
1417   if (res == errno_EOWNERDEAD)
1418     MutexRepair(thr, pc, (uptr)m);
1419   if (res == 0 || res == errno_EOWNERDEAD)
1420     MutexPostLock(thr, pc, (uptr)m);
1421   if (res == errno_EINVAL)
1422     MutexInvalidAccess(thr, pc, (uptr)m);
1423   return res;
1424 }
1425 
1426 TSAN_INTERCEPTOR(int, __pthread_mutex_unlock, void *m) {
1427   SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_unlock, m);
1428   MutexUnlock(thr, pc, (uptr)m);
1429   int res = REAL(__pthread_mutex_unlock)(m);
1430   if (res == errno_EINVAL)
1431     MutexInvalidAccess(thr, pc, (uptr)m);
1432   return res;
1433 }
1434 #  endif
1435 #endif
1436 
1437 #if !SANITIZER_APPLE
1438 TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
1439   SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
1440   int res = REAL(pthread_spin_init)(m, pshared);
1441   if (res == 0) {
1442     MutexCreate(thr, pc, (uptr)m);
1443   }
1444   return res;
1445 }
1446 
1447 TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
1448   SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
1449   int res = REAL(pthread_spin_destroy)(m);
1450   if (res == 0) {
1451     MutexDestroy(thr, pc, (uptr)m);
1452   }
1453   return res;
1454 }
1455 
1456 TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
1457   SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
1458   MutexPreLock(thr, pc, (uptr)m);
1459   int res = REAL(pthread_spin_lock)(m);
1460   if (res == 0) {
1461     MutexPostLock(thr, pc, (uptr)m);
1462   }
1463   return res;
1464 }
1465 
1466 TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
1467   SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
1468   int res = REAL(pthread_spin_trylock)(m);
1469   if (res == 0) {
1470     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1471   }
1472   return res;
1473 }
1474 
1475 TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
1476   SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
1477   MutexUnlock(thr, pc, (uptr)m);
1478   int res = REAL(pthread_spin_unlock)(m);
1479   return res;
1480 }
1481 #endif
1482 
1483 TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
1484   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
1485   int res = REAL(pthread_rwlock_init)(m, a);
1486   if (res == 0) {
1487     MutexCreate(thr, pc, (uptr)m);
1488   }
1489   return res;
1490 }
1491 
1492 TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
1493   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
1494   int res = REAL(pthread_rwlock_destroy)(m);
1495   if (res == 0) {
1496     MutexDestroy(thr, pc, (uptr)m);
1497   }
1498   return res;
1499 }
1500 
1501 TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
1502   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
1503   MutexPreReadLock(thr, pc, (uptr)m);
1504   int res = REAL(pthread_rwlock_rdlock)(m);
1505   if (res == 0) {
1506     MutexPostReadLock(thr, pc, (uptr)m);
1507   }
1508   return res;
1509 }
1510 
1511 TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
1512   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
1513   int res = REAL(pthread_rwlock_tryrdlock)(m);
1514   if (res == 0) {
1515     MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock);
1516   }
1517   return res;
1518 }
1519 
1520 #if !SANITIZER_APPLE
1521 TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
1522   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
1523   int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
1524   if (res == 0) {
1525     MutexPostReadLock(thr, pc, (uptr)m);
1526   }
1527   return res;
1528 }
1529 #endif
1530 
1531 TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
1532   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
1533   MutexPreLock(thr, pc, (uptr)m);
1534   int res = REAL(pthread_rwlock_wrlock)(m);
1535   if (res == 0) {
1536     MutexPostLock(thr, pc, (uptr)m);
1537   }
1538   return res;
1539 }
1540 
1541 TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
1542   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
1543   int res = REAL(pthread_rwlock_trywrlock)(m);
1544   if (res == 0) {
1545     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1546   }
1547   return res;
1548 }
1549 
1550 #if !SANITIZER_APPLE
1551 TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
1552   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
1553   int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
1554   if (res == 0) {
1555     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1556   }
1557   return res;
1558 }
1559 #endif
1560 
1561 TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
1562   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
1563   MutexReadOrWriteUnlock(thr, pc, (uptr)m);
1564   int res = REAL(pthread_rwlock_unlock)(m);
1565   return res;
1566 }
1567 
1568 #if !SANITIZER_APPLE
1569 TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
1570   SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
1571   MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
1572   int res = REAL(pthread_barrier_init)(b, a, count);
1573   return res;
1574 }
1575 
1576 TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
1577   SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
1578   MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
1579   int res = REAL(pthread_barrier_destroy)(b);
1580   return res;
1581 }
1582 
1583 TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
1584   SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
1585   Release(thr, pc, (uptr)b);
1586   MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
1587   int res = REAL(pthread_barrier_wait)(b);
1588   MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
1589   if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
1590     Acquire(thr, pc, (uptr)b);
1591   }
1592   return res;
1593 }
1594 #endif
1595 
1596 TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
1597   SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
1598   if (o == 0 || f == 0)
1599     return errno_EINVAL;
1600   atomic_uint32_t *a;
1601 
1602   if (SANITIZER_APPLE)
1603     a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
1604   else if (SANITIZER_NETBSD)
1605     a = static_cast<atomic_uint32_t*>
1606           ((void *)((char *)o + __sanitizer::pthread_mutex_t_sz));
1607   else
1608     a = static_cast<atomic_uint32_t*>(o);
1609 
1610   // Mac OS X appears to use pthread_once() where calling BlockingRegion hooks
1611   // result in crashes due to too little stack space.
1612   if (guard_acquire(thr, pc, a, !SANITIZER_APPLE)) {
1613     (*f)();
1614     guard_release(thr, pc, a, kGuardDone);
1615   }
1616   return 0;
1617 }
1618 
1619 #if SANITIZER_GLIBC
1620 TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
1621   SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
1622   if (fd > 0)
1623     FdAccess(thr, pc, fd);
1624   return REAL(__fxstat)(version, fd, buf);
1625 }
1626 #define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat)
1627 #else
1628 #define TSAN_MAYBE_INTERCEPT___FXSTAT
1629 #endif
1630 
1631 TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
1632 #if SANITIZER_GLIBC
1633   SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
1634   if (fd > 0)
1635     FdAccess(thr, pc, fd);
1636   return REAL(__fxstat)(0, fd, buf);
1637 #else
1638   SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
1639   if (fd > 0)
1640     FdAccess(thr, pc, fd);
1641   return REAL(fstat)(fd, buf);
1642 #endif
1643 }
1644 
1645 #if SANITIZER_GLIBC
1646 TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
1647   SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
1648   if (fd > 0)
1649     FdAccess(thr, pc, fd);
1650   return REAL(__fxstat64)(version, fd, buf);
1651 }
1652 #define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64)
1653 #else
1654 #define TSAN_MAYBE_INTERCEPT___FXSTAT64
1655 #endif
1656 
1657 #if SANITIZER_GLIBC
1658 TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
1659   SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
1660   if (fd > 0)
1661     FdAccess(thr, pc, fd);
1662   return REAL(__fxstat64)(0, fd, buf);
1663 }
1664 #define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
1665 #else
1666 #define TSAN_MAYBE_INTERCEPT_FSTAT64
1667 #endif
1668 
1669 TSAN_INTERCEPTOR(int, open, const char *name, int oflag, ...) {
1670   va_list ap;
1671   va_start(ap, oflag);
1672   mode_t mode = va_arg(ap, int);
1673   va_end(ap);
1674   SCOPED_TSAN_INTERCEPTOR(open, name, oflag, mode);
1675   READ_STRING(thr, pc, name, 0);
1676   int fd = REAL(open)(name, oflag, mode);
1677   if (fd >= 0)
1678     FdFileCreate(thr, pc, fd);
1679   return fd;
1680 }
1681 
1682 #if SANITIZER_LINUX
1683 TSAN_INTERCEPTOR(int, open64, const char *name, int oflag, ...) {
1684   va_list ap;
1685   va_start(ap, oflag);
1686   mode_t mode = va_arg(ap, int);
1687   va_end(ap);
1688   SCOPED_TSAN_INTERCEPTOR(open64, name, oflag, mode);
1689   READ_STRING(thr, pc, name, 0);
1690   int fd = REAL(open64)(name, oflag, mode);
1691   if (fd >= 0)
1692     FdFileCreate(thr, pc, fd);
1693   return fd;
1694 }
1695 #define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
1696 #else
1697 #define TSAN_MAYBE_INTERCEPT_OPEN64
1698 #endif
1699 
1700 TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
1701   SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
1702   READ_STRING(thr, pc, name, 0);
1703   int fd = REAL(creat)(name, mode);
1704   if (fd >= 0)
1705     FdFileCreate(thr, pc, fd);
1706   return fd;
1707 }
1708 
1709 #if SANITIZER_LINUX
1710 TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
1711   SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
1712   READ_STRING(thr, pc, name, 0);
1713   int fd = REAL(creat64)(name, mode);
1714   if (fd >= 0)
1715     FdFileCreate(thr, pc, fd);
1716   return fd;
1717 }
1718 #define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
1719 #else
1720 #define TSAN_MAYBE_INTERCEPT_CREAT64
1721 #endif
1722 
1723 TSAN_INTERCEPTOR(int, dup, int oldfd) {
1724   SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
1725   int newfd = REAL(dup)(oldfd);
1726   if (oldfd >= 0 && newfd >= 0 && newfd != oldfd)
1727     FdDup(thr, pc, oldfd, newfd, true);
1728   return newfd;
1729 }
1730 
1731 TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
1732   SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd);
1733   int newfd2 = REAL(dup2)(oldfd, newfd);
1734   if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1735     FdDup(thr, pc, oldfd, newfd2, false);
1736   return newfd2;
1737 }
1738 
1739 #if !SANITIZER_APPLE
1740 TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
1741   SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
1742   int newfd2 = REAL(dup3)(oldfd, newfd, flags);
1743   if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1744     FdDup(thr, pc, oldfd, newfd2, false);
1745   return newfd2;
1746 }
1747 #endif
1748 
1749 #if SANITIZER_LINUX
1750 TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
1751   SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
1752   int fd = REAL(eventfd)(initval, flags);
1753   if (fd >= 0)
1754     FdEventCreate(thr, pc, fd);
1755   return fd;
1756 }
1757 #define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
1758 #else
1759 #define TSAN_MAYBE_INTERCEPT_EVENTFD
1760 #endif
1761 
1762 #if SANITIZER_LINUX
1763 TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
1764   SCOPED_INTERCEPTOR_RAW(signalfd, fd, mask, flags);
1765   FdClose(thr, pc, fd);
1766   fd = REAL(signalfd)(fd, mask, flags);
1767   if (!MustIgnoreInterceptor(thr))
1768     FdSignalCreate(thr, pc, fd);
1769   return fd;
1770 }
1771 #define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
1772 #else
1773 #define TSAN_MAYBE_INTERCEPT_SIGNALFD
1774 #endif
1775 
1776 #if SANITIZER_LINUX
1777 TSAN_INTERCEPTOR(int, inotify_init, int fake) {
1778   SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
1779   int fd = REAL(inotify_init)(fake);
1780   if (fd >= 0)
1781     FdInotifyCreate(thr, pc, fd);
1782   return fd;
1783 }
1784 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
1785 #else
1786 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
1787 #endif
1788 
1789 #if SANITIZER_LINUX
1790 TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
1791   SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
1792   int fd = REAL(inotify_init1)(flags);
1793   if (fd >= 0)
1794     FdInotifyCreate(thr, pc, fd);
1795   return fd;
1796 }
1797 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
1798 #else
1799 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
1800 #endif
1801 
1802 TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
1803   SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
1804   int fd = REAL(socket)(domain, type, protocol);
1805   if (fd >= 0)
1806     FdSocketCreate(thr, pc, fd);
1807   return fd;
1808 }
1809 
1810 TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) {
1811   SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd);
1812   int res = REAL(socketpair)(domain, type, protocol, fd);
1813   if (res == 0 && fd[0] >= 0 && fd[1] >= 0)
1814     FdPipeCreate(thr, pc, fd[0], fd[1]);
1815   return res;
1816 }
1817 
1818 TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) {
1819   SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen);
1820   FdSocketConnecting(thr, pc, fd);
1821   int res = REAL(connect)(fd, addr, addrlen);
1822   if (res == 0 && fd >= 0)
1823     FdSocketConnect(thr, pc, fd);
1824   return res;
1825 }
1826 
1827 TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) {
1828   SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen);
1829   int res = REAL(bind)(fd, addr, addrlen);
1830   if (fd > 0 && res == 0)
1831     FdAccess(thr, pc, fd);
1832   return res;
1833 }
1834 
1835 TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
1836   SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog);
1837   int res = REAL(listen)(fd, backlog);
1838   if (fd > 0 && res == 0)
1839     FdAccess(thr, pc, fd);
1840   return res;
1841 }
1842 
1843 TSAN_INTERCEPTOR(int, close, int fd) {
1844   SCOPED_INTERCEPTOR_RAW(close, fd);
1845   if (!in_symbolizer())
1846     FdClose(thr, pc, fd);
1847   return REAL(close)(fd);
1848 }
1849 
1850 #if SANITIZER_LINUX
1851 TSAN_INTERCEPTOR(int, __close, int fd) {
1852   SCOPED_INTERCEPTOR_RAW(__close, fd);
1853   FdClose(thr, pc, fd);
1854   return REAL(__close)(fd);
1855 }
1856 #define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
1857 #else
1858 #define TSAN_MAYBE_INTERCEPT___CLOSE
1859 #endif
1860 
1861 // glibc guts
1862 #if SANITIZER_LINUX && !SANITIZER_ANDROID
1863 TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
1864   SCOPED_INTERCEPTOR_RAW(__res_iclose, state, free_addr);
1865   int fds[64];
1866   int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
1867   for (int i = 0; i < cnt; i++) FdClose(thr, pc, fds[i]);
1868   REAL(__res_iclose)(state, free_addr);
1869 }
1870 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
1871 #else
1872 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
1873 #endif
1874 
1875 TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
1876   SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
1877   int res = REAL(pipe)(pipefd);
1878   if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1879     FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1880   return res;
1881 }
1882 
1883 #if !SANITIZER_APPLE
1884 TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
1885   SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
1886   int res = REAL(pipe2)(pipefd, flags);
1887   if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1888     FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1889   return res;
1890 }
1891 #endif
1892 
1893 TSAN_INTERCEPTOR(int, unlink, char *path) {
1894   SCOPED_TSAN_INTERCEPTOR(unlink, path);
1895   Release(thr, pc, File2addr(path));
1896   int res = REAL(unlink)(path);
1897   return res;
1898 }
1899 
1900 TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
1901   SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
1902   void *res = REAL(tmpfile)(fake);
1903   if (res) {
1904     int fd = fileno_unlocked(res);
1905     if (fd >= 0)
1906       FdFileCreate(thr, pc, fd);
1907   }
1908   return res;
1909 }
1910 
1911 #if SANITIZER_LINUX
1912 TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
1913   SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
1914   void *res = REAL(tmpfile64)(fake);
1915   if (res) {
1916     int fd = fileno_unlocked(res);
1917     if (fd >= 0)
1918       FdFileCreate(thr, pc, fd);
1919   }
1920   return res;
1921 }
1922 #define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
1923 #else
1924 #define TSAN_MAYBE_INTERCEPT_TMPFILE64
1925 #endif
1926 
1927 static void FlushStreams() {
1928   // Flushing all the streams here may freeze the process if a child thread is
1929   // performing file stream operations at the same time.
1930   REAL(fflush)(stdout);
1931   REAL(fflush)(stderr);
1932 }
1933 
1934 TSAN_INTERCEPTOR(void, abort, int fake) {
1935   SCOPED_TSAN_INTERCEPTOR(abort, fake);
1936   FlushStreams();
1937   REAL(abort)(fake);
1938 }
1939 
1940 TSAN_INTERCEPTOR(int, rmdir, char *path) {
1941   SCOPED_TSAN_INTERCEPTOR(rmdir, path);
1942   Release(thr, pc, Dir2addr(path));
1943   int res = REAL(rmdir)(path);
1944   return res;
1945 }
1946 
1947 TSAN_INTERCEPTOR(int, closedir, void *dirp) {
1948   SCOPED_INTERCEPTOR_RAW(closedir, dirp);
1949   if (dirp) {
1950     int fd = dirfd(dirp);
1951     FdClose(thr, pc, fd);
1952   }
1953   return REAL(closedir)(dirp);
1954 }
1955 
1956 #if SANITIZER_LINUX
1957 TSAN_INTERCEPTOR(int, epoll_create, int size) {
1958   SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
1959   int fd = REAL(epoll_create)(size);
1960   if (fd >= 0)
1961     FdPollCreate(thr, pc, fd);
1962   return fd;
1963 }
1964 
1965 TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
1966   SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
1967   int fd = REAL(epoll_create1)(flags);
1968   if (fd >= 0)
1969     FdPollCreate(thr, pc, fd);
1970   return fd;
1971 }
1972 
1973 TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
1974   SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
1975   if (epfd >= 0)
1976     FdAccess(thr, pc, epfd);
1977   if (epfd >= 0 && fd >= 0)
1978     FdAccess(thr, pc, fd);
1979   if (op == EPOLL_CTL_ADD && epfd >= 0) {
1980     FdPollAdd(thr, pc, epfd, fd);
1981     FdRelease(thr, pc, epfd);
1982   }
1983   int res = REAL(epoll_ctl)(epfd, op, fd, ev);
1984   return res;
1985 }
1986 
1987 TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
1988   SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
1989   if (epfd >= 0)
1990     FdAccess(thr, pc, epfd);
1991   int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout);
1992   if (res > 0 && epfd >= 0)
1993     FdAcquire(thr, pc, epfd);
1994   return res;
1995 }
1996 
1997 TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
1998                  void *sigmask) {
1999   SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask);
2000   if (epfd >= 0)
2001     FdAccess(thr, pc, epfd);
2002   int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask);
2003   if (res > 0 && epfd >= 0)
2004     FdAcquire(thr, pc, epfd);
2005   return res;
2006 }
2007 
2008 TSAN_INTERCEPTOR(int, epoll_pwait2, int epfd, void *ev, int cnt, void *timeout,
2009                  void *sigmask) {
2010   SCOPED_INTERCEPTOR_RAW(epoll_pwait2, epfd, ev, cnt, timeout, sigmask);
2011   // This function is new and may not be present in libc and/or kernel.
2012   // Since we effectively add it to libc (as will be probed by the program
2013   // using dlsym or a weak function pointer) we need to handle the case
2014   // when it's not present in the actual libc.
2015   if (!REAL(epoll_pwait2)) {
2016     errno = errno_ENOSYS;
2017     return -1;
2018   }
2019   if (MustIgnoreInterceptor(thr))
2020     REAL(epoll_pwait2)(epfd, ev, cnt, timeout, sigmask);
2021   if (epfd >= 0)
2022     FdAccess(thr, pc, epfd);
2023   int res = BLOCK_REAL(epoll_pwait2)(epfd, ev, cnt, timeout, sigmask);
2024   if (res > 0 && epfd >= 0)
2025     FdAcquire(thr, pc, epfd);
2026   return res;
2027 }
2028 
2029 #  define TSAN_MAYBE_INTERCEPT_EPOLL \
2030     TSAN_INTERCEPT(epoll_create);    \
2031     TSAN_INTERCEPT(epoll_create1);   \
2032     TSAN_INTERCEPT(epoll_ctl);       \
2033     TSAN_INTERCEPT(epoll_wait);      \
2034     TSAN_INTERCEPT(epoll_pwait);     \
2035     TSAN_INTERCEPT(epoll_pwait2)
2036 #else
2037 #define TSAN_MAYBE_INTERCEPT_EPOLL
2038 #endif
2039 
2040 // The following functions are intercepted merely to process pending signals.
2041 // If program blocks signal X, we must deliver the signal before the function
2042 // returns. Similarly, if program unblocks a signal (or returns from sigsuspend)
2043 // it's better to deliver the signal straight away.
2044 TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
2045   SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
2046   return REAL(sigsuspend)(mask);
2047 }
2048 
2049 TSAN_INTERCEPTOR(int, sigblock, int mask) {
2050   SCOPED_TSAN_INTERCEPTOR(sigblock, mask);
2051   return REAL(sigblock)(mask);
2052 }
2053 
2054 TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
2055   SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask);
2056   return REAL(sigsetmask)(mask);
2057 }
2058 
2059 TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
2060     __sanitizer_sigset_t *oldset) {
2061   SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
2062   return REAL(pthread_sigmask)(how, set, oldset);
2063 }
2064 
2065 namespace __tsan {
2066 
2067 static void ReportErrnoSpoiling(ThreadState *thr, uptr pc, int sig) {
2068   VarSizeStackTrace stack;
2069   // StackTrace::GetNestInstructionPc(pc) is used because return address is
2070   // expected, OutputReport() will undo this.
2071   ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
2072   ThreadRegistryLock l(&ctx->thread_registry);
2073   ScopedReport rep(ReportTypeErrnoInSignal);
2074   rep.SetSigNum(sig);
2075   if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
2076     rep.AddStack(stack, true);
2077     OutputReport(thr, rep);
2078   }
2079 }
2080 
2081 static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
2082                                   int sig, __sanitizer_siginfo *info,
2083                                   void *uctx) {
2084   CHECK(thr->slot);
2085   __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
2086   if (acquire)
2087     Acquire(thr, 0, (uptr)&sigactions[sig]);
2088   // Signals are generally asynchronous, so if we receive a signals when
2089   // ignores are enabled we should disable ignores. This is critical for sync
2090   // and interceptors, because otherwise we can miss synchronization and report
2091   // false races.
2092   int ignore_reads_and_writes = thr->ignore_reads_and_writes;
2093   int ignore_interceptors = thr->ignore_interceptors;
2094   int ignore_sync = thr->ignore_sync;
2095   // For symbolizer we only process SIGSEGVs synchronously
2096   // (bug in symbolizer or in tsan). But we want to reset
2097   // in_symbolizer to fail gracefully. Symbolizer and user code
2098   // use different memory allocators, so if we don't reset
2099   // in_symbolizer we can get memory allocated with one being
2100   // feed with another, which can cause more crashes.
2101   int in_symbolizer = thr->in_symbolizer;
2102   if (!ctx->after_multithreaded_fork) {
2103     thr->ignore_reads_and_writes = 0;
2104     thr->fast_state.ClearIgnoreBit();
2105     thr->ignore_interceptors = 0;
2106     thr->ignore_sync = 0;
2107     thr->in_symbolizer = 0;
2108   }
2109   // Ensure that the handler does not spoil errno.
2110   const int saved_errno = errno;
2111   errno = 99;
2112   // This code races with sigaction. Be careful to not read sa_sigaction twice.
2113   // Also need to remember pc for reporting before the call,
2114   // because the handler can reset it.
2115   volatile uptr pc = (sigactions[sig].sa_flags & SA_SIGINFO)
2116                          ? (uptr)sigactions[sig].sigaction
2117                          : (uptr)sigactions[sig].handler;
2118   if (pc != sig_dfl && pc != sig_ign) {
2119     // The callback can be either sa_handler or sa_sigaction.
2120     // They have different signatures, but we assume that passing
2121     // additional arguments to sa_handler works and is harmless.
2122     ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
2123   }
2124   if (!ctx->after_multithreaded_fork) {
2125     thr->ignore_reads_and_writes = ignore_reads_and_writes;
2126     if (ignore_reads_and_writes)
2127       thr->fast_state.SetIgnoreBit();
2128     thr->ignore_interceptors = ignore_interceptors;
2129     thr->ignore_sync = ignore_sync;
2130     thr->in_symbolizer = in_symbolizer;
2131   }
2132   // We do not detect errno spoiling for SIGTERM,
2133   // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
2134   // tsan reports false positive in such case.
2135   // It's difficult to properly detect this situation (reraise),
2136   // because in async signal processing case (when handler is called directly
2137   // from rtl_generic_sighandler) we have not yet received the reraised
2138   // signal; and it looks too fragile to intercept all ways to reraise a signal.
2139   if (ShouldReport(thr, ReportTypeErrnoInSignal) && !sync && sig != SIGTERM &&
2140       errno != 99)
2141     ReportErrnoSpoiling(thr, pc, sig);
2142   errno = saved_errno;
2143 }
2144 
2145 void ProcessPendingSignalsImpl(ThreadState *thr) {
2146   atomic_store(&thr->pending_signals, 0, memory_order_relaxed);
2147   ThreadSignalContext *sctx = SigCtx(thr);
2148   if (sctx == 0)
2149     return;
2150   atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
2151   internal_sigfillset(&sctx->emptyset);
2152   int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
2153   CHECK_EQ(res, 0);
2154   for (int sig = 0; sig < kSigCount; sig++) {
2155     SignalDesc *signal = &sctx->pending_signals[sig];
2156     if (signal->armed) {
2157       signal->armed = false;
2158       CallUserSignalHandler(thr, false, true, sig, &signal->siginfo,
2159                             &signal->ctx);
2160     }
2161   }
2162   res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
2163   CHECK_EQ(res, 0);
2164   atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
2165 }
2166 
2167 }  // namespace __tsan
2168 
2169 static bool is_sync_signal(ThreadSignalContext *sctx, int sig,
2170                            __sanitizer_siginfo *info) {
2171   // If we are sending signal to ourselves, we must process it now.
2172   if (sctx && sig == sctx->int_signal_send)
2173     return true;
2174 #if SANITIZER_HAS_SIGINFO
2175   // POSIX timers can be configured to send any kind of signal; however, it
2176   // doesn't make any sense to consider a timer signal as synchronous!
2177   if (info->si_code == SI_TIMER)
2178     return false;
2179 #endif
2180   return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || sig == SIGTRAP ||
2181          sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS;
2182 }
2183 
2184 void sighandler(int sig, __sanitizer_siginfo *info, void *ctx) {
2185   ThreadState *thr = cur_thread_init();
2186   ThreadSignalContext *sctx = SigCtx(thr);
2187   if (sig < 0 || sig >= kSigCount) {
2188     VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
2189     return;
2190   }
2191   // Don't mess with synchronous signals.
2192   const bool sync = is_sync_signal(sctx, sig, info);
2193   if (sync ||
2194       // If we are in blocking function, we can safely process it now
2195       // (but check if we are in a recursive interceptor,
2196       // i.e. pthread_join()->munmap()).
2197       atomic_load(&thr->in_blocking_func, memory_order_relaxed)) {
2198     atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
2199     if (atomic_load(&thr->in_blocking_func, memory_order_relaxed)) {
2200       atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
2201       CallUserSignalHandler(thr, sync, true, sig, info, ctx);
2202       atomic_store(&thr->in_blocking_func, 1, memory_order_relaxed);
2203     } else {
2204       // Be very conservative with when we do acquire in this case.
2205       // It's unsafe to do acquire in async handlers, because ThreadState
2206       // can be in inconsistent state.
2207       // SIGSYS looks relatively safe -- it's synchronous and can actually
2208       // need some global state.
2209       bool acq = (sig == SIGSYS);
2210       CallUserSignalHandler(thr, sync, acq, sig, info, ctx);
2211     }
2212     atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
2213     return;
2214   }
2215 
2216   if (sctx == 0)
2217     return;
2218   SignalDesc *signal = &sctx->pending_signals[sig];
2219   if (signal->armed == false) {
2220     signal->armed = true;
2221     internal_memcpy(&signal->siginfo, info, sizeof(*info));
2222     internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
2223     atomic_store(&thr->pending_signals, 1, memory_order_relaxed);
2224   }
2225 }
2226 
2227 TSAN_INTERCEPTOR(int, raise, int sig) {
2228   SCOPED_TSAN_INTERCEPTOR(raise, sig);
2229   ThreadSignalContext *sctx = SigCtx(thr);
2230   CHECK_NE(sctx, 0);
2231   int prev = sctx->int_signal_send;
2232   sctx->int_signal_send = sig;
2233   int res = REAL(raise)(sig);
2234   CHECK_EQ(sctx->int_signal_send, sig);
2235   sctx->int_signal_send = prev;
2236   return res;
2237 }
2238 
2239 TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
2240   SCOPED_TSAN_INTERCEPTOR(kill, pid, sig);
2241   ThreadSignalContext *sctx = SigCtx(thr);
2242   CHECK_NE(sctx, 0);
2243   int prev = sctx->int_signal_send;
2244   if (pid == (int)internal_getpid()) {
2245     sctx->int_signal_send = sig;
2246   }
2247   int res = REAL(kill)(pid, sig);
2248   if (pid == (int)internal_getpid()) {
2249     CHECK_EQ(sctx->int_signal_send, sig);
2250     sctx->int_signal_send = prev;
2251   }
2252   return res;
2253 }
2254 
2255 TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
2256   SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig);
2257   ThreadSignalContext *sctx = SigCtx(thr);
2258   CHECK_NE(sctx, 0);
2259   int prev = sctx->int_signal_send;
2260   bool self = pthread_equal(tid, pthread_self());
2261   if (self)
2262     sctx->int_signal_send = sig;
2263   int res = REAL(pthread_kill)(tid, sig);
2264   if (self) {
2265     CHECK_EQ(sctx->int_signal_send, sig);
2266     sctx->int_signal_send = prev;
2267   }
2268   return res;
2269 }
2270 
2271 TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
2272   SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz);
2273   // It's intercepted merely to process pending signals.
2274   return REAL(gettimeofday)(tv, tz);
2275 }
2276 
2277 TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
2278     void *hints, void *rv) {
2279   SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv);
2280   // We miss atomic synchronization in getaddrinfo,
2281   // and can report false race between malloc and free
2282   // inside of getaddrinfo. So ignore memory accesses.
2283   ThreadIgnoreBegin(thr, pc);
2284   int res = REAL(getaddrinfo)(node, service, hints, rv);
2285   ThreadIgnoreEnd(thr);
2286   return res;
2287 }
2288 
2289 TSAN_INTERCEPTOR(int, fork, int fake) {
2290   if (in_symbolizer())
2291     return REAL(fork)(fake);
2292   SCOPED_INTERCEPTOR_RAW(fork, fake);
2293   return REAL(fork)(fake);
2294 }
2295 
2296 void atfork_prepare() {
2297   if (in_symbolizer())
2298     return;
2299   ThreadState *thr = cur_thread();
2300   const uptr pc = StackTrace::GetCurrentPc();
2301   ForkBefore(thr, pc);
2302 }
2303 
2304 void atfork_parent() {
2305   if (in_symbolizer())
2306     return;
2307   ThreadState *thr = cur_thread();
2308   const uptr pc = StackTrace::GetCurrentPc();
2309   ForkParentAfter(thr, pc);
2310 }
2311 
2312 void atfork_child() {
2313   if (in_symbolizer())
2314     return;
2315   ThreadState *thr = cur_thread();
2316   const uptr pc = StackTrace::GetCurrentPc();
2317   ForkChildAfter(thr, pc, true);
2318   FdOnFork(thr, pc);
2319 }
2320 
2321 #if !SANITIZER_IOS
2322 TSAN_INTERCEPTOR(int, vfork, int fake) {
2323   // Some programs (e.g. openjdk) call close for all file descriptors
2324   // in the child process. Under tsan it leads to false positives, because
2325   // address space is shared, so the parent process also thinks that
2326   // the descriptors are closed (while they are actually not).
2327   // This leads to false positives due to missed synchronization.
2328   // Strictly saying this is undefined behavior, because vfork child is not
2329   // allowed to call any functions other than exec/exit. But this is what
2330   // openjdk does, so we want to handle it.
2331   // We could disable interceptors in the child process. But it's not possible
2332   // to simply intercept and wrap vfork, because vfork child is not allowed
2333   // to return from the function that calls vfork, and that's exactly what
2334   // we would do. So this would require some assembly trickery as well.
2335   // Instead we simply turn vfork into fork.
2336   return WRAP(fork)(fake);
2337 }
2338 #endif
2339 
2340 #if SANITIZER_LINUX
2341 TSAN_INTERCEPTOR(int, clone, int (*fn)(void *), void *stack, int flags,
2342                  void *arg, int *parent_tid, void *tls, pid_t *child_tid) {
2343   SCOPED_INTERCEPTOR_RAW(clone, fn, stack, flags, arg, parent_tid, tls,
2344                          child_tid);
2345   struct Arg {
2346     int (*fn)(void *);
2347     void *arg;
2348   };
2349   auto wrapper = +[](void *p) -> int {
2350     auto *thr = cur_thread();
2351     uptr pc = GET_CURRENT_PC();
2352     // Start the background thread for fork, but not for clone.
2353     // For fork we did this always and it's known to work (or user code has
2354     // adopted). But if we do this for the new clone interceptor some code
2355     // (sandbox2) fails. So model we used to do for years and don't start the
2356     // background thread after clone.
2357     ForkChildAfter(thr, pc, false);
2358     FdOnFork(thr, pc);
2359     auto *arg = static_cast<Arg *>(p);
2360     return arg->fn(arg->arg);
2361   };
2362   ForkBefore(thr, pc);
2363   Arg arg_wrapper = {fn, arg};
2364   int pid = REAL(clone)(wrapper, stack, flags, &arg_wrapper, parent_tid, tls,
2365                         child_tid);
2366   ForkParentAfter(thr, pc);
2367   return pid;
2368 }
2369 #endif
2370 
2371 #if !SANITIZER_APPLE && !SANITIZER_ANDROID
2372 typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
2373                                     void *data);
2374 struct dl_iterate_phdr_data {
2375   ThreadState *thr;
2376   uptr pc;
2377   dl_iterate_phdr_cb_t cb;
2378   void *data;
2379 };
2380 
2381 static bool IsAppNotRodata(uptr addr) {
2382   return IsAppMem(addr) && *MemToShadow(addr) != Shadow::kRodata;
2383 }
2384 
2385 static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
2386                               void *data) {
2387   dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data;
2388   // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later
2389   // accessible in dl_iterate_phdr callback. But we don't see synchronization
2390   // inside of dynamic linker, so we "unpoison" it here in order to not
2391   // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough
2392   // because some libc functions call __libc_dlopen.
2393   if (info && IsAppNotRodata((uptr)info->dlpi_name))
2394     MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2395                      internal_strlen(info->dlpi_name));
2396   int res = cbdata->cb(info, size, cbdata->data);
2397   // Perform the check one more time in case info->dlpi_name was overwritten
2398   // by user callback.
2399   if (info && IsAppNotRodata((uptr)info->dlpi_name))
2400     MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2401                      internal_strlen(info->dlpi_name));
2402   return res;
2403 }
2404 
2405 TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) {
2406   SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data);
2407   dl_iterate_phdr_data cbdata;
2408   cbdata.thr = thr;
2409   cbdata.pc = pc;
2410   cbdata.cb = cb;
2411   cbdata.data = data;
2412   int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata);
2413   return res;
2414 }
2415 #endif
2416 
2417 static int OnExit(ThreadState *thr) {
2418   int status = Finalize(thr);
2419   FlushStreams();
2420   return status;
2421 }
2422 
2423 struct TsanInterceptorContext {
2424   ThreadState *thr;
2425   const uptr pc;
2426 };
2427 
2428 #if !SANITIZER_APPLE
2429 static void HandleRecvmsg(ThreadState *thr, uptr pc,
2430     __sanitizer_msghdr *msg) {
2431   int fds[64];
2432   int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds));
2433   for (int i = 0; i < cnt; i++)
2434     FdEventCreate(thr, pc, fds[i]);
2435 }
2436 #endif
2437 
2438 #include "sanitizer_common/sanitizer_platform_interceptors.h"
2439 // Causes interceptor recursion (getaddrinfo() and fopen())
2440 #undef SANITIZER_INTERCEPT_GETADDRINFO
2441 // We define our own.
2442 #if SANITIZER_INTERCEPT_TLS_GET_ADDR
2443 #define NEED_TLS_GET_ADDR
2444 #endif
2445 #undef SANITIZER_INTERCEPT_TLS_GET_ADDR
2446 #define SANITIZER_INTERCEPT_TLS_GET_OFFSET 1
2447 #undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
2448 
2449 #define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
2450 #define COMMON_INTERCEPT_FUNCTION_VER(name, ver)                          \
2451   INTERCEPT_FUNCTION_VER(name, ver)
2452 #define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
2453   (INTERCEPT_FUNCTION_VER(name, ver) || INTERCEPT_FUNCTION(name))
2454 
2455 #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size)                    \
2456   MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr,                 \
2457                     ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \
2458                     true)
2459 
2460 #define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size)                       \
2461   MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr,                  \
2462                     ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \
2463                     false)
2464 
2465 #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
2466   SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__);    \
2467   TsanInterceptorContext _ctx = {thr, pc};       \
2468   ctx = (void *)&_ctx;                           \
2469   (void)ctx;
2470 
2471 #define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
2472   SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__);              \
2473   TsanInterceptorContext _ctx = {thr, pc};                \
2474   ctx = (void *)&_ctx;                                    \
2475   (void)ctx;
2476 
2477 #define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
2478   if (path)                                           \
2479     Acquire(thr, pc, File2addr(path));                \
2480   if (file) {                                         \
2481     int fd = fileno_unlocked(file);                   \
2482     if (fd >= 0) FdFileCreate(thr, pc, fd);           \
2483   }
2484 
2485 #define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
2486   if (file) {                                    \
2487     int fd = fileno_unlocked(file);              \
2488     FdClose(thr, pc, fd);                        \
2489   }
2490 
2491 #define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \
2492   ({                                              \
2493     CheckNoDeepBind(filename, flag);              \
2494     ThreadIgnoreBegin(thr, 0);                    \
2495     void *res = REAL(dlopen)(filename, flag);     \
2496     ThreadIgnoreEnd(thr);                         \
2497     res;                                          \
2498   })
2499 
2500 #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
2501   libignore()->OnLibraryLoaded(filename)
2502 
2503 #define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
2504   libignore()->OnLibraryUnloaded()
2505 
2506 #define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
2507   Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
2508 
2509 #define COMMON_INTERCEPTOR_RELEASE(ctx, u) \
2510   Release(((TsanInterceptorContext *) ctx)->thr, pc, u)
2511 
2512 #define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
2513   Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path))
2514 
2515 #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
2516   FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2517 
2518 #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
2519   FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2520 
2521 #define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \
2522   FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2523 
2524 #define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
2525   FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd)
2526 
2527 #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
2528   ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
2529 
2530 #define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name)         \
2531   if (pthread_equal(pthread_self(), reinterpret_cast<void *>(thread))) \
2532     COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name);                     \
2533   else                                                                 \
2534     __tsan::ctx->thread_registry.SetThreadNameByUserId(thread, name)
2535 
2536 #define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
2537 
2538 #define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
2539   OnExit(((TsanInterceptorContext *) ctx)->thr)
2540 
2541 #define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd,  \
2542                                      off)                                   \
2543   do {                                                                      \
2544     return mmap_interceptor(thr, pc, REAL(mmap), addr, sz, prot, flags, fd, \
2545                             off);                                           \
2546   } while (false)
2547 
2548 #if !SANITIZER_APPLE
2549 #define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
2550   HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
2551       ((TsanInterceptorContext *)ctx)->pc, msg)
2552 #endif
2553 
2554 #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end)                           \
2555   if (TsanThread *t = GetCurrentThread()) {                                    \
2556     *begin = t->tls_begin();                                                   \
2557     *end = t->tls_end();                                                       \
2558   } else {                                                                     \
2559     *begin = *end = 0;                                                         \
2560   }
2561 
2562 #define COMMON_INTERCEPTOR_USER_CALLBACK_START() \
2563   SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START()
2564 
2565 #define COMMON_INTERCEPTOR_USER_CALLBACK_END() \
2566   SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END()
2567 
2568 #include "sanitizer_common/sanitizer_common_interceptors.inc"
2569 
2570 static int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2571                           __sanitizer_sigaction *old);
2572 static __sanitizer_sighandler_ptr signal_impl(int sig,
2573                                               __sanitizer_sighandler_ptr h);
2574 
2575 #define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signo, act, oldact) \
2576   { return sigaction_impl(signo, act, oldact); }
2577 
2578 #define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \
2579   { return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); }
2580 
2581 #include "sanitizer_common/sanitizer_signal_interceptors.inc"
2582 
2583 int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2584                    __sanitizer_sigaction *old) {
2585   // Note: if we call REAL(sigaction) directly for any reason without proxying
2586   // the signal handler through sighandler, very bad things will happen.
2587   // The handler will run synchronously and corrupt tsan per-thread state.
2588   SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
2589   if (sig <= 0 || sig >= kSigCount) {
2590     errno = errno_EINVAL;
2591     return -1;
2592   }
2593   __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
2594   __sanitizer_sigaction old_stored;
2595   if (old) internal_memcpy(&old_stored, &sigactions[sig], sizeof(old_stored));
2596   __sanitizer_sigaction newact;
2597   if (act) {
2598     // Copy act into sigactions[sig].
2599     // Can't use struct copy, because compiler can emit call to memcpy.
2600     // Can't use internal_memcpy, because it copies byte-by-byte,
2601     // and signal handler reads the handler concurrently. It it can read
2602     // some bytes from old value and some bytes from new value.
2603     // Use volatile to prevent insertion of memcpy.
2604     sigactions[sig].handler =
2605         *(volatile __sanitizer_sighandler_ptr const *)&act->handler;
2606     sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags;
2607     internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
2608                     sizeof(sigactions[sig].sa_mask));
2609 #if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
2610     sigactions[sig].sa_restorer = act->sa_restorer;
2611 #endif
2612     internal_memcpy(&newact, act, sizeof(newact));
2613     internal_sigfillset(&newact.sa_mask);
2614     if ((act->sa_flags & SA_SIGINFO) ||
2615         ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl)) {
2616       newact.sa_flags |= SA_SIGINFO;
2617       newact.sigaction = sighandler;
2618     }
2619     ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
2620     act = &newact;
2621   }
2622   int res = REAL(sigaction)(sig, act, old);
2623   if (res == 0 && old && old->sigaction == sighandler)
2624     internal_memcpy(old, &old_stored, sizeof(*old));
2625   return res;
2626 }
2627 
2628 static __sanitizer_sighandler_ptr signal_impl(int sig,
2629                                               __sanitizer_sighandler_ptr h) {
2630   __sanitizer_sigaction act;
2631   act.handler = h;
2632   internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask));
2633   act.sa_flags = 0;
2634   __sanitizer_sigaction old;
2635   int res = sigaction_symname(sig, &act, &old);
2636   if (res) return (__sanitizer_sighandler_ptr)sig_err;
2637   return old.handler;
2638 }
2639 
2640 #define TSAN_SYSCALL()             \
2641   ThreadState *thr = cur_thread(); \
2642   if (thr->ignore_interceptors)    \
2643     return;                        \
2644   ScopedSyscall scoped_syscall(thr)
2645 
2646 struct ScopedSyscall {
2647   ThreadState *thr;
2648 
2649   explicit ScopedSyscall(ThreadState *thr) : thr(thr) { LazyInitialize(thr); }
2650 
2651   ~ScopedSyscall() {
2652     ProcessPendingSignals(thr);
2653   }
2654 };
2655 
2656 #if !SANITIZER_FREEBSD && !SANITIZER_APPLE
2657 static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
2658   TSAN_SYSCALL();
2659   MemoryAccessRange(thr, pc, p, s, write);
2660 }
2661 
2662 static USED void syscall_acquire(uptr pc, uptr addr) {
2663   TSAN_SYSCALL();
2664   Acquire(thr, pc, addr);
2665   DPrintf("syscall_acquire(0x%zx))\n", addr);
2666 }
2667 
2668 static USED void syscall_release(uptr pc, uptr addr) {
2669   TSAN_SYSCALL();
2670   DPrintf("syscall_release(0x%zx)\n", addr);
2671   Release(thr, pc, addr);
2672 }
2673 
2674 static void syscall_fd_close(uptr pc, int fd) {
2675   auto *thr = cur_thread();
2676   FdClose(thr, pc, fd);
2677 }
2678 
2679 static USED void syscall_fd_acquire(uptr pc, int fd) {
2680   TSAN_SYSCALL();
2681   FdAcquire(thr, pc, fd);
2682   DPrintf("syscall_fd_acquire(%d)\n", fd);
2683 }
2684 
2685 static USED void syscall_fd_release(uptr pc, int fd) {
2686   TSAN_SYSCALL();
2687   DPrintf("syscall_fd_release(%d)\n", fd);
2688   FdRelease(thr, pc, fd);
2689 }
2690 
2691 static void syscall_pre_fork(uptr pc) { ForkBefore(cur_thread(), pc); }
2692 
2693 static void syscall_post_fork(uptr pc, int pid) {
2694   ThreadState *thr = cur_thread();
2695   if (pid == 0) {
2696     // child
2697     ForkChildAfter(thr, pc, true);
2698     FdOnFork(thr, pc);
2699   } else if (pid > 0) {
2700     // parent
2701     ForkParentAfter(thr, pc);
2702   } else {
2703     // error
2704     ForkParentAfter(thr, pc);
2705   }
2706 }
2707 #endif
2708 
2709 #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \
2710   syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false)
2711 
2712 #define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
2713   syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true)
2714 
2715 #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
2716   do {                                       \
2717     (void)(p);                               \
2718     (void)(s);                               \
2719   } while (false)
2720 
2721 #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
2722   do {                                        \
2723     (void)(p);                                \
2724     (void)(s);                                \
2725   } while (false)
2726 
2727 #define COMMON_SYSCALL_ACQUIRE(addr) \
2728     syscall_acquire(GET_CALLER_PC(), (uptr)(addr))
2729 
2730 #define COMMON_SYSCALL_RELEASE(addr) \
2731     syscall_release(GET_CALLER_PC(), (uptr)(addr))
2732 
2733 #define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd)
2734 
2735 #define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd)
2736 
2737 #define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd)
2738 
2739 #define COMMON_SYSCALL_PRE_FORK() \
2740   syscall_pre_fork(GET_CALLER_PC())
2741 
2742 #define COMMON_SYSCALL_POST_FORK(res) \
2743   syscall_post_fork(GET_CALLER_PC(), res)
2744 
2745 #include "sanitizer_common/sanitizer_common_syscalls.inc"
2746 #include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
2747 
2748 #ifdef NEED_TLS_GET_ADDR
2749 
2750 static void handle_tls_addr(void *arg, void *res) {
2751   ThreadState *thr = cur_thread();
2752   if (!thr)
2753     return;
2754   DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr,
2755                                         thr->tls_addr + thr->tls_size);
2756   if (!dtv)
2757     return;
2758   // New DTLS block has been allocated.
2759   MemoryResetRange(thr, 0, dtv->beg, dtv->size);
2760 }
2761 
2762 #if !SANITIZER_S390
2763 // Define own interceptor instead of sanitizer_common's for three reasons:
2764 // 1. It must not process pending signals.
2765 //    Signal handlers may contain MOVDQA instruction (see below).
2766 // 2. It must be as simple as possible to not contain MOVDQA.
2767 // 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which
2768 //    is empty for tsan (meant only for msan).
2769 // Note: __tls_get_addr can be called with mis-aligned stack due to:
2770 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
2771 // So the interceptor must work with mis-aligned stack, in particular, does not
2772 // execute MOVDQA with stack addresses.
2773 TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
2774   void *res = REAL(__tls_get_addr)(arg);
2775   handle_tls_addr(arg, res);
2776   return res;
2777 }
2778 #else // SANITIZER_S390
2779 TSAN_INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
2780   uptr res = __tls_get_offset_wrapper(arg, REAL(__tls_get_offset));
2781   char *tp = static_cast<char *>(__builtin_thread_pointer());
2782   handle_tls_addr(arg, res + tp);
2783   return res;
2784 }
2785 #endif
2786 #endif
2787 
2788 #if SANITIZER_NETBSD
2789 TSAN_INTERCEPTOR(void, _lwp_exit) {
2790   SCOPED_TSAN_INTERCEPTOR(_lwp_exit);
2791   DestroyThreadState();
2792   REAL(_lwp_exit)();
2793 }
2794 #define TSAN_MAYBE_INTERCEPT__LWP_EXIT TSAN_INTERCEPT(_lwp_exit)
2795 #else
2796 #define TSAN_MAYBE_INTERCEPT__LWP_EXIT
2797 #endif
2798 
2799 #if SANITIZER_FREEBSD
2800 TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) {
2801   SCOPED_TSAN_INTERCEPTOR(thr_exit, state);
2802   DestroyThreadState();
2803   REAL(thr_exit(state));
2804 }
2805 #define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit)
2806 #else
2807 #define TSAN_MAYBE_INTERCEPT_THR_EXIT
2808 #endif
2809 
2810 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_init, void *c, void *a)
2811 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_destroy, void *c)
2812 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_signal, void *c)
2813 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_broadcast, void *c)
2814 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_wait, void *c, void *m)
2815 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_init, void *m, void *a)
2816 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_destroy, void *m)
2817 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_lock, void *m)
2818 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_trylock, void *m)
2819 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_unlock, void *m)
2820 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_init, void *l, void *a)
2821 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_destroy, void *l)
2822 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_rdlock, void *l)
2823 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_tryrdlock, void *l)
2824 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_wrlock, void *l)
2825 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_trywrlock, void *l)
2826 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_unlock, void *l)
2827 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, once, void *o, void (*i)())
2828 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, sigmask, int f, void *n, void *o)
2829 
2830 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a)
2831 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c)
2832 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c)
2833 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m)
2834 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c)
2835 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a)
2836 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m)
2837 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_lock, void *m)
2838 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m)
2839 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_unlock, void *m)
2840 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a)
2841 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m)
2842 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m)
2843 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m)
2844 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m)
2845 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m)
2846 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m)
2847 TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)())
2848 TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(int, sigsetmask, sigmask, int a, void *b,
2849   void *c)
2850 
2851 namespace __tsan {
2852 
2853 static void finalize(void *arg) {
2854   ThreadState *thr = cur_thread();
2855   int status = Finalize(thr);
2856   // Make sure the output is not lost.
2857   FlushStreams();
2858   if (status)
2859     Die();
2860 }
2861 
2862 #if !SANITIZER_APPLE && !SANITIZER_ANDROID
2863 static void unreachable() {
2864   Report("FATAL: ThreadSanitizer: unreachable called\n");
2865   Die();
2866 }
2867 #endif
2868 
2869 // Define default implementation since interception of libdispatch  is optional.
2870 SANITIZER_WEAK_ATTRIBUTE void InitializeLibdispatchInterceptors() {}
2871 
2872 void InitializeInterceptors() {
2873 #if !SANITIZER_APPLE
2874   // We need to setup it early, because functions like dlsym() can call it.
2875   REAL(memset) = internal_memset;
2876   REAL(memcpy) = internal_memcpy;
2877 #endif
2878 
2879   new(interceptor_ctx()) InterceptorContext();
2880 
2881   InitializeCommonInterceptors();
2882   InitializeSignalInterceptors();
2883   InitializeLibdispatchInterceptors();
2884 
2885 #if !SANITIZER_APPLE
2886   // We can not use TSAN_INTERCEPT to get setjmp addr,
2887   // because it does &setjmp and setjmp is not present in some versions of libc.
2888   using __interception::InterceptFunction;
2889   InterceptFunction(TSAN_STRING_SETJMP, (uptr*)&REAL(setjmp_symname), 0, 0);
2890   InterceptFunction("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
2891   InterceptFunction(TSAN_STRING_SIGSETJMP, (uptr*)&REAL(sigsetjmp_symname), 0,
2892                     0);
2893 #if !SANITIZER_NETBSD
2894   InterceptFunction("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
2895 #endif
2896 #endif
2897 
2898   TSAN_INTERCEPT(longjmp_symname);
2899   TSAN_INTERCEPT(siglongjmp_symname);
2900 #if SANITIZER_NETBSD
2901   TSAN_INTERCEPT(_longjmp);
2902 #endif
2903 
2904   TSAN_INTERCEPT(malloc);
2905   TSAN_INTERCEPT(__libc_memalign);
2906   TSAN_INTERCEPT(calloc);
2907   TSAN_INTERCEPT(realloc);
2908   TSAN_INTERCEPT(reallocarray);
2909   TSAN_INTERCEPT(free);
2910   TSAN_INTERCEPT(cfree);
2911   TSAN_INTERCEPT(munmap);
2912   TSAN_MAYBE_INTERCEPT_MEMALIGN;
2913   TSAN_INTERCEPT(valloc);
2914   TSAN_MAYBE_INTERCEPT_PVALLOC;
2915   TSAN_INTERCEPT(posix_memalign);
2916 
2917   TSAN_INTERCEPT(strcpy);
2918   TSAN_INTERCEPT(strncpy);
2919   TSAN_INTERCEPT(strdup);
2920 
2921   TSAN_INTERCEPT(pthread_create);
2922   TSAN_INTERCEPT(pthread_join);
2923   TSAN_INTERCEPT(pthread_detach);
2924   TSAN_INTERCEPT(pthread_exit);
2925   #if SANITIZER_LINUX
2926   TSAN_INTERCEPT(pthread_tryjoin_np);
2927   TSAN_INTERCEPT(pthread_timedjoin_np);
2928   #endif
2929 
2930   TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE);
2931   TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE);
2932   TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE);
2933   TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE);
2934   TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE);
2935   TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE);
2936 
2937   TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT;
2938 
2939   TSAN_INTERCEPT(pthread_mutex_init);
2940   TSAN_INTERCEPT(pthread_mutex_destroy);
2941   TSAN_INTERCEPT(pthread_mutex_lock);
2942   TSAN_INTERCEPT(pthread_mutex_trylock);
2943   TSAN_INTERCEPT(pthread_mutex_timedlock);
2944   TSAN_INTERCEPT(pthread_mutex_unlock);
2945 #if SANITIZER_GLIBC
2946 #  if !__GLIBC_PREREQ(2, 34)
2947   TSAN_INTERCEPT(__pthread_mutex_lock);
2948   TSAN_INTERCEPT(__pthread_mutex_unlock);
2949 #  endif
2950 #endif
2951 
2952   TSAN_INTERCEPT(pthread_spin_init);
2953   TSAN_INTERCEPT(pthread_spin_destroy);
2954   TSAN_INTERCEPT(pthread_spin_lock);
2955   TSAN_INTERCEPT(pthread_spin_trylock);
2956   TSAN_INTERCEPT(pthread_spin_unlock);
2957 
2958   TSAN_INTERCEPT(pthread_rwlock_init);
2959   TSAN_INTERCEPT(pthread_rwlock_destroy);
2960   TSAN_INTERCEPT(pthread_rwlock_rdlock);
2961   TSAN_INTERCEPT(pthread_rwlock_tryrdlock);
2962   TSAN_INTERCEPT(pthread_rwlock_timedrdlock);
2963   TSAN_INTERCEPT(pthread_rwlock_wrlock);
2964   TSAN_INTERCEPT(pthread_rwlock_trywrlock);
2965   TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
2966   TSAN_INTERCEPT(pthread_rwlock_unlock);
2967 
2968   TSAN_INTERCEPT(pthread_barrier_init);
2969   TSAN_INTERCEPT(pthread_barrier_destroy);
2970   TSAN_INTERCEPT(pthread_barrier_wait);
2971 
2972   TSAN_INTERCEPT(pthread_once);
2973 
2974   TSAN_INTERCEPT(fstat);
2975   TSAN_MAYBE_INTERCEPT___FXSTAT;
2976   TSAN_MAYBE_INTERCEPT_FSTAT64;
2977   TSAN_MAYBE_INTERCEPT___FXSTAT64;
2978   TSAN_INTERCEPT(open);
2979   TSAN_MAYBE_INTERCEPT_OPEN64;
2980   TSAN_INTERCEPT(creat);
2981   TSAN_MAYBE_INTERCEPT_CREAT64;
2982   TSAN_INTERCEPT(dup);
2983   TSAN_INTERCEPT(dup2);
2984   TSAN_INTERCEPT(dup3);
2985   TSAN_MAYBE_INTERCEPT_EVENTFD;
2986   TSAN_MAYBE_INTERCEPT_SIGNALFD;
2987   TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
2988   TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
2989   TSAN_INTERCEPT(socket);
2990   TSAN_INTERCEPT(socketpair);
2991   TSAN_INTERCEPT(connect);
2992   TSAN_INTERCEPT(bind);
2993   TSAN_INTERCEPT(listen);
2994   TSAN_MAYBE_INTERCEPT_EPOLL;
2995   TSAN_INTERCEPT(close);
2996   TSAN_MAYBE_INTERCEPT___CLOSE;
2997   TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
2998   TSAN_INTERCEPT(pipe);
2999   TSAN_INTERCEPT(pipe2);
3000 
3001   TSAN_INTERCEPT(unlink);
3002   TSAN_INTERCEPT(tmpfile);
3003   TSAN_MAYBE_INTERCEPT_TMPFILE64;
3004   TSAN_INTERCEPT(abort);
3005   TSAN_INTERCEPT(rmdir);
3006   TSAN_INTERCEPT(closedir);
3007 
3008   TSAN_INTERCEPT(sigsuspend);
3009   TSAN_INTERCEPT(sigblock);
3010   TSAN_INTERCEPT(sigsetmask);
3011   TSAN_INTERCEPT(pthread_sigmask);
3012   TSAN_INTERCEPT(raise);
3013   TSAN_INTERCEPT(kill);
3014   TSAN_INTERCEPT(pthread_kill);
3015   TSAN_INTERCEPT(sleep);
3016   TSAN_INTERCEPT(usleep);
3017   TSAN_INTERCEPT(nanosleep);
3018   TSAN_INTERCEPT(pause);
3019   TSAN_INTERCEPT(gettimeofday);
3020   TSAN_INTERCEPT(getaddrinfo);
3021 
3022   TSAN_INTERCEPT(fork);
3023   TSAN_INTERCEPT(vfork);
3024 #if SANITIZER_LINUX
3025   TSAN_INTERCEPT(clone);
3026 #endif
3027 #if !SANITIZER_ANDROID
3028   TSAN_INTERCEPT(dl_iterate_phdr);
3029 #endif
3030   TSAN_MAYBE_INTERCEPT_ON_EXIT;
3031   TSAN_INTERCEPT(__cxa_atexit);
3032   TSAN_INTERCEPT(_exit);
3033 
3034 #ifdef NEED_TLS_GET_ADDR
3035 #if !SANITIZER_S390
3036   TSAN_INTERCEPT(__tls_get_addr);
3037 #else
3038   TSAN_INTERCEPT(__tls_get_addr_internal);
3039   TSAN_INTERCEPT(__tls_get_offset);
3040 #endif
3041 #endif
3042 
3043   TSAN_MAYBE_INTERCEPT__LWP_EXIT;
3044   TSAN_MAYBE_INTERCEPT_THR_EXIT;
3045 
3046 #if !SANITIZER_APPLE && !SANITIZER_ANDROID
3047   // Need to setup it, because interceptors check that the function is resolved.
3048   // But atexit is emitted directly into the module, so can't be resolved.
3049   REAL(atexit) = (int(*)(void(*)()))unreachable;
3050 #endif
3051 
3052   if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
3053     Printf("ThreadSanitizer: failed to setup atexit callback\n");
3054     Die();
3055   }
3056   if (pthread_atfork(atfork_prepare, atfork_parent, atfork_child)) {
3057     Printf("ThreadSanitizer: failed to setup atfork callbacks\n");
3058     Die();
3059   }
3060 
3061 #if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
3062   if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) {
3063     Printf("ThreadSanitizer: failed to create thread key\n");
3064     Die();
3065   }
3066 #endif
3067 
3068   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_init);
3069   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_destroy);
3070   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_signal);
3071   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_broadcast);
3072   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_wait);
3073   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_init);
3074   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_destroy);
3075   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_lock);
3076   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_trylock);
3077   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_unlock);
3078   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_init);
3079   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_destroy);
3080   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_rdlock);
3081   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_tryrdlock);
3082   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_wrlock);
3083   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_trywrlock);
3084   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_unlock);
3085   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(once);
3086   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(sigmask);
3087 
3088   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init);
3089   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal);
3090   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast);
3091   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_wait);
3092   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_destroy);
3093   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_init);
3094   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_destroy);
3095   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_lock);
3096   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_trylock);
3097   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_unlock);
3098   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_init);
3099   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_destroy);
3100   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_rdlock);
3101   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_tryrdlock);
3102   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_wrlock);
3103   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_trywrlock);
3104   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_unlock);
3105   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(once);
3106   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(sigsetmask);
3107 
3108   FdInit();
3109 }
3110 
3111 }  // namespace __tsan
3112 
3113 // Invisible barrier for tests.
3114 // There were several unsuccessful iterations for this functionality:
3115 // 1. Initially it was implemented in user code using
3116 //    REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on
3117 //    MacOS. Futexes are linux-specific for this matter.
3118 // 2. Then we switched to atomics+usleep(10). But usleep produced parasitic
3119 //    "as-if synchronized via sleep" messages in reports which failed some
3120 //    output tests.
3121 // 3. Then we switched to atomics+sched_yield. But this produced tons of tsan-
3122 //    visible events, which lead to "failed to restore stack trace" failures.
3123 // Note that no_sanitize_thread attribute does not turn off atomic interception
3124 // so attaching it to the function defined in user code does not help.
3125 // That's why we now have what we have.
3126 constexpr u32 kBarrierThreadBits = 10;
3127 constexpr u32 kBarrierThreads = 1 << kBarrierThreadBits;
3128 
3129 extern "C" {
3130 
3131 SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init(
3132     atomic_uint32_t *barrier, u32 num_threads) {
3133   if (num_threads >= kBarrierThreads) {
3134     Printf("barrier_init: count is too large (%d)\n", num_threads);
3135     Die();
3136   }
3137   // kBarrierThreadBits lsb is thread count,
3138   // the remaining are count of entered threads.
3139   atomic_store(barrier, num_threads, memory_order_relaxed);
3140 }
3141 
3142 static u32 barrier_epoch(u32 value) {
3143   return (value >> kBarrierThreadBits) / (value & (kBarrierThreads - 1));
3144 }
3145 
3146 SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
3147     atomic_uint32_t *barrier) {
3148   u32 old = atomic_fetch_add(barrier, kBarrierThreads, memory_order_relaxed);
3149   u32 old_epoch = barrier_epoch(old);
3150   if (barrier_epoch(old + kBarrierThreads) != old_epoch) {
3151     FutexWake(barrier, (1 << 30));
3152     return;
3153   }
3154   for (;;) {
3155     u32 cur = atomic_load(barrier, memory_order_relaxed);
3156     if (barrier_epoch(cur) != old_epoch)
3157       return;
3158     FutexWait(barrier, cur);
3159   }
3160 }
3161 
3162 void *__tsan_memcpy(void *dst, const void *src, uptr size) {
3163   void *ctx;
3164 #if PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE
3165   COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
3166 #else
3167   COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
3168 #endif
3169 }
3170 
3171 void *__tsan_memset(void *dst, int c, uptr size) {
3172   void *ctx;
3173   COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, c, size);
3174 }
3175 
3176 void *__tsan_memmove(void *dst, const void *src, uptr size) {
3177   void *ctx;
3178   COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
3179 }
3180 }
3181