1 //===-- tsan_interceptors_posix.cpp ---------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 // FIXME: move as many interceptors as possible into
12 // sanitizer_common/sanitizer_common_interceptors.inc
13 //===----------------------------------------------------------------------===//
14 
15 #include "sanitizer_common/sanitizer_atomic.h"
16 #include "sanitizer_common/sanitizer_errno.h"
17 #include "sanitizer_common/sanitizer_libc.h"
18 #include "sanitizer_common/sanitizer_linux.h"
19 #include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
20 #include "sanitizer_common/sanitizer_platform_limits_posix.h"
21 #include "sanitizer_common/sanitizer_placement_new.h"
22 #include "sanitizer_common/sanitizer_posix.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "sanitizer_common/sanitizer_tls_get_addr.h"
25 #include "interception/interception.h"
26 #include "tsan_interceptors.h"
27 #include "tsan_interface.h"
28 #include "tsan_platform.h"
29 #include "tsan_suppressions.h"
30 #include "tsan_rtl.h"
31 #include "tsan_mman.h"
32 #include "tsan_fd.h"
33 
34 using namespace __tsan;
35 
36 #if SANITIZER_FREEBSD || SANITIZER_MAC
37 #define stdout __stdoutp
38 #define stderr __stderrp
39 #endif
40 
41 #if SANITIZER_NETBSD
42 #define dirfd(dirp) (*(int *)(dirp))
43 #define fileno_unlocked(fp)              \
44   (((__sanitizer_FILE *)fp)->_file == -1 \
45        ? -1                              \
46        : (int)(unsigned short)(((__sanitizer_FILE *)fp)->_file))
47 
48 #define stdout ((__sanitizer_FILE*)&__sF[1])
49 #define stderr ((__sanitizer_FILE*)&__sF[2])
50 
51 #define nanosleep __nanosleep50
52 #define vfork __vfork14
53 #endif
54 
55 #if SANITIZER_ANDROID
56 #define mallopt(a, b)
57 #endif
58 
59 #ifdef __mips__
60 const int kSigCount = 129;
61 #else
62 const int kSigCount = 65;
63 #endif
64 
65 #ifdef __mips__
66 struct ucontext_t {
67   u64 opaque[768 / sizeof(u64) + 1];
68 };
69 #else
70 struct ucontext_t {
71   // The size is determined by looking at sizeof of real ucontext_t on linux.
72   u64 opaque[936 / sizeof(u64) + 1];
73 };
74 #endif
75 
76 #if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1
77 #define PTHREAD_ABI_BASE  "GLIBC_2.3.2"
78 #elif defined(__aarch64__) || SANITIZER_PPC64V2
79 #define PTHREAD_ABI_BASE  "GLIBC_2.17"
80 #endif
81 
82 extern "C" int pthread_attr_init(void *attr);
83 extern "C" int pthread_attr_destroy(void *attr);
84 DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
85 extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
86 extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
87 extern "C" int pthread_setspecific(unsigned key, const void *v);
88 DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
89 DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
90 DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
91 DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
92 extern "C" void *pthread_self();
93 extern "C" void _exit(int status);
94 #if !SANITIZER_NETBSD
95 extern "C" int fileno_unlocked(void *stream);
96 extern "C" int dirfd(void *dirp);
97 #endif
98 #if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_NETBSD
99 extern "C" int mallopt(int param, int value);
100 #endif
101 #if SANITIZER_NETBSD
102 extern __sanitizer_FILE __sF[];
103 #else
104 extern __sanitizer_FILE *stdout, *stderr;
105 #endif
106 #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
107 const int PTHREAD_MUTEX_RECURSIVE = 1;
108 const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
109 #else
110 const int PTHREAD_MUTEX_RECURSIVE = 2;
111 const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
112 #endif
113 #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
114 const int EPOLL_CTL_ADD = 1;
115 #endif
116 const int SIGILL = 4;
117 const int SIGTRAP = 5;
118 const int SIGABRT = 6;
119 const int SIGFPE = 8;
120 const int SIGSEGV = 11;
121 const int SIGPIPE = 13;
122 const int SIGTERM = 15;
123 #if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
124 const int SIGBUS = 10;
125 const int SIGSYS = 12;
126 #else
127 const int SIGBUS = 7;
128 const int SIGSYS = 31;
129 #endif
130 void *const MAP_FAILED = (void*)-1;
131 #if SANITIZER_NETBSD
132 const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
133 #elif !SANITIZER_MAC
134 const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
135 #endif
136 const int MAP_FIXED = 0x10;
137 typedef long long_t;
138 
139 // From /usr/include/unistd.h
140 # define F_ULOCK 0      /* Unlock a previously locked region.  */
141 # define F_LOCK  1      /* Lock a region for exclusive use.  */
142 # define F_TLOCK 2      /* Test and lock a region for exclusive use.  */
143 # define F_TEST  3      /* Test a region for other processes locks.  */
144 
145 #if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
146 const int SA_SIGINFO = 0x40;
147 const int SIG_SETMASK = 3;
148 #elif defined(__mips__)
149 const int SA_SIGINFO = 8;
150 const int SIG_SETMASK = 3;
151 #else
152 const int SA_SIGINFO = 4;
153 const int SIG_SETMASK = 2;
154 #endif
155 
156 #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \
157   (cur_thread_init(), !cur_thread()->is_inited)
158 
159 namespace __tsan {
160 struct SignalDesc {
161   bool armed;
162   bool sigaction;
163   __sanitizer_siginfo siginfo;
164   ucontext_t ctx;
165 };
166 
167 struct ThreadSignalContext {
168   int int_signal_send;
169   atomic_uintptr_t in_blocking_func;
170   atomic_uintptr_t have_pending_signals;
171   SignalDesc pending_signals[kSigCount];
172   // emptyset and oldset are too big for stack.
173   __sanitizer_sigset_t emptyset;
174   __sanitizer_sigset_t oldset;
175 };
176 
177 // The sole reason tsan wraps atexit callbacks is to establish synchronization
178 // between callback setup and callback execution.
179 struct AtExitCtx {
180   void (*f)();
181   void *arg;
182 };
183 
184 // InterceptorContext holds all global data required for interceptors.
185 // It's explicitly constructed in InitializeInterceptors with placement new
186 // and is never destroyed. This allows usage of members with non-trivial
187 // constructors and destructors.
188 struct InterceptorContext {
189   // The object is 64-byte aligned, because we want hot data to be located
190   // in a single cache line if possible (it's accessed in every interceptor).
191   ALIGNED(64) LibIgnore libignore;
192   __sanitizer_sigaction sigactions[kSigCount];
193 #if !SANITIZER_MAC && !SANITIZER_NETBSD
194   unsigned finalize_key;
195 #endif
196 
197   BlockingMutex atexit_mu;
198   Vector<struct AtExitCtx *> AtExitStack;
199 
200   InterceptorContext()
201       : libignore(LINKER_INITIALIZED), AtExitStack() {
202   }
203 };
204 
205 static ALIGNED(64) char interceptor_placeholder[sizeof(InterceptorContext)];
206 InterceptorContext *interceptor_ctx() {
207   return reinterpret_cast<InterceptorContext*>(&interceptor_placeholder[0]);
208 }
209 
210 LibIgnore *libignore() {
211   return &interceptor_ctx()->libignore;
212 }
213 
214 void InitializeLibIgnore() {
215   const SuppressionContext &supp = *Suppressions();
216   const uptr n = supp.SuppressionCount();
217   for (uptr i = 0; i < n; i++) {
218     const Suppression *s = supp.SuppressionAt(i);
219     if (0 == internal_strcmp(s->type, kSuppressionLib))
220       libignore()->AddIgnoredLibrary(s->templ);
221   }
222   if (flags()->ignore_noninstrumented_modules)
223     libignore()->IgnoreNoninstrumentedModules(true);
224   libignore()->OnLibraryLoaded(0);
225 }
226 
227 // The following two hooks can be used by for cooperative scheduling when
228 // locking.
229 #ifdef TSAN_EXTERNAL_HOOKS
230 void OnPotentiallyBlockingRegionBegin();
231 void OnPotentiallyBlockingRegionEnd();
232 #else
233 SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionBegin() {}
234 SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionEnd() {}
235 #endif
236 
237 }  // namespace __tsan
238 
239 static ThreadSignalContext *SigCtx(ThreadState *thr) {
240   ThreadSignalContext *ctx = (ThreadSignalContext*)thr->signal_ctx;
241   if (ctx == 0 && !thr->is_dead) {
242     ctx = (ThreadSignalContext*)MmapOrDie(sizeof(*ctx), "ThreadSignalContext");
243     MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx));
244     thr->signal_ctx = ctx;
245   }
246   return ctx;
247 }
248 
249 ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
250                                      uptr pc)
251     : thr_(thr), pc_(pc), in_ignored_lib_(false), ignoring_(false) {
252   Initialize(thr);
253   if (!thr_->is_inited) return;
254   if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
255   DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
256   ignoring_ =
257       !thr_->in_ignored_lib && libignore()->IsIgnored(pc, &in_ignored_lib_);
258   EnableIgnores();
259 }
260 
261 ScopedInterceptor::~ScopedInterceptor() {
262   if (!thr_->is_inited) return;
263   DisableIgnores();
264   if (!thr_->ignore_interceptors) {
265     ProcessPendingSignals(thr_);
266     FuncExit(thr_);
267     CheckNoLocks(thr_);
268   }
269 }
270 
271 void ScopedInterceptor::EnableIgnores() {
272   if (ignoring_) {
273     ThreadIgnoreBegin(thr_, pc_, /*save_stack=*/false);
274     if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports++;
275     if (in_ignored_lib_) {
276       DCHECK(!thr_->in_ignored_lib);
277       thr_->in_ignored_lib = true;
278     }
279   }
280 }
281 
282 void ScopedInterceptor::DisableIgnores() {
283   if (ignoring_) {
284     ThreadIgnoreEnd(thr_, pc_);
285     if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports--;
286     if (in_ignored_lib_) {
287       DCHECK(thr_->in_ignored_lib);
288       thr_->in_ignored_lib = false;
289     }
290   }
291 }
292 
293 #define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
294 #if SANITIZER_FREEBSD
295 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
296 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
297 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
298 #elif SANITIZER_NETBSD
299 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
300 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \
301          INTERCEPT_FUNCTION(__libc_##func)
302 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \
303          INTERCEPT_FUNCTION(__libc_thr_##func)
304 #else
305 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
306 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
307 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
308 #endif
309 
310 #define READ_STRING_OF_LEN(thr, pc, s, len, n)                 \
311   MemoryAccessRange((thr), (pc), (uptr)(s),                         \
312     common_flags()->strict_string_checks ? (len) + 1 : (n), false)
313 
314 #define READ_STRING(thr, pc, s, n)                             \
315     READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n))
316 
317 #define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
318 
319 struct BlockingCall {
320   explicit BlockingCall(ThreadState *thr)
321       : thr(thr)
322       , ctx(SigCtx(thr)) {
323     for (;;) {
324       atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed);
325       if (atomic_load(&ctx->have_pending_signals, memory_order_relaxed) == 0)
326         break;
327       atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
328       ProcessPendingSignals(thr);
329     }
330     // When we are in a "blocking call", we process signals asynchronously
331     // (right when they arrive). In this context we do not expect to be
332     // executing any user/runtime code. The known interceptor sequence when
333     // this is not true is: pthread_join -> munmap(stack). It's fine
334     // to ignore munmap in this case -- we handle stack shadow separately.
335     thr->ignore_interceptors++;
336   }
337 
338   ~BlockingCall() {
339     thr->ignore_interceptors--;
340     atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
341   }
342 
343   ThreadState *thr;
344   ThreadSignalContext *ctx;
345 };
346 
347 TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
348   SCOPED_TSAN_INTERCEPTOR(sleep, sec);
349   unsigned res = BLOCK_REAL(sleep)(sec);
350   AfterSleep(thr, pc);
351   return res;
352 }
353 
354 TSAN_INTERCEPTOR(int, usleep, long_t usec) {
355   SCOPED_TSAN_INTERCEPTOR(usleep, usec);
356   int res = BLOCK_REAL(usleep)(usec);
357   AfterSleep(thr, pc);
358   return res;
359 }
360 
361 TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
362   SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem);
363   int res = BLOCK_REAL(nanosleep)(req, rem);
364   AfterSleep(thr, pc);
365   return res;
366 }
367 
368 TSAN_INTERCEPTOR(int, pause, int fake) {
369   SCOPED_TSAN_INTERCEPTOR(pause, fake);
370   return BLOCK_REAL(pause)(fake);
371 }
372 
373 static void at_exit_wrapper() {
374   AtExitCtx *ctx;
375   {
376     // Ensure thread-safety.
377     BlockingMutexLock l(&interceptor_ctx()->atexit_mu);
378 
379     // Pop AtExitCtx from the top of the stack of callback functions
380     uptr element = interceptor_ctx()->AtExitStack.Size() - 1;
381     ctx = interceptor_ctx()->AtExitStack[element];
382     interceptor_ctx()->AtExitStack.PopBack();
383   }
384 
385   Acquire(cur_thread(), (uptr)0, (uptr)ctx);
386   ((void(*)())ctx->f)();
387   InternalFree(ctx);
388 }
389 
390 static void cxa_at_exit_wrapper(void *arg) {
391   Acquire(cur_thread(), 0, (uptr)arg);
392   AtExitCtx *ctx = (AtExitCtx*)arg;
393   ((void(*)(void *arg))ctx->f)(ctx->arg);
394   InternalFree(ctx);
395 }
396 
397 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
398       void *arg, void *dso);
399 
400 #if !SANITIZER_ANDROID
401 TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
402   if (in_symbolizer())
403     return 0;
404   // We want to setup the atexit callback even if we are in ignored lib
405   // or after fork.
406   SCOPED_INTERCEPTOR_RAW(atexit, f);
407   return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0);
408 }
409 #endif
410 
411 TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
412   if (in_symbolizer())
413     return 0;
414   SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
415   return setup_at_exit_wrapper(thr, pc, (void(*)())f, arg, dso);
416 }
417 
418 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
419       void *arg, void *dso) {
420   AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
421   ctx->f = f;
422   ctx->arg = arg;
423   Release(thr, pc, (uptr)ctx);
424   // Memory allocation in __cxa_atexit will race with free during exit,
425   // because we do not see synchronization around atexit callback list.
426   ThreadIgnoreBegin(thr, pc);
427   int res;
428   if (!dso) {
429     // NetBSD does not preserve the 2nd argument if dso is equal to 0
430     // Store ctx in a local stack-like structure
431 
432     // Ensure thread-safety.
433     BlockingMutexLock l(&interceptor_ctx()->atexit_mu);
434 
435     res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_wrapper, 0, 0);
436     // Push AtExitCtx on the top of the stack of callback functions
437     if (!res) {
438       interceptor_ctx()->AtExitStack.PushBack(ctx);
439     }
440   } else {
441     res = REAL(__cxa_atexit)(cxa_at_exit_wrapper, ctx, dso);
442   }
443   ThreadIgnoreEnd(thr, pc);
444   return res;
445 }
446 
447 #if !SANITIZER_MAC && !SANITIZER_NETBSD
448 static void on_exit_wrapper(int status, void *arg) {
449   ThreadState *thr = cur_thread();
450   uptr pc = 0;
451   Acquire(thr, pc, (uptr)arg);
452   AtExitCtx *ctx = (AtExitCtx*)arg;
453   ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
454   InternalFree(ctx);
455 }
456 
457 TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
458   if (in_symbolizer())
459     return 0;
460   SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
461   AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
462   ctx->f = (void(*)())f;
463   ctx->arg = arg;
464   Release(thr, pc, (uptr)ctx);
465   // Memory allocation in __cxa_atexit will race with free during exit,
466   // because we do not see synchronization around atexit callback list.
467   ThreadIgnoreBegin(thr, pc);
468   int res = REAL(on_exit)(on_exit_wrapper, ctx);
469   ThreadIgnoreEnd(thr, pc);
470   return res;
471 }
472 #define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit)
473 #else
474 #define TSAN_MAYBE_INTERCEPT_ON_EXIT
475 #endif
476 
477 // Cleanup old bufs.
478 static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
479   for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
480     JmpBuf *buf = &thr->jmp_bufs[i];
481     if (buf->sp <= sp) {
482       uptr sz = thr->jmp_bufs.Size();
483       internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf));
484       thr->jmp_bufs.PopBack();
485       i--;
486     }
487   }
488 }
489 
490 static void SetJmp(ThreadState *thr, uptr sp) {
491   if (!thr->is_inited)  // called from libc guts during bootstrap
492     return;
493   // Cleanup old bufs.
494   JmpBufGarbageCollect(thr, sp);
495   // Remember the buf.
496   JmpBuf *buf = thr->jmp_bufs.PushBack();
497   buf->sp = sp;
498   buf->shadow_stack_pos = thr->shadow_stack_pos;
499   ThreadSignalContext *sctx = SigCtx(thr);
500   buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
501   buf->in_blocking_func = sctx ?
502       atomic_load(&sctx->in_blocking_func, memory_order_relaxed) :
503       false;
504   buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
505       memory_order_relaxed);
506 }
507 
508 static void LongJmp(ThreadState *thr, uptr *env) {
509   uptr sp = ExtractLongJmpSp(env);
510   // Find the saved buf with matching sp.
511   for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
512     JmpBuf *buf = &thr->jmp_bufs[i];
513     if (buf->sp == sp) {
514       CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos);
515       // Unwind the stack.
516       while (thr->shadow_stack_pos > buf->shadow_stack_pos)
517         FuncExit(thr);
518       ThreadSignalContext *sctx = SigCtx(thr);
519       if (sctx) {
520         sctx->int_signal_send = buf->int_signal_send;
521         atomic_store(&sctx->in_blocking_func, buf->in_blocking_func,
522             memory_order_relaxed);
523       }
524       atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
525           memory_order_relaxed);
526       JmpBufGarbageCollect(thr, buf->sp - 1);  // do not collect buf->sp
527       return;
528     }
529   }
530   Printf("ThreadSanitizer: can't find longjmp buf\n");
531   CHECK(0);
532 }
533 
534 // FIXME: put everything below into a common extern "C" block?
535 extern "C" void __tsan_setjmp(uptr sp) {
536   cur_thread_init();
537   SetJmp(cur_thread(), sp);
538 }
539 
540 #if SANITIZER_MAC
541 TSAN_INTERCEPTOR(int, setjmp, void *env);
542 TSAN_INTERCEPTOR(int, _setjmp, void *env);
543 TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
544 #else  // SANITIZER_MAC
545 
546 #if SANITIZER_NETBSD
547 #define setjmp_symname __setjmp14
548 #define sigsetjmp_symname __sigsetjmp14
549 #else
550 #define setjmp_symname setjmp
551 #define sigsetjmp_symname sigsetjmp
552 #endif
553 
554 #define TSAN_INTERCEPTOR_SETJMP_(x) __interceptor_ ## x
555 #define TSAN_INTERCEPTOR_SETJMP__(x) TSAN_INTERCEPTOR_SETJMP_(x)
556 #define TSAN_INTERCEPTOR_SETJMP TSAN_INTERCEPTOR_SETJMP__(setjmp_symname)
557 #define TSAN_INTERCEPTOR_SIGSETJMP TSAN_INTERCEPTOR_SETJMP__(sigsetjmp_symname)
558 
559 #define TSAN_STRING_SETJMP SANITIZER_STRINGIFY(setjmp_symname)
560 #define TSAN_STRING_SIGSETJMP SANITIZER_STRINGIFY(sigsetjmp_symname)
561 
562 // Not called.  Merely to satisfy TSAN_INTERCEPT().
563 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
564 int TSAN_INTERCEPTOR_SETJMP(void *env);
565 extern "C" int TSAN_INTERCEPTOR_SETJMP(void *env) {
566   CHECK(0);
567   return 0;
568 }
569 
570 // FIXME: any reason to have a separate declaration?
571 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
572 int __interceptor__setjmp(void *env);
573 extern "C" int __interceptor__setjmp(void *env) {
574   CHECK(0);
575   return 0;
576 }
577 
578 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
579 int TSAN_INTERCEPTOR_SIGSETJMP(void *env);
580 extern "C" int TSAN_INTERCEPTOR_SIGSETJMP(void *env) {
581   CHECK(0);
582   return 0;
583 }
584 
585 #if !SANITIZER_NETBSD
586 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
587 int __interceptor___sigsetjmp(void *env);
588 extern "C" int __interceptor___sigsetjmp(void *env) {
589   CHECK(0);
590   return 0;
591 }
592 #endif
593 
594 extern "C" int setjmp_symname(void *env);
595 extern "C" int _setjmp(void *env);
596 extern "C" int sigsetjmp_symname(void *env);
597 #if !SANITIZER_NETBSD
598 extern "C" int __sigsetjmp(void *env);
599 #endif
600 DEFINE_REAL(int, setjmp_symname, void *env)
601 DEFINE_REAL(int, _setjmp, void *env)
602 DEFINE_REAL(int, sigsetjmp_symname, void *env)
603 #if !SANITIZER_NETBSD
604 DEFINE_REAL(int, __sigsetjmp, void *env)
605 #endif
606 #endif  // SANITIZER_MAC
607 
608 #if SANITIZER_NETBSD
609 #define longjmp_symname __longjmp14
610 #define siglongjmp_symname __siglongjmp14
611 #else
612 #define longjmp_symname longjmp
613 #define siglongjmp_symname siglongjmp
614 #endif
615 
616 TSAN_INTERCEPTOR(void, longjmp_symname, uptr *env, int val) {
617   // Note: if we call REAL(longjmp) in the context of ScopedInterceptor,
618   // bad things will happen. We will jump over ScopedInterceptor dtor and can
619   // leave thr->in_ignored_lib set.
620   {
621     SCOPED_INTERCEPTOR_RAW(longjmp_symname, env, val);
622   }
623   LongJmp(cur_thread(), env);
624   REAL(longjmp_symname)(env, val);
625 }
626 
627 TSAN_INTERCEPTOR(void, siglongjmp_symname, uptr *env, int val) {
628   {
629     SCOPED_INTERCEPTOR_RAW(siglongjmp_symname, env, val);
630   }
631   LongJmp(cur_thread(), env);
632   REAL(siglongjmp_symname)(env, val);
633 }
634 
635 #if SANITIZER_NETBSD
636 TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) {
637   {
638     SCOPED_INTERCEPTOR_RAW(_longjmp, env, val);
639   }
640   LongJmp(cur_thread(), env);
641   REAL(_longjmp)(env, val);
642 }
643 #endif
644 
645 #if !SANITIZER_MAC
646 TSAN_INTERCEPTOR(void*, malloc, uptr size) {
647   if (in_symbolizer())
648     return InternalAlloc(size);
649   void *p = 0;
650   {
651     SCOPED_INTERCEPTOR_RAW(malloc, size);
652     p = user_alloc(thr, pc, size);
653   }
654   invoke_malloc_hook(p, size);
655   return p;
656 }
657 
658 TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
659   SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz);
660   return user_memalign(thr, pc, align, sz);
661 }
662 
663 TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
664   if (in_symbolizer())
665     return InternalCalloc(size, n);
666   void *p = 0;
667   {
668     SCOPED_INTERCEPTOR_RAW(calloc, size, n);
669     p = user_calloc(thr, pc, size, n);
670   }
671   invoke_malloc_hook(p, n * size);
672   return p;
673 }
674 
675 TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
676   if (in_symbolizer())
677     return InternalRealloc(p, size);
678   if (p)
679     invoke_free_hook(p);
680   {
681     SCOPED_INTERCEPTOR_RAW(realloc, p, size);
682     p = user_realloc(thr, pc, p, size);
683   }
684   invoke_malloc_hook(p, size);
685   return p;
686 }
687 
688 TSAN_INTERCEPTOR(void*, reallocarray, void *p, uptr size, uptr n) {
689   if (in_symbolizer())
690     return InternalReallocArray(p, size, n);
691   if (p)
692     invoke_free_hook(p);
693   {
694     SCOPED_INTERCEPTOR_RAW(reallocarray, p, size, n);
695     p = user_reallocarray(thr, pc, p, size, n);
696   }
697   invoke_malloc_hook(p, size);
698   return p;
699 }
700 
701 TSAN_INTERCEPTOR(void, free, void *p) {
702   if (p == 0)
703     return;
704   if (in_symbolizer())
705     return InternalFree(p);
706   invoke_free_hook(p);
707   SCOPED_INTERCEPTOR_RAW(free, p);
708   user_free(thr, pc, p);
709 }
710 
711 TSAN_INTERCEPTOR(void, cfree, void *p) {
712   if (p == 0)
713     return;
714   if (in_symbolizer())
715     return InternalFree(p);
716   invoke_free_hook(p);
717   SCOPED_INTERCEPTOR_RAW(cfree, p);
718   user_free(thr, pc, p);
719 }
720 
721 TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
722   SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
723   return user_alloc_usable_size(p);
724 }
725 #endif
726 
727 TSAN_INTERCEPTOR(char *, strcpy, char *dst, const char *src) {
728   SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src);
729   uptr srclen = internal_strlen(src);
730   MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true);
731   MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false);
732   return REAL(strcpy)(dst, src);
733 }
734 
735 TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) {
736   SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n);
737   uptr srclen = internal_strnlen(src, n);
738   MemoryAccessRange(thr, pc, (uptr)dst, n, true);
739   MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false);
740   return REAL(strncpy)(dst, src, n);
741 }
742 
743 TSAN_INTERCEPTOR(char*, strdup, const char *str) {
744   SCOPED_TSAN_INTERCEPTOR(strdup, str);
745   // strdup will call malloc, so no instrumentation is required here.
746   return REAL(strdup)(str);
747 }
748 
749 // Zero out addr if it points into shadow memory and was provided as a hint
750 // only, i.e., MAP_FIXED is not set.
751 static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
752   if (*addr) {
753     if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) {
754       if (flags & MAP_FIXED) {
755         errno = errno_EINVAL;
756         return false;
757       } else {
758         *addr = 0;
759       }
760     }
761   }
762   return true;
763 }
764 
765 template <class Mmap>
766 static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap,
767                               void *addr, SIZE_T sz, int prot, int flags,
768                               int fd, OFF64_T off) {
769   if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED;
770   void *res = real_mmap(addr, sz, prot, flags, fd, off);
771   if (res != MAP_FAILED) {
772     if (fd > 0) FdAccess(thr, pc, fd);
773     MemoryRangeImitateWriteOrResetRange(thr, pc, (uptr)res, sz);
774   }
775   return res;
776 }
777 
778 TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
779   SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
780   UnmapShadow(thr, (uptr)addr, sz);
781   int res = REAL(munmap)(addr, sz);
782   return res;
783 }
784 
785 #if SANITIZER_LINUX
786 TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
787   SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
788   return user_memalign(thr, pc, align, sz);
789 }
790 #define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
791 #else
792 #define TSAN_MAYBE_INTERCEPT_MEMALIGN
793 #endif
794 
795 #if !SANITIZER_MAC
796 TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
797   if (in_symbolizer())
798     return InternalAlloc(sz, nullptr, align);
799   SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz);
800   return user_aligned_alloc(thr, pc, align, sz);
801 }
802 
803 TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
804   if (in_symbolizer())
805     return InternalAlloc(sz, nullptr, GetPageSizeCached());
806   SCOPED_INTERCEPTOR_RAW(valloc, sz);
807   return user_valloc(thr, pc, sz);
808 }
809 #endif
810 
811 #if SANITIZER_LINUX
812 TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
813   if (in_symbolizer()) {
814     uptr PageSize = GetPageSizeCached();
815     sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
816     return InternalAlloc(sz, nullptr, PageSize);
817   }
818   SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
819   return user_pvalloc(thr, pc, sz);
820 }
821 #define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
822 #else
823 #define TSAN_MAYBE_INTERCEPT_PVALLOC
824 #endif
825 
826 #if !SANITIZER_MAC
827 TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
828   if (in_symbolizer()) {
829     void *p = InternalAlloc(sz, nullptr, align);
830     if (!p)
831       return errno_ENOMEM;
832     *memptr = p;
833     return 0;
834   }
835   SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
836   return user_posix_memalign(thr, pc, memptr, align, sz);
837 }
838 #endif
839 
840 // __cxa_guard_acquire and friends need to be intercepted in a special way -
841 // regular interceptors will break statically-linked libstdc++. Linux
842 // interceptors are especially defined as weak functions (so that they don't
843 // cause link errors when user defines them as well). So they silently
844 // auto-disable themselves when such symbol is already present in the binary. If
845 // we link libstdc++ statically, it will bring own __cxa_guard_acquire which
846 // will silently replace our interceptor.  That's why on Linux we simply export
847 // these interceptors with INTERFACE_ATTRIBUTE.
848 // On OS X, we don't support statically linking, so we just use a regular
849 // interceptor.
850 #if SANITIZER_MAC
851 #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
852 #else
853 #define STDCXX_INTERCEPTOR(rettype, name, ...) \
854   extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__)
855 #endif
856 
857 // Used in thread-safe function static initialization.
858 STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
859   SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
860   OnPotentiallyBlockingRegionBegin();
861   auto on_exit = at_scope_exit(&OnPotentiallyBlockingRegionEnd);
862   for (;;) {
863     u32 cmp = atomic_load(g, memory_order_acquire);
864     if (cmp == 0) {
865       if (atomic_compare_exchange_strong(g, &cmp, 1<<16, memory_order_relaxed))
866         return 1;
867     } else if (cmp == 1) {
868       Acquire(thr, pc, (uptr)g);
869       return 0;
870     } else {
871       internal_sched_yield();
872     }
873   }
874 }
875 
876 STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
877   SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
878   Release(thr, pc, (uptr)g);
879   atomic_store(g, 1, memory_order_release);
880 }
881 
882 STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
883   SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
884   atomic_store(g, 0, memory_order_relaxed);
885 }
886 
887 namespace __tsan {
888 void DestroyThreadState() {
889   ThreadState *thr = cur_thread();
890   Processor *proc = thr->proc();
891   ThreadFinish(thr);
892   ProcUnwire(proc, thr);
893   ProcDestroy(proc);
894   ThreadSignalContext *sctx = thr->signal_ctx;
895   if (sctx) {
896     thr->signal_ctx = 0;
897     UnmapOrDie(sctx, sizeof(*sctx));
898   }
899   DTLS_Destroy();
900   cur_thread_finalize();
901 }
902 }  // namespace __tsan
903 
904 #if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
905 static void thread_finalize(void *v) {
906   uptr iter = (uptr)v;
907   if (iter > 1) {
908     if (pthread_setspecific(interceptor_ctx()->finalize_key,
909         (void*)(iter - 1))) {
910       Printf("ThreadSanitizer: failed to set thread key\n");
911       Die();
912     }
913     return;
914   }
915   DestroyThreadState();
916 }
917 #endif
918 
919 
920 struct ThreadParam {
921   void* (*callback)(void *arg);
922   void *param;
923   atomic_uintptr_t tid;
924 };
925 
926 extern "C" void *__tsan_thread_start_func(void *arg) {
927   ThreadParam *p = (ThreadParam*)arg;
928   void* (*callback)(void *arg) = p->callback;
929   void *param = p->param;
930   int tid = 0;
931   {
932     cur_thread_init();
933     ThreadState *thr = cur_thread();
934     // Thread-local state is not initialized yet.
935     ScopedIgnoreInterceptors ignore;
936 #if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
937     ThreadIgnoreBegin(thr, 0);
938     if (pthread_setspecific(interceptor_ctx()->finalize_key,
939                             (void *)GetPthreadDestructorIterations())) {
940       Printf("ThreadSanitizer: failed to set thread key\n");
941       Die();
942     }
943     ThreadIgnoreEnd(thr, 0);
944 #endif
945     while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
946       internal_sched_yield();
947     Processor *proc = ProcCreate();
948     ProcWire(proc, thr);
949     ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
950     atomic_store(&p->tid, 0, memory_order_release);
951   }
952   void *res = callback(param);
953   // Prevent the callback from being tail called,
954   // it mixes up stack traces.
955   volatile int foo = 42;
956   foo++;
957   return res;
958 }
959 
960 TSAN_INTERCEPTOR(int, pthread_create,
961     void *th, void *attr, void *(*callback)(void*), void * param) {
962   SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
963 
964   MaybeSpawnBackgroundThread();
965 
966   if (ctx->after_multithreaded_fork) {
967     if (flags()->die_after_fork) {
968       Report("ThreadSanitizer: starting new threads after multi-threaded "
969           "fork is not supported. Dying (set die_after_fork=0 to override)\n");
970       Die();
971     } else {
972       VPrintf(1, "ThreadSanitizer: starting new threads after multi-threaded "
973           "fork is not supported (pid %d). Continuing because of "
974           "die_after_fork=0, but you are on your own\n", internal_getpid());
975     }
976   }
977   __sanitizer_pthread_attr_t myattr;
978   if (attr == 0) {
979     pthread_attr_init(&myattr);
980     attr = &myattr;
981   }
982   int detached = 0;
983   REAL(pthread_attr_getdetachstate)(attr, &detached);
984   AdjustStackSize(attr);
985 
986   ThreadParam p;
987   p.callback = callback;
988   p.param = param;
989   atomic_store(&p.tid, 0, memory_order_relaxed);
990   int res = -1;
991   {
992     // Otherwise we see false positives in pthread stack manipulation.
993     ScopedIgnoreInterceptors ignore;
994     ThreadIgnoreBegin(thr, pc);
995     res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
996     ThreadIgnoreEnd(thr, pc);
997   }
998   if (res == 0) {
999     int tid = ThreadCreate(thr, pc, *(uptr*)th, IsStateDetached(detached));
1000     CHECK_NE(tid, 0);
1001     // Synchronization on p.tid serves two purposes:
1002     // 1. ThreadCreate must finish before the new thread starts.
1003     //    Otherwise the new thread can call pthread_detach, but the pthread_t
1004     //    identifier is not yet registered in ThreadRegistry by ThreadCreate.
1005     // 2. ThreadStart must finish before this thread continues.
1006     //    Otherwise, this thread can call pthread_detach and reset thr->sync
1007     //    before the new thread got a chance to acquire from it in ThreadStart.
1008     atomic_store(&p.tid, tid, memory_order_release);
1009     while (atomic_load(&p.tid, memory_order_acquire) != 0)
1010       internal_sched_yield();
1011   }
1012   if (attr == &myattr)
1013     pthread_attr_destroy(&myattr);
1014   return res;
1015 }
1016 
1017 TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
1018   SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
1019   int tid = ThreadTid(thr, pc, (uptr)th);
1020   ThreadIgnoreBegin(thr, pc);
1021   int res = BLOCK_REAL(pthread_join)(th, ret);
1022   ThreadIgnoreEnd(thr, pc);
1023   if (res == 0) {
1024     ThreadJoin(thr, pc, tid);
1025   }
1026   return res;
1027 }
1028 
1029 DEFINE_REAL_PTHREAD_FUNCTIONS
1030 
1031 TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
1032   SCOPED_TSAN_INTERCEPTOR(pthread_detach, th);
1033   int tid = ThreadTid(thr, pc, (uptr)th);
1034   int res = REAL(pthread_detach)(th);
1035   if (res == 0) {
1036     ThreadDetach(thr, pc, tid);
1037   }
1038   return res;
1039 }
1040 
1041 TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
1042   {
1043     SCOPED_INTERCEPTOR_RAW(pthread_exit, retval);
1044 #if !SANITIZER_MAC && !SANITIZER_ANDROID
1045     CHECK_EQ(thr, &cur_thread_placeholder);
1046 #endif
1047   }
1048   REAL(pthread_exit)(retval);
1049 }
1050 
1051 #if SANITIZER_LINUX
1052 TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
1053   SCOPED_TSAN_INTERCEPTOR(pthread_tryjoin_np, th, ret);
1054   int tid = ThreadTid(thr, pc, (uptr)th);
1055   ThreadIgnoreBegin(thr, pc);
1056   int res = REAL(pthread_tryjoin_np)(th, ret);
1057   ThreadIgnoreEnd(thr, pc);
1058   if (res == 0)
1059     ThreadJoin(thr, pc, tid);
1060   else
1061     ThreadNotJoined(thr, pc, tid, (uptr)th);
1062   return res;
1063 }
1064 
1065 TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
1066                  const struct timespec *abstime) {
1067   SCOPED_TSAN_INTERCEPTOR(pthread_timedjoin_np, th, ret, abstime);
1068   int tid = ThreadTid(thr, pc, (uptr)th);
1069   ThreadIgnoreBegin(thr, pc);
1070   int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
1071   ThreadIgnoreEnd(thr, pc);
1072   if (res == 0)
1073     ThreadJoin(thr, pc, tid);
1074   else
1075     ThreadNotJoined(thr, pc, tid, (uptr)th);
1076   return res;
1077 }
1078 #endif
1079 
1080 // Problem:
1081 // NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
1082 // pthread_cond_t has different size in the different versions.
1083 // If call new REAL functions for old pthread_cond_t, they will corrupt memory
1084 // after pthread_cond_t (old cond is smaller).
1085 // If we call old REAL functions for new pthread_cond_t, we will lose  some
1086 // functionality (e.g. old functions do not support waiting against
1087 // CLOCK_REALTIME).
1088 // Proper handling would require to have 2 versions of interceptors as well.
1089 // But this is messy, in particular requires linker scripts when sanitizer
1090 // runtime is linked into a shared library.
1091 // Instead we assume we don't have dynamic libraries built against old
1092 // pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
1093 // that allows to work with old libraries (but this mode does not support
1094 // some features, e.g. pthread_condattr_getpshared).
1095 static void *init_cond(void *c, bool force = false) {
1096   // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
1097   // So we allocate additional memory on the side large enough to hold
1098   // any pthread_cond_t object. Always call new REAL functions, but pass
1099   // the aux object to them.
1100   // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
1101   // first word of pthread_cond_t to zero.
1102   // It's all relevant only for linux.
1103   if (!common_flags()->legacy_pthread_cond)
1104     return c;
1105   atomic_uintptr_t *p = (atomic_uintptr_t*)c;
1106   uptr cond = atomic_load(p, memory_order_acquire);
1107   if (!force && cond != 0)
1108     return (void*)cond;
1109   void *newcond = WRAP(malloc)(pthread_cond_t_sz);
1110   internal_memset(newcond, 0, pthread_cond_t_sz);
1111   if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond,
1112       memory_order_acq_rel))
1113     return newcond;
1114   WRAP(free)(newcond);
1115   return (void*)cond;
1116 }
1117 
1118 struct CondMutexUnlockCtx {
1119   ScopedInterceptor *si;
1120   ThreadState *thr;
1121   uptr pc;
1122   void *m;
1123 };
1124 
1125 static void cond_mutex_unlock(CondMutexUnlockCtx *arg) {
1126   // pthread_cond_wait interceptor has enabled async signal delivery
1127   // (see BlockingCall below). Disable async signals since we are running
1128   // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
1129   // since the thread is cancelled, so we have to manually execute them
1130   // (the thread still can run some user code due to pthread_cleanup_push).
1131   ThreadSignalContext *ctx = SigCtx(arg->thr);
1132   CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1);
1133   atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
1134   MutexPostLock(arg->thr, arg->pc, (uptr)arg->m, MutexFlagDoPreLockOnPostLock);
1135   // Undo BlockingCall ctor effects.
1136   arg->thr->ignore_interceptors--;
1137   arg->si->~ScopedInterceptor();
1138 }
1139 
1140 INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
1141   void *cond = init_cond(c, true);
1142   SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
1143   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1144   return REAL(pthread_cond_init)(cond, a);
1145 }
1146 
1147 static int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si,
1148                      int (*fn)(void *c, void *m, void *abstime), void *c,
1149                      void *m, void *t) {
1150   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1151   MutexUnlock(thr, pc, (uptr)m);
1152   CondMutexUnlockCtx arg = {si, thr, pc, m};
1153   int res = 0;
1154   // This ensures that we handle mutex lock even in case of pthread_cancel.
1155   // See test/tsan/cond_cancel.cpp.
1156   {
1157     // Enable signal delivery while the thread is blocked.
1158     BlockingCall bc(thr);
1159     res = call_pthread_cancel_with_cleanup(
1160         fn, c, m, t, (void (*)(void *arg))cond_mutex_unlock, &arg);
1161   }
1162   if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
1163   MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
1164   return res;
1165 }
1166 
1167 INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
1168   void *cond = init_cond(c);
1169   SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
1170   return cond_wait(thr, pc, &si, (int (*)(void *c, void *m, void *abstime))REAL(
1171                                      pthread_cond_wait),
1172                    cond, m, 0);
1173 }
1174 
1175 INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
1176   void *cond = init_cond(c);
1177   SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
1178   return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait), cond, m,
1179                    abstime);
1180 }
1181 
1182 #if SANITIZER_MAC
1183 INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
1184             void *reltime) {
1185   void *cond = init_cond(c);
1186   SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
1187   return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait_relative_np), cond,
1188                    m, reltime);
1189 }
1190 #endif
1191 
1192 INTERCEPTOR(int, pthread_cond_signal, void *c) {
1193   void *cond = init_cond(c);
1194   SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
1195   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1196   return REAL(pthread_cond_signal)(cond);
1197 }
1198 
1199 INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
1200   void *cond = init_cond(c);
1201   SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
1202   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1203   return REAL(pthread_cond_broadcast)(cond);
1204 }
1205 
1206 INTERCEPTOR(int, pthread_cond_destroy, void *c) {
1207   void *cond = init_cond(c);
1208   SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
1209   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1210   int res = REAL(pthread_cond_destroy)(cond);
1211   if (common_flags()->legacy_pthread_cond) {
1212     // Free our aux cond and zero the pointer to not leave dangling pointers.
1213     WRAP(free)(cond);
1214     atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed);
1215   }
1216   return res;
1217 }
1218 
1219 TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
1220   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
1221   int res = REAL(pthread_mutex_init)(m, a);
1222   if (res == 0) {
1223     u32 flagz = 0;
1224     if (a) {
1225       int type = 0;
1226       if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
1227         if (type == PTHREAD_MUTEX_RECURSIVE ||
1228             type == PTHREAD_MUTEX_RECURSIVE_NP)
1229           flagz |= MutexFlagWriteReentrant;
1230     }
1231     MutexCreate(thr, pc, (uptr)m, flagz);
1232   }
1233   return res;
1234 }
1235 
1236 TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
1237   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
1238   int res = REAL(pthread_mutex_destroy)(m);
1239   if (res == 0 || res == errno_EBUSY) {
1240     MutexDestroy(thr, pc, (uptr)m);
1241   }
1242   return res;
1243 }
1244 
1245 TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
1246   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
1247   int res = REAL(pthread_mutex_trylock)(m);
1248   if (res == errno_EOWNERDEAD)
1249     MutexRepair(thr, pc, (uptr)m);
1250   if (res == 0 || res == errno_EOWNERDEAD)
1251     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1252   return res;
1253 }
1254 
1255 #if !SANITIZER_MAC
1256 TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
1257   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
1258   int res = REAL(pthread_mutex_timedlock)(m, abstime);
1259   if (res == 0) {
1260     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1261   }
1262   return res;
1263 }
1264 #endif
1265 
1266 #if !SANITIZER_MAC
1267 TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
1268   SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
1269   int res = REAL(pthread_spin_init)(m, pshared);
1270   if (res == 0) {
1271     MutexCreate(thr, pc, (uptr)m);
1272   }
1273   return res;
1274 }
1275 
1276 TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
1277   SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
1278   int res = REAL(pthread_spin_destroy)(m);
1279   if (res == 0) {
1280     MutexDestroy(thr, pc, (uptr)m);
1281   }
1282   return res;
1283 }
1284 
1285 TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
1286   SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
1287   MutexPreLock(thr, pc, (uptr)m);
1288   int res = REAL(pthread_spin_lock)(m);
1289   if (res == 0) {
1290     MutexPostLock(thr, pc, (uptr)m);
1291   }
1292   return res;
1293 }
1294 
1295 TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
1296   SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
1297   int res = REAL(pthread_spin_trylock)(m);
1298   if (res == 0) {
1299     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1300   }
1301   return res;
1302 }
1303 
1304 TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
1305   SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
1306   MutexUnlock(thr, pc, (uptr)m);
1307   int res = REAL(pthread_spin_unlock)(m);
1308   return res;
1309 }
1310 #endif
1311 
1312 TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
1313   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
1314   int res = REAL(pthread_rwlock_init)(m, a);
1315   if (res == 0) {
1316     MutexCreate(thr, pc, (uptr)m);
1317   }
1318   return res;
1319 }
1320 
1321 TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
1322   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
1323   int res = REAL(pthread_rwlock_destroy)(m);
1324   if (res == 0) {
1325     MutexDestroy(thr, pc, (uptr)m);
1326   }
1327   return res;
1328 }
1329 
1330 TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
1331   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
1332   MutexPreReadLock(thr, pc, (uptr)m);
1333   int res = REAL(pthread_rwlock_rdlock)(m);
1334   if (res == 0) {
1335     MutexPostReadLock(thr, pc, (uptr)m);
1336   }
1337   return res;
1338 }
1339 
1340 TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
1341   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
1342   int res = REAL(pthread_rwlock_tryrdlock)(m);
1343   if (res == 0) {
1344     MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock);
1345   }
1346   return res;
1347 }
1348 
1349 #if !SANITIZER_MAC
1350 TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
1351   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
1352   int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
1353   if (res == 0) {
1354     MutexPostReadLock(thr, pc, (uptr)m);
1355   }
1356   return res;
1357 }
1358 #endif
1359 
1360 TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
1361   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
1362   MutexPreLock(thr, pc, (uptr)m);
1363   int res = REAL(pthread_rwlock_wrlock)(m);
1364   if (res == 0) {
1365     MutexPostLock(thr, pc, (uptr)m);
1366   }
1367   return res;
1368 }
1369 
1370 TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
1371   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
1372   int res = REAL(pthread_rwlock_trywrlock)(m);
1373   if (res == 0) {
1374     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1375   }
1376   return res;
1377 }
1378 
1379 #if !SANITIZER_MAC
1380 TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
1381   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
1382   int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
1383   if (res == 0) {
1384     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1385   }
1386   return res;
1387 }
1388 #endif
1389 
1390 TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
1391   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
1392   MutexReadOrWriteUnlock(thr, pc, (uptr)m);
1393   int res = REAL(pthread_rwlock_unlock)(m);
1394   return res;
1395 }
1396 
1397 #if !SANITIZER_MAC
1398 TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
1399   SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
1400   MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
1401   int res = REAL(pthread_barrier_init)(b, a, count);
1402   return res;
1403 }
1404 
1405 TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
1406   SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
1407   MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
1408   int res = REAL(pthread_barrier_destroy)(b);
1409   return res;
1410 }
1411 
1412 TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
1413   SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
1414   Release(thr, pc, (uptr)b);
1415   MemoryRead(thr, pc, (uptr)b, kSizeLog1);
1416   int res = REAL(pthread_barrier_wait)(b);
1417   MemoryRead(thr, pc, (uptr)b, kSizeLog1);
1418   if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
1419     Acquire(thr, pc, (uptr)b);
1420   }
1421   return res;
1422 }
1423 #endif
1424 
1425 TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
1426   SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
1427   if (o == 0 || f == 0)
1428     return errno_EINVAL;
1429   atomic_uint32_t *a;
1430 
1431   if (SANITIZER_MAC)
1432     a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
1433   else if (SANITIZER_NETBSD)
1434     a = static_cast<atomic_uint32_t*>
1435           ((void *)((char *)o + __sanitizer::pthread_mutex_t_sz));
1436   else
1437     a = static_cast<atomic_uint32_t*>(o);
1438 
1439   u32 v = atomic_load(a, memory_order_acquire);
1440   if (v == 0 && atomic_compare_exchange_strong(a, &v, 1,
1441                                                memory_order_relaxed)) {
1442     (*f)();
1443     if (!thr->in_ignored_lib)
1444       Release(thr, pc, (uptr)o);
1445     atomic_store(a, 2, memory_order_release);
1446   } else {
1447     while (v != 2) {
1448       internal_sched_yield();
1449       v = atomic_load(a, memory_order_acquire);
1450     }
1451     if (!thr->in_ignored_lib)
1452       Acquire(thr, pc, (uptr)o);
1453   }
1454   return 0;
1455 }
1456 
1457 #if SANITIZER_LINUX && !SANITIZER_ANDROID
1458 TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
1459   SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
1460   if (fd > 0)
1461     FdAccess(thr, pc, fd);
1462   return REAL(__fxstat)(version, fd, buf);
1463 }
1464 #define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat)
1465 #else
1466 #define TSAN_MAYBE_INTERCEPT___FXSTAT
1467 #endif
1468 
1469 TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
1470 #if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD
1471   SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
1472   if (fd > 0)
1473     FdAccess(thr, pc, fd);
1474   return REAL(fstat)(fd, buf);
1475 #else
1476   SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
1477   if (fd > 0)
1478     FdAccess(thr, pc, fd);
1479   return REAL(__fxstat)(0, fd, buf);
1480 #endif
1481 }
1482 
1483 #if SANITIZER_LINUX && !SANITIZER_ANDROID
1484 TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
1485   SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
1486   if (fd > 0)
1487     FdAccess(thr, pc, fd);
1488   return REAL(__fxstat64)(version, fd, buf);
1489 }
1490 #define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64)
1491 #else
1492 #define TSAN_MAYBE_INTERCEPT___FXSTAT64
1493 #endif
1494 
1495 #if SANITIZER_LINUX && !SANITIZER_ANDROID
1496 TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
1497   SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
1498   if (fd > 0)
1499     FdAccess(thr, pc, fd);
1500   return REAL(__fxstat64)(0, fd, buf);
1501 }
1502 #define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
1503 #else
1504 #define TSAN_MAYBE_INTERCEPT_FSTAT64
1505 #endif
1506 
1507 TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) {
1508   SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode);
1509   READ_STRING(thr, pc, name, 0);
1510   int fd = REAL(open)(name, flags, mode);
1511   if (fd >= 0)
1512     FdFileCreate(thr, pc, fd);
1513   return fd;
1514 }
1515 
1516 #if SANITIZER_LINUX
1517 TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) {
1518   SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode);
1519   READ_STRING(thr, pc, name, 0);
1520   int fd = REAL(open64)(name, flags, mode);
1521   if (fd >= 0)
1522     FdFileCreate(thr, pc, fd);
1523   return fd;
1524 }
1525 #define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
1526 #else
1527 #define TSAN_MAYBE_INTERCEPT_OPEN64
1528 #endif
1529 
1530 TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
1531   SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
1532   READ_STRING(thr, pc, name, 0);
1533   int fd = REAL(creat)(name, mode);
1534   if (fd >= 0)
1535     FdFileCreate(thr, pc, fd);
1536   return fd;
1537 }
1538 
1539 #if SANITIZER_LINUX
1540 TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
1541   SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
1542   READ_STRING(thr, pc, name, 0);
1543   int fd = REAL(creat64)(name, mode);
1544   if (fd >= 0)
1545     FdFileCreate(thr, pc, fd);
1546   return fd;
1547 }
1548 #define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
1549 #else
1550 #define TSAN_MAYBE_INTERCEPT_CREAT64
1551 #endif
1552 
1553 TSAN_INTERCEPTOR(int, dup, int oldfd) {
1554   SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
1555   int newfd = REAL(dup)(oldfd);
1556   if (oldfd >= 0 && newfd >= 0 && newfd != oldfd)
1557     FdDup(thr, pc, oldfd, newfd, true);
1558   return newfd;
1559 }
1560 
1561 TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
1562   SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd);
1563   int newfd2 = REAL(dup2)(oldfd, newfd);
1564   if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1565     FdDup(thr, pc, oldfd, newfd2, false);
1566   return newfd2;
1567 }
1568 
1569 #if !SANITIZER_MAC
1570 TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
1571   SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
1572   int newfd2 = REAL(dup3)(oldfd, newfd, flags);
1573   if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1574     FdDup(thr, pc, oldfd, newfd2, false);
1575   return newfd2;
1576 }
1577 #endif
1578 
1579 #if SANITIZER_LINUX
1580 TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
1581   SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
1582   int fd = REAL(eventfd)(initval, flags);
1583   if (fd >= 0)
1584     FdEventCreate(thr, pc, fd);
1585   return fd;
1586 }
1587 #define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
1588 #else
1589 #define TSAN_MAYBE_INTERCEPT_EVENTFD
1590 #endif
1591 
1592 #if SANITIZER_LINUX
1593 TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
1594   SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags);
1595   if (fd >= 0)
1596     FdClose(thr, pc, fd);
1597   fd = REAL(signalfd)(fd, mask, flags);
1598   if (fd >= 0)
1599     FdSignalCreate(thr, pc, fd);
1600   return fd;
1601 }
1602 #define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
1603 #else
1604 #define TSAN_MAYBE_INTERCEPT_SIGNALFD
1605 #endif
1606 
1607 #if SANITIZER_LINUX
1608 TSAN_INTERCEPTOR(int, inotify_init, int fake) {
1609   SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
1610   int fd = REAL(inotify_init)(fake);
1611   if (fd >= 0)
1612     FdInotifyCreate(thr, pc, fd);
1613   return fd;
1614 }
1615 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
1616 #else
1617 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
1618 #endif
1619 
1620 #if SANITIZER_LINUX
1621 TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
1622   SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
1623   int fd = REAL(inotify_init1)(flags);
1624   if (fd >= 0)
1625     FdInotifyCreate(thr, pc, fd);
1626   return fd;
1627 }
1628 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
1629 #else
1630 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
1631 #endif
1632 
1633 TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
1634   SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
1635   int fd = REAL(socket)(domain, type, protocol);
1636   if (fd >= 0)
1637     FdSocketCreate(thr, pc, fd);
1638   return fd;
1639 }
1640 
1641 TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) {
1642   SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd);
1643   int res = REAL(socketpair)(domain, type, protocol, fd);
1644   if (res == 0 && fd[0] >= 0 && fd[1] >= 0)
1645     FdPipeCreate(thr, pc, fd[0], fd[1]);
1646   return res;
1647 }
1648 
1649 TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) {
1650   SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen);
1651   FdSocketConnecting(thr, pc, fd);
1652   int res = REAL(connect)(fd, addr, addrlen);
1653   if (res == 0 && fd >= 0)
1654     FdSocketConnect(thr, pc, fd);
1655   return res;
1656 }
1657 
1658 TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) {
1659   SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen);
1660   int res = REAL(bind)(fd, addr, addrlen);
1661   if (fd > 0 && res == 0)
1662     FdAccess(thr, pc, fd);
1663   return res;
1664 }
1665 
1666 TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
1667   SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog);
1668   int res = REAL(listen)(fd, backlog);
1669   if (fd > 0 && res == 0)
1670     FdAccess(thr, pc, fd);
1671   return res;
1672 }
1673 
1674 TSAN_INTERCEPTOR(int, close, int fd) {
1675   SCOPED_TSAN_INTERCEPTOR(close, fd);
1676   if (fd >= 0)
1677     FdClose(thr, pc, fd);
1678   return REAL(close)(fd);
1679 }
1680 
1681 #if SANITIZER_LINUX
1682 TSAN_INTERCEPTOR(int, __close, int fd) {
1683   SCOPED_TSAN_INTERCEPTOR(__close, fd);
1684   if (fd >= 0)
1685     FdClose(thr, pc, fd);
1686   return REAL(__close)(fd);
1687 }
1688 #define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
1689 #else
1690 #define TSAN_MAYBE_INTERCEPT___CLOSE
1691 #endif
1692 
1693 // glibc guts
1694 #if SANITIZER_LINUX && !SANITIZER_ANDROID
1695 TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
1696   SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr);
1697   int fds[64];
1698   int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
1699   for (int i = 0; i < cnt; i++) {
1700     if (fds[i] > 0)
1701       FdClose(thr, pc, fds[i]);
1702   }
1703   REAL(__res_iclose)(state, free_addr);
1704 }
1705 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
1706 #else
1707 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
1708 #endif
1709 
1710 TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
1711   SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
1712   int res = REAL(pipe)(pipefd);
1713   if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1714     FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1715   return res;
1716 }
1717 
1718 #if !SANITIZER_MAC
1719 TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
1720   SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
1721   int res = REAL(pipe2)(pipefd, flags);
1722   if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1723     FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1724   return res;
1725 }
1726 #endif
1727 
1728 TSAN_INTERCEPTOR(int, unlink, char *path) {
1729   SCOPED_TSAN_INTERCEPTOR(unlink, path);
1730   Release(thr, pc, File2addr(path));
1731   int res = REAL(unlink)(path);
1732   return res;
1733 }
1734 
1735 TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
1736   SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
1737   void *res = REAL(tmpfile)(fake);
1738   if (res) {
1739     int fd = fileno_unlocked(res);
1740     if (fd >= 0)
1741       FdFileCreate(thr, pc, fd);
1742   }
1743   return res;
1744 }
1745 
1746 #if SANITIZER_LINUX
1747 TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
1748   SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
1749   void *res = REAL(tmpfile64)(fake);
1750   if (res) {
1751     int fd = fileno_unlocked(res);
1752     if (fd >= 0)
1753       FdFileCreate(thr, pc, fd);
1754   }
1755   return res;
1756 }
1757 #define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
1758 #else
1759 #define TSAN_MAYBE_INTERCEPT_TMPFILE64
1760 #endif
1761 
1762 static void FlushStreams() {
1763   // Flushing all the streams here may freeze the process if a child thread is
1764   // performing file stream operations at the same time.
1765   REAL(fflush)(stdout);
1766   REAL(fflush)(stderr);
1767 }
1768 
1769 TSAN_INTERCEPTOR(void, abort, int fake) {
1770   SCOPED_TSAN_INTERCEPTOR(abort, fake);
1771   FlushStreams();
1772   REAL(abort)(fake);
1773 }
1774 
1775 TSAN_INTERCEPTOR(int, rmdir, char *path) {
1776   SCOPED_TSAN_INTERCEPTOR(rmdir, path);
1777   Release(thr, pc, Dir2addr(path));
1778   int res = REAL(rmdir)(path);
1779   return res;
1780 }
1781 
1782 TSAN_INTERCEPTOR(int, closedir, void *dirp) {
1783   SCOPED_TSAN_INTERCEPTOR(closedir, dirp);
1784   if (dirp) {
1785     int fd = dirfd(dirp);
1786     FdClose(thr, pc, fd);
1787   }
1788   return REAL(closedir)(dirp);
1789 }
1790 
1791 #if SANITIZER_LINUX
1792 TSAN_INTERCEPTOR(int, epoll_create, int size) {
1793   SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
1794   int fd = REAL(epoll_create)(size);
1795   if (fd >= 0)
1796     FdPollCreate(thr, pc, fd);
1797   return fd;
1798 }
1799 
1800 TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
1801   SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
1802   int fd = REAL(epoll_create1)(flags);
1803   if (fd >= 0)
1804     FdPollCreate(thr, pc, fd);
1805   return fd;
1806 }
1807 
1808 TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
1809   SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
1810   if (epfd >= 0)
1811     FdAccess(thr, pc, epfd);
1812   if (epfd >= 0 && fd >= 0)
1813     FdAccess(thr, pc, fd);
1814   if (op == EPOLL_CTL_ADD && epfd >= 0)
1815     FdRelease(thr, pc, epfd);
1816   int res = REAL(epoll_ctl)(epfd, op, fd, ev);
1817   return res;
1818 }
1819 
1820 TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
1821   SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
1822   if (epfd >= 0)
1823     FdAccess(thr, pc, epfd);
1824   int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout);
1825   if (res > 0 && epfd >= 0)
1826     FdAcquire(thr, pc, epfd);
1827   return res;
1828 }
1829 
1830 TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
1831                  void *sigmask) {
1832   SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask);
1833   if (epfd >= 0)
1834     FdAccess(thr, pc, epfd);
1835   int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask);
1836   if (res > 0 && epfd >= 0)
1837     FdAcquire(thr, pc, epfd);
1838   return res;
1839 }
1840 
1841 #define TSAN_MAYBE_INTERCEPT_EPOLL \
1842     TSAN_INTERCEPT(epoll_create); \
1843     TSAN_INTERCEPT(epoll_create1); \
1844     TSAN_INTERCEPT(epoll_ctl); \
1845     TSAN_INTERCEPT(epoll_wait); \
1846     TSAN_INTERCEPT(epoll_pwait)
1847 #else
1848 #define TSAN_MAYBE_INTERCEPT_EPOLL
1849 #endif
1850 
1851 // The following functions are intercepted merely to process pending signals.
1852 // If program blocks signal X, we must deliver the signal before the function
1853 // returns. Similarly, if program unblocks a signal (or returns from sigsuspend)
1854 // it's better to deliver the signal straight away.
1855 TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
1856   SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
1857   return REAL(sigsuspend)(mask);
1858 }
1859 
1860 TSAN_INTERCEPTOR(int, sigblock, int mask) {
1861   SCOPED_TSAN_INTERCEPTOR(sigblock, mask);
1862   return REAL(sigblock)(mask);
1863 }
1864 
1865 TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
1866   SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask);
1867   return REAL(sigsetmask)(mask);
1868 }
1869 
1870 TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
1871     __sanitizer_sigset_t *oldset) {
1872   SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
1873   return REAL(pthread_sigmask)(how, set, oldset);
1874 }
1875 
1876 namespace __tsan {
1877 
1878 static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
1879                                   bool sigact, int sig,
1880                                   __sanitizer_siginfo *info, void *uctx) {
1881   __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
1882   if (acquire)
1883     Acquire(thr, 0, (uptr)&sigactions[sig]);
1884   // Signals are generally asynchronous, so if we receive a signals when
1885   // ignores are enabled we should disable ignores. This is critical for sync
1886   // and interceptors, because otherwise we can miss syncronization and report
1887   // false races.
1888   int ignore_reads_and_writes = thr->ignore_reads_and_writes;
1889   int ignore_interceptors = thr->ignore_interceptors;
1890   int ignore_sync = thr->ignore_sync;
1891   if (!ctx->after_multithreaded_fork) {
1892     thr->ignore_reads_and_writes = 0;
1893     thr->fast_state.ClearIgnoreBit();
1894     thr->ignore_interceptors = 0;
1895     thr->ignore_sync = 0;
1896   }
1897   // Ensure that the handler does not spoil errno.
1898   const int saved_errno = errno;
1899   errno = 99;
1900   // This code races with sigaction. Be careful to not read sa_sigaction twice.
1901   // Also need to remember pc for reporting before the call,
1902   // because the handler can reset it.
1903   volatile uptr pc =
1904       sigact ? (uptr)sigactions[sig].sigaction : (uptr)sigactions[sig].handler;
1905   if (pc != sig_dfl && pc != sig_ign) {
1906     if (sigact)
1907       ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
1908     else
1909       ((__sanitizer_sighandler_ptr)pc)(sig);
1910   }
1911   if (!ctx->after_multithreaded_fork) {
1912     thr->ignore_reads_and_writes = ignore_reads_and_writes;
1913     if (ignore_reads_and_writes)
1914       thr->fast_state.SetIgnoreBit();
1915     thr->ignore_interceptors = ignore_interceptors;
1916     thr->ignore_sync = ignore_sync;
1917   }
1918   // We do not detect errno spoiling for SIGTERM,
1919   // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
1920   // tsan reports false positive in such case.
1921   // It's difficult to properly detect this situation (reraise),
1922   // because in async signal processing case (when handler is called directly
1923   // from rtl_generic_sighandler) we have not yet received the reraised
1924   // signal; and it looks too fragile to intercept all ways to reraise a signal.
1925   if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) {
1926     VarSizeStackTrace stack;
1927     // StackTrace::GetNestInstructionPc(pc) is used because return address is
1928     // expected, OutputReport() will undo this.
1929     ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
1930     ThreadRegistryLock l(ctx->thread_registry);
1931     ScopedReport rep(ReportTypeErrnoInSignal);
1932     if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
1933       rep.AddStack(stack, true);
1934       OutputReport(thr, rep);
1935     }
1936   }
1937   errno = saved_errno;
1938 }
1939 
1940 void ProcessPendingSignals(ThreadState *thr) {
1941   ThreadSignalContext *sctx = SigCtx(thr);
1942   if (sctx == 0 ||
1943       atomic_load(&sctx->have_pending_signals, memory_order_relaxed) == 0)
1944     return;
1945   atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed);
1946   atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
1947   internal_sigfillset(&sctx->emptyset);
1948   int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
1949   CHECK_EQ(res, 0);
1950   for (int sig = 0; sig < kSigCount; sig++) {
1951     SignalDesc *signal = &sctx->pending_signals[sig];
1952     if (signal->armed) {
1953       signal->armed = false;
1954       CallUserSignalHandler(thr, false, true, signal->sigaction, sig,
1955           &signal->siginfo, &signal->ctx);
1956     }
1957   }
1958   res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
1959   CHECK_EQ(res, 0);
1960   atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
1961 }
1962 
1963 }  // namespace __tsan
1964 
1965 static bool is_sync_signal(ThreadSignalContext *sctx, int sig) {
1966   return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || sig == SIGTRAP ||
1967          sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
1968          // If we are sending signal to ourselves, we must process it now.
1969          (sctx && sig == sctx->int_signal_send);
1970 }
1971 
1972 void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
1973                                           __sanitizer_siginfo *info,
1974                                           void *ctx) {
1975   cur_thread_init();
1976   ThreadState *thr = cur_thread();
1977   ThreadSignalContext *sctx = SigCtx(thr);
1978   if (sig < 0 || sig >= kSigCount) {
1979     VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
1980     return;
1981   }
1982   // Don't mess with synchronous signals.
1983   const bool sync = is_sync_signal(sctx, sig);
1984   if (sync ||
1985       // If we are in blocking function, we can safely process it now
1986       // (but check if we are in a recursive interceptor,
1987       // i.e. pthread_join()->munmap()).
1988       (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) {
1989     atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
1990     if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) {
1991       atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed);
1992       CallUserSignalHandler(thr, sync, true, sigact, sig, info, ctx);
1993       atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed);
1994     } else {
1995       // Be very conservative with when we do acquire in this case.
1996       // It's unsafe to do acquire in async handlers, because ThreadState
1997       // can be in inconsistent state.
1998       // SIGSYS looks relatively safe -- it's synchronous and can actually
1999       // need some global state.
2000       bool acq = (sig == SIGSYS);
2001       CallUserSignalHandler(thr, sync, acq, sigact, sig, info, ctx);
2002     }
2003     atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
2004     return;
2005   }
2006 
2007   if (sctx == 0)
2008     return;
2009   SignalDesc *signal = &sctx->pending_signals[sig];
2010   if (signal->armed == false) {
2011     signal->armed = true;
2012     signal->sigaction = sigact;
2013     if (info)
2014       internal_memcpy(&signal->siginfo, info, sizeof(*info));
2015     if (ctx)
2016       internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
2017     atomic_store(&sctx->have_pending_signals, 1, memory_order_relaxed);
2018   }
2019 }
2020 
2021 static void rtl_sighandler(int sig) {
2022   rtl_generic_sighandler(false, sig, 0, 0);
2023 }
2024 
2025 static void rtl_sigaction(int sig, __sanitizer_siginfo *info, void *ctx) {
2026   rtl_generic_sighandler(true, sig, info, ctx);
2027 }
2028 
2029 TSAN_INTERCEPTOR(int, raise, int sig) {
2030   SCOPED_TSAN_INTERCEPTOR(raise, sig);
2031   ThreadSignalContext *sctx = SigCtx(thr);
2032   CHECK_NE(sctx, 0);
2033   int prev = sctx->int_signal_send;
2034   sctx->int_signal_send = sig;
2035   int res = REAL(raise)(sig);
2036   CHECK_EQ(sctx->int_signal_send, sig);
2037   sctx->int_signal_send = prev;
2038   return res;
2039 }
2040 
2041 TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
2042   SCOPED_TSAN_INTERCEPTOR(kill, pid, sig);
2043   ThreadSignalContext *sctx = SigCtx(thr);
2044   CHECK_NE(sctx, 0);
2045   int prev = sctx->int_signal_send;
2046   if (pid == (int)internal_getpid()) {
2047     sctx->int_signal_send = sig;
2048   }
2049   int res = REAL(kill)(pid, sig);
2050   if (pid == (int)internal_getpid()) {
2051     CHECK_EQ(sctx->int_signal_send, sig);
2052     sctx->int_signal_send = prev;
2053   }
2054   return res;
2055 }
2056 
2057 TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
2058   SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig);
2059   ThreadSignalContext *sctx = SigCtx(thr);
2060   CHECK_NE(sctx, 0);
2061   int prev = sctx->int_signal_send;
2062   if (tid == pthread_self()) {
2063     sctx->int_signal_send = sig;
2064   }
2065   int res = REAL(pthread_kill)(tid, sig);
2066   if (tid == pthread_self()) {
2067     CHECK_EQ(sctx->int_signal_send, sig);
2068     sctx->int_signal_send = prev;
2069   }
2070   return res;
2071 }
2072 
2073 TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
2074   SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz);
2075   // It's intercepted merely to process pending signals.
2076   return REAL(gettimeofday)(tv, tz);
2077 }
2078 
2079 TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
2080     void *hints, void *rv) {
2081   SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv);
2082   // We miss atomic synchronization in getaddrinfo,
2083   // and can report false race between malloc and free
2084   // inside of getaddrinfo. So ignore memory accesses.
2085   ThreadIgnoreBegin(thr, pc);
2086   int res = REAL(getaddrinfo)(node, service, hints, rv);
2087   ThreadIgnoreEnd(thr, pc);
2088   return res;
2089 }
2090 
2091 TSAN_INTERCEPTOR(int, fork, int fake) {
2092   if (in_symbolizer())
2093     return REAL(fork)(fake);
2094   SCOPED_INTERCEPTOR_RAW(fork, fake);
2095   ForkBefore(thr, pc);
2096   int pid;
2097   {
2098     // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
2099     // we'll assert in CheckNoLocks() unless we ignore interceptors.
2100     ScopedIgnoreInterceptors ignore;
2101     pid = REAL(fork)(fake);
2102   }
2103   if (pid == 0) {
2104     // child
2105     ForkChildAfter(thr, pc);
2106     FdOnFork(thr, pc);
2107   } else if (pid > 0) {
2108     // parent
2109     ForkParentAfter(thr, pc);
2110   } else {
2111     // error
2112     ForkParentAfter(thr, pc);
2113   }
2114   return pid;
2115 }
2116 
2117 TSAN_INTERCEPTOR(int, vfork, int fake) {
2118   // Some programs (e.g. openjdk) call close for all file descriptors
2119   // in the child process. Under tsan it leads to false positives, because
2120   // address space is shared, so the parent process also thinks that
2121   // the descriptors are closed (while they are actually not).
2122   // This leads to false positives due to missed synchronization.
2123   // Strictly saying this is undefined behavior, because vfork child is not
2124   // allowed to call any functions other than exec/exit. But this is what
2125   // openjdk does, so we want to handle it.
2126   // We could disable interceptors in the child process. But it's not possible
2127   // to simply intercept and wrap vfork, because vfork child is not allowed
2128   // to return from the function that calls vfork, and that's exactly what
2129   // we would do. So this would require some assembly trickery as well.
2130   // Instead we simply turn vfork into fork.
2131   return WRAP(fork)(fake);
2132 }
2133 
2134 #if !SANITIZER_MAC && !SANITIZER_ANDROID
2135 typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
2136                                     void *data);
2137 struct dl_iterate_phdr_data {
2138   ThreadState *thr;
2139   uptr pc;
2140   dl_iterate_phdr_cb_t cb;
2141   void *data;
2142 };
2143 
2144 static bool IsAppNotRodata(uptr addr) {
2145   return IsAppMem(addr) && *(u64*)MemToShadow(addr) != kShadowRodata;
2146 }
2147 
2148 static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
2149                               void *data) {
2150   dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data;
2151   // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later
2152   // accessible in dl_iterate_phdr callback. But we don't see synchronization
2153   // inside of dynamic linker, so we "unpoison" it here in order to not
2154   // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough
2155   // because some libc functions call __libc_dlopen.
2156   if (info && IsAppNotRodata((uptr)info->dlpi_name))
2157     MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2158                      internal_strlen(info->dlpi_name));
2159   int res = cbdata->cb(info, size, cbdata->data);
2160   // Perform the check one more time in case info->dlpi_name was overwritten
2161   // by user callback.
2162   if (info && IsAppNotRodata((uptr)info->dlpi_name))
2163     MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2164                      internal_strlen(info->dlpi_name));
2165   return res;
2166 }
2167 
2168 TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) {
2169   SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data);
2170   dl_iterate_phdr_data cbdata;
2171   cbdata.thr = thr;
2172   cbdata.pc = pc;
2173   cbdata.cb = cb;
2174   cbdata.data = data;
2175   int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata);
2176   return res;
2177 }
2178 #endif
2179 
2180 static int OnExit(ThreadState *thr) {
2181   int status = Finalize(thr);
2182   FlushStreams();
2183   return status;
2184 }
2185 
2186 struct TsanInterceptorContext {
2187   ThreadState *thr;
2188   const uptr caller_pc;
2189   const uptr pc;
2190 };
2191 
2192 #if !SANITIZER_MAC
2193 static void HandleRecvmsg(ThreadState *thr, uptr pc,
2194     __sanitizer_msghdr *msg) {
2195   int fds[64];
2196   int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds));
2197   for (int i = 0; i < cnt; i++)
2198     FdEventCreate(thr, pc, fds[i]);
2199 }
2200 #endif
2201 
2202 #include "sanitizer_common/sanitizer_platform_interceptors.h"
2203 // Causes interceptor recursion (getaddrinfo() and fopen())
2204 #undef SANITIZER_INTERCEPT_GETADDRINFO
2205 // We define our own.
2206 #if SANITIZER_INTERCEPT_TLS_GET_ADDR
2207 #define NEED_TLS_GET_ADDR
2208 #endif
2209 #undef SANITIZER_INTERCEPT_TLS_GET_ADDR
2210 #undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
2211 
2212 #define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
2213 #define COMMON_INTERCEPT_FUNCTION_VER(name, ver)                          \
2214   INTERCEPT_FUNCTION_VER(name, ver)
2215 
2216 #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size)                    \
2217   MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr,                 \
2218                     ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \
2219                     true)
2220 
2221 #define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size)                       \
2222   MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr,                  \
2223                     ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \
2224                     false)
2225 
2226 #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...)      \
2227   SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__);         \
2228   TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
2229   ctx = (void *)&_ctx;                                \
2230   (void) ctx;
2231 
2232 #define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
2233   SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__);              \
2234   TsanInterceptorContext _ctx = {thr, caller_pc, pc};     \
2235   ctx = (void *)&_ctx;                                    \
2236   (void) ctx;
2237 
2238 #define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
2239   if (path)                                           \
2240     Acquire(thr, pc, File2addr(path));                \
2241   if (file) {                                         \
2242     int fd = fileno_unlocked(file);                   \
2243     if (fd >= 0) FdFileCreate(thr, pc, fd);           \
2244   }
2245 
2246 #define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
2247   if (file) {                                    \
2248     int fd = fileno_unlocked(file);              \
2249     if (fd >= 0) FdClose(thr, pc, fd);           \
2250   }
2251 
2252 #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
2253   libignore()->OnLibraryLoaded(filename)
2254 
2255 #define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
2256   libignore()->OnLibraryUnloaded()
2257 
2258 #define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
2259   Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
2260 
2261 #define COMMON_INTERCEPTOR_RELEASE(ctx, u) \
2262   Release(((TsanInterceptorContext *) ctx)->thr, pc, u)
2263 
2264 #define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
2265   Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path))
2266 
2267 #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
2268   FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2269 
2270 #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
2271   FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2272 
2273 #define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \
2274   FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2275 
2276 #define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
2277   FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd)
2278 
2279 #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
2280   ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
2281 
2282 #define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
2283   __tsan::ctx->thread_registry->SetThreadNameByUserId(thread, name)
2284 
2285 #define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
2286 
2287 #define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
2288   OnExit(((TsanInterceptorContext *) ctx)->thr)
2289 
2290 #define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) \
2291   MutexPreLock(((TsanInterceptorContext *)ctx)->thr, \
2292             ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2293 
2294 #define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) \
2295   MutexPostLock(((TsanInterceptorContext *)ctx)->thr, \
2296             ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2297 
2298 #define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \
2299   MutexUnlock(((TsanInterceptorContext *)ctx)->thr, \
2300             ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2301 
2302 #define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) \
2303   MutexRepair(((TsanInterceptorContext *)ctx)->thr, \
2304             ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2305 
2306 #define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) \
2307   MutexInvalidAccess(((TsanInterceptorContext *)ctx)->thr, \
2308                      ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2309 
2310 #define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd,  \
2311                                      off)                                   \
2312   do {                                                                      \
2313     return mmap_interceptor(thr, pc, REAL(mmap), addr, sz, prot, flags, fd, \
2314                             off);                                           \
2315   } while (false)
2316 
2317 #if !SANITIZER_MAC
2318 #define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
2319   HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
2320       ((TsanInterceptorContext *)ctx)->pc, msg)
2321 #endif
2322 
2323 #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end)                           \
2324   if (TsanThread *t = GetCurrentThread()) {                                    \
2325     *begin = t->tls_begin();                                                   \
2326     *end = t->tls_end();                                                       \
2327   } else {                                                                     \
2328     *begin = *end = 0;                                                         \
2329   }
2330 
2331 #define COMMON_INTERCEPTOR_USER_CALLBACK_START() \
2332   SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START()
2333 
2334 #define COMMON_INTERCEPTOR_USER_CALLBACK_END() \
2335   SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END()
2336 
2337 #include "sanitizer_common/sanitizer_common_interceptors.inc"
2338 
2339 static int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2340                           __sanitizer_sigaction *old);
2341 static __sanitizer_sighandler_ptr signal_impl(int sig,
2342                                               __sanitizer_sighandler_ptr h);
2343 
2344 #define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signo, act, oldact) \
2345   { return sigaction_impl(signo, act, oldact); }
2346 
2347 #define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \
2348   { return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); }
2349 
2350 #include "sanitizer_common/sanitizer_signal_interceptors.inc"
2351 
2352 int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2353                    __sanitizer_sigaction *old) {
2354   // Note: if we call REAL(sigaction) directly for any reason without proxying
2355   // the signal handler through rtl_sigaction, very bad things will happen.
2356   // The handler will run synchronously and corrupt tsan per-thread state.
2357   SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
2358   __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
2359   __sanitizer_sigaction old_stored;
2360   if (old) internal_memcpy(&old_stored, &sigactions[sig], sizeof(old_stored));
2361   __sanitizer_sigaction newact;
2362   if (act) {
2363     // Copy act into sigactions[sig].
2364     // Can't use struct copy, because compiler can emit call to memcpy.
2365     // Can't use internal_memcpy, because it copies byte-by-byte,
2366     // and signal handler reads the handler concurrently. It it can read
2367     // some bytes from old value and some bytes from new value.
2368     // Use volatile to prevent insertion of memcpy.
2369     sigactions[sig].handler =
2370         *(volatile __sanitizer_sighandler_ptr const *)&act->handler;
2371     sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags;
2372     internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
2373                     sizeof(sigactions[sig].sa_mask));
2374 #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
2375     sigactions[sig].sa_restorer = act->sa_restorer;
2376 #endif
2377     internal_memcpy(&newact, act, sizeof(newact));
2378     internal_sigfillset(&newact.sa_mask);
2379     if ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl) {
2380       if (newact.sa_flags & SA_SIGINFO)
2381         newact.sigaction = rtl_sigaction;
2382       else
2383         newact.handler = rtl_sighandler;
2384     }
2385     ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
2386     act = &newact;
2387   }
2388   int res = REAL(sigaction)(sig, act, old);
2389   if (res == 0 && old) {
2390     uptr cb = (uptr)old->sigaction;
2391     if (cb == (uptr)rtl_sigaction || cb == (uptr)rtl_sighandler) {
2392       internal_memcpy(old, &old_stored, sizeof(*old));
2393     }
2394   }
2395   return res;
2396 }
2397 
2398 static __sanitizer_sighandler_ptr signal_impl(int sig,
2399                                               __sanitizer_sighandler_ptr h) {
2400   __sanitizer_sigaction act;
2401   act.handler = h;
2402   internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask));
2403   act.sa_flags = 0;
2404   __sanitizer_sigaction old;
2405   int res = sigaction_symname(sig, &act, &old);
2406   if (res) return (__sanitizer_sighandler_ptr)sig_err;
2407   return old.handler;
2408 }
2409 
2410 #define TSAN_SYSCALL() \
2411   ThreadState *thr = cur_thread(); \
2412   if (thr->ignore_interceptors) \
2413     return; \
2414   ScopedSyscall scoped_syscall(thr) \
2415 /**/
2416 
2417 struct ScopedSyscall {
2418   ThreadState *thr;
2419 
2420   explicit ScopedSyscall(ThreadState *thr)
2421       : thr(thr) {
2422     Initialize(thr);
2423   }
2424 
2425   ~ScopedSyscall() {
2426     ProcessPendingSignals(thr);
2427   }
2428 };
2429 
2430 #if !SANITIZER_FREEBSD && !SANITIZER_MAC
2431 static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
2432   TSAN_SYSCALL();
2433   MemoryAccessRange(thr, pc, p, s, write);
2434 }
2435 
2436 static void syscall_acquire(uptr pc, uptr addr) {
2437   TSAN_SYSCALL();
2438   Acquire(thr, pc, addr);
2439   DPrintf("syscall_acquire(%p)\n", addr);
2440 }
2441 
2442 static void syscall_release(uptr pc, uptr addr) {
2443   TSAN_SYSCALL();
2444   DPrintf("syscall_release(%p)\n", addr);
2445   Release(thr, pc, addr);
2446 }
2447 
2448 static void syscall_fd_close(uptr pc, int fd) {
2449   TSAN_SYSCALL();
2450   FdClose(thr, pc, fd);
2451 }
2452 
2453 static USED void syscall_fd_acquire(uptr pc, int fd) {
2454   TSAN_SYSCALL();
2455   FdAcquire(thr, pc, fd);
2456   DPrintf("syscall_fd_acquire(%p)\n", fd);
2457 }
2458 
2459 static USED void syscall_fd_release(uptr pc, int fd) {
2460   TSAN_SYSCALL();
2461   DPrintf("syscall_fd_release(%p)\n", fd);
2462   FdRelease(thr, pc, fd);
2463 }
2464 
2465 static void syscall_pre_fork(uptr pc) {
2466   TSAN_SYSCALL();
2467   ForkBefore(thr, pc);
2468 }
2469 
2470 static void syscall_post_fork(uptr pc, int pid) {
2471   TSAN_SYSCALL();
2472   if (pid == 0) {
2473     // child
2474     ForkChildAfter(thr, pc);
2475     FdOnFork(thr, pc);
2476   } else if (pid > 0) {
2477     // parent
2478     ForkParentAfter(thr, pc);
2479   } else {
2480     // error
2481     ForkParentAfter(thr, pc);
2482   }
2483 }
2484 #endif
2485 
2486 #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \
2487   syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false)
2488 
2489 #define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
2490   syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true)
2491 
2492 #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
2493   do {                                       \
2494     (void)(p);                               \
2495     (void)(s);                               \
2496   } while (false)
2497 
2498 #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
2499   do {                                        \
2500     (void)(p);                                \
2501     (void)(s);                                \
2502   } while (false)
2503 
2504 #define COMMON_SYSCALL_ACQUIRE(addr) \
2505     syscall_acquire(GET_CALLER_PC(), (uptr)(addr))
2506 
2507 #define COMMON_SYSCALL_RELEASE(addr) \
2508     syscall_release(GET_CALLER_PC(), (uptr)(addr))
2509 
2510 #define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd)
2511 
2512 #define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd)
2513 
2514 #define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd)
2515 
2516 #define COMMON_SYSCALL_PRE_FORK() \
2517   syscall_pre_fork(GET_CALLER_PC())
2518 
2519 #define COMMON_SYSCALL_POST_FORK(res) \
2520   syscall_post_fork(GET_CALLER_PC(), res)
2521 
2522 #include "sanitizer_common/sanitizer_common_syscalls.inc"
2523 #include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
2524 
2525 #ifdef NEED_TLS_GET_ADDR
2526 // Define own interceptor instead of sanitizer_common's for three reasons:
2527 // 1. It must not process pending signals.
2528 //    Signal handlers may contain MOVDQA instruction (see below).
2529 // 2. It must be as simple as possible to not contain MOVDQA.
2530 // 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which
2531 //    is empty for tsan (meant only for msan).
2532 // Note: __tls_get_addr can be called with mis-aligned stack due to:
2533 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
2534 // So the interceptor must work with mis-aligned stack, in particular, does not
2535 // execute MOVDQA with stack addresses.
2536 TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
2537   void *res = REAL(__tls_get_addr)(arg);
2538   ThreadState *thr = cur_thread();
2539   if (!thr)
2540     return res;
2541   DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr,
2542                                         thr->tls_addr + thr->tls_size);
2543   if (!dtv)
2544     return res;
2545   // New DTLS block has been allocated.
2546   MemoryResetRange(thr, 0, dtv->beg, dtv->size);
2547   return res;
2548 }
2549 #endif
2550 
2551 #if SANITIZER_NETBSD
2552 TSAN_INTERCEPTOR(void, _lwp_exit) {
2553   SCOPED_TSAN_INTERCEPTOR(_lwp_exit);
2554   DestroyThreadState();
2555   REAL(_lwp_exit)();
2556 }
2557 #define TSAN_MAYBE_INTERCEPT__LWP_EXIT TSAN_INTERCEPT(_lwp_exit)
2558 #else
2559 #define TSAN_MAYBE_INTERCEPT__LWP_EXIT
2560 #endif
2561 
2562 #if SANITIZER_FREEBSD
2563 TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) {
2564   SCOPED_TSAN_INTERCEPTOR(thr_exit, state);
2565   DestroyThreadState();
2566   REAL(thr_exit(state));
2567 }
2568 #define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit)
2569 #else
2570 #define TSAN_MAYBE_INTERCEPT_THR_EXIT
2571 #endif
2572 
2573 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a)
2574 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c)
2575 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c)
2576 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m)
2577 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c)
2578 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a)
2579 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m)
2580 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m)
2581 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a)
2582 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m)
2583 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m)
2584 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m)
2585 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m)
2586 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m)
2587 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m)
2588 TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)())
2589 TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(int, sigsetmask, sigmask, int a, void *b,
2590   void *c)
2591 
2592 namespace __tsan {
2593 
2594 static void finalize(void *arg) {
2595   ThreadState *thr = cur_thread();
2596   int status = Finalize(thr);
2597   // Make sure the output is not lost.
2598   FlushStreams();
2599   if (status)
2600     Die();
2601 }
2602 
2603 #if !SANITIZER_MAC && !SANITIZER_ANDROID
2604 static void unreachable() {
2605   Report("FATAL: ThreadSanitizer: unreachable called\n");
2606   Die();
2607 }
2608 #endif
2609 
2610 // Define default implementation since interception of libdispatch  is optional.
2611 SANITIZER_WEAK_ATTRIBUTE void InitializeLibdispatchInterceptors() {}
2612 
2613 void InitializeInterceptors() {
2614 #if !SANITIZER_MAC
2615   // We need to setup it early, because functions like dlsym() can call it.
2616   REAL(memset) = internal_memset;
2617   REAL(memcpy) = internal_memcpy;
2618 #endif
2619 
2620   // Instruct libc malloc to consume less memory.
2621 #if SANITIZER_LINUX
2622   mallopt(1, 0);  // M_MXFAST
2623   mallopt(-3, 32*1024);  // M_MMAP_THRESHOLD
2624 #endif
2625 
2626   new(interceptor_ctx()) InterceptorContext();
2627 
2628   InitializeCommonInterceptors();
2629   InitializeSignalInterceptors();
2630   InitializeLibdispatchInterceptors();
2631 
2632 #if !SANITIZER_MAC
2633   // We can not use TSAN_INTERCEPT to get setjmp addr,
2634   // because it does &setjmp and setjmp is not present in some versions of libc.
2635   using __interception::InterceptFunction;
2636   InterceptFunction(TSAN_STRING_SETJMP, (uptr*)&REAL(setjmp_symname), 0, 0);
2637   InterceptFunction("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
2638   InterceptFunction(TSAN_STRING_SIGSETJMP, (uptr*)&REAL(sigsetjmp_symname), 0,
2639                     0);
2640 #if !SANITIZER_NETBSD
2641   InterceptFunction("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
2642 #endif
2643 #endif
2644 
2645   TSAN_INTERCEPT(longjmp_symname);
2646   TSAN_INTERCEPT(siglongjmp_symname);
2647 #if SANITIZER_NETBSD
2648   TSAN_INTERCEPT(_longjmp);
2649 #endif
2650 
2651   TSAN_INTERCEPT(malloc);
2652   TSAN_INTERCEPT(__libc_memalign);
2653   TSAN_INTERCEPT(calloc);
2654   TSAN_INTERCEPT(realloc);
2655   TSAN_INTERCEPT(reallocarray);
2656   TSAN_INTERCEPT(free);
2657   TSAN_INTERCEPT(cfree);
2658   TSAN_INTERCEPT(munmap);
2659   TSAN_MAYBE_INTERCEPT_MEMALIGN;
2660   TSAN_INTERCEPT(valloc);
2661   TSAN_MAYBE_INTERCEPT_PVALLOC;
2662   TSAN_INTERCEPT(posix_memalign);
2663 
2664   TSAN_INTERCEPT(strcpy);
2665   TSAN_INTERCEPT(strncpy);
2666   TSAN_INTERCEPT(strdup);
2667 
2668   TSAN_INTERCEPT(pthread_create);
2669   TSAN_INTERCEPT(pthread_join);
2670   TSAN_INTERCEPT(pthread_detach);
2671   TSAN_INTERCEPT(pthread_exit);
2672   #if SANITIZER_LINUX
2673   TSAN_INTERCEPT(pthread_tryjoin_np);
2674   TSAN_INTERCEPT(pthread_timedjoin_np);
2675   #endif
2676 
2677   TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE);
2678   TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE);
2679   TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE);
2680   TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE);
2681   TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE);
2682   TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE);
2683 
2684   TSAN_INTERCEPT(pthread_mutex_init);
2685   TSAN_INTERCEPT(pthread_mutex_destroy);
2686   TSAN_INTERCEPT(pthread_mutex_trylock);
2687   TSAN_INTERCEPT(pthread_mutex_timedlock);
2688 
2689   TSAN_INTERCEPT(pthread_spin_init);
2690   TSAN_INTERCEPT(pthread_spin_destroy);
2691   TSAN_INTERCEPT(pthread_spin_lock);
2692   TSAN_INTERCEPT(pthread_spin_trylock);
2693   TSAN_INTERCEPT(pthread_spin_unlock);
2694 
2695   TSAN_INTERCEPT(pthread_rwlock_init);
2696   TSAN_INTERCEPT(pthread_rwlock_destroy);
2697   TSAN_INTERCEPT(pthread_rwlock_rdlock);
2698   TSAN_INTERCEPT(pthread_rwlock_tryrdlock);
2699   TSAN_INTERCEPT(pthread_rwlock_timedrdlock);
2700   TSAN_INTERCEPT(pthread_rwlock_wrlock);
2701   TSAN_INTERCEPT(pthread_rwlock_trywrlock);
2702   TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
2703   TSAN_INTERCEPT(pthread_rwlock_unlock);
2704 
2705   TSAN_INTERCEPT(pthread_barrier_init);
2706   TSAN_INTERCEPT(pthread_barrier_destroy);
2707   TSAN_INTERCEPT(pthread_barrier_wait);
2708 
2709   TSAN_INTERCEPT(pthread_once);
2710 
2711   TSAN_INTERCEPT(fstat);
2712   TSAN_MAYBE_INTERCEPT___FXSTAT;
2713   TSAN_MAYBE_INTERCEPT_FSTAT64;
2714   TSAN_MAYBE_INTERCEPT___FXSTAT64;
2715   TSAN_INTERCEPT(open);
2716   TSAN_MAYBE_INTERCEPT_OPEN64;
2717   TSAN_INTERCEPT(creat);
2718   TSAN_MAYBE_INTERCEPT_CREAT64;
2719   TSAN_INTERCEPT(dup);
2720   TSAN_INTERCEPT(dup2);
2721   TSAN_INTERCEPT(dup3);
2722   TSAN_MAYBE_INTERCEPT_EVENTFD;
2723   TSAN_MAYBE_INTERCEPT_SIGNALFD;
2724   TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
2725   TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
2726   TSAN_INTERCEPT(socket);
2727   TSAN_INTERCEPT(socketpair);
2728   TSAN_INTERCEPT(connect);
2729   TSAN_INTERCEPT(bind);
2730   TSAN_INTERCEPT(listen);
2731   TSAN_MAYBE_INTERCEPT_EPOLL;
2732   TSAN_INTERCEPT(close);
2733   TSAN_MAYBE_INTERCEPT___CLOSE;
2734   TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
2735   TSAN_INTERCEPT(pipe);
2736   TSAN_INTERCEPT(pipe2);
2737 
2738   TSAN_INTERCEPT(unlink);
2739   TSAN_INTERCEPT(tmpfile);
2740   TSAN_MAYBE_INTERCEPT_TMPFILE64;
2741   TSAN_INTERCEPT(abort);
2742   TSAN_INTERCEPT(rmdir);
2743   TSAN_INTERCEPT(closedir);
2744 
2745   TSAN_INTERCEPT(sigsuspend);
2746   TSAN_INTERCEPT(sigblock);
2747   TSAN_INTERCEPT(sigsetmask);
2748   TSAN_INTERCEPT(pthread_sigmask);
2749   TSAN_INTERCEPT(raise);
2750   TSAN_INTERCEPT(kill);
2751   TSAN_INTERCEPT(pthread_kill);
2752   TSAN_INTERCEPT(sleep);
2753   TSAN_INTERCEPT(usleep);
2754   TSAN_INTERCEPT(nanosleep);
2755   TSAN_INTERCEPT(pause);
2756   TSAN_INTERCEPT(gettimeofday);
2757   TSAN_INTERCEPT(getaddrinfo);
2758 
2759   TSAN_INTERCEPT(fork);
2760   TSAN_INTERCEPT(vfork);
2761 #if !SANITIZER_ANDROID
2762   TSAN_INTERCEPT(dl_iterate_phdr);
2763 #endif
2764   TSAN_MAYBE_INTERCEPT_ON_EXIT;
2765   TSAN_INTERCEPT(__cxa_atexit);
2766   TSAN_INTERCEPT(_exit);
2767 
2768 #ifdef NEED_TLS_GET_ADDR
2769   TSAN_INTERCEPT(__tls_get_addr);
2770 #endif
2771 
2772   TSAN_MAYBE_INTERCEPT__LWP_EXIT;
2773   TSAN_MAYBE_INTERCEPT_THR_EXIT;
2774 
2775 #if !SANITIZER_MAC && !SANITIZER_ANDROID
2776   // Need to setup it, because interceptors check that the function is resolved.
2777   // But atexit is emitted directly into the module, so can't be resolved.
2778   REAL(atexit) = (int(*)(void(*)()))unreachable;
2779 #endif
2780 
2781   if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
2782     Printf("ThreadSanitizer: failed to setup atexit callback\n");
2783     Die();
2784   }
2785 
2786 #if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
2787   if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) {
2788     Printf("ThreadSanitizer: failed to create thread key\n");
2789     Die();
2790   }
2791 #endif
2792 
2793   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init);
2794   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal);
2795   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast);
2796   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_wait);
2797   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_destroy);
2798   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_init);
2799   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_destroy);
2800   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_trylock);
2801   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_init);
2802   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_destroy);
2803   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_rdlock);
2804   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_tryrdlock);
2805   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_wrlock);
2806   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_trywrlock);
2807   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_unlock);
2808   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(once);
2809   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(sigsetmask);
2810 
2811   FdInit();
2812 }
2813 
2814 }  // namespace __tsan
2815 
2816 // Invisible barrier for tests.
2817 // There were several unsuccessful iterations for this functionality:
2818 // 1. Initially it was implemented in user code using
2819 //    REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on
2820 //    MacOS. Futexes are linux-specific for this matter.
2821 // 2. Then we switched to atomics+usleep(10). But usleep produced parasitic
2822 //    "as-if synchronized via sleep" messages in reports which failed some
2823 //    output tests.
2824 // 3. Then we switched to atomics+sched_yield. But this produced tons of tsan-
2825 //    visible events, which lead to "failed to restore stack trace" failures.
2826 // Note that no_sanitize_thread attribute does not turn off atomic interception
2827 // so attaching it to the function defined in user code does not help.
2828 // That's why we now have what we have.
2829 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
2830 void __tsan_testonly_barrier_init(u64 *barrier, u32 count) {
2831   if (count >= (1 << 8)) {
2832       Printf("barrier_init: count is too large (%d)\n", count);
2833       Die();
2834   }
2835   // 8 lsb is thread count, the remaining are count of entered threads.
2836   *barrier = count;
2837 }
2838 
2839 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
2840 void __tsan_testonly_barrier_wait(u64 *barrier) {
2841   unsigned old = __atomic_fetch_add(barrier, 1 << 8, __ATOMIC_RELAXED);
2842   unsigned old_epoch = (old >> 8) / (old & 0xff);
2843   for (;;) {
2844     unsigned cur = __atomic_load_n(barrier, __ATOMIC_RELAXED);
2845     unsigned cur_epoch = (cur >> 8) / (cur & 0xff);
2846     if (cur_epoch != old_epoch)
2847       return;
2848     internal_sched_yield();
2849   }
2850 }
2851