1 //===-- tsan_interceptors_posix.cpp ---------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 // FIXME: move as many interceptors as possible into
12 // sanitizer_common/sanitizer_common_interceptors.inc
13 //===----------------------------------------------------------------------===//
14 
15 #include "sanitizer_common/sanitizer_atomic.h"
16 #include "sanitizer_common/sanitizer_errno.h"
17 #include "sanitizer_common/sanitizer_libc.h"
18 #include "sanitizer_common/sanitizer_linux.h"
19 #include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
20 #include "sanitizer_common/sanitizer_platform_limits_posix.h"
21 #include "sanitizer_common/sanitizer_placement_new.h"
22 #include "sanitizer_common/sanitizer_posix.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "sanitizer_common/sanitizer_tls_get_addr.h"
25 #include "interception/interception.h"
26 #include "tsan_interceptors.h"
27 #include "tsan_interface.h"
28 #include "tsan_platform.h"
29 #include "tsan_suppressions.h"
30 #include "tsan_rtl.h"
31 #include "tsan_mman.h"
32 #include "tsan_fd.h"
33 
34 #include <stdarg.h>
35 
36 using namespace __tsan;
37 
38 DECLARE_REAL(void *, memcpy, void *to, const void *from, SIZE_T size)
39 DECLARE_REAL(void *, memset, void *block, int c, SIZE_T size)
40 
41 #if SANITIZER_FREEBSD || SANITIZER_APPLE
42 #define stdout __stdoutp
43 #define stderr __stderrp
44 #endif
45 
46 #if SANITIZER_NETBSD
47 #define dirfd(dirp) (*(int *)(dirp))
48 #define fileno_unlocked(fp)              \
49   (((__sanitizer_FILE *)fp)->_file == -1 \
50        ? -1                              \
51        : (int)(unsigned short)(((__sanitizer_FILE *)fp)->_file))
52 
53 #define stdout ((__sanitizer_FILE*)&__sF[1])
54 #define stderr ((__sanitizer_FILE*)&__sF[2])
55 
56 #define nanosleep __nanosleep50
57 #define vfork __vfork14
58 #endif
59 
60 #ifdef __mips__
61 const int kSigCount = 129;
62 #else
63 const int kSigCount = 65;
64 #endif
65 
66 #ifdef __mips__
67 struct ucontext_t {
68   u64 opaque[768 / sizeof(u64) + 1];
69 };
70 #else
71 struct ucontext_t {
72   // The size is determined by looking at sizeof of real ucontext_t on linux.
73   u64 opaque[936 / sizeof(u64) + 1];
74 };
75 #endif
76 
77 #if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \
78     defined(__s390x__)
79 #define PTHREAD_ABI_BASE  "GLIBC_2.3.2"
80 #elif defined(__aarch64__) || SANITIZER_PPC64V2
81 #define PTHREAD_ABI_BASE  "GLIBC_2.17"
82 #elif SANITIZER_LOONGARCH64
83 #define PTHREAD_ABI_BASE  "GLIBC_2.36"
84 #endif
85 
86 extern "C" int pthread_attr_init(void *attr);
87 extern "C" int pthread_attr_destroy(void *attr);
88 DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
89 extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
90 extern "C" int pthread_atfork(void (*prepare)(void), void (*parent)(void),
91                               void (*child)(void));
92 extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
93 extern "C" int pthread_setspecific(unsigned key, const void *v);
94 DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
95 DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
96 DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
97 DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
98 extern "C" int pthread_equal(void *t1, void *t2);
99 extern "C" void *pthread_self();
100 extern "C" void _exit(int status);
101 #if !SANITIZER_NETBSD
102 extern "C" int fileno_unlocked(void *stream);
103 extern "C" int dirfd(void *dirp);
104 #endif
105 #if SANITIZER_NETBSD
106 extern __sanitizer_FILE __sF[];
107 #else
108 extern __sanitizer_FILE *stdout, *stderr;
109 #endif
110 #if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
111 const int PTHREAD_MUTEX_RECURSIVE = 1;
112 const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
113 #else
114 const int PTHREAD_MUTEX_RECURSIVE = 2;
115 const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
116 #endif
117 #if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
118 const int EPOLL_CTL_ADD = 1;
119 #endif
120 const int SIGILL = 4;
121 const int SIGTRAP = 5;
122 const int SIGABRT = 6;
123 const int SIGFPE = 8;
124 const int SIGSEGV = 11;
125 const int SIGPIPE = 13;
126 const int SIGTERM = 15;
127 #if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
128 const int SIGBUS = 10;
129 const int SIGSYS = 12;
130 #else
131 const int SIGBUS = 7;
132 const int SIGSYS = 31;
133 #endif
134 #if SANITIZER_HAS_SIGINFO
135 const int SI_TIMER = -2;
136 #endif
137 void *const MAP_FAILED = (void*)-1;
138 #if SANITIZER_NETBSD
139 const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
140 #elif !SANITIZER_APPLE
141 const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
142 #endif
143 const int MAP_FIXED = 0x10;
144 typedef long long_t;
145 typedef __sanitizer::u16 mode_t;
146 
147 // From /usr/include/unistd.h
148 # define F_ULOCK 0      /* Unlock a previously locked region.  */
149 # define F_LOCK  1      /* Lock a region for exclusive use.  */
150 # define F_TLOCK 2      /* Test and lock a region for exclusive use.  */
151 # define F_TEST  3      /* Test a region for other processes locks.  */
152 
153 #if SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
154 const int SA_SIGINFO = 0x40;
155 const int SIG_SETMASK = 3;
156 #elif defined(__mips__)
157 const int SA_SIGINFO = 8;
158 const int SIG_SETMASK = 3;
159 #else
160 const int SA_SIGINFO = 4;
161 const int SIG_SETMASK = 2;
162 #endif
163 
164 namespace __tsan {
165 struct SignalDesc {
166   bool armed;
167   __sanitizer_siginfo siginfo;
168   ucontext_t ctx;
169 };
170 
171 struct ThreadSignalContext {
172   int int_signal_send;
173   SignalDesc pending_signals[kSigCount];
174   // emptyset and oldset are too big for stack.
175   __sanitizer_sigset_t emptyset;
176   __sanitizer_sigset_t oldset;
177 };
178 
179 void EnterBlockingFunc(ThreadState *thr) {
180   for (;;) {
181     // The order is important to not delay a signal infinitely if it's
182     // delivered right before we set in_blocking_func. Note: we can't call
183     // ProcessPendingSignals when in_blocking_func is set, or we can handle
184     // a signal synchronously when we are already handling a signal.
185     atomic_store(&thr->in_blocking_func, 1, memory_order_relaxed);
186     if (atomic_load(&thr->pending_signals, memory_order_relaxed) == 0)
187       break;
188     atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
189     ProcessPendingSignals(thr);
190   }
191 }
192 
193 // The sole reason tsan wraps atexit callbacks is to establish synchronization
194 // between callback setup and callback execution.
195 struct AtExitCtx {
196   void (*f)();
197   void *arg;
198   uptr pc;
199 };
200 
201 // InterceptorContext holds all global data required for interceptors.
202 // It's explicitly constructed in InitializeInterceptors with placement new
203 // and is never destroyed. This allows usage of members with non-trivial
204 // constructors and destructors.
205 struct InterceptorContext {
206   // The object is 64-byte aligned, because we want hot data to be located
207   // in a single cache line if possible (it's accessed in every interceptor).
208   ALIGNED(64) LibIgnore libignore;
209   __sanitizer_sigaction sigactions[kSigCount];
210 #if !SANITIZER_APPLE && !SANITIZER_NETBSD
211   unsigned finalize_key;
212 #endif
213 
214   Mutex atexit_mu;
215   Vector<struct AtExitCtx *> AtExitStack;
216 
217   InterceptorContext() : libignore(LINKER_INITIALIZED), atexit_mu(MutexTypeAtExit), AtExitStack() {}
218 };
219 
220 static ALIGNED(64) char interceptor_placeholder[sizeof(InterceptorContext)];
221 InterceptorContext *interceptor_ctx() {
222   return reinterpret_cast<InterceptorContext*>(&interceptor_placeholder[0]);
223 }
224 
225 LibIgnore *libignore() {
226   return &interceptor_ctx()->libignore;
227 }
228 
229 void InitializeLibIgnore() {
230   const SuppressionContext &supp = *Suppressions();
231   const uptr n = supp.SuppressionCount();
232   for (uptr i = 0; i < n; i++) {
233     const Suppression *s = supp.SuppressionAt(i);
234     if (0 == internal_strcmp(s->type, kSuppressionLib))
235       libignore()->AddIgnoredLibrary(s->templ);
236   }
237   if (flags()->ignore_noninstrumented_modules)
238     libignore()->IgnoreNoninstrumentedModules(true);
239   libignore()->OnLibraryLoaded(0);
240 }
241 
242 // The following two hooks can be used by for cooperative scheduling when
243 // locking.
244 #ifdef TSAN_EXTERNAL_HOOKS
245 void OnPotentiallyBlockingRegionBegin();
246 void OnPotentiallyBlockingRegionEnd();
247 #else
248 SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionBegin() {}
249 SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionEnd() {}
250 #endif
251 
252 }  // namespace __tsan
253 
254 static ThreadSignalContext *SigCtx(ThreadState *thr) {
255   // This function may be called reentrantly if it is interrupted by a signal
256   // handler. Use CAS to handle the race.
257   uptr ctx = atomic_load(&thr->signal_ctx, memory_order_relaxed);
258   if (ctx == 0 && !thr->is_dead) {
259     uptr pctx =
260         (uptr)MmapOrDie(sizeof(ThreadSignalContext), "ThreadSignalContext");
261     MemoryResetRange(thr, (uptr)&SigCtx, pctx, sizeof(ThreadSignalContext));
262     if (atomic_compare_exchange_strong(&thr->signal_ctx, &ctx, pctx,
263                                        memory_order_relaxed)) {
264       ctx = pctx;
265     } else {
266       UnmapOrDie((ThreadSignalContext *)pctx, sizeof(ThreadSignalContext));
267     }
268   }
269   return (ThreadSignalContext *)ctx;
270 }
271 
272 ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
273                                      uptr pc)
274     : thr_(thr) {
275   LazyInitialize(thr);
276   if (UNLIKELY(atomic_load(&thr->in_blocking_func, memory_order_relaxed))) {
277     // pthread_join is marked as blocking, but it's also known to call other
278     // intercepted functions (mmap, free). If we don't reset in_blocking_func
279     // we can get deadlocks and memory corruptions if we deliver a synchronous
280     // signal inside of an mmap/free interceptor.
281     // So reset it and restore it back in the destructor.
282     // See https://github.com/google/sanitizers/issues/1540
283     atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
284     in_blocking_func_ = true;
285   }
286   if (!thr_->is_inited) return;
287   if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
288   DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
289   ignoring_ =
290       !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses ||
291                                 libignore()->IsIgnored(pc, &in_ignored_lib_));
292   EnableIgnores();
293 }
294 
295 ScopedInterceptor::~ScopedInterceptor() {
296   if (!thr_->is_inited) return;
297   DisableIgnores();
298   if (UNLIKELY(in_blocking_func_))
299     EnterBlockingFunc(thr_);
300   if (!thr_->ignore_interceptors) {
301     ProcessPendingSignals(thr_);
302     FuncExit(thr_);
303     CheckedMutex::CheckNoLocks();
304   }
305 }
306 
307 NOINLINE
308 void ScopedInterceptor::EnableIgnoresImpl() {
309   ThreadIgnoreBegin(thr_, 0);
310   if (flags()->ignore_noninstrumented_modules)
311     thr_->suppress_reports++;
312   if (in_ignored_lib_) {
313     DCHECK(!thr_->in_ignored_lib);
314     thr_->in_ignored_lib = true;
315   }
316 }
317 
318 NOINLINE
319 void ScopedInterceptor::DisableIgnoresImpl() {
320   ThreadIgnoreEnd(thr_);
321   if (flags()->ignore_noninstrumented_modules)
322     thr_->suppress_reports--;
323   if (in_ignored_lib_) {
324     DCHECK(thr_->in_ignored_lib);
325     thr_->in_ignored_lib = false;
326   }
327 }
328 
329 #define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
330 #if SANITIZER_FREEBSD || SANITIZER_NETBSD
331 #  define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
332 #else
333 #  define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
334 #endif
335 #if SANITIZER_FREEBSD
336 #  define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func) \
337     INTERCEPT_FUNCTION(_pthread_##func)
338 #else
339 #  define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func)
340 #endif
341 #if SANITIZER_NETBSD
342 #  define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \
343     INTERCEPT_FUNCTION(__libc_##func)
344 #  define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \
345     INTERCEPT_FUNCTION(__libc_thr_##func)
346 #else
347 #  define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
348 #  define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
349 #endif
350 
351 #define READ_STRING_OF_LEN(thr, pc, s, len, n)                 \
352   MemoryAccessRange((thr), (pc), (uptr)(s),                         \
353     common_flags()->strict_string_checks ? (len) + 1 : (n), false)
354 
355 #define READ_STRING(thr, pc, s, n)                             \
356     READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n))
357 
358 #define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
359 
360 struct BlockingCall {
361   explicit BlockingCall(ThreadState *thr)
362       : thr(thr) {
363     EnterBlockingFunc(thr);
364     // When we are in a "blocking call", we process signals asynchronously
365     // (right when they arrive). In this context we do not expect to be
366     // executing any user/runtime code. The known interceptor sequence when
367     // this is not true is: pthread_join -> munmap(stack). It's fine
368     // to ignore munmap in this case -- we handle stack shadow separately.
369     thr->ignore_interceptors++;
370   }
371 
372   ~BlockingCall() {
373     thr->ignore_interceptors--;
374     atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
375   }
376 
377   ThreadState *thr;
378 };
379 
380 TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
381   SCOPED_TSAN_INTERCEPTOR(sleep, sec);
382   unsigned res = BLOCK_REAL(sleep)(sec);
383   AfterSleep(thr, pc);
384   return res;
385 }
386 
387 TSAN_INTERCEPTOR(int, usleep, long_t usec) {
388   SCOPED_TSAN_INTERCEPTOR(usleep, usec);
389   int res = BLOCK_REAL(usleep)(usec);
390   AfterSleep(thr, pc);
391   return res;
392 }
393 
394 TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
395   SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem);
396   int res = BLOCK_REAL(nanosleep)(req, rem);
397   AfterSleep(thr, pc);
398   return res;
399 }
400 
401 TSAN_INTERCEPTOR(int, pause, int fake) {
402   SCOPED_TSAN_INTERCEPTOR(pause, fake);
403   return BLOCK_REAL(pause)(fake);
404 }
405 
406 // Note: we specifically call the function in such strange way
407 // with "installed_at" because in reports it will appear between
408 // callback frames and the frame that installed the callback.
409 static void at_exit_callback_installed_at() {
410   AtExitCtx *ctx;
411   {
412     // Ensure thread-safety.
413     Lock l(&interceptor_ctx()->atexit_mu);
414 
415     // Pop AtExitCtx from the top of the stack of callback functions
416     uptr element = interceptor_ctx()->AtExitStack.Size() - 1;
417     ctx = interceptor_ctx()->AtExitStack[element];
418     interceptor_ctx()->AtExitStack.PopBack();
419   }
420 
421   ThreadState *thr = cur_thread();
422   Acquire(thr, ctx->pc, (uptr)ctx);
423   FuncEntry(thr, ctx->pc);
424   ((void(*)())ctx->f)();
425   FuncExit(thr);
426   Free(ctx);
427 }
428 
429 static void cxa_at_exit_callback_installed_at(void *arg) {
430   ThreadState *thr = cur_thread();
431   AtExitCtx *ctx = (AtExitCtx*)arg;
432   Acquire(thr, ctx->pc, (uptr)arg);
433   FuncEntry(thr, ctx->pc);
434   ((void(*)(void *arg))ctx->f)(ctx->arg);
435   FuncExit(thr);
436   Free(ctx);
437 }
438 
439 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
440       void *arg, void *dso);
441 
442 #if !SANITIZER_ANDROID
443 TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
444   if (in_symbolizer())
445     return 0;
446   // We want to setup the atexit callback even if we are in ignored lib
447   // or after fork.
448   SCOPED_INTERCEPTOR_RAW(atexit, f);
449   return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, 0, 0);
450 }
451 #endif
452 
453 TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
454   if (in_symbolizer())
455     return 0;
456   SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
457   return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, arg, dso);
458 }
459 
460 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
461       void *arg, void *dso) {
462   auto *ctx = New<AtExitCtx>();
463   ctx->f = f;
464   ctx->arg = arg;
465   ctx->pc = pc;
466   Release(thr, pc, (uptr)ctx);
467   // Memory allocation in __cxa_atexit will race with free during exit,
468   // because we do not see synchronization around atexit callback list.
469   ThreadIgnoreBegin(thr, pc);
470   int res;
471   if (!dso) {
472     // NetBSD does not preserve the 2nd argument if dso is equal to 0
473     // Store ctx in a local stack-like structure
474 
475     // Ensure thread-safety.
476     Lock l(&interceptor_ctx()->atexit_mu);
477     // __cxa_atexit calls calloc. If we don't ignore interceptors, we will fail
478     // due to atexit_mu held on exit from the calloc interceptor.
479     ScopedIgnoreInterceptors ignore;
480 
481     res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_callback_installed_at,
482                              0, 0);
483     // Push AtExitCtx on the top of the stack of callback functions
484     if (!res) {
485       interceptor_ctx()->AtExitStack.PushBack(ctx);
486     }
487   } else {
488     res = REAL(__cxa_atexit)(cxa_at_exit_callback_installed_at, ctx, dso);
489   }
490   ThreadIgnoreEnd(thr);
491   return res;
492 }
493 
494 #if !SANITIZER_APPLE && !SANITIZER_NETBSD
495 static void on_exit_callback_installed_at(int status, void *arg) {
496   ThreadState *thr = cur_thread();
497   AtExitCtx *ctx = (AtExitCtx*)arg;
498   Acquire(thr, ctx->pc, (uptr)arg);
499   FuncEntry(thr, ctx->pc);
500   ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
501   FuncExit(thr);
502   Free(ctx);
503 }
504 
505 TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
506   if (in_symbolizer())
507     return 0;
508   SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
509   auto *ctx = New<AtExitCtx>();
510   ctx->f = (void(*)())f;
511   ctx->arg = arg;
512   ctx->pc = GET_CALLER_PC();
513   Release(thr, pc, (uptr)ctx);
514   // Memory allocation in __cxa_atexit will race with free during exit,
515   // because we do not see synchronization around atexit callback list.
516   ThreadIgnoreBegin(thr, pc);
517   int res = REAL(on_exit)(on_exit_callback_installed_at, ctx);
518   ThreadIgnoreEnd(thr);
519   return res;
520 }
521 #define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit)
522 #else
523 #define TSAN_MAYBE_INTERCEPT_ON_EXIT
524 #endif
525 
526 // Cleanup old bufs.
527 static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
528   for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
529     JmpBuf *buf = &thr->jmp_bufs[i];
530     if (buf->sp <= sp) {
531       uptr sz = thr->jmp_bufs.Size();
532       internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf));
533       thr->jmp_bufs.PopBack();
534       i--;
535     }
536   }
537 }
538 
539 static void SetJmp(ThreadState *thr, uptr sp) {
540   if (!thr->is_inited)  // called from libc guts during bootstrap
541     return;
542   // Cleanup old bufs.
543   JmpBufGarbageCollect(thr, sp);
544   // Remember the buf.
545   JmpBuf *buf = thr->jmp_bufs.PushBack();
546   buf->sp = sp;
547   buf->shadow_stack_pos = thr->shadow_stack_pos;
548   ThreadSignalContext *sctx = SigCtx(thr);
549   buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
550   buf->in_blocking_func = atomic_load(&thr->in_blocking_func, memory_order_relaxed);
551   buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
552       memory_order_relaxed);
553 }
554 
555 static void LongJmp(ThreadState *thr, uptr *env) {
556   uptr sp = ExtractLongJmpSp(env);
557   // Find the saved buf with matching sp.
558   for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
559     JmpBuf *buf = &thr->jmp_bufs[i];
560     if (buf->sp == sp) {
561       CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos);
562       // Unwind the stack.
563       while (thr->shadow_stack_pos > buf->shadow_stack_pos)
564         FuncExit(thr);
565       ThreadSignalContext *sctx = SigCtx(thr);
566       if (sctx)
567         sctx->int_signal_send = buf->int_signal_send;
568       atomic_store(&thr->in_blocking_func, buf->in_blocking_func,
569           memory_order_relaxed);
570       atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
571           memory_order_relaxed);
572       JmpBufGarbageCollect(thr, buf->sp - 1);  // do not collect buf->sp
573       return;
574     }
575   }
576   Printf("ThreadSanitizer: can't find longjmp buf\n");
577   CHECK(0);
578 }
579 
580 // FIXME: put everything below into a common extern "C" block?
581 extern "C" void __tsan_setjmp(uptr sp) { SetJmp(cur_thread_init(), sp); }
582 
583 #if SANITIZER_APPLE
584 TSAN_INTERCEPTOR(int, setjmp, void *env);
585 TSAN_INTERCEPTOR(int, _setjmp, void *env);
586 TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
587 #else  // SANITIZER_APPLE
588 
589 #if SANITIZER_NETBSD
590 #define setjmp_symname __setjmp14
591 #define sigsetjmp_symname __sigsetjmp14
592 #else
593 #define setjmp_symname setjmp
594 #define sigsetjmp_symname sigsetjmp
595 #endif
596 
597 DEFINE_REAL(int, setjmp_symname, void *env)
598 DEFINE_REAL(int, _setjmp, void *env)
599 DEFINE_REAL(int, sigsetjmp_symname, void *env)
600 #if !SANITIZER_NETBSD
601 DEFINE_REAL(int, __sigsetjmp, void *env)
602 #endif
603 
604 // The real interceptor for setjmp is special, and implemented in pure asm. We
605 // just need to initialize the REAL functions so that they can be used in asm.
606 static void InitializeSetjmpInterceptors() {
607   // We can not use TSAN_INTERCEPT to get setjmp addr, because it does &setjmp and
608   // setjmp is not present in some versions of libc.
609   using __interception::InterceptFunction;
610   InterceptFunction(SANITIZER_STRINGIFY(setjmp_symname), (uptr*)&REAL(setjmp_symname), 0, 0);
611   InterceptFunction("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
612   InterceptFunction(SANITIZER_STRINGIFY(sigsetjmp_symname), (uptr*)&REAL(sigsetjmp_symname), 0,
613                     0);
614 #if !SANITIZER_NETBSD
615   InterceptFunction("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
616 #endif
617 }
618 #endif  // SANITIZER_APPLE
619 
620 #if SANITIZER_NETBSD
621 #define longjmp_symname __longjmp14
622 #define siglongjmp_symname __siglongjmp14
623 #else
624 #define longjmp_symname longjmp
625 #define siglongjmp_symname siglongjmp
626 #endif
627 
628 TSAN_INTERCEPTOR(void, longjmp_symname, uptr *env, int val) {
629   // Note: if we call REAL(longjmp) in the context of ScopedInterceptor,
630   // bad things will happen. We will jump over ScopedInterceptor dtor and can
631   // leave thr->in_ignored_lib set.
632   {
633     SCOPED_INTERCEPTOR_RAW(longjmp_symname, env, val);
634   }
635   LongJmp(cur_thread(), env);
636   REAL(longjmp_symname)(env, val);
637 }
638 
639 TSAN_INTERCEPTOR(void, siglongjmp_symname, uptr *env, int val) {
640   {
641     SCOPED_INTERCEPTOR_RAW(siglongjmp_symname, env, val);
642   }
643   LongJmp(cur_thread(), env);
644   REAL(siglongjmp_symname)(env, val);
645 }
646 
647 #if SANITIZER_NETBSD
648 TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) {
649   {
650     SCOPED_INTERCEPTOR_RAW(_longjmp, env, val);
651   }
652   LongJmp(cur_thread(), env);
653   REAL(_longjmp)(env, val);
654 }
655 #endif
656 
657 #if !SANITIZER_APPLE
658 TSAN_INTERCEPTOR(void*, malloc, uptr size) {
659   if (in_symbolizer())
660     return InternalAlloc(size);
661   void *p = 0;
662   {
663     SCOPED_INTERCEPTOR_RAW(malloc, size);
664     p = user_alloc(thr, pc, size);
665   }
666   invoke_malloc_hook(p, size);
667   return p;
668 }
669 
670 // In glibc<2.25, dynamic TLS blocks are allocated by __libc_memalign. Intercept
671 // __libc_memalign so that (1) we can detect races (2) free will not be called
672 // on libc internally allocated blocks.
673 TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
674   SCOPED_INTERCEPTOR_RAW(__libc_memalign, align, sz);
675   return user_memalign(thr, pc, align, sz);
676 }
677 
678 TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
679   if (in_symbolizer())
680     return InternalCalloc(size, n);
681   void *p = 0;
682   {
683     SCOPED_INTERCEPTOR_RAW(calloc, size, n);
684     p = user_calloc(thr, pc, size, n);
685   }
686   invoke_malloc_hook(p, n * size);
687   return p;
688 }
689 
690 TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
691   if (in_symbolizer())
692     return InternalRealloc(p, size);
693   if (p)
694     invoke_free_hook(p);
695   {
696     SCOPED_INTERCEPTOR_RAW(realloc, p, size);
697     p = user_realloc(thr, pc, p, size);
698   }
699   invoke_malloc_hook(p, size);
700   return p;
701 }
702 
703 TSAN_INTERCEPTOR(void*, reallocarray, void *p, uptr size, uptr n) {
704   if (in_symbolizer())
705     return InternalReallocArray(p, size, n);
706   if (p)
707     invoke_free_hook(p);
708   {
709     SCOPED_INTERCEPTOR_RAW(reallocarray, p, size, n);
710     p = user_reallocarray(thr, pc, p, size, n);
711   }
712   invoke_malloc_hook(p, size);
713   return p;
714 }
715 
716 TSAN_INTERCEPTOR(void, free, void *p) {
717   if (p == 0)
718     return;
719   if (in_symbolizer())
720     return InternalFree(p);
721   invoke_free_hook(p);
722   SCOPED_INTERCEPTOR_RAW(free, p);
723   user_free(thr, pc, p);
724 }
725 
726 TSAN_INTERCEPTOR(void, cfree, void *p) {
727   if (p == 0)
728     return;
729   if (in_symbolizer())
730     return InternalFree(p);
731   invoke_free_hook(p);
732   SCOPED_INTERCEPTOR_RAW(cfree, p);
733   user_free(thr, pc, p);
734 }
735 
736 TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
737   SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
738   return user_alloc_usable_size(p);
739 }
740 #endif
741 
742 TSAN_INTERCEPTOR(char *, strcpy, char *dst, const char *src) {
743   SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src);
744   uptr srclen = internal_strlen(src);
745   MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true);
746   MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false);
747   return REAL(strcpy)(dst, src);
748 }
749 
750 TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) {
751   SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n);
752   uptr srclen = internal_strnlen(src, n);
753   MemoryAccessRange(thr, pc, (uptr)dst, n, true);
754   MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false);
755   return REAL(strncpy)(dst, src, n);
756 }
757 
758 TSAN_INTERCEPTOR(char*, strdup, const char *str) {
759   SCOPED_TSAN_INTERCEPTOR(strdup, str);
760   // strdup will call malloc, so no instrumentation is required here.
761   return REAL(strdup)(str);
762 }
763 
764 // Zero out addr if it points into shadow memory and was provided as a hint
765 // only, i.e., MAP_FIXED is not set.
766 static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
767   if (*addr) {
768     if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) {
769       if (flags & MAP_FIXED) {
770         errno = errno_EINVAL;
771         return false;
772       } else {
773         *addr = 0;
774       }
775     }
776   }
777   return true;
778 }
779 
780 template <class Mmap>
781 static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap,
782                               void *addr, SIZE_T sz, int prot, int flags,
783                               int fd, OFF64_T off) {
784   if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED;
785   void *res = real_mmap(addr, sz, prot, flags, fd, off);
786   if (res != MAP_FAILED) {
787     if (!IsAppMem((uptr)res) || !IsAppMem((uptr)res + sz - 1)) {
788       Report("ThreadSanitizer: mmap at bad address: addr=%p size=%p res=%p\n",
789              addr, (void*)sz, res);
790       Die();
791     }
792     if (fd > 0) FdAccess(thr, pc, fd);
793     MemoryRangeImitateWriteOrResetRange(thr, pc, (uptr)res, sz);
794   }
795   return res;
796 }
797 
798 template <class Munmap>
799 static int munmap_interceptor(ThreadState *thr, uptr pc, Munmap real_munmap,
800                                 void *addr, SIZE_T sz) {
801   UnmapShadow(thr, (uptr)addr, sz);
802   int res = real_munmap(addr, sz);
803   return res;
804 }
805 
806 #if SANITIZER_LINUX
807 TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
808   SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
809   return user_memalign(thr, pc, align, sz);
810 }
811 #define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
812 #else
813 #define TSAN_MAYBE_INTERCEPT_MEMALIGN
814 #endif
815 
816 #if !SANITIZER_APPLE
817 TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
818   if (in_symbolizer())
819     return InternalAlloc(sz, nullptr, align);
820   SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz);
821   return user_aligned_alloc(thr, pc, align, sz);
822 }
823 
824 TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
825   if (in_symbolizer())
826     return InternalAlloc(sz, nullptr, GetPageSizeCached());
827   SCOPED_INTERCEPTOR_RAW(valloc, sz);
828   return user_valloc(thr, pc, sz);
829 }
830 #endif
831 
832 #if SANITIZER_LINUX
833 TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
834   if (in_symbolizer()) {
835     uptr PageSize = GetPageSizeCached();
836     sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
837     return InternalAlloc(sz, nullptr, PageSize);
838   }
839   SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
840   return user_pvalloc(thr, pc, sz);
841 }
842 #define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
843 #else
844 #define TSAN_MAYBE_INTERCEPT_PVALLOC
845 #endif
846 
847 #if !SANITIZER_APPLE
848 TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
849   if (in_symbolizer()) {
850     void *p = InternalAlloc(sz, nullptr, align);
851     if (!p)
852       return errno_ENOMEM;
853     *memptr = p;
854     return 0;
855   }
856   SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
857   return user_posix_memalign(thr, pc, memptr, align, sz);
858 }
859 #endif
860 
861 // Both __cxa_guard_acquire and pthread_once 0-initialize
862 // the object initially. pthread_once does not have any
863 // other ABI requirements. __cxa_guard_acquire assumes
864 // that any non-0 value in the first byte means that
865 // initialization is completed. Contents of the remaining
866 // bytes are up to us.
867 constexpr u32 kGuardInit = 0;
868 constexpr u32 kGuardDone = 1;
869 constexpr u32 kGuardRunning = 1 << 16;
870 constexpr u32 kGuardWaiter = 1 << 17;
871 
872 static int guard_acquire(ThreadState *thr, uptr pc, atomic_uint32_t *g,
873                          bool blocking_hooks = true) {
874   if (blocking_hooks)
875     OnPotentiallyBlockingRegionBegin();
876   auto on_exit = at_scope_exit([blocking_hooks] {
877     if (blocking_hooks)
878       OnPotentiallyBlockingRegionEnd();
879   });
880 
881   for (;;) {
882     u32 cmp = atomic_load(g, memory_order_acquire);
883     if (cmp == kGuardInit) {
884       if (atomic_compare_exchange_strong(g, &cmp, kGuardRunning,
885                                          memory_order_relaxed))
886         return 1;
887     } else if (cmp == kGuardDone) {
888       if (!thr->in_ignored_lib)
889         Acquire(thr, pc, (uptr)g);
890       return 0;
891     } else {
892       if ((cmp & kGuardWaiter) ||
893           atomic_compare_exchange_strong(g, &cmp, cmp | kGuardWaiter,
894                                          memory_order_relaxed))
895         FutexWait(g, cmp | kGuardWaiter);
896     }
897   }
898 }
899 
900 static void guard_release(ThreadState *thr, uptr pc, atomic_uint32_t *g,
901                           u32 v) {
902   if (!thr->in_ignored_lib)
903     Release(thr, pc, (uptr)g);
904   u32 old = atomic_exchange(g, v, memory_order_release);
905   if (old & kGuardWaiter)
906     FutexWake(g, 1 << 30);
907 }
908 
909 // __cxa_guard_acquire and friends need to be intercepted in a special way -
910 // regular interceptors will break statically-linked libstdc++. Linux
911 // interceptors are especially defined as weak functions (so that they don't
912 // cause link errors when user defines them as well). So they silently
913 // auto-disable themselves when such symbol is already present in the binary. If
914 // we link libstdc++ statically, it will bring own __cxa_guard_acquire which
915 // will silently replace our interceptor.  That's why on Linux we simply export
916 // these interceptors with INTERFACE_ATTRIBUTE.
917 // On OS X, we don't support statically linking, so we just use a regular
918 // interceptor.
919 #if SANITIZER_APPLE
920 #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
921 #else
922 #define STDCXX_INTERCEPTOR(rettype, name, ...) \
923   extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__)
924 #endif
925 
926 // Used in thread-safe function static initialization.
927 STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
928   SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
929   return guard_acquire(thr, pc, g);
930 }
931 
932 STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
933   SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
934   guard_release(thr, pc, g, kGuardDone);
935 }
936 
937 STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
938   SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
939   guard_release(thr, pc, g, kGuardInit);
940 }
941 
942 namespace __tsan {
943 void DestroyThreadState() {
944   ThreadState *thr = cur_thread();
945   Processor *proc = thr->proc();
946   ThreadFinish(thr);
947   ProcUnwire(proc, thr);
948   ProcDestroy(proc);
949   DTLS_Destroy();
950   cur_thread_finalize();
951 }
952 
953 void PlatformCleanUpThreadState(ThreadState *thr) {
954   ThreadSignalContext *sctx = (ThreadSignalContext *)atomic_load(
955       &thr->signal_ctx, memory_order_relaxed);
956   if (sctx) {
957     atomic_store(&thr->signal_ctx, 0, memory_order_relaxed);
958     UnmapOrDie(sctx, sizeof(*sctx));
959   }
960 }
961 }  // namespace __tsan
962 
963 #if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
964 static void thread_finalize(void *v) {
965   uptr iter = (uptr)v;
966   if (iter > 1) {
967     if (pthread_setspecific(interceptor_ctx()->finalize_key,
968         (void*)(iter - 1))) {
969       Printf("ThreadSanitizer: failed to set thread key\n");
970       Die();
971     }
972     return;
973   }
974   DestroyThreadState();
975 }
976 #endif
977 
978 
979 struct ThreadParam {
980   void* (*callback)(void *arg);
981   void *param;
982   Tid tid;
983   Semaphore created;
984   Semaphore started;
985 };
986 
987 extern "C" void *__tsan_thread_start_func(void *arg) {
988   ThreadParam *p = (ThreadParam*)arg;
989   void* (*callback)(void *arg) = p->callback;
990   void *param = p->param;
991   {
992     ThreadState *thr = cur_thread_init();
993     // Thread-local state is not initialized yet.
994     ScopedIgnoreInterceptors ignore;
995 #if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
996     ThreadIgnoreBegin(thr, 0);
997     if (pthread_setspecific(interceptor_ctx()->finalize_key,
998                             (void *)GetPthreadDestructorIterations())) {
999       Printf("ThreadSanitizer: failed to set thread key\n");
1000       Die();
1001     }
1002     ThreadIgnoreEnd(thr);
1003 #endif
1004     p->created.Wait();
1005     Processor *proc = ProcCreate();
1006     ProcWire(proc, thr);
1007     ThreadStart(thr, p->tid, GetTid(), ThreadType::Regular);
1008     p->started.Post();
1009   }
1010   void *res = callback(param);
1011   // Prevent the callback from being tail called,
1012   // it mixes up stack traces.
1013   volatile int foo = 42;
1014   foo++;
1015   return res;
1016 }
1017 
1018 TSAN_INTERCEPTOR(int, pthread_create,
1019     void *th, void *attr, void *(*callback)(void*), void * param) {
1020   SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
1021 
1022   MaybeSpawnBackgroundThread();
1023 
1024   if (ctx->after_multithreaded_fork) {
1025     if (flags()->die_after_fork) {
1026       Report("ThreadSanitizer: starting new threads after multi-threaded "
1027           "fork is not supported. Dying (set die_after_fork=0 to override)\n");
1028       Die();
1029     } else {
1030       VPrintf(1,
1031               "ThreadSanitizer: starting new threads after multi-threaded "
1032               "fork is not supported (pid %lu). Continuing because of "
1033               "die_after_fork=0, but you are on your own\n",
1034               internal_getpid());
1035     }
1036   }
1037   __sanitizer_pthread_attr_t myattr;
1038   if (attr == 0) {
1039     pthread_attr_init(&myattr);
1040     attr = &myattr;
1041   }
1042   int detached = 0;
1043   REAL(pthread_attr_getdetachstate)(attr, &detached);
1044   AdjustStackSize(attr);
1045 
1046   ThreadParam p;
1047   p.callback = callback;
1048   p.param = param;
1049   p.tid = kMainTid;
1050   int res = -1;
1051   {
1052     // Otherwise we see false positives in pthread stack manipulation.
1053     ScopedIgnoreInterceptors ignore;
1054     ThreadIgnoreBegin(thr, pc);
1055     res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
1056     ThreadIgnoreEnd(thr);
1057   }
1058   if (res == 0) {
1059     p.tid = ThreadCreate(thr, pc, *(uptr *)th, IsStateDetached(detached));
1060     CHECK_NE(p.tid, kMainTid);
1061     // Synchronization on p.tid serves two purposes:
1062     // 1. ThreadCreate must finish before the new thread starts.
1063     //    Otherwise the new thread can call pthread_detach, but the pthread_t
1064     //    identifier is not yet registered in ThreadRegistry by ThreadCreate.
1065     // 2. ThreadStart must finish before this thread continues.
1066     //    Otherwise, this thread can call pthread_detach and reset thr->sync
1067     //    before the new thread got a chance to acquire from it in ThreadStart.
1068     p.created.Post();
1069     p.started.Wait();
1070   }
1071   if (attr == &myattr)
1072     pthread_attr_destroy(&myattr);
1073   return res;
1074 }
1075 
1076 TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
1077   SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
1078   Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1079   ThreadIgnoreBegin(thr, pc);
1080   int res = BLOCK_REAL(pthread_join)(th, ret);
1081   ThreadIgnoreEnd(thr);
1082   if (res == 0) {
1083     ThreadJoin(thr, pc, tid);
1084   }
1085   return res;
1086 }
1087 
1088 DEFINE_REAL_PTHREAD_FUNCTIONS
1089 
1090 TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
1091   SCOPED_INTERCEPTOR_RAW(pthread_detach, th);
1092   Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1093   int res = REAL(pthread_detach)(th);
1094   if (res == 0) {
1095     ThreadDetach(thr, pc, tid);
1096   }
1097   return res;
1098 }
1099 
1100 TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
1101   {
1102     SCOPED_INTERCEPTOR_RAW(pthread_exit, retval);
1103 #if !SANITIZER_APPLE && !SANITIZER_ANDROID
1104     CHECK_EQ(thr, &cur_thread_placeholder);
1105 #endif
1106   }
1107   REAL(pthread_exit)(retval);
1108 }
1109 
1110 #if SANITIZER_LINUX
1111 TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
1112   SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret);
1113   Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1114   ThreadIgnoreBegin(thr, pc);
1115   int res = REAL(pthread_tryjoin_np)(th, ret);
1116   ThreadIgnoreEnd(thr);
1117   if (res == 0)
1118     ThreadJoin(thr, pc, tid);
1119   else
1120     ThreadNotJoined(thr, pc, tid, (uptr)th);
1121   return res;
1122 }
1123 
1124 TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
1125                  const struct timespec *abstime) {
1126   SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime);
1127   Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1128   ThreadIgnoreBegin(thr, pc);
1129   int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
1130   ThreadIgnoreEnd(thr);
1131   if (res == 0)
1132     ThreadJoin(thr, pc, tid);
1133   else
1134     ThreadNotJoined(thr, pc, tid, (uptr)th);
1135   return res;
1136 }
1137 #endif
1138 
1139 // Problem:
1140 // NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
1141 // pthread_cond_t has different size in the different versions.
1142 // If call new REAL functions for old pthread_cond_t, they will corrupt memory
1143 // after pthread_cond_t (old cond is smaller).
1144 // If we call old REAL functions for new pthread_cond_t, we will lose  some
1145 // functionality (e.g. old functions do not support waiting against
1146 // CLOCK_REALTIME).
1147 // Proper handling would require to have 2 versions of interceptors as well.
1148 // But this is messy, in particular requires linker scripts when sanitizer
1149 // runtime is linked into a shared library.
1150 // Instead we assume we don't have dynamic libraries built against old
1151 // pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
1152 // that allows to work with old libraries (but this mode does not support
1153 // some features, e.g. pthread_condattr_getpshared).
1154 static void *init_cond(void *c, bool force = false) {
1155   // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
1156   // So we allocate additional memory on the side large enough to hold
1157   // any pthread_cond_t object. Always call new REAL functions, but pass
1158   // the aux object to them.
1159   // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
1160   // first word of pthread_cond_t to zero.
1161   // It's all relevant only for linux.
1162   if (!common_flags()->legacy_pthread_cond)
1163     return c;
1164   atomic_uintptr_t *p = (atomic_uintptr_t*)c;
1165   uptr cond = atomic_load(p, memory_order_acquire);
1166   if (!force && cond != 0)
1167     return (void*)cond;
1168   void *newcond = WRAP(malloc)(pthread_cond_t_sz);
1169   internal_memset(newcond, 0, pthread_cond_t_sz);
1170   if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond,
1171       memory_order_acq_rel))
1172     return newcond;
1173   WRAP(free)(newcond);
1174   return (void*)cond;
1175 }
1176 
1177 namespace {
1178 
1179 template <class Fn>
1180 struct CondMutexUnlockCtx {
1181   ScopedInterceptor *si;
1182   ThreadState *thr;
1183   uptr pc;
1184   void *m;
1185   void *c;
1186   const Fn &fn;
1187 
1188   int Cancel() const { return fn(); }
1189   void Unlock() const;
1190 };
1191 
1192 template <class Fn>
1193 void CondMutexUnlockCtx<Fn>::Unlock() const {
1194   // pthread_cond_wait interceptor has enabled async signal delivery
1195   // (see BlockingCall below). Disable async signals since we are running
1196   // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
1197   // since the thread is cancelled, so we have to manually execute them
1198   // (the thread still can run some user code due to pthread_cleanup_push).
1199   CHECK_EQ(atomic_load(&thr->in_blocking_func, memory_order_relaxed), 1);
1200   atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
1201   MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
1202   // Undo BlockingCall ctor effects.
1203   thr->ignore_interceptors--;
1204   si->~ScopedInterceptor();
1205 }
1206 }  // namespace
1207 
1208 INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
1209   void *cond = init_cond(c, true);
1210   SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
1211   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1212   return REAL(pthread_cond_init)(cond, a);
1213 }
1214 
1215 template <class Fn>
1216 int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si, const Fn &fn,
1217               void *c, void *m) {
1218   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1219   MutexUnlock(thr, pc, (uptr)m);
1220   int res = 0;
1221   // This ensures that we handle mutex lock even in case of pthread_cancel.
1222   // See test/tsan/cond_cancel.cpp.
1223   {
1224     // Enable signal delivery while the thread is blocked.
1225     BlockingCall bc(thr);
1226     CondMutexUnlockCtx<Fn> arg = {si, thr, pc, m, c, fn};
1227     res = call_pthread_cancel_with_cleanup(
1228         [](void *arg) -> int {
1229           return ((const CondMutexUnlockCtx<Fn> *)arg)->Cancel();
1230         },
1231         [](void *arg) { ((const CondMutexUnlockCtx<Fn> *)arg)->Unlock(); },
1232         &arg);
1233   }
1234   if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
1235   MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
1236   return res;
1237 }
1238 
1239 INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
1240   void *cond = init_cond(c);
1241   SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
1242   return cond_wait(
1243       thr, pc, &si, [=]() { return REAL(pthread_cond_wait)(cond, m); }, cond,
1244       m);
1245 }
1246 
1247 INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
1248   void *cond = init_cond(c);
1249   SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
1250   return cond_wait(
1251       thr, pc, &si,
1252       [=]() { return REAL(pthread_cond_timedwait)(cond, m, abstime); }, cond,
1253       m);
1254 }
1255 
1256 #if SANITIZER_LINUX
1257 INTERCEPTOR(int, pthread_cond_clockwait, void *c, void *m,
1258             __sanitizer_clockid_t clock, void *abstime) {
1259   void *cond = init_cond(c);
1260   SCOPED_TSAN_INTERCEPTOR(pthread_cond_clockwait, cond, m, clock, abstime);
1261   return cond_wait(
1262       thr, pc, &si,
1263       [=]() { return REAL(pthread_cond_clockwait)(cond, m, clock, abstime); },
1264       cond, m);
1265 }
1266 #define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT TSAN_INTERCEPT(pthread_cond_clockwait)
1267 #else
1268 #define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT
1269 #endif
1270 
1271 #if SANITIZER_APPLE
1272 INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
1273             void *reltime) {
1274   void *cond = init_cond(c);
1275   SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
1276   return cond_wait(
1277       thr, pc, &si,
1278       [=]() {
1279         return REAL(pthread_cond_timedwait_relative_np)(cond, m, reltime);
1280       },
1281       cond, m);
1282 }
1283 #endif
1284 
1285 INTERCEPTOR(int, pthread_cond_signal, void *c) {
1286   void *cond = init_cond(c);
1287   SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
1288   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1289   return REAL(pthread_cond_signal)(cond);
1290 }
1291 
1292 INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
1293   void *cond = init_cond(c);
1294   SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
1295   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1296   return REAL(pthread_cond_broadcast)(cond);
1297 }
1298 
1299 INTERCEPTOR(int, pthread_cond_destroy, void *c) {
1300   void *cond = init_cond(c);
1301   SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
1302   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1303   int res = REAL(pthread_cond_destroy)(cond);
1304   if (common_flags()->legacy_pthread_cond) {
1305     // Free our aux cond and zero the pointer to not leave dangling pointers.
1306     WRAP(free)(cond);
1307     atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed);
1308   }
1309   return res;
1310 }
1311 
1312 TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
1313   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
1314   int res = REAL(pthread_mutex_init)(m, a);
1315   if (res == 0) {
1316     u32 flagz = 0;
1317     if (a) {
1318       int type = 0;
1319       if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
1320         if (type == PTHREAD_MUTEX_RECURSIVE ||
1321             type == PTHREAD_MUTEX_RECURSIVE_NP)
1322           flagz |= MutexFlagWriteReentrant;
1323     }
1324     MutexCreate(thr, pc, (uptr)m, flagz);
1325   }
1326   return res;
1327 }
1328 
1329 TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
1330   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
1331   int res = REAL(pthread_mutex_destroy)(m);
1332   if (res == 0 || res == errno_EBUSY) {
1333     MutexDestroy(thr, pc, (uptr)m);
1334   }
1335   return res;
1336 }
1337 
1338 TSAN_INTERCEPTOR(int, pthread_mutex_lock, void *m) {
1339   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_lock, m);
1340   MutexPreLock(thr, pc, (uptr)m);
1341   int res = REAL(pthread_mutex_lock)(m);
1342   if (res == errno_EOWNERDEAD)
1343     MutexRepair(thr, pc, (uptr)m);
1344   if (res == 0 || res == errno_EOWNERDEAD)
1345     MutexPostLock(thr, pc, (uptr)m);
1346   if (res == errno_EINVAL)
1347     MutexInvalidAccess(thr, pc, (uptr)m);
1348   return res;
1349 }
1350 
1351 TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
1352   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
1353   int res = REAL(pthread_mutex_trylock)(m);
1354   if (res == errno_EOWNERDEAD)
1355     MutexRepair(thr, pc, (uptr)m);
1356   if (res == 0 || res == errno_EOWNERDEAD)
1357     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1358   return res;
1359 }
1360 
1361 #if !SANITIZER_APPLE
1362 TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
1363   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
1364   int res = REAL(pthread_mutex_timedlock)(m, abstime);
1365   if (res == 0) {
1366     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1367   }
1368   return res;
1369 }
1370 #endif
1371 
1372 TSAN_INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
1373   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_unlock, m);
1374   MutexUnlock(thr, pc, (uptr)m);
1375   int res = REAL(pthread_mutex_unlock)(m);
1376   if (res == errno_EINVAL)
1377     MutexInvalidAccess(thr, pc, (uptr)m);
1378   return res;
1379 }
1380 
1381 #if SANITIZER_GLIBC
1382 #  if !__GLIBC_PREREQ(2, 34)
1383 // glibc 2.34 applies a non-default version for the two functions. They are no
1384 // longer expected to be intercepted by programs.
1385 TSAN_INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
1386   SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_lock, m);
1387   MutexPreLock(thr, pc, (uptr)m);
1388   int res = REAL(__pthread_mutex_lock)(m);
1389   if (res == errno_EOWNERDEAD)
1390     MutexRepair(thr, pc, (uptr)m);
1391   if (res == 0 || res == errno_EOWNERDEAD)
1392     MutexPostLock(thr, pc, (uptr)m);
1393   if (res == errno_EINVAL)
1394     MutexInvalidAccess(thr, pc, (uptr)m);
1395   return res;
1396 }
1397 
1398 TSAN_INTERCEPTOR(int, __pthread_mutex_unlock, void *m) {
1399   SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_unlock, m);
1400   MutexUnlock(thr, pc, (uptr)m);
1401   int res = REAL(__pthread_mutex_unlock)(m);
1402   if (res == errno_EINVAL)
1403     MutexInvalidAccess(thr, pc, (uptr)m);
1404   return res;
1405 }
1406 #  endif
1407 #endif
1408 
1409 #if !SANITIZER_APPLE
1410 TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
1411   SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
1412   int res = REAL(pthread_spin_init)(m, pshared);
1413   if (res == 0) {
1414     MutexCreate(thr, pc, (uptr)m);
1415   }
1416   return res;
1417 }
1418 
1419 TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
1420   SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
1421   int res = REAL(pthread_spin_destroy)(m);
1422   if (res == 0) {
1423     MutexDestroy(thr, pc, (uptr)m);
1424   }
1425   return res;
1426 }
1427 
1428 TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
1429   SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
1430   MutexPreLock(thr, pc, (uptr)m);
1431   int res = REAL(pthread_spin_lock)(m);
1432   if (res == 0) {
1433     MutexPostLock(thr, pc, (uptr)m);
1434   }
1435   return res;
1436 }
1437 
1438 TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
1439   SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
1440   int res = REAL(pthread_spin_trylock)(m);
1441   if (res == 0) {
1442     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1443   }
1444   return res;
1445 }
1446 
1447 TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
1448   SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
1449   MutexUnlock(thr, pc, (uptr)m);
1450   int res = REAL(pthread_spin_unlock)(m);
1451   return res;
1452 }
1453 #endif
1454 
1455 TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
1456   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
1457   int res = REAL(pthread_rwlock_init)(m, a);
1458   if (res == 0) {
1459     MutexCreate(thr, pc, (uptr)m);
1460   }
1461   return res;
1462 }
1463 
1464 TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
1465   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
1466   int res = REAL(pthread_rwlock_destroy)(m);
1467   if (res == 0) {
1468     MutexDestroy(thr, pc, (uptr)m);
1469   }
1470   return res;
1471 }
1472 
1473 TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
1474   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
1475   MutexPreReadLock(thr, pc, (uptr)m);
1476   int res = REAL(pthread_rwlock_rdlock)(m);
1477   if (res == 0) {
1478     MutexPostReadLock(thr, pc, (uptr)m);
1479   }
1480   return res;
1481 }
1482 
1483 TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
1484   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
1485   int res = REAL(pthread_rwlock_tryrdlock)(m);
1486   if (res == 0) {
1487     MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock);
1488   }
1489   return res;
1490 }
1491 
1492 #if !SANITIZER_APPLE
1493 TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
1494   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
1495   int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
1496   if (res == 0) {
1497     MutexPostReadLock(thr, pc, (uptr)m);
1498   }
1499   return res;
1500 }
1501 #endif
1502 
1503 TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
1504   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
1505   MutexPreLock(thr, pc, (uptr)m);
1506   int res = REAL(pthread_rwlock_wrlock)(m);
1507   if (res == 0) {
1508     MutexPostLock(thr, pc, (uptr)m);
1509   }
1510   return res;
1511 }
1512 
1513 TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
1514   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
1515   int res = REAL(pthread_rwlock_trywrlock)(m);
1516   if (res == 0) {
1517     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1518   }
1519   return res;
1520 }
1521 
1522 #if !SANITIZER_APPLE
1523 TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
1524   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
1525   int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
1526   if (res == 0) {
1527     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1528   }
1529   return res;
1530 }
1531 #endif
1532 
1533 TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
1534   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
1535   MutexReadOrWriteUnlock(thr, pc, (uptr)m);
1536   int res = REAL(pthread_rwlock_unlock)(m);
1537   return res;
1538 }
1539 
1540 #if !SANITIZER_APPLE
1541 TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
1542   SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
1543   MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
1544   int res = REAL(pthread_barrier_init)(b, a, count);
1545   return res;
1546 }
1547 
1548 TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
1549   SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
1550   MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
1551   int res = REAL(pthread_barrier_destroy)(b);
1552   return res;
1553 }
1554 
1555 TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
1556   SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
1557   Release(thr, pc, (uptr)b);
1558   MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
1559   int res = REAL(pthread_barrier_wait)(b);
1560   MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
1561   if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
1562     Acquire(thr, pc, (uptr)b);
1563   }
1564   return res;
1565 }
1566 #endif
1567 
1568 TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
1569   SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
1570   if (o == 0 || f == 0)
1571     return errno_EINVAL;
1572   atomic_uint32_t *a;
1573 
1574   if (SANITIZER_APPLE)
1575     a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
1576   else if (SANITIZER_NETBSD)
1577     a = static_cast<atomic_uint32_t*>
1578           ((void *)((char *)o + __sanitizer::pthread_mutex_t_sz));
1579   else
1580     a = static_cast<atomic_uint32_t*>(o);
1581 
1582   // Mac OS X appears to use pthread_once() where calling BlockingRegion hooks
1583   // result in crashes due to too little stack space.
1584   if (guard_acquire(thr, pc, a, !SANITIZER_APPLE)) {
1585     (*f)();
1586     guard_release(thr, pc, a, kGuardDone);
1587   }
1588   return 0;
1589 }
1590 
1591 #if SANITIZER_GLIBC
1592 TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
1593   SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
1594   if (fd > 0)
1595     FdAccess(thr, pc, fd);
1596   return REAL(__fxstat)(version, fd, buf);
1597 }
1598 #define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat)
1599 #else
1600 #define TSAN_MAYBE_INTERCEPT___FXSTAT
1601 #endif
1602 
1603 TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
1604 #if SANITIZER_GLIBC
1605   SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
1606   if (fd > 0)
1607     FdAccess(thr, pc, fd);
1608   return REAL(__fxstat)(0, fd, buf);
1609 #else
1610   SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
1611   if (fd > 0)
1612     FdAccess(thr, pc, fd);
1613   return REAL(fstat)(fd, buf);
1614 #endif
1615 }
1616 
1617 #if SANITIZER_GLIBC
1618 TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
1619   SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
1620   if (fd > 0)
1621     FdAccess(thr, pc, fd);
1622   return REAL(__fxstat64)(version, fd, buf);
1623 }
1624 #define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64)
1625 #else
1626 #define TSAN_MAYBE_INTERCEPT___FXSTAT64
1627 #endif
1628 
1629 #if SANITIZER_GLIBC
1630 TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
1631   SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
1632   if (fd > 0)
1633     FdAccess(thr, pc, fd);
1634   return REAL(__fxstat64)(0, fd, buf);
1635 }
1636 #define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
1637 #else
1638 #define TSAN_MAYBE_INTERCEPT_FSTAT64
1639 #endif
1640 
1641 TSAN_INTERCEPTOR(int, open, const char *name, int oflag, ...) {
1642   va_list ap;
1643   va_start(ap, oflag);
1644   mode_t mode = va_arg(ap, int);
1645   va_end(ap);
1646   SCOPED_TSAN_INTERCEPTOR(open, name, oflag, mode);
1647   READ_STRING(thr, pc, name, 0);
1648   int fd = REAL(open)(name, oflag, mode);
1649   if (fd >= 0)
1650     FdFileCreate(thr, pc, fd);
1651   return fd;
1652 }
1653 
1654 #if SANITIZER_LINUX
1655 TSAN_INTERCEPTOR(int, open64, const char *name, int oflag, ...) {
1656   va_list ap;
1657   va_start(ap, oflag);
1658   mode_t mode = va_arg(ap, int);
1659   va_end(ap);
1660   SCOPED_TSAN_INTERCEPTOR(open64, name, oflag, mode);
1661   READ_STRING(thr, pc, name, 0);
1662   int fd = REAL(open64)(name, oflag, mode);
1663   if (fd >= 0)
1664     FdFileCreate(thr, pc, fd);
1665   return fd;
1666 }
1667 #define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
1668 #else
1669 #define TSAN_MAYBE_INTERCEPT_OPEN64
1670 #endif
1671 
1672 TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
1673   SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
1674   READ_STRING(thr, pc, name, 0);
1675   int fd = REAL(creat)(name, mode);
1676   if (fd >= 0)
1677     FdFileCreate(thr, pc, fd);
1678   return fd;
1679 }
1680 
1681 #if SANITIZER_LINUX
1682 TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
1683   SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
1684   READ_STRING(thr, pc, name, 0);
1685   int fd = REAL(creat64)(name, mode);
1686   if (fd >= 0)
1687     FdFileCreate(thr, pc, fd);
1688   return fd;
1689 }
1690 #define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
1691 #else
1692 #define TSAN_MAYBE_INTERCEPT_CREAT64
1693 #endif
1694 
1695 TSAN_INTERCEPTOR(int, dup, int oldfd) {
1696   SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
1697   int newfd = REAL(dup)(oldfd);
1698   if (oldfd >= 0 && newfd >= 0 && newfd != oldfd)
1699     FdDup(thr, pc, oldfd, newfd, true);
1700   return newfd;
1701 }
1702 
1703 TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
1704   SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd);
1705   int newfd2 = REAL(dup2)(oldfd, newfd);
1706   if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1707     FdDup(thr, pc, oldfd, newfd2, false);
1708   return newfd2;
1709 }
1710 
1711 #if !SANITIZER_APPLE
1712 TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
1713   SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
1714   int newfd2 = REAL(dup3)(oldfd, newfd, flags);
1715   if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1716     FdDup(thr, pc, oldfd, newfd2, false);
1717   return newfd2;
1718 }
1719 #endif
1720 
1721 #if SANITIZER_LINUX
1722 TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
1723   SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
1724   int fd = REAL(eventfd)(initval, flags);
1725   if (fd >= 0)
1726     FdEventCreate(thr, pc, fd);
1727   return fd;
1728 }
1729 #define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
1730 #else
1731 #define TSAN_MAYBE_INTERCEPT_EVENTFD
1732 #endif
1733 
1734 #if SANITIZER_LINUX
1735 TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
1736   SCOPED_INTERCEPTOR_RAW(signalfd, fd, mask, flags);
1737   FdClose(thr, pc, fd);
1738   fd = REAL(signalfd)(fd, mask, flags);
1739   if (!MustIgnoreInterceptor(thr))
1740     FdSignalCreate(thr, pc, fd);
1741   return fd;
1742 }
1743 #define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
1744 #else
1745 #define TSAN_MAYBE_INTERCEPT_SIGNALFD
1746 #endif
1747 
1748 #if SANITIZER_LINUX
1749 TSAN_INTERCEPTOR(int, inotify_init, int fake) {
1750   SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
1751   int fd = REAL(inotify_init)(fake);
1752   if (fd >= 0)
1753     FdInotifyCreate(thr, pc, fd);
1754   return fd;
1755 }
1756 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
1757 #else
1758 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
1759 #endif
1760 
1761 #if SANITIZER_LINUX
1762 TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
1763   SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
1764   int fd = REAL(inotify_init1)(flags);
1765   if (fd >= 0)
1766     FdInotifyCreate(thr, pc, fd);
1767   return fd;
1768 }
1769 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
1770 #else
1771 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
1772 #endif
1773 
1774 TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
1775   SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
1776   int fd = REAL(socket)(domain, type, protocol);
1777   if (fd >= 0)
1778     FdSocketCreate(thr, pc, fd);
1779   return fd;
1780 }
1781 
1782 TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) {
1783   SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd);
1784   int res = REAL(socketpair)(domain, type, protocol, fd);
1785   if (res == 0 && fd[0] >= 0 && fd[1] >= 0)
1786     FdPipeCreate(thr, pc, fd[0], fd[1]);
1787   return res;
1788 }
1789 
1790 TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) {
1791   SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen);
1792   FdSocketConnecting(thr, pc, fd);
1793   int res = REAL(connect)(fd, addr, addrlen);
1794   if (res == 0 && fd >= 0)
1795     FdSocketConnect(thr, pc, fd);
1796   return res;
1797 }
1798 
1799 TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) {
1800   SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen);
1801   int res = REAL(bind)(fd, addr, addrlen);
1802   if (fd > 0 && res == 0)
1803     FdAccess(thr, pc, fd);
1804   return res;
1805 }
1806 
1807 TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
1808   SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog);
1809   int res = REAL(listen)(fd, backlog);
1810   if (fd > 0 && res == 0)
1811     FdAccess(thr, pc, fd);
1812   return res;
1813 }
1814 
1815 TSAN_INTERCEPTOR(int, close, int fd) {
1816   SCOPED_INTERCEPTOR_RAW(close, fd);
1817   if (!in_symbolizer())
1818     FdClose(thr, pc, fd);
1819   return REAL(close)(fd);
1820 }
1821 
1822 #if SANITIZER_LINUX
1823 TSAN_INTERCEPTOR(int, __close, int fd) {
1824   SCOPED_INTERCEPTOR_RAW(__close, fd);
1825   FdClose(thr, pc, fd);
1826   return REAL(__close)(fd);
1827 }
1828 #define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
1829 #else
1830 #define TSAN_MAYBE_INTERCEPT___CLOSE
1831 #endif
1832 
1833 // glibc guts
1834 #if SANITIZER_LINUX && !SANITIZER_ANDROID
1835 TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
1836   SCOPED_INTERCEPTOR_RAW(__res_iclose, state, free_addr);
1837   int fds[64];
1838   int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
1839   for (int i = 0; i < cnt; i++) FdClose(thr, pc, fds[i]);
1840   REAL(__res_iclose)(state, free_addr);
1841 }
1842 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
1843 #else
1844 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
1845 #endif
1846 
1847 TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
1848   SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
1849   int res = REAL(pipe)(pipefd);
1850   if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1851     FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1852   return res;
1853 }
1854 
1855 #if !SANITIZER_APPLE
1856 TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
1857   SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
1858   int res = REAL(pipe2)(pipefd, flags);
1859   if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1860     FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1861   return res;
1862 }
1863 #endif
1864 
1865 TSAN_INTERCEPTOR(int, unlink, char *path) {
1866   SCOPED_TSAN_INTERCEPTOR(unlink, path);
1867   Release(thr, pc, File2addr(path));
1868   int res = REAL(unlink)(path);
1869   return res;
1870 }
1871 
1872 TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
1873   SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
1874   void *res = REAL(tmpfile)(fake);
1875   if (res) {
1876     int fd = fileno_unlocked(res);
1877     if (fd >= 0)
1878       FdFileCreate(thr, pc, fd);
1879   }
1880   return res;
1881 }
1882 
1883 #if SANITIZER_LINUX
1884 TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
1885   SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
1886   void *res = REAL(tmpfile64)(fake);
1887   if (res) {
1888     int fd = fileno_unlocked(res);
1889     if (fd >= 0)
1890       FdFileCreate(thr, pc, fd);
1891   }
1892   return res;
1893 }
1894 #define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
1895 #else
1896 #define TSAN_MAYBE_INTERCEPT_TMPFILE64
1897 #endif
1898 
1899 static void FlushStreams() {
1900   // Flushing all the streams here may freeze the process if a child thread is
1901   // performing file stream operations at the same time.
1902   REAL(fflush)(stdout);
1903   REAL(fflush)(stderr);
1904 }
1905 
1906 TSAN_INTERCEPTOR(void, abort, int fake) {
1907   SCOPED_TSAN_INTERCEPTOR(abort, fake);
1908   FlushStreams();
1909   REAL(abort)(fake);
1910 }
1911 
1912 TSAN_INTERCEPTOR(int, rmdir, char *path) {
1913   SCOPED_TSAN_INTERCEPTOR(rmdir, path);
1914   Release(thr, pc, Dir2addr(path));
1915   int res = REAL(rmdir)(path);
1916   return res;
1917 }
1918 
1919 TSAN_INTERCEPTOR(int, closedir, void *dirp) {
1920   SCOPED_INTERCEPTOR_RAW(closedir, dirp);
1921   if (dirp) {
1922     int fd = dirfd(dirp);
1923     FdClose(thr, pc, fd);
1924   }
1925   return REAL(closedir)(dirp);
1926 }
1927 
1928 #if SANITIZER_LINUX
1929 TSAN_INTERCEPTOR(int, epoll_create, int size) {
1930   SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
1931   int fd = REAL(epoll_create)(size);
1932   if (fd >= 0)
1933     FdPollCreate(thr, pc, fd);
1934   return fd;
1935 }
1936 
1937 TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
1938   SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
1939   int fd = REAL(epoll_create1)(flags);
1940   if (fd >= 0)
1941     FdPollCreate(thr, pc, fd);
1942   return fd;
1943 }
1944 
1945 TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
1946   SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
1947   if (epfd >= 0)
1948     FdAccess(thr, pc, epfd);
1949   if (epfd >= 0 && fd >= 0)
1950     FdAccess(thr, pc, fd);
1951   if (op == EPOLL_CTL_ADD && epfd >= 0) {
1952     FdPollAdd(thr, pc, epfd, fd);
1953     FdRelease(thr, pc, epfd);
1954   }
1955   int res = REAL(epoll_ctl)(epfd, op, fd, ev);
1956   return res;
1957 }
1958 
1959 TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
1960   SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
1961   if (epfd >= 0)
1962     FdAccess(thr, pc, epfd);
1963   int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout);
1964   if (res > 0 && epfd >= 0)
1965     FdAcquire(thr, pc, epfd);
1966   return res;
1967 }
1968 
1969 TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
1970                  void *sigmask) {
1971   SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask);
1972   if (epfd >= 0)
1973     FdAccess(thr, pc, epfd);
1974   int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask);
1975   if (res > 0 && epfd >= 0)
1976     FdAcquire(thr, pc, epfd);
1977   return res;
1978 }
1979 
1980 TSAN_INTERCEPTOR(int, epoll_pwait2, int epfd, void *ev, int cnt, void *timeout,
1981                  void *sigmask) {
1982   SCOPED_INTERCEPTOR_RAW(epoll_pwait2, epfd, ev, cnt, timeout, sigmask);
1983   // This function is new and may not be present in libc and/or kernel.
1984   // Since we effectively add it to libc (as will be probed by the program
1985   // using dlsym or a weak function pointer) we need to handle the case
1986   // when it's not present in the actual libc.
1987   if (!REAL(epoll_pwait2)) {
1988     errno = errno_ENOSYS;
1989     return -1;
1990   }
1991   if (MustIgnoreInterceptor(thr))
1992     REAL(epoll_pwait2)(epfd, ev, cnt, timeout, sigmask);
1993   if (epfd >= 0)
1994     FdAccess(thr, pc, epfd);
1995   int res = BLOCK_REAL(epoll_pwait2)(epfd, ev, cnt, timeout, sigmask);
1996   if (res > 0 && epfd >= 0)
1997     FdAcquire(thr, pc, epfd);
1998   return res;
1999 }
2000 
2001 #  define TSAN_MAYBE_INTERCEPT_EPOLL \
2002     TSAN_INTERCEPT(epoll_create);    \
2003     TSAN_INTERCEPT(epoll_create1);   \
2004     TSAN_INTERCEPT(epoll_ctl);       \
2005     TSAN_INTERCEPT(epoll_wait);      \
2006     TSAN_INTERCEPT(epoll_pwait);     \
2007     TSAN_INTERCEPT(epoll_pwait2)
2008 #else
2009 #define TSAN_MAYBE_INTERCEPT_EPOLL
2010 #endif
2011 
2012 // The following functions are intercepted merely to process pending signals.
2013 // If program blocks signal X, we must deliver the signal before the function
2014 // returns. Similarly, if program unblocks a signal (or returns from sigsuspend)
2015 // it's better to deliver the signal straight away.
2016 TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
2017   SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
2018   return REAL(sigsuspend)(mask);
2019 }
2020 
2021 TSAN_INTERCEPTOR(int, sigblock, int mask) {
2022   SCOPED_TSAN_INTERCEPTOR(sigblock, mask);
2023   return REAL(sigblock)(mask);
2024 }
2025 
2026 TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
2027   SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask);
2028   return REAL(sigsetmask)(mask);
2029 }
2030 
2031 TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
2032     __sanitizer_sigset_t *oldset) {
2033   SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
2034   return REAL(pthread_sigmask)(how, set, oldset);
2035 }
2036 
2037 namespace __tsan {
2038 
2039 static void ReportErrnoSpoiling(ThreadState *thr, uptr pc, int sig) {
2040   VarSizeStackTrace stack;
2041   // StackTrace::GetNestInstructionPc(pc) is used because return address is
2042   // expected, OutputReport() will undo this.
2043   ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
2044   ThreadRegistryLock l(&ctx->thread_registry);
2045   ScopedReport rep(ReportTypeErrnoInSignal);
2046   rep.SetSigNum(sig);
2047   if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
2048     rep.AddStack(stack, true);
2049     OutputReport(thr, rep);
2050   }
2051 }
2052 
2053 static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
2054                                   int sig, __sanitizer_siginfo *info,
2055                                   void *uctx) {
2056   CHECK(thr->slot);
2057   __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
2058   if (acquire)
2059     Acquire(thr, 0, (uptr)&sigactions[sig]);
2060   // Signals are generally asynchronous, so if we receive a signals when
2061   // ignores are enabled we should disable ignores. This is critical for sync
2062   // and interceptors, because otherwise we can miss synchronization and report
2063   // false races.
2064   int ignore_reads_and_writes = thr->ignore_reads_and_writes;
2065   int ignore_interceptors = thr->ignore_interceptors;
2066   int ignore_sync = thr->ignore_sync;
2067   // For symbolizer we only process SIGSEGVs synchronously
2068   // (bug in symbolizer or in tsan). But we want to reset
2069   // in_symbolizer to fail gracefully. Symbolizer and user code
2070   // use different memory allocators, so if we don't reset
2071   // in_symbolizer we can get memory allocated with one being
2072   // feed with another, which can cause more crashes.
2073   int in_symbolizer = thr->in_symbolizer;
2074   if (!ctx->after_multithreaded_fork) {
2075     thr->ignore_reads_and_writes = 0;
2076     thr->fast_state.ClearIgnoreBit();
2077     thr->ignore_interceptors = 0;
2078     thr->ignore_sync = 0;
2079     thr->in_symbolizer = 0;
2080   }
2081   // Ensure that the handler does not spoil errno.
2082   const int saved_errno = errno;
2083   errno = 99;
2084   // This code races with sigaction. Be careful to not read sa_sigaction twice.
2085   // Also need to remember pc for reporting before the call,
2086   // because the handler can reset it.
2087   volatile uptr pc = (sigactions[sig].sa_flags & SA_SIGINFO)
2088                          ? (uptr)sigactions[sig].sigaction
2089                          : (uptr)sigactions[sig].handler;
2090   if (pc != sig_dfl && pc != sig_ign) {
2091     // The callback can be either sa_handler or sa_sigaction.
2092     // They have different signatures, but we assume that passing
2093     // additional arguments to sa_handler works and is harmless.
2094     ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
2095   }
2096   if (!ctx->after_multithreaded_fork) {
2097     thr->ignore_reads_and_writes = ignore_reads_and_writes;
2098     if (ignore_reads_and_writes)
2099       thr->fast_state.SetIgnoreBit();
2100     thr->ignore_interceptors = ignore_interceptors;
2101     thr->ignore_sync = ignore_sync;
2102     thr->in_symbolizer = in_symbolizer;
2103   }
2104   // We do not detect errno spoiling for SIGTERM,
2105   // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
2106   // tsan reports false positive in such case.
2107   // It's difficult to properly detect this situation (reraise),
2108   // because in async signal processing case (when handler is called directly
2109   // from rtl_generic_sighandler) we have not yet received the reraised
2110   // signal; and it looks too fragile to intercept all ways to reraise a signal.
2111   if (ShouldReport(thr, ReportTypeErrnoInSignal) && !sync && sig != SIGTERM &&
2112       errno != 99)
2113     ReportErrnoSpoiling(thr, pc, sig);
2114   errno = saved_errno;
2115 }
2116 
2117 void ProcessPendingSignalsImpl(ThreadState *thr) {
2118   atomic_store(&thr->pending_signals, 0, memory_order_relaxed);
2119   ThreadSignalContext *sctx = SigCtx(thr);
2120   if (sctx == 0)
2121     return;
2122   atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
2123   internal_sigfillset(&sctx->emptyset);
2124   int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
2125   CHECK_EQ(res, 0);
2126   for (int sig = 0; sig < kSigCount; sig++) {
2127     SignalDesc *signal = &sctx->pending_signals[sig];
2128     if (signal->armed) {
2129       signal->armed = false;
2130       CallUserSignalHandler(thr, false, true, sig, &signal->siginfo,
2131                             &signal->ctx);
2132     }
2133   }
2134   res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
2135   CHECK_EQ(res, 0);
2136   atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
2137 }
2138 
2139 }  // namespace __tsan
2140 
2141 static bool is_sync_signal(ThreadSignalContext *sctx, int sig,
2142                            __sanitizer_siginfo *info) {
2143   // If we are sending signal to ourselves, we must process it now.
2144   if (sctx && sig == sctx->int_signal_send)
2145     return true;
2146 #if SANITIZER_HAS_SIGINFO
2147   // POSIX timers can be configured to send any kind of signal; however, it
2148   // doesn't make any sense to consider a timer signal as synchronous!
2149   if (info->si_code == SI_TIMER)
2150     return false;
2151 #endif
2152   return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || sig == SIGTRAP ||
2153          sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS;
2154 }
2155 
2156 void sighandler(int sig, __sanitizer_siginfo *info, void *ctx) {
2157   ThreadState *thr = cur_thread_init();
2158   ThreadSignalContext *sctx = SigCtx(thr);
2159   if (sig < 0 || sig >= kSigCount) {
2160     VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
2161     return;
2162   }
2163   // Don't mess with synchronous signals.
2164   const bool sync = is_sync_signal(sctx, sig, info);
2165   if (sync ||
2166       // If we are in blocking function, we can safely process it now
2167       // (but check if we are in a recursive interceptor,
2168       // i.e. pthread_join()->munmap()).
2169       atomic_load(&thr->in_blocking_func, memory_order_relaxed)) {
2170     atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
2171     if (atomic_load(&thr->in_blocking_func, memory_order_relaxed)) {
2172       atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
2173       CallUserSignalHandler(thr, sync, true, sig, info, ctx);
2174       atomic_store(&thr->in_blocking_func, 1, memory_order_relaxed);
2175     } else {
2176       // Be very conservative with when we do acquire in this case.
2177       // It's unsafe to do acquire in async handlers, because ThreadState
2178       // can be in inconsistent state.
2179       // SIGSYS looks relatively safe -- it's synchronous and can actually
2180       // need some global state.
2181       bool acq = (sig == SIGSYS);
2182       CallUserSignalHandler(thr, sync, acq, sig, info, ctx);
2183     }
2184     atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
2185     return;
2186   }
2187 
2188   if (sctx == 0)
2189     return;
2190   SignalDesc *signal = &sctx->pending_signals[sig];
2191   if (signal->armed == false) {
2192     signal->armed = true;
2193     internal_memcpy(&signal->siginfo, info, sizeof(*info));
2194     internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
2195     atomic_store(&thr->pending_signals, 1, memory_order_relaxed);
2196   }
2197 }
2198 
2199 TSAN_INTERCEPTOR(int, raise, int sig) {
2200   SCOPED_TSAN_INTERCEPTOR(raise, sig);
2201   ThreadSignalContext *sctx = SigCtx(thr);
2202   CHECK_NE(sctx, 0);
2203   int prev = sctx->int_signal_send;
2204   sctx->int_signal_send = sig;
2205   int res = REAL(raise)(sig);
2206   CHECK_EQ(sctx->int_signal_send, sig);
2207   sctx->int_signal_send = prev;
2208   return res;
2209 }
2210 
2211 TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
2212   SCOPED_TSAN_INTERCEPTOR(kill, pid, sig);
2213   ThreadSignalContext *sctx = SigCtx(thr);
2214   CHECK_NE(sctx, 0);
2215   int prev = sctx->int_signal_send;
2216   if (pid == (int)internal_getpid()) {
2217     sctx->int_signal_send = sig;
2218   }
2219   int res = REAL(kill)(pid, sig);
2220   if (pid == (int)internal_getpid()) {
2221     CHECK_EQ(sctx->int_signal_send, sig);
2222     sctx->int_signal_send = prev;
2223   }
2224   return res;
2225 }
2226 
2227 TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
2228   SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig);
2229   ThreadSignalContext *sctx = SigCtx(thr);
2230   CHECK_NE(sctx, 0);
2231   int prev = sctx->int_signal_send;
2232   bool self = pthread_equal(tid, pthread_self());
2233   if (self)
2234     sctx->int_signal_send = sig;
2235   int res = REAL(pthread_kill)(tid, sig);
2236   if (self) {
2237     CHECK_EQ(sctx->int_signal_send, sig);
2238     sctx->int_signal_send = prev;
2239   }
2240   return res;
2241 }
2242 
2243 TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
2244   SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz);
2245   // It's intercepted merely to process pending signals.
2246   return REAL(gettimeofday)(tv, tz);
2247 }
2248 
2249 TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
2250     void *hints, void *rv) {
2251   SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv);
2252   // We miss atomic synchronization in getaddrinfo,
2253   // and can report false race between malloc and free
2254   // inside of getaddrinfo. So ignore memory accesses.
2255   ThreadIgnoreBegin(thr, pc);
2256   int res = REAL(getaddrinfo)(node, service, hints, rv);
2257   ThreadIgnoreEnd(thr);
2258   return res;
2259 }
2260 
2261 TSAN_INTERCEPTOR(int, fork, int fake) {
2262   if (in_symbolizer())
2263     return REAL(fork)(fake);
2264   SCOPED_INTERCEPTOR_RAW(fork, fake);
2265   return REAL(fork)(fake);
2266 }
2267 
2268 void atfork_prepare() {
2269   if (in_symbolizer())
2270     return;
2271   ThreadState *thr = cur_thread();
2272   const uptr pc = StackTrace::GetCurrentPc();
2273   ForkBefore(thr, pc);
2274 }
2275 
2276 void atfork_parent() {
2277   if (in_symbolizer())
2278     return;
2279   ThreadState *thr = cur_thread();
2280   const uptr pc = StackTrace::GetCurrentPc();
2281   ForkParentAfter(thr, pc);
2282 }
2283 
2284 void atfork_child() {
2285   if (in_symbolizer())
2286     return;
2287   ThreadState *thr = cur_thread();
2288   const uptr pc = StackTrace::GetCurrentPc();
2289   ForkChildAfter(thr, pc, true);
2290   FdOnFork(thr, pc);
2291 }
2292 
2293 #if !SANITIZER_IOS
2294 TSAN_INTERCEPTOR(int, vfork, int fake) {
2295   // Some programs (e.g. openjdk) call close for all file descriptors
2296   // in the child process. Under tsan it leads to false positives, because
2297   // address space is shared, so the parent process also thinks that
2298   // the descriptors are closed (while they are actually not).
2299   // This leads to false positives due to missed synchronization.
2300   // Strictly saying this is undefined behavior, because vfork child is not
2301   // allowed to call any functions other than exec/exit. But this is what
2302   // openjdk does, so we want to handle it.
2303   // We could disable interceptors in the child process. But it's not possible
2304   // to simply intercept and wrap vfork, because vfork child is not allowed
2305   // to return from the function that calls vfork, and that's exactly what
2306   // we would do. So this would require some assembly trickery as well.
2307   // Instead we simply turn vfork into fork.
2308   return WRAP(fork)(fake);
2309 }
2310 #endif
2311 
2312 #if SANITIZER_LINUX
2313 TSAN_INTERCEPTOR(int, clone, int (*fn)(void *), void *stack, int flags,
2314                  void *arg, int *parent_tid, void *tls, pid_t *child_tid) {
2315   SCOPED_INTERCEPTOR_RAW(clone, fn, stack, flags, arg, parent_tid, tls,
2316                          child_tid);
2317   struct Arg {
2318     int (*fn)(void *);
2319     void *arg;
2320   };
2321   auto wrapper = +[](void *p) -> int {
2322     auto *thr = cur_thread();
2323     uptr pc = GET_CURRENT_PC();
2324     // Start the background thread for fork, but not for clone.
2325     // For fork we did this always and it's known to work (or user code has
2326     // adopted). But if we do this for the new clone interceptor some code
2327     // (sandbox2) fails. So model we used to do for years and don't start the
2328     // background thread after clone.
2329     ForkChildAfter(thr, pc, false);
2330     FdOnFork(thr, pc);
2331     auto *arg = static_cast<Arg *>(p);
2332     return arg->fn(arg->arg);
2333   };
2334   ForkBefore(thr, pc);
2335   Arg arg_wrapper = {fn, arg};
2336   int pid = REAL(clone)(wrapper, stack, flags, &arg_wrapper, parent_tid, tls,
2337                         child_tid);
2338   ForkParentAfter(thr, pc);
2339   return pid;
2340 }
2341 #endif
2342 
2343 #if !SANITIZER_APPLE && !SANITIZER_ANDROID
2344 typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
2345                                     void *data);
2346 struct dl_iterate_phdr_data {
2347   ThreadState *thr;
2348   uptr pc;
2349   dl_iterate_phdr_cb_t cb;
2350   void *data;
2351 };
2352 
2353 static bool IsAppNotRodata(uptr addr) {
2354   return IsAppMem(addr) && *MemToShadow(addr) != Shadow::kRodata;
2355 }
2356 
2357 static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
2358                               void *data) {
2359   dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data;
2360   // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later
2361   // accessible in dl_iterate_phdr callback. But we don't see synchronization
2362   // inside of dynamic linker, so we "unpoison" it here in order to not
2363   // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough
2364   // because some libc functions call __libc_dlopen.
2365   if (info && IsAppNotRodata((uptr)info->dlpi_name))
2366     MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2367                      internal_strlen(info->dlpi_name));
2368   int res = cbdata->cb(info, size, cbdata->data);
2369   // Perform the check one more time in case info->dlpi_name was overwritten
2370   // by user callback.
2371   if (info && IsAppNotRodata((uptr)info->dlpi_name))
2372     MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2373                      internal_strlen(info->dlpi_name));
2374   return res;
2375 }
2376 
2377 TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) {
2378   SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data);
2379   dl_iterate_phdr_data cbdata;
2380   cbdata.thr = thr;
2381   cbdata.pc = pc;
2382   cbdata.cb = cb;
2383   cbdata.data = data;
2384   int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata);
2385   return res;
2386 }
2387 #endif
2388 
2389 static int OnExit(ThreadState *thr) {
2390   int status = Finalize(thr);
2391   FlushStreams();
2392   return status;
2393 }
2394 
2395 #if !SANITIZER_APPLE
2396 static void HandleRecvmsg(ThreadState *thr, uptr pc,
2397     __sanitizer_msghdr *msg) {
2398   int fds[64];
2399   int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds));
2400   for (int i = 0; i < cnt; i++)
2401     FdEventCreate(thr, pc, fds[i]);
2402 }
2403 #endif
2404 
2405 #include "sanitizer_common/sanitizer_platform_interceptors.h"
2406 // Causes interceptor recursion (getaddrinfo() and fopen())
2407 #undef SANITIZER_INTERCEPT_GETADDRINFO
2408 // We define our own.
2409 #if SANITIZER_INTERCEPT_TLS_GET_ADDR
2410 #define NEED_TLS_GET_ADDR
2411 #endif
2412 #undef SANITIZER_INTERCEPT_TLS_GET_ADDR
2413 #define SANITIZER_INTERCEPT_TLS_GET_OFFSET 1
2414 #undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
2415 
2416 #define COMMON_INTERCEPT_FUNCTION_VER(name, ver)                          \
2417   INTERCEPT_FUNCTION_VER(name, ver)
2418 #define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
2419   (INTERCEPT_FUNCTION_VER(name, ver) || INTERCEPT_FUNCTION(name))
2420 
2421 #define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
2422   SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__);              \
2423   TsanInterceptorContext _ctx = {thr, pc};                \
2424   ctx = (void *)&_ctx;                                    \
2425   (void)ctx;
2426 
2427 #define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
2428   if (path)                                           \
2429     Acquire(thr, pc, File2addr(path));                \
2430   if (file) {                                         \
2431     int fd = fileno_unlocked(file);                   \
2432     if (fd >= 0) FdFileCreate(thr, pc, fd);           \
2433   }
2434 
2435 #define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
2436   if (file) {                                    \
2437     int fd = fileno_unlocked(file);              \
2438     FdClose(thr, pc, fd);                        \
2439   }
2440 
2441 #define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \
2442   ({                                              \
2443     CheckNoDeepBind(filename, flag);              \
2444     ThreadIgnoreBegin(thr, 0);                    \
2445     void *res = REAL(dlopen)(filename, flag);     \
2446     ThreadIgnoreEnd(thr);                         \
2447     res;                                          \
2448   })
2449 
2450 // Ignore interceptors in OnLibraryLoaded()/Unloaded().  These hooks use code
2451 // (ListOfModules::init, MemoryMappingLayout::DumpListOfModules) that make
2452 // intercepted calls, which can cause deadlockes with ReportRace() which also
2453 // uses this code.
2454 #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
2455   ({                                                        \
2456     ScopedIgnoreInterceptors ignore_interceptors;           \
2457     libignore()->OnLibraryLoaded(filename);                 \
2458   })
2459 
2460 #define COMMON_INTERCEPTOR_LIBRARY_UNLOADED()     \
2461   ({                                              \
2462     ScopedIgnoreInterceptors ignore_interceptors; \
2463     libignore()->OnLibraryUnloaded();             \
2464   })
2465 
2466 #define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
2467   Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
2468 
2469 #define COMMON_INTERCEPTOR_RELEASE(ctx, u) \
2470   Release(((TsanInterceptorContext *) ctx)->thr, pc, u)
2471 
2472 #define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
2473   Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path))
2474 
2475 #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
2476   FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2477 
2478 #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
2479   FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2480 
2481 #define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \
2482   FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2483 
2484 #define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
2485   FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd)
2486 
2487 #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
2488   ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
2489 
2490 #define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name)         \
2491   if (pthread_equal(pthread_self(), reinterpret_cast<void *>(thread))) \
2492     COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name);                     \
2493   else                                                                 \
2494     __tsan::ctx->thread_registry.SetThreadNameByUserId(thread, name)
2495 
2496 #define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
2497 
2498 #define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
2499   OnExit(((TsanInterceptorContext *) ctx)->thr)
2500 
2501 #define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd,  \
2502                                      off)                                   \
2503   do {                                                                      \
2504     return mmap_interceptor(thr, pc, REAL(mmap), addr, sz, prot, flags, fd, \
2505                             off);                                           \
2506   } while (false)
2507 
2508 #define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, sz)           \
2509   do {                                                          \
2510     return munmap_interceptor(thr, pc, REAL(munmap), addr, sz); \
2511   } while (false)
2512 
2513 #if !SANITIZER_APPLE
2514 #define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
2515   HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
2516       ((TsanInterceptorContext *)ctx)->pc, msg)
2517 #endif
2518 
2519 #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end)                           \
2520   if (TsanThread *t = GetCurrentThread()) {                                    \
2521     *begin = t->tls_begin();                                                   \
2522     *end = t->tls_end();                                                       \
2523   } else {                                                                     \
2524     *begin = *end = 0;                                                         \
2525   }
2526 
2527 #define COMMON_INTERCEPTOR_USER_CALLBACK_START() \
2528   SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START()
2529 
2530 #define COMMON_INTERCEPTOR_USER_CALLBACK_END() \
2531   SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END()
2532 
2533 #include "sanitizer_common/sanitizer_common_interceptors.inc"
2534 
2535 static int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2536                           __sanitizer_sigaction *old);
2537 static __sanitizer_sighandler_ptr signal_impl(int sig,
2538                                               __sanitizer_sighandler_ptr h);
2539 
2540 #define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signo, act, oldact) \
2541   { return sigaction_impl(signo, act, oldact); }
2542 
2543 #define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \
2544   { return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); }
2545 
2546 #define SIGNAL_INTERCEPTOR_ENTER() LazyInitialize(cur_thread_init())
2547 
2548 #include "sanitizer_common/sanitizer_signal_interceptors.inc"
2549 
2550 int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2551                    __sanitizer_sigaction *old) {
2552   // Note: if we call REAL(sigaction) directly for any reason without proxying
2553   // the signal handler through sighandler, very bad things will happen.
2554   // The handler will run synchronously and corrupt tsan per-thread state.
2555   SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
2556   if (sig <= 0 || sig >= kSigCount) {
2557     errno = errno_EINVAL;
2558     return -1;
2559   }
2560   __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
2561   __sanitizer_sigaction old_stored;
2562   if (old) internal_memcpy(&old_stored, &sigactions[sig], sizeof(old_stored));
2563   __sanitizer_sigaction newact;
2564   if (act) {
2565     // Copy act into sigactions[sig].
2566     // Can't use struct copy, because compiler can emit call to memcpy.
2567     // Can't use internal_memcpy, because it copies byte-by-byte,
2568     // and signal handler reads the handler concurrently. It it can read
2569     // some bytes from old value and some bytes from new value.
2570     // Use volatile to prevent insertion of memcpy.
2571     sigactions[sig].handler =
2572         *(volatile __sanitizer_sighandler_ptr const *)&act->handler;
2573     sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags;
2574     internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
2575                     sizeof(sigactions[sig].sa_mask));
2576 #if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
2577     sigactions[sig].sa_restorer = act->sa_restorer;
2578 #endif
2579     internal_memcpy(&newact, act, sizeof(newact));
2580     internal_sigfillset(&newact.sa_mask);
2581     if ((act->sa_flags & SA_SIGINFO) ||
2582         ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl)) {
2583       newact.sa_flags |= SA_SIGINFO;
2584       newact.sigaction = sighandler;
2585     }
2586     ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
2587     act = &newact;
2588   }
2589   int res = REAL(sigaction)(sig, act, old);
2590   if (res == 0 && old && old->sigaction == sighandler)
2591     internal_memcpy(old, &old_stored, sizeof(*old));
2592   return res;
2593 }
2594 
2595 static __sanitizer_sighandler_ptr signal_impl(int sig,
2596                                               __sanitizer_sighandler_ptr h) {
2597   __sanitizer_sigaction act;
2598   act.handler = h;
2599   internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask));
2600   act.sa_flags = 0;
2601   __sanitizer_sigaction old;
2602   int res = sigaction_symname(sig, &act, &old);
2603   if (res) return (__sanitizer_sighandler_ptr)sig_err;
2604   return old.handler;
2605 }
2606 
2607 #define TSAN_SYSCALL()             \
2608   ThreadState *thr = cur_thread(); \
2609   if (thr->ignore_interceptors)    \
2610     return;                        \
2611   ScopedSyscall scoped_syscall(thr)
2612 
2613 struct ScopedSyscall {
2614   ThreadState *thr;
2615 
2616   explicit ScopedSyscall(ThreadState *thr) : thr(thr) { LazyInitialize(thr); }
2617 
2618   ~ScopedSyscall() {
2619     ProcessPendingSignals(thr);
2620   }
2621 };
2622 
2623 #if !SANITIZER_FREEBSD && !SANITIZER_APPLE
2624 static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
2625   TSAN_SYSCALL();
2626   MemoryAccessRange(thr, pc, p, s, write);
2627 }
2628 
2629 static USED void syscall_acquire(uptr pc, uptr addr) {
2630   TSAN_SYSCALL();
2631   Acquire(thr, pc, addr);
2632   DPrintf("syscall_acquire(0x%zx))\n", addr);
2633 }
2634 
2635 static USED void syscall_release(uptr pc, uptr addr) {
2636   TSAN_SYSCALL();
2637   DPrintf("syscall_release(0x%zx)\n", addr);
2638   Release(thr, pc, addr);
2639 }
2640 
2641 static void syscall_fd_close(uptr pc, int fd) {
2642   auto *thr = cur_thread();
2643   FdClose(thr, pc, fd);
2644 }
2645 
2646 static USED void syscall_fd_acquire(uptr pc, int fd) {
2647   TSAN_SYSCALL();
2648   FdAcquire(thr, pc, fd);
2649   DPrintf("syscall_fd_acquire(%d)\n", fd);
2650 }
2651 
2652 static USED void syscall_fd_release(uptr pc, int fd) {
2653   TSAN_SYSCALL();
2654   DPrintf("syscall_fd_release(%d)\n", fd);
2655   FdRelease(thr, pc, fd);
2656 }
2657 
2658 static void syscall_pre_fork(uptr pc) { ForkBefore(cur_thread(), pc); }
2659 
2660 static void syscall_post_fork(uptr pc, int pid) {
2661   ThreadState *thr = cur_thread();
2662   if (pid == 0) {
2663     // child
2664     ForkChildAfter(thr, pc, true);
2665     FdOnFork(thr, pc);
2666   } else if (pid > 0) {
2667     // parent
2668     ForkParentAfter(thr, pc);
2669   } else {
2670     // error
2671     ForkParentAfter(thr, pc);
2672   }
2673 }
2674 #endif
2675 
2676 #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \
2677   syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false)
2678 
2679 #define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
2680   syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true)
2681 
2682 #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
2683   do {                                       \
2684     (void)(p);                               \
2685     (void)(s);                               \
2686   } while (false)
2687 
2688 #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
2689   do {                                        \
2690     (void)(p);                                \
2691     (void)(s);                                \
2692   } while (false)
2693 
2694 #define COMMON_SYSCALL_ACQUIRE(addr) \
2695     syscall_acquire(GET_CALLER_PC(), (uptr)(addr))
2696 
2697 #define COMMON_SYSCALL_RELEASE(addr) \
2698     syscall_release(GET_CALLER_PC(), (uptr)(addr))
2699 
2700 #define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd)
2701 
2702 #define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd)
2703 
2704 #define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd)
2705 
2706 #define COMMON_SYSCALL_PRE_FORK() \
2707   syscall_pre_fork(GET_CALLER_PC())
2708 
2709 #define COMMON_SYSCALL_POST_FORK(res) \
2710   syscall_post_fork(GET_CALLER_PC(), res)
2711 
2712 #include "sanitizer_common/sanitizer_common_syscalls.inc"
2713 #include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
2714 
2715 #ifdef NEED_TLS_GET_ADDR
2716 
2717 static void handle_tls_addr(void *arg, void *res) {
2718   ThreadState *thr = cur_thread();
2719   if (!thr)
2720     return;
2721   DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr,
2722                                         thr->tls_addr + thr->tls_size);
2723   if (!dtv)
2724     return;
2725   // New DTLS block has been allocated.
2726   MemoryResetRange(thr, 0, dtv->beg, dtv->size);
2727 }
2728 
2729 #if !SANITIZER_S390
2730 // Define own interceptor instead of sanitizer_common's for three reasons:
2731 // 1. It must not process pending signals.
2732 //    Signal handlers may contain MOVDQA instruction (see below).
2733 // 2. It must be as simple as possible to not contain MOVDQA.
2734 // 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which
2735 //    is empty for tsan (meant only for msan).
2736 // Note: __tls_get_addr can be called with mis-aligned stack due to:
2737 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
2738 // So the interceptor must work with mis-aligned stack, in particular, does not
2739 // execute MOVDQA with stack addresses.
2740 TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
2741   void *res = REAL(__tls_get_addr)(arg);
2742   handle_tls_addr(arg, res);
2743   return res;
2744 }
2745 #else // SANITIZER_S390
2746 TSAN_INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
2747   uptr res = __tls_get_offset_wrapper(arg, REAL(__tls_get_offset));
2748   char *tp = static_cast<char *>(__builtin_thread_pointer());
2749   handle_tls_addr(arg, res + tp);
2750   return res;
2751 }
2752 #endif
2753 #endif
2754 
2755 #if SANITIZER_NETBSD
2756 TSAN_INTERCEPTOR(void, _lwp_exit) {
2757   SCOPED_TSAN_INTERCEPTOR(_lwp_exit);
2758   DestroyThreadState();
2759   REAL(_lwp_exit)();
2760 }
2761 #define TSAN_MAYBE_INTERCEPT__LWP_EXIT TSAN_INTERCEPT(_lwp_exit)
2762 #else
2763 #define TSAN_MAYBE_INTERCEPT__LWP_EXIT
2764 #endif
2765 
2766 #if SANITIZER_FREEBSD
2767 TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) {
2768   SCOPED_TSAN_INTERCEPTOR(thr_exit, state);
2769   DestroyThreadState();
2770   REAL(thr_exit(state));
2771 }
2772 #define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit)
2773 #else
2774 #define TSAN_MAYBE_INTERCEPT_THR_EXIT
2775 #endif
2776 
2777 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_init, void *c, void *a)
2778 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_destroy, void *c)
2779 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_signal, void *c)
2780 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_broadcast, void *c)
2781 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_wait, void *c, void *m)
2782 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_init, void *m, void *a)
2783 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_destroy, void *m)
2784 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_lock, void *m)
2785 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_trylock, void *m)
2786 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_unlock, void *m)
2787 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_init, void *l, void *a)
2788 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_destroy, void *l)
2789 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_rdlock, void *l)
2790 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_tryrdlock, void *l)
2791 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_wrlock, void *l)
2792 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_trywrlock, void *l)
2793 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_unlock, void *l)
2794 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, once, void *o, void (*i)())
2795 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, sigmask, int f, void *n, void *o)
2796 
2797 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a)
2798 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c)
2799 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c)
2800 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m)
2801 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c)
2802 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a)
2803 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m)
2804 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_lock, void *m)
2805 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m)
2806 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_unlock, void *m)
2807 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a)
2808 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m)
2809 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m)
2810 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m)
2811 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m)
2812 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m)
2813 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m)
2814 TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)())
2815 TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(int, sigsetmask, sigmask, int a, void *b,
2816   void *c)
2817 
2818 namespace __tsan {
2819 
2820 static void finalize(void *arg) {
2821   ThreadState *thr = cur_thread();
2822   int status = Finalize(thr);
2823   // Make sure the output is not lost.
2824   FlushStreams();
2825   if (status)
2826     Die();
2827 }
2828 
2829 #if !SANITIZER_APPLE && !SANITIZER_ANDROID
2830 static void unreachable() {
2831   Report("FATAL: ThreadSanitizer: unreachable called\n");
2832   Die();
2833 }
2834 #endif
2835 
2836 // Define default implementation since interception of libdispatch  is optional.
2837 SANITIZER_WEAK_ATTRIBUTE void InitializeLibdispatchInterceptors() {}
2838 
2839 void InitializeInterceptors() {
2840 #if !SANITIZER_APPLE
2841   // We need to setup it early, because functions like dlsym() can call it.
2842   REAL(memset) = internal_memset;
2843   REAL(memcpy) = internal_memcpy;
2844 #endif
2845 
2846   new(interceptor_ctx()) InterceptorContext();
2847 
2848   InitializeCommonInterceptors();
2849   InitializeSignalInterceptors();
2850   InitializeLibdispatchInterceptors();
2851 
2852 #if !SANITIZER_APPLE
2853   InitializeSetjmpInterceptors();
2854 #endif
2855 
2856   TSAN_INTERCEPT(longjmp_symname);
2857   TSAN_INTERCEPT(siglongjmp_symname);
2858 #if SANITIZER_NETBSD
2859   TSAN_INTERCEPT(_longjmp);
2860 #endif
2861 
2862   TSAN_INTERCEPT(malloc);
2863   TSAN_INTERCEPT(__libc_memalign);
2864   TSAN_INTERCEPT(calloc);
2865   TSAN_INTERCEPT(realloc);
2866   TSAN_INTERCEPT(reallocarray);
2867   TSAN_INTERCEPT(free);
2868   TSAN_INTERCEPT(cfree);
2869   TSAN_INTERCEPT(munmap);
2870   TSAN_MAYBE_INTERCEPT_MEMALIGN;
2871   TSAN_INTERCEPT(valloc);
2872   TSAN_MAYBE_INTERCEPT_PVALLOC;
2873   TSAN_INTERCEPT(posix_memalign);
2874 
2875   TSAN_INTERCEPT(strcpy);
2876   TSAN_INTERCEPT(strncpy);
2877   TSAN_INTERCEPT(strdup);
2878 
2879   TSAN_INTERCEPT(pthread_create);
2880   TSAN_INTERCEPT(pthread_join);
2881   TSAN_INTERCEPT(pthread_detach);
2882   TSAN_INTERCEPT(pthread_exit);
2883   #if SANITIZER_LINUX
2884   TSAN_INTERCEPT(pthread_tryjoin_np);
2885   TSAN_INTERCEPT(pthread_timedjoin_np);
2886   #endif
2887 
2888   TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE);
2889   TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE);
2890   TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE);
2891   TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE);
2892   TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE);
2893   TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE);
2894 
2895   TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT;
2896 
2897   TSAN_INTERCEPT(pthread_mutex_init);
2898   TSAN_INTERCEPT(pthread_mutex_destroy);
2899   TSAN_INTERCEPT(pthread_mutex_lock);
2900   TSAN_INTERCEPT(pthread_mutex_trylock);
2901   TSAN_INTERCEPT(pthread_mutex_timedlock);
2902   TSAN_INTERCEPT(pthread_mutex_unlock);
2903 #if SANITIZER_GLIBC
2904 #  if !__GLIBC_PREREQ(2, 34)
2905   TSAN_INTERCEPT(__pthread_mutex_lock);
2906   TSAN_INTERCEPT(__pthread_mutex_unlock);
2907 #  endif
2908 #endif
2909 
2910   TSAN_INTERCEPT(pthread_spin_init);
2911   TSAN_INTERCEPT(pthread_spin_destroy);
2912   TSAN_INTERCEPT(pthread_spin_lock);
2913   TSAN_INTERCEPT(pthread_spin_trylock);
2914   TSAN_INTERCEPT(pthread_spin_unlock);
2915 
2916   TSAN_INTERCEPT(pthread_rwlock_init);
2917   TSAN_INTERCEPT(pthread_rwlock_destroy);
2918   TSAN_INTERCEPT(pthread_rwlock_rdlock);
2919   TSAN_INTERCEPT(pthread_rwlock_tryrdlock);
2920   TSAN_INTERCEPT(pthread_rwlock_timedrdlock);
2921   TSAN_INTERCEPT(pthread_rwlock_wrlock);
2922   TSAN_INTERCEPT(pthread_rwlock_trywrlock);
2923   TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
2924   TSAN_INTERCEPT(pthread_rwlock_unlock);
2925 
2926   TSAN_INTERCEPT(pthread_barrier_init);
2927   TSAN_INTERCEPT(pthread_barrier_destroy);
2928   TSAN_INTERCEPT(pthread_barrier_wait);
2929 
2930   TSAN_INTERCEPT(pthread_once);
2931 
2932   TSAN_INTERCEPT(fstat);
2933   TSAN_MAYBE_INTERCEPT___FXSTAT;
2934   TSAN_MAYBE_INTERCEPT_FSTAT64;
2935   TSAN_MAYBE_INTERCEPT___FXSTAT64;
2936   TSAN_INTERCEPT(open);
2937   TSAN_MAYBE_INTERCEPT_OPEN64;
2938   TSAN_INTERCEPT(creat);
2939   TSAN_MAYBE_INTERCEPT_CREAT64;
2940   TSAN_INTERCEPT(dup);
2941   TSAN_INTERCEPT(dup2);
2942   TSAN_INTERCEPT(dup3);
2943   TSAN_MAYBE_INTERCEPT_EVENTFD;
2944   TSAN_MAYBE_INTERCEPT_SIGNALFD;
2945   TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
2946   TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
2947   TSAN_INTERCEPT(socket);
2948   TSAN_INTERCEPT(socketpair);
2949   TSAN_INTERCEPT(connect);
2950   TSAN_INTERCEPT(bind);
2951   TSAN_INTERCEPT(listen);
2952   TSAN_MAYBE_INTERCEPT_EPOLL;
2953   TSAN_INTERCEPT(close);
2954   TSAN_MAYBE_INTERCEPT___CLOSE;
2955   TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
2956   TSAN_INTERCEPT(pipe);
2957   TSAN_INTERCEPT(pipe2);
2958 
2959   TSAN_INTERCEPT(unlink);
2960   TSAN_INTERCEPT(tmpfile);
2961   TSAN_MAYBE_INTERCEPT_TMPFILE64;
2962   TSAN_INTERCEPT(abort);
2963   TSAN_INTERCEPT(rmdir);
2964   TSAN_INTERCEPT(closedir);
2965 
2966   TSAN_INTERCEPT(sigsuspend);
2967   TSAN_INTERCEPT(sigblock);
2968   TSAN_INTERCEPT(sigsetmask);
2969   TSAN_INTERCEPT(pthread_sigmask);
2970   TSAN_INTERCEPT(raise);
2971   TSAN_INTERCEPT(kill);
2972   TSAN_INTERCEPT(pthread_kill);
2973   TSAN_INTERCEPT(sleep);
2974   TSAN_INTERCEPT(usleep);
2975   TSAN_INTERCEPT(nanosleep);
2976   TSAN_INTERCEPT(pause);
2977   TSAN_INTERCEPT(gettimeofday);
2978   TSAN_INTERCEPT(getaddrinfo);
2979 
2980   TSAN_INTERCEPT(fork);
2981   TSAN_INTERCEPT(vfork);
2982 #if SANITIZER_LINUX
2983   TSAN_INTERCEPT(clone);
2984 #endif
2985 #if !SANITIZER_ANDROID
2986   TSAN_INTERCEPT(dl_iterate_phdr);
2987 #endif
2988   TSAN_MAYBE_INTERCEPT_ON_EXIT;
2989   TSAN_INTERCEPT(__cxa_atexit);
2990   TSAN_INTERCEPT(_exit);
2991 
2992 #ifdef NEED_TLS_GET_ADDR
2993 #if !SANITIZER_S390
2994   TSAN_INTERCEPT(__tls_get_addr);
2995 #else
2996   TSAN_INTERCEPT(__tls_get_addr_internal);
2997   TSAN_INTERCEPT(__tls_get_offset);
2998 #endif
2999 #endif
3000 
3001   TSAN_MAYBE_INTERCEPT__LWP_EXIT;
3002   TSAN_MAYBE_INTERCEPT_THR_EXIT;
3003 
3004 #if !SANITIZER_APPLE && !SANITIZER_ANDROID
3005   // Need to setup it, because interceptors check that the function is resolved.
3006   // But atexit is emitted directly into the module, so can't be resolved.
3007   REAL(atexit) = (int(*)(void(*)()))unreachable;
3008 #endif
3009 
3010   if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
3011     Printf("ThreadSanitizer: failed to setup atexit callback\n");
3012     Die();
3013   }
3014   if (pthread_atfork(atfork_prepare, atfork_parent, atfork_child)) {
3015     Printf("ThreadSanitizer: failed to setup atfork callbacks\n");
3016     Die();
3017   }
3018 
3019 #if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
3020   if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) {
3021     Printf("ThreadSanitizer: failed to create thread key\n");
3022     Die();
3023   }
3024 #endif
3025 
3026   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_init);
3027   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_destroy);
3028   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_signal);
3029   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_broadcast);
3030   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_wait);
3031   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_init);
3032   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_destroy);
3033   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_lock);
3034   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_trylock);
3035   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_unlock);
3036   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_init);
3037   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_destroy);
3038   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_rdlock);
3039   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_tryrdlock);
3040   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_wrlock);
3041   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_trywrlock);
3042   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_unlock);
3043   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(once);
3044   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(sigmask);
3045 
3046   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init);
3047   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal);
3048   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast);
3049   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_wait);
3050   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_destroy);
3051   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_init);
3052   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_destroy);
3053   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_lock);
3054   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_trylock);
3055   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_unlock);
3056   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_init);
3057   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_destroy);
3058   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_rdlock);
3059   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_tryrdlock);
3060   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_wrlock);
3061   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_trywrlock);
3062   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_unlock);
3063   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(once);
3064   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(sigsetmask);
3065 
3066   FdInit();
3067 }
3068 
3069 }  // namespace __tsan
3070 
3071 // Invisible barrier for tests.
3072 // There were several unsuccessful iterations for this functionality:
3073 // 1. Initially it was implemented in user code using
3074 //    REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on
3075 //    MacOS. Futexes are linux-specific for this matter.
3076 // 2. Then we switched to atomics+usleep(10). But usleep produced parasitic
3077 //    "as-if synchronized via sleep" messages in reports which failed some
3078 //    output tests.
3079 // 3. Then we switched to atomics+sched_yield. But this produced tons of tsan-
3080 //    visible events, which lead to "failed to restore stack trace" failures.
3081 // Note that no_sanitize_thread attribute does not turn off atomic interception
3082 // so attaching it to the function defined in user code does not help.
3083 // That's why we now have what we have.
3084 constexpr u32 kBarrierThreadBits = 10;
3085 constexpr u32 kBarrierThreads = 1 << kBarrierThreadBits;
3086 
3087 extern "C" {
3088 
3089 SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init(
3090     atomic_uint32_t *barrier, u32 num_threads) {
3091   if (num_threads >= kBarrierThreads) {
3092     Printf("barrier_init: count is too large (%d)\n", num_threads);
3093     Die();
3094   }
3095   // kBarrierThreadBits lsb is thread count,
3096   // the remaining are count of entered threads.
3097   atomic_store(barrier, num_threads, memory_order_relaxed);
3098 }
3099 
3100 static u32 barrier_epoch(u32 value) {
3101   return (value >> kBarrierThreadBits) / (value & (kBarrierThreads - 1));
3102 }
3103 
3104 SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
3105     atomic_uint32_t *barrier) {
3106   u32 old = atomic_fetch_add(barrier, kBarrierThreads, memory_order_relaxed);
3107   u32 old_epoch = barrier_epoch(old);
3108   if (barrier_epoch(old + kBarrierThreads) != old_epoch) {
3109     FutexWake(barrier, (1 << 30));
3110     return;
3111   }
3112   for (;;) {
3113     u32 cur = atomic_load(barrier, memory_order_relaxed);
3114     if (barrier_epoch(cur) != old_epoch)
3115       return;
3116     FutexWait(barrier, cur);
3117   }
3118 }
3119 
3120 }  // extern "C"
3121