1 //===-- tsan_interceptors_mac.cpp -----------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 // Mac-specific interceptors.
12 //===----------------------------------------------------------------------===//
13 
14 #include "sanitizer_common/sanitizer_platform.h"
15 #if SANITIZER_MAC
16 
17 #include "interception/interception.h"
18 #include "tsan_interceptors.h"
19 #include "tsan_interface.h"
20 #include "tsan_interface_ann.h"
21 #include "sanitizer_common/sanitizer_addrhashmap.h"
22 
23 #include <errno.h>
24 #include <libkern/OSAtomic.h>
25 #include <objc/objc-sync.h>
26 #include <sys/ucontext.h>
27 
28 #if defined(__has_include) && __has_include(<os/lock.h>)
29 #include <os/lock.h>
30 #endif
31 
32 #if defined(__has_include) && __has_include(<xpc/xpc.h>)
33 #include <xpc/xpc.h>
34 #endif  // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
35 
36 typedef long long_t;
37 
38 extern "C" {
39 int getcontext(ucontext_t *ucp) __attribute__((returns_twice));
40 int setcontext(const ucontext_t *ucp);
41 }
42 
43 namespace __tsan {
44 
45 // The non-barrier versions of OSAtomic* functions are semantically mo_relaxed,
46 // but the two variants (e.g. OSAtomicAdd32 and OSAtomicAdd32Barrier) are
47 // actually aliases of each other, and we cannot have different interceptors for
48 // them, because they're actually the same function.  Thus, we have to stay
49 // conservative and treat the non-barrier versions as mo_acq_rel.
50 static const morder kMacOrderBarrier = mo_acq_rel;
51 static const morder kMacOrderNonBarrier = mo_acq_rel;
52 
53 #define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
54   TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) {                 \
55     SCOPED_TSAN_INTERCEPTOR(f, x, ptr);                                 \
56     return tsan_atomic_f((volatile tsan_t *)ptr, x, mo);                \
57   }
58 
59 #define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
60   TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) {                        \
61     SCOPED_TSAN_INTERCEPTOR(f, x, ptr);                                        \
62     return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x;                   \
63   }
64 
65 #define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
66   TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) {                             \
67     SCOPED_TSAN_INTERCEPTOR(f, ptr);                                           \
68     return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1;                   \
69   }
70 
71 #define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
72                                      mo)                                    \
73   TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) {                          \
74     SCOPED_TSAN_INTERCEPTOR(f, ptr);                                        \
75     return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) - 1;                \
76   }
77 
78 #define OSATOMIC_INTERCEPTORS_ARITHMETIC(f, tsan_atomic_f, m)                  \
79   m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f,             \
80     kMacOrderNonBarrier)                                                       \
81   m(int32_t, int32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f,    \
82     kMacOrderBarrier)                                                          \
83   m(int64_t, int64_t, a64, f##64, __tsan_atomic64_##tsan_atomic_f,             \
84     kMacOrderNonBarrier)                                                       \
85   m(int64_t, int64_t, a64, f##64##Barrier, __tsan_atomic64_##tsan_atomic_f,    \
86     kMacOrderBarrier)
87 
88 #define OSATOMIC_INTERCEPTORS_BITWISE(f, tsan_atomic_f, m, m_orig)             \
89   m(int32_t, uint32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f,            \
90     kMacOrderNonBarrier)                                                       \
91   m(int32_t, uint32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f,   \
92     kMacOrderBarrier)                                                          \
93   m_orig(int32_t, uint32_t, a32, f##32##Orig, __tsan_atomic32_##tsan_atomic_f, \
94     kMacOrderNonBarrier)                                                       \
95   m_orig(int32_t, uint32_t, a32, f##32##OrigBarrier,                           \
96     __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier)
97 
98 OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicAdd, fetch_add,
99                                  OSATOMIC_INTERCEPTOR_PLUS_X)
100 OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicIncrement, fetch_add,
101                                  OSATOMIC_INTERCEPTOR_PLUS_1)
102 OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicDecrement, fetch_sub,
103                                  OSATOMIC_INTERCEPTOR_MINUS_1)
104 OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicOr, fetch_or, OSATOMIC_INTERCEPTOR_PLUS_X,
105                               OSATOMIC_INTERCEPTOR)
106 OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicAnd, fetch_and,
107                               OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR)
108 OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicXor, fetch_xor,
109                               OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR)
110 
111 #define OSATOMIC_INTERCEPTORS_CAS(f, tsan_atomic_f, tsan_t, t)              \
112   TSAN_INTERCEPTOR(bool, f, t old_value, t new_value, t volatile *ptr) {    \
113     SCOPED_TSAN_INTERCEPTOR(f, old_value, new_value, ptr);                  \
114     return tsan_atomic_f##_compare_exchange_strong(                         \
115         (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value,    \
116         kMacOrderNonBarrier, kMacOrderNonBarrier);                          \
117   }                                                                         \
118                                                                             \
119   TSAN_INTERCEPTOR(bool, f##Barrier, t old_value, t new_value,              \
120                    t volatile *ptr) {                                       \
121     SCOPED_TSAN_INTERCEPTOR(f##Barrier, old_value, new_value, ptr);         \
122     return tsan_atomic_f##_compare_exchange_strong(                         \
123         (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value,    \
124         kMacOrderBarrier, kMacOrderNonBarrier);                             \
125   }
126 
127 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapInt, __tsan_atomic32, a32, int)
128 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapLong, __tsan_atomic64, a64,
129                           long_t)
130 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapPtr, __tsan_atomic64, a64,
131                           void *)
132 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap32, __tsan_atomic32, a32,
133                           int32_t)
134 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap64, __tsan_atomic64, a64,
135                           int64_t)
136 
137 #define OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, mo)             \
138   TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) {    \
139     SCOPED_TSAN_INTERCEPTOR(f, n, ptr);                          \
140     volatile char *byte_ptr = ((volatile char *)ptr) + (n >> 3); \
141     char bit = 0x80u >> (n & 7);                                 \
142     char mask = clear ? ~bit : bit;                              \
143     char orig_byte = op((volatile a8 *)byte_ptr, mask, mo);      \
144     return orig_byte & bit;                                      \
145   }
146 
147 #define OSATOMIC_INTERCEPTORS_BITOP(f, op, clear)               \
148   OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, kMacOrderNonBarrier) \
149   OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, clear, kMacOrderBarrier)
150 
151 OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndSet, __tsan_atomic8_fetch_or, false)
152 OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndClear, __tsan_atomic8_fetch_and,
153                             true)
154 
155 TSAN_INTERCEPTOR(void, OSAtomicEnqueue, OSQueueHead *list, void *item,
156                  size_t offset) {
157   SCOPED_TSAN_INTERCEPTOR(OSAtomicEnqueue, list, item, offset);
158   __tsan_release(item);
159   REAL(OSAtomicEnqueue)(list, item, offset);
160 }
161 
162 TSAN_INTERCEPTOR(void *, OSAtomicDequeue, OSQueueHead *list, size_t offset) {
163   SCOPED_TSAN_INTERCEPTOR(OSAtomicDequeue, list, offset);
164   void *item = REAL(OSAtomicDequeue)(list, offset);
165   if (item) __tsan_acquire(item);
166   return item;
167 }
168 
169 // OSAtomicFifoEnqueue and OSAtomicFifoDequeue are only on OS X.
170 #if !SANITIZER_IOS
171 
172 TSAN_INTERCEPTOR(void, OSAtomicFifoEnqueue, OSFifoQueueHead *list, void *item,
173                  size_t offset) {
174   SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoEnqueue, list, item, offset);
175   __tsan_release(item);
176   REAL(OSAtomicFifoEnqueue)(list, item, offset);
177 }
178 
179 TSAN_INTERCEPTOR(void *, OSAtomicFifoDequeue, OSFifoQueueHead *list,
180                  size_t offset) {
181   SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoDequeue, list, offset);
182   void *item = REAL(OSAtomicFifoDequeue)(list, offset);
183   if (item) __tsan_acquire(item);
184   return item;
185 }
186 
187 #endif
188 
189 TSAN_INTERCEPTOR(void, OSSpinLockLock, volatile OSSpinLock *lock) {
190   CHECK(!cur_thread()->is_dead);
191   if (!cur_thread()->is_inited) {
192     return REAL(OSSpinLockLock)(lock);
193   }
194   SCOPED_TSAN_INTERCEPTOR(OSSpinLockLock, lock);
195   REAL(OSSpinLockLock)(lock);
196   Acquire(thr, pc, (uptr)lock);
197 }
198 
199 TSAN_INTERCEPTOR(bool, OSSpinLockTry, volatile OSSpinLock *lock) {
200   CHECK(!cur_thread()->is_dead);
201   if (!cur_thread()->is_inited) {
202     return REAL(OSSpinLockTry)(lock);
203   }
204   SCOPED_TSAN_INTERCEPTOR(OSSpinLockTry, lock);
205   bool result = REAL(OSSpinLockTry)(lock);
206   if (result)
207     Acquire(thr, pc, (uptr)lock);
208   return result;
209 }
210 
211 TSAN_INTERCEPTOR(void, OSSpinLockUnlock, volatile OSSpinLock *lock) {
212   CHECK(!cur_thread()->is_dead);
213   if (!cur_thread()->is_inited) {
214     return REAL(OSSpinLockUnlock)(lock);
215   }
216   SCOPED_TSAN_INTERCEPTOR(OSSpinLockUnlock, lock);
217   Release(thr, pc, (uptr)lock);
218   REAL(OSSpinLockUnlock)(lock);
219 }
220 
221 TSAN_INTERCEPTOR(void, os_lock_lock, void *lock) {
222   CHECK(!cur_thread()->is_dead);
223   if (!cur_thread()->is_inited) {
224     return REAL(os_lock_lock)(lock);
225   }
226   SCOPED_TSAN_INTERCEPTOR(os_lock_lock, lock);
227   REAL(os_lock_lock)(lock);
228   Acquire(thr, pc, (uptr)lock);
229 }
230 
231 TSAN_INTERCEPTOR(bool, os_lock_trylock, void *lock) {
232   CHECK(!cur_thread()->is_dead);
233   if (!cur_thread()->is_inited) {
234     return REAL(os_lock_trylock)(lock);
235   }
236   SCOPED_TSAN_INTERCEPTOR(os_lock_trylock, lock);
237   bool result = REAL(os_lock_trylock)(lock);
238   if (result)
239     Acquire(thr, pc, (uptr)lock);
240   return result;
241 }
242 
243 TSAN_INTERCEPTOR(void, os_lock_unlock, void *lock) {
244   CHECK(!cur_thread()->is_dead);
245   if (!cur_thread()->is_inited) {
246     return REAL(os_lock_unlock)(lock);
247   }
248   SCOPED_TSAN_INTERCEPTOR(os_lock_unlock, lock);
249   Release(thr, pc, (uptr)lock);
250   REAL(os_lock_unlock)(lock);
251 }
252 
253 #if defined(__has_include) && __has_include(<os/lock.h>)
254 
255 TSAN_INTERCEPTOR(void, os_unfair_lock_lock, os_unfair_lock_t lock) {
256   if (!cur_thread()->is_inited || cur_thread()->is_dead) {
257     return REAL(os_unfair_lock_lock)(lock);
258   }
259   SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_lock, lock);
260   REAL(os_unfair_lock_lock)(lock);
261   Acquire(thr, pc, (uptr)lock);
262 }
263 
264 TSAN_INTERCEPTOR(void, os_unfair_lock_lock_with_options, os_unfair_lock_t lock,
265                  u32 options) {
266   if (!cur_thread()->is_inited || cur_thread()->is_dead) {
267     return REAL(os_unfair_lock_lock_with_options)(lock, options);
268   }
269   SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_lock_with_options, lock, options);
270   REAL(os_unfair_lock_lock_with_options)(lock, options);
271   Acquire(thr, pc, (uptr)lock);
272 }
273 
274 TSAN_INTERCEPTOR(bool, os_unfair_lock_trylock, os_unfair_lock_t lock) {
275   if (!cur_thread()->is_inited || cur_thread()->is_dead) {
276     return REAL(os_unfair_lock_trylock)(lock);
277   }
278   SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_trylock, lock);
279   bool result = REAL(os_unfair_lock_trylock)(lock);
280   if (result)
281     Acquire(thr, pc, (uptr)lock);
282   return result;
283 }
284 
285 TSAN_INTERCEPTOR(void, os_unfair_lock_unlock, os_unfair_lock_t lock) {
286   if (!cur_thread()->is_inited || cur_thread()->is_dead) {
287     return REAL(os_unfair_lock_unlock)(lock);
288   }
289   SCOPED_TSAN_INTERCEPTOR(os_unfair_lock_unlock, lock);
290   Release(thr, pc, (uptr)lock);
291   REAL(os_unfair_lock_unlock)(lock);
292 }
293 
294 #endif  // #if defined(__has_include) && __has_include(<os/lock.h>)
295 
296 #if defined(__has_include) && __has_include(<xpc/xpc.h>)
297 
298 TSAN_INTERCEPTOR(void, xpc_connection_set_event_handler,
299                  xpc_connection_t connection, xpc_handler_t handler) {
300   SCOPED_TSAN_INTERCEPTOR(xpc_connection_set_event_handler, connection,
301                           handler);
302   Release(thr, pc, (uptr)connection);
303   xpc_handler_t new_handler = ^(xpc_object_t object) {
304     {
305       SCOPED_INTERCEPTOR_RAW(xpc_connection_set_event_handler);
306       Acquire(thr, pc, (uptr)connection);
307     }
308     handler(object);
309   };
310   REAL(xpc_connection_set_event_handler)(connection, new_handler);
311 }
312 
313 TSAN_INTERCEPTOR(void, xpc_connection_send_barrier, xpc_connection_t connection,
314                  dispatch_block_t barrier) {
315   SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_barrier, connection, barrier);
316   Release(thr, pc, (uptr)connection);
317   dispatch_block_t new_barrier = ^() {
318     {
319       SCOPED_INTERCEPTOR_RAW(xpc_connection_send_barrier);
320       Acquire(thr, pc, (uptr)connection);
321     }
322     barrier();
323   };
324   REAL(xpc_connection_send_barrier)(connection, new_barrier);
325 }
326 
327 TSAN_INTERCEPTOR(void, xpc_connection_send_message_with_reply,
328                  xpc_connection_t connection, xpc_object_t message,
329                  dispatch_queue_t replyq, xpc_handler_t handler) {
330   SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_message_with_reply, connection,
331                           message, replyq, handler);
332   Release(thr, pc, (uptr)connection);
333   xpc_handler_t new_handler = ^(xpc_object_t object) {
334     {
335       SCOPED_INTERCEPTOR_RAW(xpc_connection_send_message_with_reply);
336       Acquire(thr, pc, (uptr)connection);
337     }
338     handler(object);
339   };
340   REAL(xpc_connection_send_message_with_reply)
341   (connection, message, replyq, new_handler);
342 }
343 
344 TSAN_INTERCEPTOR(void, xpc_connection_cancel, xpc_connection_t connection) {
345   SCOPED_TSAN_INTERCEPTOR(xpc_connection_cancel, connection);
346   Release(thr, pc, (uptr)connection);
347   REAL(xpc_connection_cancel)(connection);
348 }
349 
350 #endif  // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
351 
352 // Determines whether the Obj-C object pointer is a tagged pointer. Tagged
353 // pointers encode the object data directly in their pointer bits and do not
354 // have an associated memory allocation. The Obj-C runtime uses tagged pointers
355 // to transparently optimize small objects.
356 static bool IsTaggedObjCPointer(id obj) {
357   const uptr kPossibleTaggedBits = 0x8000000000000001ull;
358   return ((uptr)obj & kPossibleTaggedBits) != 0;
359 }
360 
361 // Returns an address which can be used to inform TSan about synchronization
362 // points (MutexLock/Unlock). The TSan infrastructure expects this to be a valid
363 // address in the process space. We do a small allocation here to obtain a
364 // stable address (the array backing the hash map can change). The memory is
365 // never free'd (leaked) and allocation and locking are slow, but this code only
366 // runs for @synchronized with tagged pointers, which is very rare.
367 static uptr GetOrCreateSyncAddress(uptr addr, ThreadState *thr, uptr pc) {
368   typedef AddrHashMap<uptr, 5> Map;
369   static Map Addresses;
370   Map::Handle h(&Addresses, addr);
371   if (h.created()) {
372     ThreadIgnoreBegin(thr, pc);
373     *h = (uptr) user_alloc(thr, pc, /*size=*/1);
374     ThreadIgnoreEnd(thr, pc);
375   }
376   return *h;
377 }
378 
379 // Returns an address on which we can synchronize given an Obj-C object pointer.
380 // For normal object pointers, this is just the address of the object in memory.
381 // Tagged pointers are not backed by an actual memory allocation, so we need to
382 // synthesize a valid address.
383 static uptr SyncAddressForObjCObject(id obj, ThreadState *thr, uptr pc) {
384   if (IsTaggedObjCPointer(obj))
385     return GetOrCreateSyncAddress((uptr)obj, thr, pc);
386   return (uptr)obj;
387 }
388 
389 TSAN_INTERCEPTOR(int, objc_sync_enter, id obj) {
390   SCOPED_TSAN_INTERCEPTOR(objc_sync_enter, obj);
391   if (!obj) return REAL(objc_sync_enter)(obj);
392   uptr addr = SyncAddressForObjCObject(obj, thr, pc);
393   MutexPreLock(thr, pc, addr, MutexFlagWriteReentrant);
394   int result = REAL(objc_sync_enter)(obj);
395   CHECK_EQ(result, OBJC_SYNC_SUCCESS);
396   MutexPostLock(thr, pc, addr, MutexFlagWriteReentrant);
397   return result;
398 }
399 
400 TSAN_INTERCEPTOR(int, objc_sync_exit, id obj) {
401   SCOPED_TSAN_INTERCEPTOR(objc_sync_exit, obj);
402   if (!obj) return REAL(objc_sync_exit)(obj);
403   uptr addr = SyncAddressForObjCObject(obj, thr, pc);
404   MutexUnlock(thr, pc, addr);
405   int result = REAL(objc_sync_exit)(obj);
406   if (result != OBJC_SYNC_SUCCESS) MutexInvalidAccess(thr, pc, addr);
407   return result;
408 }
409 
410 TSAN_INTERCEPTOR(int, swapcontext, ucontext_t *oucp, const ucontext_t *ucp) {
411   {
412     SCOPED_INTERCEPTOR_RAW(swapcontext, oucp, ucp);
413   }
414   // Bacause of swapcontext() semantics we have no option but to copy its
415   // impementation here
416   if (!oucp || !ucp) {
417     errno = EINVAL;
418     return -1;
419   }
420   ThreadState *thr = cur_thread();
421   const int UCF_SWAPPED = 0x80000000;
422   oucp->uc_onstack &= ~UCF_SWAPPED;
423   thr->ignore_interceptors++;
424   int ret = getcontext(oucp);
425   if (!(oucp->uc_onstack & UCF_SWAPPED)) {
426     thr->ignore_interceptors--;
427     if (!ret) {
428       oucp->uc_onstack |= UCF_SWAPPED;
429       ret = setcontext(ucp);
430     }
431   }
432   return ret;
433 }
434 
435 // On macOS, libc++ is always linked dynamically, so intercepting works the
436 // usual way.
437 #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
438 
439 namespace {
440 struct fake_shared_weak_count {
441   volatile a64 shared_owners;
442   volatile a64 shared_weak_owners;
443   virtual void _unused_0x0() = 0;
444   virtual void _unused_0x8() = 0;
445   virtual void on_zero_shared() = 0;
446   virtual void _unused_0x18() = 0;
447   virtual void on_zero_shared_weak() = 0;
448 };
449 }  // namespace
450 
451 // The following code adds libc++ interceptors for:
452 //     void __shared_weak_count::__release_shared() _NOEXCEPT;
453 //     bool __shared_count::__release_shared() _NOEXCEPT;
454 // Shared and weak pointers in C++ maintain reference counts via atomics in
455 // libc++.dylib, which are TSan-invisible, and this leads to false positives in
456 // destructor code. These interceptors re-implements the whole functions so that
457 // the mo_acq_rel semantics of the atomic decrement are visible.
458 //
459 // Unfortunately, the interceptors cannot simply Acquire/Release some sync
460 // object and call the original function, because it would have a race between
461 // the sync and the destruction of the object.  Calling both under a lock will
462 // not work because the destructor can invoke this interceptor again (and even
463 // in a different thread, so recursive locks don't help).
464 
465 STDCXX_INTERCEPTOR(void, _ZNSt3__119__shared_weak_count16__release_sharedEv,
466                    fake_shared_weak_count *o) {
467   if (!flags()->shared_ptr_interceptor)
468     return REAL(_ZNSt3__119__shared_weak_count16__release_sharedEv)(o);
469 
470   SCOPED_TSAN_INTERCEPTOR(_ZNSt3__119__shared_weak_count16__release_sharedEv,
471                           o);
472   if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
473     Acquire(thr, pc, (uptr)&o->shared_owners);
474     o->on_zero_shared();
475     if (__tsan_atomic64_fetch_add(&o->shared_weak_owners, -1, mo_release) ==
476         0) {
477       Acquire(thr, pc, (uptr)&o->shared_weak_owners);
478       o->on_zero_shared_weak();
479     }
480   }
481 }
482 
483 STDCXX_INTERCEPTOR(bool, _ZNSt3__114__shared_count16__release_sharedEv,
484                    fake_shared_weak_count *o) {
485   if (!flags()->shared_ptr_interceptor)
486     return REAL(_ZNSt3__114__shared_count16__release_sharedEv)(o);
487 
488   SCOPED_TSAN_INTERCEPTOR(_ZNSt3__114__shared_count16__release_sharedEv, o);
489   if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
490     Acquire(thr, pc, (uptr)&o->shared_owners);
491     o->on_zero_shared();
492     return true;
493   }
494   return false;
495 }
496 
497 namespace {
498 struct call_once_callback_args {
499   void (*orig_func)(void *arg);
500   void *orig_arg;
501   void *flag;
502 };
503 
504 void call_once_callback_wrapper(void *arg) {
505   call_once_callback_args *new_args = (call_once_callback_args *)arg;
506   new_args->orig_func(new_args->orig_arg);
507   __tsan_release(new_args->flag);
508 }
509 }  // namespace
510 
511 // This adds a libc++ interceptor for:
512 //     void __call_once(volatile unsigned long&, void*, void(*)(void*));
513 // C++11 call_once is implemented via an internal function __call_once which is
514 // inside libc++.dylib, and the atomic release store inside it is thus
515 // TSan-invisible. To avoid false positives, this interceptor wraps the callback
516 // function and performs an explicit Release after the user code has run.
517 STDCXX_INTERCEPTOR(void, _ZNSt3__111__call_onceERVmPvPFvS2_E, void *flag,
518                    void *arg, void (*func)(void *arg)) {
519   call_once_callback_args new_args = {func, arg, flag};
520   REAL(_ZNSt3__111__call_onceERVmPvPFvS2_E)(flag, &new_args,
521                                             call_once_callback_wrapper);
522 }
523 
524 }  // namespace __tsan
525 
526 #endif  // SANITIZER_MAC
527