1 //===-- tsan_interface_atomic.cpp -----------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12
13 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
14 // For background see C++11 standard. A slightly older, publicly
15 // available draft of the standard (not entirely up-to-date, but close enough
16 // for casual browsing) is available here:
17 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
18 // The following page contains more background information:
19 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
20
21 #include "sanitizer_common/sanitizer_placement_new.h"
22 #include "sanitizer_common/sanitizer_stacktrace.h"
23 #include "sanitizer_common/sanitizer_mutex.h"
24 #include "tsan_flags.h"
25 #include "tsan_interface.h"
26 #include "tsan_rtl.h"
27
28 using namespace __tsan;
29
30 #if !SANITIZER_GO && __TSAN_HAS_INT128
31 // Protects emulation of 128-bit atomic operations.
32 static StaticSpinMutex mutex128;
33 #endif
34
IsLoadOrder(morder mo)35 static bool IsLoadOrder(morder mo) {
36 return mo == mo_relaxed || mo == mo_consume
37 || mo == mo_acquire || mo == mo_seq_cst;
38 }
39
IsStoreOrder(morder mo)40 static bool IsStoreOrder(morder mo) {
41 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
42 }
43
IsReleaseOrder(morder mo)44 static bool IsReleaseOrder(morder mo) {
45 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
46 }
47
IsAcquireOrder(morder mo)48 static bool IsAcquireOrder(morder mo) {
49 return mo == mo_consume || mo == mo_acquire
50 || mo == mo_acq_rel || mo == mo_seq_cst;
51 }
52
IsAcqRelOrder(morder mo)53 static bool IsAcqRelOrder(morder mo) {
54 return mo == mo_acq_rel || mo == mo_seq_cst;
55 }
56
func_xchg(volatile T * v,T op)57 template<typename T> T func_xchg(volatile T *v, T op) {
58 T res = __sync_lock_test_and_set(v, op);
59 // __sync_lock_test_and_set does not contain full barrier.
60 __sync_synchronize();
61 return res;
62 }
63
func_add(volatile T * v,T op)64 template<typename T> T func_add(volatile T *v, T op) {
65 return __sync_fetch_and_add(v, op);
66 }
67
func_sub(volatile T * v,T op)68 template<typename T> T func_sub(volatile T *v, T op) {
69 return __sync_fetch_and_sub(v, op);
70 }
71
func_and(volatile T * v,T op)72 template<typename T> T func_and(volatile T *v, T op) {
73 return __sync_fetch_and_and(v, op);
74 }
75
func_or(volatile T * v,T op)76 template<typename T> T func_or(volatile T *v, T op) {
77 return __sync_fetch_and_or(v, op);
78 }
79
func_xor(volatile T * v,T op)80 template<typename T> T func_xor(volatile T *v, T op) {
81 return __sync_fetch_and_xor(v, op);
82 }
83
func_nand(volatile T * v,T op)84 template<typename T> T func_nand(volatile T *v, T op) {
85 // clang does not support __sync_fetch_and_nand.
86 T cmp = *v;
87 for (;;) {
88 T newv = ~(cmp & op);
89 T cur = __sync_val_compare_and_swap(v, cmp, newv);
90 if (cmp == cur)
91 return cmp;
92 cmp = cur;
93 }
94 }
95
func_cas(volatile T * v,T cmp,T xch)96 template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
97 return __sync_val_compare_and_swap(v, cmp, xch);
98 }
99
100 // clang does not support 128-bit atomic ops.
101 // Atomic ops are executed under tsan internal mutex,
102 // here we assume that the atomic variables are not accessed
103 // from non-instrumented code.
104 #if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
105 && __TSAN_HAS_INT128
func_xchg(volatile a128 * v,a128 op)106 a128 func_xchg(volatile a128 *v, a128 op) {
107 SpinMutexLock lock(&mutex128);
108 a128 cmp = *v;
109 *v = op;
110 return cmp;
111 }
112
func_add(volatile a128 * v,a128 op)113 a128 func_add(volatile a128 *v, a128 op) {
114 SpinMutexLock lock(&mutex128);
115 a128 cmp = *v;
116 *v = cmp + op;
117 return cmp;
118 }
119
func_sub(volatile a128 * v,a128 op)120 a128 func_sub(volatile a128 *v, a128 op) {
121 SpinMutexLock lock(&mutex128);
122 a128 cmp = *v;
123 *v = cmp - op;
124 return cmp;
125 }
126
func_and(volatile a128 * v,a128 op)127 a128 func_and(volatile a128 *v, a128 op) {
128 SpinMutexLock lock(&mutex128);
129 a128 cmp = *v;
130 *v = cmp & op;
131 return cmp;
132 }
133
func_or(volatile a128 * v,a128 op)134 a128 func_or(volatile a128 *v, a128 op) {
135 SpinMutexLock lock(&mutex128);
136 a128 cmp = *v;
137 *v = cmp | op;
138 return cmp;
139 }
140
func_xor(volatile a128 * v,a128 op)141 a128 func_xor(volatile a128 *v, a128 op) {
142 SpinMutexLock lock(&mutex128);
143 a128 cmp = *v;
144 *v = cmp ^ op;
145 return cmp;
146 }
147
func_nand(volatile a128 * v,a128 op)148 a128 func_nand(volatile a128 *v, a128 op) {
149 SpinMutexLock lock(&mutex128);
150 a128 cmp = *v;
151 *v = ~(cmp & op);
152 return cmp;
153 }
154
func_cas(volatile a128 * v,a128 cmp,a128 xch)155 a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
156 SpinMutexLock lock(&mutex128);
157 a128 cur = *v;
158 if (cur == cmp)
159 *v = xch;
160 return cur;
161 }
162 #endif
163
164 template<typename T>
SizeLog()165 static int SizeLog() {
166 if (sizeof(T) <= 1)
167 return kSizeLog1;
168 else if (sizeof(T) <= 2)
169 return kSizeLog2;
170 else if (sizeof(T) <= 4)
171 return kSizeLog4;
172 else
173 return kSizeLog8;
174 // For 16-byte atomics we also use 8-byte memory access,
175 // this leads to false negatives only in very obscure cases.
176 }
177
178 #if !SANITIZER_GO
to_atomic(const volatile a8 * a)179 static atomic_uint8_t *to_atomic(const volatile a8 *a) {
180 return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
181 }
182
to_atomic(const volatile a16 * a)183 static atomic_uint16_t *to_atomic(const volatile a16 *a) {
184 return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
185 }
186 #endif
187
to_atomic(const volatile a32 * a)188 static atomic_uint32_t *to_atomic(const volatile a32 *a) {
189 return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
190 }
191
to_atomic(const volatile a64 * a)192 static atomic_uint64_t *to_atomic(const volatile a64 *a) {
193 return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
194 }
195
to_mo(morder mo)196 static memory_order to_mo(morder mo) {
197 switch (mo) {
198 case mo_relaxed: return memory_order_relaxed;
199 case mo_consume: return memory_order_consume;
200 case mo_acquire: return memory_order_acquire;
201 case mo_release: return memory_order_release;
202 case mo_acq_rel: return memory_order_acq_rel;
203 case mo_seq_cst: return memory_order_seq_cst;
204 }
205 CHECK(0);
206 return memory_order_seq_cst;
207 }
208
209 template<typename T>
NoTsanAtomicLoad(const volatile T * a,morder mo)210 static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
211 return atomic_load(to_atomic(a), to_mo(mo));
212 }
213
214 #if __TSAN_HAS_INT128 && !SANITIZER_GO
NoTsanAtomicLoad(const volatile a128 * a,morder mo)215 static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
216 SpinMutexLock lock(&mutex128);
217 return *a;
218 }
219 #endif
220
221 template <typename T>
AtomicLoad(ThreadState * thr,uptr pc,const volatile T * a,morder mo)222 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
223 morder mo) NO_THREAD_SAFETY_ANALYSIS {
224 CHECK(IsLoadOrder(mo));
225 // This fast-path is critical for performance.
226 // Assume the access is atomic.
227 if (!IsAcquireOrder(mo)) {
228 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
229 return NoTsanAtomicLoad(a, mo);
230 }
231 // Don't create sync object if it does not exist yet. For example, an atomic
232 // pointer is initialized to nullptr and then periodically acquire-loaded.
233 T v = NoTsanAtomicLoad(a, mo);
234 SyncVar *s = ctx->metamap.GetIfExistsAndLock((uptr)a, false);
235 if (s) {
236 AcquireImpl(thr, pc, &s->clock);
237 // Re-read under sync mutex because we need a consistent snapshot
238 // of the value and the clock we acquire.
239 v = NoTsanAtomicLoad(a, mo);
240 s->mtx.ReadUnlock();
241 }
242 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
243 return v;
244 }
245
246 template<typename T>
NoTsanAtomicStore(volatile T * a,T v,morder mo)247 static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
248 atomic_store(to_atomic(a), v, to_mo(mo));
249 }
250
251 #if __TSAN_HAS_INT128 && !SANITIZER_GO
NoTsanAtomicStore(volatile a128 * a,a128 v,morder mo)252 static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
253 SpinMutexLock lock(&mutex128);
254 *a = v;
255 }
256 #endif
257
258 template <typename T>
AtomicStore(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)259 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
260 morder mo) NO_THREAD_SAFETY_ANALYSIS {
261 CHECK(IsStoreOrder(mo));
262 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
263 // This fast-path is critical for performance.
264 // Assume the access is atomic.
265 // Strictly saying even relaxed store cuts off release sequence,
266 // so must reset the clock.
267 if (!IsReleaseOrder(mo)) {
268 NoTsanAtomicStore(a, v, mo);
269 return;
270 }
271 __sync_synchronize();
272 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
273 thr->fast_state.IncrementEpoch();
274 // Can't increment epoch w/o writing to the trace as well.
275 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
276 ReleaseStoreImpl(thr, pc, &s->clock);
277 NoTsanAtomicStore(a, v, mo);
278 s->mtx.Unlock();
279 }
280
281 template <typename T, T (*F)(volatile T *v, T op)>
AtomicRMW(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)282 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v,
283 morder mo) NO_THREAD_SAFETY_ANALYSIS {
284 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
285 SyncVar *s = 0;
286 if (mo != mo_relaxed) {
287 s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
288 thr->fast_state.IncrementEpoch();
289 // Can't increment epoch w/o writing to the trace as well.
290 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
291 if (IsAcqRelOrder(mo))
292 AcquireReleaseImpl(thr, pc, &s->clock);
293 else if (IsReleaseOrder(mo))
294 ReleaseImpl(thr, pc, &s->clock);
295 else if (IsAcquireOrder(mo))
296 AcquireImpl(thr, pc, &s->clock);
297 }
298 v = F(a, v);
299 if (s)
300 s->mtx.Unlock();
301 return v;
302 }
303
304 template<typename T>
NoTsanAtomicExchange(volatile T * a,T v,morder mo)305 static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
306 return func_xchg(a, v);
307 }
308
309 template<typename T>
NoTsanAtomicFetchAdd(volatile T * a,T v,morder mo)310 static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
311 return func_add(a, v);
312 }
313
314 template<typename T>
NoTsanAtomicFetchSub(volatile T * a,T v,morder mo)315 static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
316 return func_sub(a, v);
317 }
318
319 template<typename T>
NoTsanAtomicFetchAnd(volatile T * a,T v,morder mo)320 static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
321 return func_and(a, v);
322 }
323
324 template<typename T>
NoTsanAtomicFetchOr(volatile T * a,T v,morder mo)325 static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
326 return func_or(a, v);
327 }
328
329 template<typename T>
NoTsanAtomicFetchXor(volatile T * a,T v,morder mo)330 static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
331 return func_xor(a, v);
332 }
333
334 template<typename T>
NoTsanAtomicFetchNand(volatile T * a,T v,morder mo)335 static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
336 return func_nand(a, v);
337 }
338
339 template<typename T>
AtomicExchange(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)340 static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
341 morder mo) {
342 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
343 }
344
345 template<typename T>
AtomicFetchAdd(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)346 static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
347 morder mo) {
348 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
349 }
350
351 template<typename T>
AtomicFetchSub(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)352 static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
353 morder mo) {
354 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
355 }
356
357 template<typename T>
AtomicFetchAnd(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)358 static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
359 morder mo) {
360 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
361 }
362
363 template<typename T>
AtomicFetchOr(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)364 static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
365 morder mo) {
366 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
367 }
368
369 template<typename T>
AtomicFetchXor(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)370 static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
371 morder mo) {
372 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
373 }
374
375 template<typename T>
AtomicFetchNand(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)376 static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
377 morder mo) {
378 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
379 }
380
381 template<typename T>
NoTsanAtomicCAS(volatile T * a,T * c,T v,morder mo,morder fmo)382 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
383 return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
384 }
385
386 #if __TSAN_HAS_INT128
NoTsanAtomicCAS(volatile a128 * a,a128 * c,a128 v,morder mo,morder fmo)387 static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
388 morder mo, morder fmo) {
389 a128 old = *c;
390 a128 cur = func_cas(a, old, v);
391 if (cur == old)
392 return true;
393 *c = cur;
394 return false;
395 }
396 #endif
397
398 template<typename T>
NoTsanAtomicCAS(volatile T * a,T c,T v,morder mo,morder fmo)399 static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
400 NoTsanAtomicCAS(a, &c, v, mo, fmo);
401 return c;
402 }
403
404 template <typename T>
AtomicCAS(ThreadState * thr,uptr pc,volatile T * a,T * c,T v,morder mo,morder fmo)405 static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v, morder mo,
406 morder fmo) NO_THREAD_SAFETY_ANALYSIS {
407 // 31.7.2.18: "The failure argument shall not be memory_order_release
408 // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic
409 // (mo_relaxed) when those are used.
410 CHECK(IsLoadOrder(fmo));
411
412 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
413 SyncVar *s = 0;
414 bool write_lock = IsReleaseOrder(mo);
415
416 if (mo != mo_relaxed || fmo != mo_relaxed)
417 s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
418
419 T cc = *c;
420 T pr = func_cas(a, cc, v);
421 bool success = pr == cc;
422 if (!success) {
423 *c = pr;
424 mo = fmo;
425 }
426
427 if (s) {
428 thr->fast_state.IncrementEpoch();
429 // Can't increment epoch w/o writing to the trace as well.
430 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
431
432 if (success && IsAcqRelOrder(mo))
433 AcquireReleaseImpl(thr, pc, &s->clock);
434 else if (success && IsReleaseOrder(mo))
435 ReleaseImpl(thr, pc, &s->clock);
436 else if (IsAcquireOrder(mo))
437 AcquireImpl(thr, pc, &s->clock);
438
439 if (write_lock)
440 s->mtx.Unlock();
441 else
442 s->mtx.ReadUnlock();
443 }
444
445 return success;
446 }
447
448 template<typename T>
AtomicCAS(ThreadState * thr,uptr pc,volatile T * a,T c,T v,morder mo,morder fmo)449 static T AtomicCAS(ThreadState *thr, uptr pc,
450 volatile T *a, T c, T v, morder mo, morder fmo) {
451 AtomicCAS(thr, pc, a, &c, v, mo, fmo);
452 return c;
453 }
454
455 #if !SANITIZER_GO
NoTsanAtomicFence(morder mo)456 static void NoTsanAtomicFence(morder mo) {
457 __sync_synchronize();
458 }
459
AtomicFence(ThreadState * thr,uptr pc,morder mo)460 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
461 // FIXME(dvyukov): not implemented.
462 __sync_synchronize();
463 }
464 #endif
465
466 // Interface functions follow.
467 #if !SANITIZER_GO
468
469 // C/C++
470
convert_morder(morder mo)471 static morder convert_morder(morder mo) {
472 if (flags()->force_seq_cst_atomics)
473 return (morder)mo_seq_cst;
474
475 // Filter out additional memory order flags:
476 // MEMMODEL_SYNC = 1 << 15
477 // __ATOMIC_HLE_ACQUIRE = 1 << 16
478 // __ATOMIC_HLE_RELEASE = 1 << 17
479 //
480 // HLE is an optimization, and we pretend that elision always fails.
481 // MEMMODEL_SYNC is used when lowering __sync_ atomics,
482 // since we use __sync_ atomics for actual atomic operations,
483 // we can safely ignore it as well. It also subtly affects semantics,
484 // but we don't model the difference.
485 return (morder)(mo & 0x7fff);
486 }
487
488 #define SCOPED_ATOMIC(func, ...) \
489 ThreadState *const thr = cur_thread(); \
490 if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) { \
491 ProcessPendingSignals(thr); \
492 return NoTsanAtomic##func(__VA_ARGS__); \
493 } \
494 const uptr callpc = (uptr)__builtin_return_address(0); \
495 uptr pc = StackTrace::GetCurrentPc(); \
496 mo = convert_morder(mo); \
497 ScopedAtomic sa(thr, callpc, a, mo, __func__); \
498 return Atomic##func(thr, pc, __VA_ARGS__); \
499 /**/
500
501 class ScopedAtomic {
502 public:
ScopedAtomic(ThreadState * thr,uptr pc,const volatile void * a,morder mo,const char * func)503 ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
504 morder mo, const char *func)
505 : thr_(thr) {
506 FuncEntry(thr_, pc);
507 DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
508 }
~ScopedAtomic()509 ~ScopedAtomic() {
510 ProcessPendingSignals(thr_);
511 FuncExit(thr_);
512 }
513 private:
514 ThreadState *thr_;
515 };
516
517 extern "C" {
518 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_load(const volatile a8 * a,morder mo)519 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
520 SCOPED_ATOMIC(Load, a, mo);
521 }
522
523 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_load(const volatile a16 * a,morder mo)524 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
525 SCOPED_ATOMIC(Load, a, mo);
526 }
527
528 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_load(const volatile a32 * a,morder mo)529 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
530 SCOPED_ATOMIC(Load, a, mo);
531 }
532
533 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_load(const volatile a64 * a,morder mo)534 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
535 SCOPED_ATOMIC(Load, a, mo);
536 }
537
538 #if __TSAN_HAS_INT128
539 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_load(const volatile a128 * a,morder mo)540 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
541 SCOPED_ATOMIC(Load, a, mo);
542 }
543 #endif
544
545 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_store(volatile a8 * a,a8 v,morder mo)546 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
547 SCOPED_ATOMIC(Store, a, v, mo);
548 }
549
550 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_store(volatile a16 * a,a16 v,morder mo)551 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
552 SCOPED_ATOMIC(Store, a, v, mo);
553 }
554
555 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_store(volatile a32 * a,a32 v,morder mo)556 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
557 SCOPED_ATOMIC(Store, a, v, mo);
558 }
559
560 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_store(volatile a64 * a,a64 v,morder mo)561 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
562 SCOPED_ATOMIC(Store, a, v, mo);
563 }
564
565 #if __TSAN_HAS_INT128
566 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_store(volatile a128 * a,a128 v,morder mo)567 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
568 SCOPED_ATOMIC(Store, a, v, mo);
569 }
570 #endif
571
572 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_exchange(volatile a8 * a,a8 v,morder mo)573 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
574 SCOPED_ATOMIC(Exchange, a, v, mo);
575 }
576
577 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_exchange(volatile a16 * a,a16 v,morder mo)578 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
579 SCOPED_ATOMIC(Exchange, a, v, mo);
580 }
581
582 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_exchange(volatile a32 * a,a32 v,morder mo)583 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
584 SCOPED_ATOMIC(Exchange, a, v, mo);
585 }
586
587 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_exchange(volatile a64 * a,a64 v,morder mo)588 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
589 SCOPED_ATOMIC(Exchange, a, v, mo);
590 }
591
592 #if __TSAN_HAS_INT128
593 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_exchange(volatile a128 * a,a128 v,morder mo)594 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
595 SCOPED_ATOMIC(Exchange, a, v, mo);
596 }
597 #endif
598
599 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_add(volatile a8 * a,a8 v,morder mo)600 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
601 SCOPED_ATOMIC(FetchAdd, a, v, mo);
602 }
603
604 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_add(volatile a16 * a,a16 v,morder mo)605 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
606 SCOPED_ATOMIC(FetchAdd, a, v, mo);
607 }
608
609 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_add(volatile a32 * a,a32 v,morder mo)610 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
611 SCOPED_ATOMIC(FetchAdd, a, v, mo);
612 }
613
614 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_add(volatile a64 * a,a64 v,morder mo)615 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
616 SCOPED_ATOMIC(FetchAdd, a, v, mo);
617 }
618
619 #if __TSAN_HAS_INT128
620 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_add(volatile a128 * a,a128 v,morder mo)621 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
622 SCOPED_ATOMIC(FetchAdd, a, v, mo);
623 }
624 #endif
625
626 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_sub(volatile a8 * a,a8 v,morder mo)627 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
628 SCOPED_ATOMIC(FetchSub, a, v, mo);
629 }
630
631 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_sub(volatile a16 * a,a16 v,morder mo)632 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
633 SCOPED_ATOMIC(FetchSub, a, v, mo);
634 }
635
636 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_sub(volatile a32 * a,a32 v,morder mo)637 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
638 SCOPED_ATOMIC(FetchSub, a, v, mo);
639 }
640
641 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_sub(volatile a64 * a,a64 v,morder mo)642 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
643 SCOPED_ATOMIC(FetchSub, a, v, mo);
644 }
645
646 #if __TSAN_HAS_INT128
647 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_sub(volatile a128 * a,a128 v,morder mo)648 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
649 SCOPED_ATOMIC(FetchSub, a, v, mo);
650 }
651 #endif
652
653 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_and(volatile a8 * a,a8 v,morder mo)654 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
655 SCOPED_ATOMIC(FetchAnd, a, v, mo);
656 }
657
658 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_and(volatile a16 * a,a16 v,morder mo)659 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
660 SCOPED_ATOMIC(FetchAnd, a, v, mo);
661 }
662
663 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_and(volatile a32 * a,a32 v,morder mo)664 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
665 SCOPED_ATOMIC(FetchAnd, a, v, mo);
666 }
667
668 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_and(volatile a64 * a,a64 v,morder mo)669 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
670 SCOPED_ATOMIC(FetchAnd, a, v, mo);
671 }
672
673 #if __TSAN_HAS_INT128
674 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_and(volatile a128 * a,a128 v,morder mo)675 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
676 SCOPED_ATOMIC(FetchAnd, a, v, mo);
677 }
678 #endif
679
680 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_or(volatile a8 * a,a8 v,morder mo)681 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
682 SCOPED_ATOMIC(FetchOr, a, v, mo);
683 }
684
685 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_or(volatile a16 * a,a16 v,morder mo)686 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
687 SCOPED_ATOMIC(FetchOr, a, v, mo);
688 }
689
690 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_or(volatile a32 * a,a32 v,morder mo)691 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
692 SCOPED_ATOMIC(FetchOr, a, v, mo);
693 }
694
695 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_or(volatile a64 * a,a64 v,morder mo)696 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
697 SCOPED_ATOMIC(FetchOr, a, v, mo);
698 }
699
700 #if __TSAN_HAS_INT128
701 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_or(volatile a128 * a,a128 v,morder mo)702 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
703 SCOPED_ATOMIC(FetchOr, a, v, mo);
704 }
705 #endif
706
707 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_xor(volatile a8 * a,a8 v,morder mo)708 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
709 SCOPED_ATOMIC(FetchXor, a, v, mo);
710 }
711
712 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_xor(volatile a16 * a,a16 v,morder mo)713 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
714 SCOPED_ATOMIC(FetchXor, a, v, mo);
715 }
716
717 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_xor(volatile a32 * a,a32 v,morder mo)718 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
719 SCOPED_ATOMIC(FetchXor, a, v, mo);
720 }
721
722 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_xor(volatile a64 * a,a64 v,morder mo)723 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
724 SCOPED_ATOMIC(FetchXor, a, v, mo);
725 }
726
727 #if __TSAN_HAS_INT128
728 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_xor(volatile a128 * a,a128 v,morder mo)729 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
730 SCOPED_ATOMIC(FetchXor, a, v, mo);
731 }
732 #endif
733
734 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_nand(volatile a8 * a,a8 v,morder mo)735 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
736 SCOPED_ATOMIC(FetchNand, a, v, mo);
737 }
738
739 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_nand(volatile a16 * a,a16 v,morder mo)740 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
741 SCOPED_ATOMIC(FetchNand, a, v, mo);
742 }
743
744 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_nand(volatile a32 * a,a32 v,morder mo)745 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
746 SCOPED_ATOMIC(FetchNand, a, v, mo);
747 }
748
749 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_nand(volatile a64 * a,a64 v,morder mo)750 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
751 SCOPED_ATOMIC(FetchNand, a, v, mo);
752 }
753
754 #if __TSAN_HAS_INT128
755 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_nand(volatile a128 * a,a128 v,morder mo)756 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
757 SCOPED_ATOMIC(FetchNand, a, v, mo);
758 }
759 #endif
760
761 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_compare_exchange_strong(volatile a8 * a,a8 * c,a8 v,morder mo,morder fmo)762 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
763 morder mo, morder fmo) {
764 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
765 }
766
767 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_compare_exchange_strong(volatile a16 * a,a16 * c,a16 v,morder mo,morder fmo)768 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
769 morder mo, morder fmo) {
770 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
771 }
772
773 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_compare_exchange_strong(volatile a32 * a,a32 * c,a32 v,morder mo,morder fmo)774 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
775 morder mo, morder fmo) {
776 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
777 }
778
779 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_compare_exchange_strong(volatile a64 * a,a64 * c,a64 v,morder mo,morder fmo)780 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
781 morder mo, morder fmo) {
782 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
783 }
784
785 #if __TSAN_HAS_INT128
786 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_compare_exchange_strong(volatile a128 * a,a128 * c,a128 v,morder mo,morder fmo)787 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
788 morder mo, morder fmo) {
789 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
790 }
791 #endif
792
793 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_compare_exchange_weak(volatile a8 * a,a8 * c,a8 v,morder mo,morder fmo)794 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
795 morder mo, morder fmo) {
796 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
797 }
798
799 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_compare_exchange_weak(volatile a16 * a,a16 * c,a16 v,morder mo,morder fmo)800 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
801 morder mo, morder fmo) {
802 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
803 }
804
805 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_compare_exchange_weak(volatile a32 * a,a32 * c,a32 v,morder mo,morder fmo)806 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
807 morder mo, morder fmo) {
808 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
809 }
810
811 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_compare_exchange_weak(volatile a64 * a,a64 * c,a64 v,morder mo,morder fmo)812 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
813 morder mo, morder fmo) {
814 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
815 }
816
817 #if __TSAN_HAS_INT128
818 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_compare_exchange_weak(volatile a128 * a,a128 * c,a128 v,morder mo,morder fmo)819 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
820 morder mo, morder fmo) {
821 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
822 }
823 #endif
824
825 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_compare_exchange_val(volatile a8 * a,a8 c,a8 v,morder mo,morder fmo)826 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
827 morder mo, morder fmo) {
828 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
829 }
830
831 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_compare_exchange_val(volatile a16 * a,a16 c,a16 v,morder mo,morder fmo)832 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
833 morder mo, morder fmo) {
834 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
835 }
836
837 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_compare_exchange_val(volatile a32 * a,a32 c,a32 v,morder mo,morder fmo)838 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
839 morder mo, morder fmo) {
840 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
841 }
842
843 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_compare_exchange_val(volatile a64 * a,a64 c,a64 v,morder mo,morder fmo)844 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
845 morder mo, morder fmo) {
846 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
847 }
848
849 #if __TSAN_HAS_INT128
850 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_compare_exchange_val(volatile a128 * a,a128 c,a128 v,morder mo,morder fmo)851 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
852 morder mo, morder fmo) {
853 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
854 }
855 #endif
856
857 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic_thread_fence(morder mo)858 void __tsan_atomic_thread_fence(morder mo) {
859 char* a = 0;
860 SCOPED_ATOMIC(Fence, mo);
861 }
862
863 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic_signal_fence(morder mo)864 void __tsan_atomic_signal_fence(morder mo) {
865 }
866 } // extern "C"
867
868 #else // #if !SANITIZER_GO
869
870 // Go
871
872 #define ATOMIC(func, ...) \
873 if (thr->ignore_sync) { \
874 NoTsanAtomic##func(__VA_ARGS__); \
875 } else { \
876 FuncEntry(thr, cpc); \
877 Atomic##func(thr, pc, __VA_ARGS__); \
878 FuncExit(thr); \
879 } \
880 /**/
881
882 #define ATOMIC_RET(func, ret, ...) \
883 if (thr->ignore_sync) { \
884 (ret) = NoTsanAtomic##func(__VA_ARGS__); \
885 } else { \
886 FuncEntry(thr, cpc); \
887 (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
888 FuncExit(thr); \
889 } \
890 /**/
891
892 extern "C" {
893 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_load(ThreadState * thr,uptr cpc,uptr pc,u8 * a)894 void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
895 ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
896 }
897
898 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_load(ThreadState * thr,uptr cpc,uptr pc,u8 * a)899 void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
900 ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
901 }
902
903 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_store(ThreadState * thr,uptr cpc,uptr pc,u8 * a)904 void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
905 ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
906 }
907
908 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_store(ThreadState * thr,uptr cpc,uptr pc,u8 * a)909 void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
910 ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
911 }
912
913 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_fetch_add(ThreadState * thr,uptr cpc,uptr pc,u8 * a)914 void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
915 ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
916 }
917
918 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_fetch_add(ThreadState * thr,uptr cpc,uptr pc,u8 * a)919 void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
920 ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
921 }
922
923 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)924 void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
925 ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
926 }
927
928 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)929 void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
930 ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
931 }
932
933 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_compare_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)934 void __tsan_go_atomic32_compare_exchange(
935 ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
936 a32 cur = 0;
937 a32 cmp = *(a32*)(a+8);
938 ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
939 *(bool*)(a+16) = (cur == cmp);
940 }
941
942 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_compare_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)943 void __tsan_go_atomic64_compare_exchange(
944 ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
945 a64 cur = 0;
946 a64 cmp = *(a64*)(a+8);
947 ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
948 *(bool*)(a+24) = (cur == cmp);
949 }
950 } // extern "C"
951 #endif // #if !SANITIZER_GO
952