1 //===-- tsan_interface_atomic.cpp -----------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12
13 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
14 // For background see C++11 standard. A slightly older, publicly
15 // available draft of the standard (not entirely up-to-date, but close enough
16 // for casual browsing) is available here:
17 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
18 // The following page contains more background information:
19 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
20
21 #include "sanitizer_common/sanitizer_placement_new.h"
22 #include "sanitizer_common/sanitizer_stacktrace.h"
23 #include "sanitizer_common/sanitizer_mutex.h"
24 #include "tsan_flags.h"
25 #include "tsan_interface.h"
26 #include "tsan_rtl.h"
27
28 using namespace __tsan;
29
30 #if !SANITIZER_GO && __TSAN_HAS_INT128
31 // Protects emulation of 128-bit atomic operations.
32 static StaticSpinMutex mutex128;
33 #endif
34
IsLoadOrder(morder mo)35 static bool IsLoadOrder(morder mo) {
36 return mo == mo_relaxed || mo == mo_consume
37 || mo == mo_acquire || mo == mo_seq_cst;
38 }
39
IsStoreOrder(morder mo)40 static bool IsStoreOrder(morder mo) {
41 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
42 }
43
IsReleaseOrder(morder mo)44 static bool IsReleaseOrder(morder mo) {
45 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
46 }
47
IsAcquireOrder(morder mo)48 static bool IsAcquireOrder(morder mo) {
49 return mo == mo_consume || mo == mo_acquire
50 || mo == mo_acq_rel || mo == mo_seq_cst;
51 }
52
IsAcqRelOrder(morder mo)53 static bool IsAcqRelOrder(morder mo) {
54 return mo == mo_acq_rel || mo == mo_seq_cst;
55 }
56
func_xchg(volatile T * v,T op)57 template<typename T> T func_xchg(volatile T *v, T op) {
58 T res = __sync_lock_test_and_set(v, op);
59 // __sync_lock_test_and_set does not contain full barrier.
60 __sync_synchronize();
61 return res;
62 }
63
func_add(volatile T * v,T op)64 template<typename T> T func_add(volatile T *v, T op) {
65 return __sync_fetch_and_add(v, op);
66 }
67
func_sub(volatile T * v,T op)68 template<typename T> T func_sub(volatile T *v, T op) {
69 return __sync_fetch_and_sub(v, op);
70 }
71
func_and(volatile T * v,T op)72 template<typename T> T func_and(volatile T *v, T op) {
73 return __sync_fetch_and_and(v, op);
74 }
75
func_or(volatile T * v,T op)76 template<typename T> T func_or(volatile T *v, T op) {
77 return __sync_fetch_and_or(v, op);
78 }
79
func_xor(volatile T * v,T op)80 template<typename T> T func_xor(volatile T *v, T op) {
81 return __sync_fetch_and_xor(v, op);
82 }
83
func_nand(volatile T * v,T op)84 template<typename T> T func_nand(volatile T *v, T op) {
85 // clang does not support __sync_fetch_and_nand.
86 T cmp = *v;
87 for (;;) {
88 T newv = ~(cmp & op);
89 T cur = __sync_val_compare_and_swap(v, cmp, newv);
90 if (cmp == cur)
91 return cmp;
92 cmp = cur;
93 }
94 }
95
func_cas(volatile T * v,T cmp,T xch)96 template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
97 return __sync_val_compare_and_swap(v, cmp, xch);
98 }
99
100 // clang does not support 128-bit atomic ops.
101 // Atomic ops are executed under tsan internal mutex,
102 // here we assume that the atomic variables are not accessed
103 // from non-instrumented code.
104 #if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
105 && __TSAN_HAS_INT128
func_xchg(volatile a128 * v,a128 op)106 a128 func_xchg(volatile a128 *v, a128 op) {
107 SpinMutexLock lock(&mutex128);
108 a128 cmp = *v;
109 *v = op;
110 return cmp;
111 }
112
func_add(volatile a128 * v,a128 op)113 a128 func_add(volatile a128 *v, a128 op) {
114 SpinMutexLock lock(&mutex128);
115 a128 cmp = *v;
116 *v = cmp + op;
117 return cmp;
118 }
119
func_sub(volatile a128 * v,a128 op)120 a128 func_sub(volatile a128 *v, a128 op) {
121 SpinMutexLock lock(&mutex128);
122 a128 cmp = *v;
123 *v = cmp - op;
124 return cmp;
125 }
126
func_and(volatile a128 * v,a128 op)127 a128 func_and(volatile a128 *v, a128 op) {
128 SpinMutexLock lock(&mutex128);
129 a128 cmp = *v;
130 *v = cmp & op;
131 return cmp;
132 }
133
func_or(volatile a128 * v,a128 op)134 a128 func_or(volatile a128 *v, a128 op) {
135 SpinMutexLock lock(&mutex128);
136 a128 cmp = *v;
137 *v = cmp | op;
138 return cmp;
139 }
140
func_xor(volatile a128 * v,a128 op)141 a128 func_xor(volatile a128 *v, a128 op) {
142 SpinMutexLock lock(&mutex128);
143 a128 cmp = *v;
144 *v = cmp ^ op;
145 return cmp;
146 }
147
func_nand(volatile a128 * v,a128 op)148 a128 func_nand(volatile a128 *v, a128 op) {
149 SpinMutexLock lock(&mutex128);
150 a128 cmp = *v;
151 *v = ~(cmp & op);
152 return cmp;
153 }
154
func_cas(volatile a128 * v,a128 cmp,a128 xch)155 a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
156 SpinMutexLock lock(&mutex128);
157 a128 cur = *v;
158 if (cur == cmp)
159 *v = xch;
160 return cur;
161 }
162 #endif
163
164 template<typename T>
SizeLog()165 static int SizeLog() {
166 if (sizeof(T) <= 1)
167 return kSizeLog1;
168 else if (sizeof(T) <= 2)
169 return kSizeLog2;
170 else if (sizeof(T) <= 4)
171 return kSizeLog4;
172 else
173 return kSizeLog8;
174 // For 16-byte atomics we also use 8-byte memory access,
175 // this leads to false negatives only in very obscure cases.
176 }
177
178 #if !SANITIZER_GO
to_atomic(const volatile a8 * a)179 static atomic_uint8_t *to_atomic(const volatile a8 *a) {
180 return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
181 }
182
to_atomic(const volatile a16 * a)183 static atomic_uint16_t *to_atomic(const volatile a16 *a) {
184 return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
185 }
186 #endif
187
to_atomic(const volatile a32 * a)188 static atomic_uint32_t *to_atomic(const volatile a32 *a) {
189 return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
190 }
191
to_atomic(const volatile a64 * a)192 static atomic_uint64_t *to_atomic(const volatile a64 *a) {
193 return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
194 }
195
to_mo(morder mo)196 static memory_order to_mo(morder mo) {
197 switch (mo) {
198 case mo_relaxed: return memory_order_relaxed;
199 case mo_consume: return memory_order_consume;
200 case mo_acquire: return memory_order_acquire;
201 case mo_release: return memory_order_release;
202 case mo_acq_rel: return memory_order_acq_rel;
203 case mo_seq_cst: return memory_order_seq_cst;
204 }
205 CHECK(0);
206 return memory_order_seq_cst;
207 }
208
209 template<typename T>
NoTsanAtomicLoad(const volatile T * a,morder mo)210 static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
211 return atomic_load(to_atomic(a), to_mo(mo));
212 }
213
214 #if __TSAN_HAS_INT128 && !SANITIZER_GO
NoTsanAtomicLoad(const volatile a128 * a,morder mo)215 static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
216 SpinMutexLock lock(&mutex128);
217 return *a;
218 }
219 #endif
220
221 template<typename T>
AtomicLoad(ThreadState * thr,uptr pc,const volatile T * a,morder mo)222 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
223 CHECK(IsLoadOrder(mo));
224 // This fast-path is critical for performance.
225 // Assume the access is atomic.
226 if (!IsAcquireOrder(mo)) {
227 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
228 return NoTsanAtomicLoad(a, mo);
229 }
230 // Don't create sync object if it does not exist yet. For example, an atomic
231 // pointer is initialized to nullptr and then periodically acquire-loaded.
232 T v = NoTsanAtomicLoad(a, mo);
233 SyncVar *s = ctx->metamap.GetIfExistsAndLock((uptr)a, false);
234 if (s) {
235 AcquireImpl(thr, pc, &s->clock);
236 // Re-read under sync mutex because we need a consistent snapshot
237 // of the value and the clock we acquire.
238 v = NoTsanAtomicLoad(a, mo);
239 s->mtx.ReadUnlock();
240 }
241 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
242 return v;
243 }
244
245 template<typename T>
NoTsanAtomicStore(volatile T * a,T v,morder mo)246 static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
247 atomic_store(to_atomic(a), v, to_mo(mo));
248 }
249
250 #if __TSAN_HAS_INT128 && !SANITIZER_GO
NoTsanAtomicStore(volatile a128 * a,a128 v,morder mo)251 static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
252 SpinMutexLock lock(&mutex128);
253 *a = v;
254 }
255 #endif
256
257 template<typename T>
AtomicStore(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)258 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
259 morder mo) {
260 CHECK(IsStoreOrder(mo));
261 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
262 // This fast-path is critical for performance.
263 // Assume the access is atomic.
264 // Strictly saying even relaxed store cuts off release sequence,
265 // so must reset the clock.
266 if (!IsReleaseOrder(mo)) {
267 NoTsanAtomicStore(a, v, mo);
268 return;
269 }
270 __sync_synchronize();
271 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
272 thr->fast_state.IncrementEpoch();
273 // Can't increment epoch w/o writing to the trace as well.
274 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
275 ReleaseStoreImpl(thr, pc, &s->clock);
276 NoTsanAtomicStore(a, v, mo);
277 s->mtx.Unlock();
278 }
279
280 template<typename T, T (*F)(volatile T *v, T op)>
AtomicRMW(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)281 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
282 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
283 SyncVar *s = 0;
284 if (mo != mo_relaxed) {
285 s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
286 thr->fast_state.IncrementEpoch();
287 // Can't increment epoch w/o writing to the trace as well.
288 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
289 if (IsAcqRelOrder(mo))
290 AcquireReleaseImpl(thr, pc, &s->clock);
291 else if (IsReleaseOrder(mo))
292 ReleaseImpl(thr, pc, &s->clock);
293 else if (IsAcquireOrder(mo))
294 AcquireImpl(thr, pc, &s->clock);
295 }
296 v = F(a, v);
297 if (s)
298 s->mtx.Unlock();
299 return v;
300 }
301
302 template<typename T>
NoTsanAtomicExchange(volatile T * a,T v,morder mo)303 static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
304 return func_xchg(a, v);
305 }
306
307 template<typename T>
NoTsanAtomicFetchAdd(volatile T * a,T v,morder mo)308 static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
309 return func_add(a, v);
310 }
311
312 template<typename T>
NoTsanAtomicFetchSub(volatile T * a,T v,morder mo)313 static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
314 return func_sub(a, v);
315 }
316
317 template<typename T>
NoTsanAtomicFetchAnd(volatile T * a,T v,morder mo)318 static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
319 return func_and(a, v);
320 }
321
322 template<typename T>
NoTsanAtomicFetchOr(volatile T * a,T v,morder mo)323 static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
324 return func_or(a, v);
325 }
326
327 template<typename T>
NoTsanAtomicFetchXor(volatile T * a,T v,morder mo)328 static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
329 return func_xor(a, v);
330 }
331
332 template<typename T>
NoTsanAtomicFetchNand(volatile T * a,T v,morder mo)333 static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
334 return func_nand(a, v);
335 }
336
337 template<typename T>
AtomicExchange(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)338 static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
339 morder mo) {
340 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
341 }
342
343 template<typename T>
AtomicFetchAdd(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)344 static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
345 morder mo) {
346 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
347 }
348
349 template<typename T>
AtomicFetchSub(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)350 static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
351 morder mo) {
352 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
353 }
354
355 template<typename T>
AtomicFetchAnd(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)356 static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
357 morder mo) {
358 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
359 }
360
361 template<typename T>
AtomicFetchOr(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)362 static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
363 morder mo) {
364 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
365 }
366
367 template<typename T>
AtomicFetchXor(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)368 static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
369 morder mo) {
370 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
371 }
372
373 template<typename T>
AtomicFetchNand(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)374 static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
375 morder mo) {
376 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
377 }
378
379 template<typename T>
NoTsanAtomicCAS(volatile T * a,T * c,T v,morder mo,morder fmo)380 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
381 return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
382 }
383
384 #if __TSAN_HAS_INT128
NoTsanAtomicCAS(volatile a128 * a,a128 * c,a128 v,morder mo,morder fmo)385 static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
386 morder mo, morder fmo) {
387 a128 old = *c;
388 a128 cur = func_cas(a, old, v);
389 if (cur == old)
390 return true;
391 *c = cur;
392 return false;
393 }
394 #endif
395
396 template<typename T>
NoTsanAtomicCAS(volatile T * a,T c,T v,morder mo,morder fmo)397 static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
398 NoTsanAtomicCAS(a, &c, v, mo, fmo);
399 return c;
400 }
401
402 template<typename T>
AtomicCAS(ThreadState * thr,uptr pc,volatile T * a,T * c,T v,morder mo,morder fmo)403 static bool AtomicCAS(ThreadState *thr, uptr pc,
404 volatile T *a, T *c, T v, morder mo, morder fmo) {
405 (void)fmo; // Unused because llvm does not pass it yet.
406 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
407 SyncVar *s = 0;
408 bool write_lock = mo != mo_acquire && mo != mo_consume;
409 if (mo != mo_relaxed) {
410 s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
411 thr->fast_state.IncrementEpoch();
412 // Can't increment epoch w/o writing to the trace as well.
413 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
414 if (IsAcqRelOrder(mo))
415 AcquireReleaseImpl(thr, pc, &s->clock);
416 else if (IsReleaseOrder(mo))
417 ReleaseImpl(thr, pc, &s->clock);
418 else if (IsAcquireOrder(mo))
419 AcquireImpl(thr, pc, &s->clock);
420 }
421 T cc = *c;
422 T pr = func_cas(a, cc, v);
423 if (s) {
424 if (write_lock)
425 s->mtx.Unlock();
426 else
427 s->mtx.ReadUnlock();
428 }
429 if (pr == cc)
430 return true;
431 *c = pr;
432 return false;
433 }
434
435 template<typename T>
AtomicCAS(ThreadState * thr,uptr pc,volatile T * a,T c,T v,morder mo,morder fmo)436 static T AtomicCAS(ThreadState *thr, uptr pc,
437 volatile T *a, T c, T v, morder mo, morder fmo) {
438 AtomicCAS(thr, pc, a, &c, v, mo, fmo);
439 return c;
440 }
441
442 #if !SANITIZER_GO
NoTsanAtomicFence(morder mo)443 static void NoTsanAtomicFence(morder mo) {
444 __sync_synchronize();
445 }
446
AtomicFence(ThreadState * thr,uptr pc,morder mo)447 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
448 // FIXME(dvyukov): not implemented.
449 __sync_synchronize();
450 }
451 #endif
452
453 // Interface functions follow.
454 #if !SANITIZER_GO
455
456 // C/C++
457
convert_morder(morder mo)458 static morder convert_morder(morder mo) {
459 if (flags()->force_seq_cst_atomics)
460 return (morder)mo_seq_cst;
461
462 // Filter out additional memory order flags:
463 // MEMMODEL_SYNC = 1 << 15
464 // __ATOMIC_HLE_ACQUIRE = 1 << 16
465 // __ATOMIC_HLE_RELEASE = 1 << 17
466 //
467 // HLE is an optimization, and we pretend that elision always fails.
468 // MEMMODEL_SYNC is used when lowering __sync_ atomics,
469 // since we use __sync_ atomics for actual atomic operations,
470 // we can safely ignore it as well. It also subtly affects semantics,
471 // but we don't model the difference.
472 return (morder)(mo & 0x7fff);
473 }
474
475 #define SCOPED_ATOMIC(func, ...) \
476 ThreadState *const thr = cur_thread(); \
477 if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) { \
478 ProcessPendingSignals(thr); \
479 return NoTsanAtomic##func(__VA_ARGS__); \
480 } \
481 const uptr callpc = (uptr)__builtin_return_address(0); \
482 uptr pc = StackTrace::GetCurrentPc(); \
483 mo = convert_morder(mo); \
484 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
485 ScopedAtomic sa(thr, callpc, a, mo, __func__); \
486 return Atomic##func(thr, pc, __VA_ARGS__); \
487 /**/
488
489 class ScopedAtomic {
490 public:
ScopedAtomic(ThreadState * thr,uptr pc,const volatile void * a,morder mo,const char * func)491 ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
492 morder mo, const char *func)
493 : thr_(thr) {
494 FuncEntry(thr_, pc);
495 DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
496 }
~ScopedAtomic()497 ~ScopedAtomic() {
498 ProcessPendingSignals(thr_);
499 FuncExit(thr_);
500 }
501 private:
502 ThreadState *thr_;
503 };
504
AtomicStatInc(ThreadState * thr,uptr size,morder mo,StatType t)505 static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
506 StatInc(thr, StatAtomic);
507 StatInc(thr, t);
508 StatInc(thr, size == 1 ? StatAtomic1
509 : size == 2 ? StatAtomic2
510 : size == 4 ? StatAtomic4
511 : size == 8 ? StatAtomic8
512 : StatAtomic16);
513 StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
514 : mo == mo_consume ? StatAtomicConsume
515 : mo == mo_acquire ? StatAtomicAcquire
516 : mo == mo_release ? StatAtomicRelease
517 : mo == mo_acq_rel ? StatAtomicAcq_Rel
518 : StatAtomicSeq_Cst);
519 }
520
521 extern "C" {
522 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_load(const volatile a8 * a,morder mo)523 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
524 SCOPED_ATOMIC(Load, a, mo);
525 }
526
527 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_load(const volatile a16 * a,morder mo)528 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
529 SCOPED_ATOMIC(Load, a, mo);
530 }
531
532 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_load(const volatile a32 * a,morder mo)533 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
534 SCOPED_ATOMIC(Load, a, mo);
535 }
536
537 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_load(const volatile a64 * a,morder mo)538 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
539 SCOPED_ATOMIC(Load, a, mo);
540 }
541
542 #if __TSAN_HAS_INT128
543 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_load(const volatile a128 * a,morder mo)544 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
545 SCOPED_ATOMIC(Load, a, mo);
546 }
547 #endif
548
549 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_store(volatile a8 * a,a8 v,morder mo)550 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
551 SCOPED_ATOMIC(Store, a, v, mo);
552 }
553
554 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_store(volatile a16 * a,a16 v,morder mo)555 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
556 SCOPED_ATOMIC(Store, a, v, mo);
557 }
558
559 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_store(volatile a32 * a,a32 v,morder mo)560 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
561 SCOPED_ATOMIC(Store, a, v, mo);
562 }
563
564 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_store(volatile a64 * a,a64 v,morder mo)565 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
566 SCOPED_ATOMIC(Store, a, v, mo);
567 }
568
569 #if __TSAN_HAS_INT128
570 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_store(volatile a128 * a,a128 v,morder mo)571 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
572 SCOPED_ATOMIC(Store, a, v, mo);
573 }
574 #endif
575
576 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_exchange(volatile a8 * a,a8 v,morder mo)577 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
578 SCOPED_ATOMIC(Exchange, a, v, mo);
579 }
580
581 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_exchange(volatile a16 * a,a16 v,morder mo)582 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
583 SCOPED_ATOMIC(Exchange, a, v, mo);
584 }
585
586 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_exchange(volatile a32 * a,a32 v,morder mo)587 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
588 SCOPED_ATOMIC(Exchange, a, v, mo);
589 }
590
591 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_exchange(volatile a64 * a,a64 v,morder mo)592 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
593 SCOPED_ATOMIC(Exchange, a, v, mo);
594 }
595
596 #if __TSAN_HAS_INT128
597 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_exchange(volatile a128 * a,a128 v,morder mo)598 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
599 SCOPED_ATOMIC(Exchange, a, v, mo);
600 }
601 #endif
602
603 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_add(volatile a8 * a,a8 v,morder mo)604 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
605 SCOPED_ATOMIC(FetchAdd, a, v, mo);
606 }
607
608 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_add(volatile a16 * a,a16 v,morder mo)609 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
610 SCOPED_ATOMIC(FetchAdd, a, v, mo);
611 }
612
613 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_add(volatile a32 * a,a32 v,morder mo)614 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
615 SCOPED_ATOMIC(FetchAdd, a, v, mo);
616 }
617
618 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_add(volatile a64 * a,a64 v,morder mo)619 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
620 SCOPED_ATOMIC(FetchAdd, a, v, mo);
621 }
622
623 #if __TSAN_HAS_INT128
624 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_add(volatile a128 * a,a128 v,morder mo)625 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
626 SCOPED_ATOMIC(FetchAdd, a, v, mo);
627 }
628 #endif
629
630 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_sub(volatile a8 * a,a8 v,morder mo)631 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
632 SCOPED_ATOMIC(FetchSub, a, v, mo);
633 }
634
635 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_sub(volatile a16 * a,a16 v,morder mo)636 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
637 SCOPED_ATOMIC(FetchSub, a, v, mo);
638 }
639
640 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_sub(volatile a32 * a,a32 v,morder mo)641 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
642 SCOPED_ATOMIC(FetchSub, a, v, mo);
643 }
644
645 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_sub(volatile a64 * a,a64 v,morder mo)646 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
647 SCOPED_ATOMIC(FetchSub, a, v, mo);
648 }
649
650 #if __TSAN_HAS_INT128
651 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_sub(volatile a128 * a,a128 v,morder mo)652 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
653 SCOPED_ATOMIC(FetchSub, a, v, mo);
654 }
655 #endif
656
657 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_and(volatile a8 * a,a8 v,morder mo)658 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
659 SCOPED_ATOMIC(FetchAnd, a, v, mo);
660 }
661
662 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_and(volatile a16 * a,a16 v,morder mo)663 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
664 SCOPED_ATOMIC(FetchAnd, a, v, mo);
665 }
666
667 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_and(volatile a32 * a,a32 v,morder mo)668 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
669 SCOPED_ATOMIC(FetchAnd, a, v, mo);
670 }
671
672 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_and(volatile a64 * a,a64 v,morder mo)673 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
674 SCOPED_ATOMIC(FetchAnd, a, v, mo);
675 }
676
677 #if __TSAN_HAS_INT128
678 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_and(volatile a128 * a,a128 v,morder mo)679 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
680 SCOPED_ATOMIC(FetchAnd, a, v, mo);
681 }
682 #endif
683
684 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_or(volatile a8 * a,a8 v,morder mo)685 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
686 SCOPED_ATOMIC(FetchOr, a, v, mo);
687 }
688
689 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_or(volatile a16 * a,a16 v,morder mo)690 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
691 SCOPED_ATOMIC(FetchOr, a, v, mo);
692 }
693
694 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_or(volatile a32 * a,a32 v,morder mo)695 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
696 SCOPED_ATOMIC(FetchOr, a, v, mo);
697 }
698
699 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_or(volatile a64 * a,a64 v,morder mo)700 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
701 SCOPED_ATOMIC(FetchOr, a, v, mo);
702 }
703
704 #if __TSAN_HAS_INT128
705 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_or(volatile a128 * a,a128 v,morder mo)706 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
707 SCOPED_ATOMIC(FetchOr, a, v, mo);
708 }
709 #endif
710
711 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_xor(volatile a8 * a,a8 v,morder mo)712 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
713 SCOPED_ATOMIC(FetchXor, a, v, mo);
714 }
715
716 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_xor(volatile a16 * a,a16 v,morder mo)717 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
718 SCOPED_ATOMIC(FetchXor, a, v, mo);
719 }
720
721 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_xor(volatile a32 * a,a32 v,morder mo)722 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
723 SCOPED_ATOMIC(FetchXor, a, v, mo);
724 }
725
726 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_xor(volatile a64 * a,a64 v,morder mo)727 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
728 SCOPED_ATOMIC(FetchXor, a, v, mo);
729 }
730
731 #if __TSAN_HAS_INT128
732 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_xor(volatile a128 * a,a128 v,morder mo)733 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
734 SCOPED_ATOMIC(FetchXor, a, v, mo);
735 }
736 #endif
737
738 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_nand(volatile a8 * a,a8 v,morder mo)739 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
740 SCOPED_ATOMIC(FetchNand, a, v, mo);
741 }
742
743 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_nand(volatile a16 * a,a16 v,morder mo)744 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
745 SCOPED_ATOMIC(FetchNand, a, v, mo);
746 }
747
748 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_nand(volatile a32 * a,a32 v,morder mo)749 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
750 SCOPED_ATOMIC(FetchNand, a, v, mo);
751 }
752
753 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_nand(volatile a64 * a,a64 v,morder mo)754 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
755 SCOPED_ATOMIC(FetchNand, a, v, mo);
756 }
757
758 #if __TSAN_HAS_INT128
759 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_nand(volatile a128 * a,a128 v,morder mo)760 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
761 SCOPED_ATOMIC(FetchNand, a, v, mo);
762 }
763 #endif
764
765 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_compare_exchange_strong(volatile a8 * a,a8 * c,a8 v,morder mo,morder fmo)766 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
767 morder mo, morder fmo) {
768 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
769 }
770
771 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_compare_exchange_strong(volatile a16 * a,a16 * c,a16 v,morder mo,morder fmo)772 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
773 morder mo, morder fmo) {
774 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
775 }
776
777 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_compare_exchange_strong(volatile a32 * a,a32 * c,a32 v,morder mo,morder fmo)778 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
779 morder mo, morder fmo) {
780 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
781 }
782
783 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_compare_exchange_strong(volatile a64 * a,a64 * c,a64 v,morder mo,morder fmo)784 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
785 morder mo, morder fmo) {
786 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
787 }
788
789 #if __TSAN_HAS_INT128
790 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_compare_exchange_strong(volatile a128 * a,a128 * c,a128 v,morder mo,morder fmo)791 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
792 morder mo, morder fmo) {
793 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
794 }
795 #endif
796
797 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_compare_exchange_weak(volatile a8 * a,a8 * c,a8 v,morder mo,morder fmo)798 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
799 morder mo, morder fmo) {
800 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
801 }
802
803 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_compare_exchange_weak(volatile a16 * a,a16 * c,a16 v,morder mo,morder fmo)804 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
805 morder mo, morder fmo) {
806 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
807 }
808
809 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_compare_exchange_weak(volatile a32 * a,a32 * c,a32 v,morder mo,morder fmo)810 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
811 morder mo, morder fmo) {
812 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
813 }
814
815 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_compare_exchange_weak(volatile a64 * a,a64 * c,a64 v,morder mo,morder fmo)816 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
817 morder mo, morder fmo) {
818 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
819 }
820
821 #if __TSAN_HAS_INT128
822 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_compare_exchange_weak(volatile a128 * a,a128 * c,a128 v,morder mo,morder fmo)823 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
824 morder mo, morder fmo) {
825 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
826 }
827 #endif
828
829 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_compare_exchange_val(volatile a8 * a,a8 c,a8 v,morder mo,morder fmo)830 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
831 morder mo, morder fmo) {
832 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
833 }
834
835 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_compare_exchange_val(volatile a16 * a,a16 c,a16 v,morder mo,morder fmo)836 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
837 morder mo, morder fmo) {
838 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
839 }
840
841 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_compare_exchange_val(volatile a32 * a,a32 c,a32 v,morder mo,morder fmo)842 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
843 morder mo, morder fmo) {
844 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
845 }
846
847 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_compare_exchange_val(volatile a64 * a,a64 c,a64 v,morder mo,morder fmo)848 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
849 morder mo, morder fmo) {
850 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
851 }
852
853 #if __TSAN_HAS_INT128
854 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_compare_exchange_val(volatile a128 * a,a128 c,a128 v,morder mo,morder fmo)855 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
856 morder mo, morder fmo) {
857 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
858 }
859 #endif
860
861 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic_thread_fence(morder mo)862 void __tsan_atomic_thread_fence(morder mo) {
863 char* a = 0;
864 SCOPED_ATOMIC(Fence, mo);
865 }
866
867 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic_signal_fence(morder mo)868 void __tsan_atomic_signal_fence(morder mo) {
869 }
870 } // extern "C"
871
872 #else // #if !SANITIZER_GO
873
874 // Go
875
876 #define ATOMIC(func, ...) \
877 if (thr->ignore_sync) { \
878 NoTsanAtomic##func(__VA_ARGS__); \
879 } else { \
880 FuncEntry(thr, cpc); \
881 Atomic##func(thr, pc, __VA_ARGS__); \
882 FuncExit(thr); \
883 } \
884 /**/
885
886 #define ATOMIC_RET(func, ret, ...) \
887 if (thr->ignore_sync) { \
888 (ret) = NoTsanAtomic##func(__VA_ARGS__); \
889 } else { \
890 FuncEntry(thr, cpc); \
891 (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
892 FuncExit(thr); \
893 } \
894 /**/
895
896 extern "C" {
897 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_load(ThreadState * thr,uptr cpc,uptr pc,u8 * a)898 void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
899 ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
900 }
901
902 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_load(ThreadState * thr,uptr cpc,uptr pc,u8 * a)903 void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
904 ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
905 }
906
907 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_store(ThreadState * thr,uptr cpc,uptr pc,u8 * a)908 void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
909 ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
910 }
911
912 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_store(ThreadState * thr,uptr cpc,uptr pc,u8 * a)913 void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
914 ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
915 }
916
917 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_fetch_add(ThreadState * thr,uptr cpc,uptr pc,u8 * a)918 void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
919 ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
920 }
921
922 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_fetch_add(ThreadState * thr,uptr cpc,uptr pc,u8 * a)923 void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
924 ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
925 }
926
927 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)928 void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
929 ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
930 }
931
932 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)933 void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
934 ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
935 }
936
937 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_compare_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)938 void __tsan_go_atomic32_compare_exchange(
939 ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
940 a32 cur = 0;
941 a32 cmp = *(a32*)(a+8);
942 ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
943 *(bool*)(a+16) = (cur == cmp);
944 }
945
946 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_compare_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)947 void __tsan_go_atomic64_compare_exchange(
948 ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
949 a64 cur = 0;
950 a64 cmp = *(a64*)(a+8);
951 ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
952 *(bool*)(a+24) = (cur == cmp);
953 }
954 } // extern "C"
955 #endif // #if !SANITIZER_GO
956