1 //===-- tsan_interface_atomic.cc ------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer (TSan), a race detector.
11 //
12 //===----------------------------------------------------------------------===//
13
14 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
15 // For background see C++11 standard. A slightly older, publicly
16 // available draft of the standard (not entirely up-to-date, but close enough
17 // for casual browsing) is available here:
18 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
19 // The following page contains more background information:
20 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
21
22 #include "sanitizer_common/sanitizer_placement_new.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "sanitizer_common/sanitizer_mutex.h"
25 #include "tsan_flags.h"
26 #include "tsan_interface.h"
27 #include "tsan_rtl.h"
28
29 using namespace __tsan; // NOLINT
30
31 #if !SANITIZER_GO && __TSAN_HAS_INT128
32 // Protects emulation of 128-bit atomic operations.
33 static StaticSpinMutex mutex128;
34 #endif
35
IsLoadOrder(morder mo)36 static bool IsLoadOrder(morder mo) {
37 return mo == mo_relaxed || mo == mo_consume
38 || mo == mo_acquire || mo == mo_seq_cst;
39 }
40
IsStoreOrder(morder mo)41 static bool IsStoreOrder(morder mo) {
42 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
43 }
44
IsReleaseOrder(morder mo)45 static bool IsReleaseOrder(morder mo) {
46 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
47 }
48
IsAcquireOrder(morder mo)49 static bool IsAcquireOrder(morder mo) {
50 return mo == mo_consume || mo == mo_acquire
51 || mo == mo_acq_rel || mo == mo_seq_cst;
52 }
53
IsAcqRelOrder(morder mo)54 static bool IsAcqRelOrder(morder mo) {
55 return mo == mo_acq_rel || mo == mo_seq_cst;
56 }
57
func_xchg(volatile T * v,T op)58 template<typename T> T func_xchg(volatile T *v, T op) {
59 T res = __sync_lock_test_and_set(v, op);
60 // __sync_lock_test_and_set does not contain full barrier.
61 __sync_synchronize();
62 return res;
63 }
64
func_add(volatile T * v,T op)65 template<typename T> T func_add(volatile T *v, T op) {
66 return __sync_fetch_and_add(v, op);
67 }
68
func_sub(volatile T * v,T op)69 template<typename T> T func_sub(volatile T *v, T op) {
70 return __sync_fetch_and_sub(v, op);
71 }
72
func_and(volatile T * v,T op)73 template<typename T> T func_and(volatile T *v, T op) {
74 return __sync_fetch_and_and(v, op);
75 }
76
func_or(volatile T * v,T op)77 template<typename T> T func_or(volatile T *v, T op) {
78 return __sync_fetch_and_or(v, op);
79 }
80
func_xor(volatile T * v,T op)81 template<typename T> T func_xor(volatile T *v, T op) {
82 return __sync_fetch_and_xor(v, op);
83 }
84
func_nand(volatile T * v,T op)85 template<typename T> T func_nand(volatile T *v, T op) {
86 // clang does not support __sync_fetch_and_nand.
87 T cmp = *v;
88 for (;;) {
89 T newv = ~(cmp & op);
90 T cur = __sync_val_compare_and_swap(v, cmp, newv);
91 if (cmp == cur)
92 return cmp;
93 cmp = cur;
94 }
95 }
96
func_cas(volatile T * v,T cmp,T xch)97 template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
98 return __sync_val_compare_and_swap(v, cmp, xch);
99 }
100
101 // clang does not support 128-bit atomic ops.
102 // Atomic ops are executed under tsan internal mutex,
103 // here we assume that the atomic variables are not accessed
104 // from non-instrumented code.
105 #if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
106 && __TSAN_HAS_INT128
func_xchg(volatile a128 * v,a128 op)107 a128 func_xchg(volatile a128 *v, a128 op) {
108 SpinMutexLock lock(&mutex128);
109 a128 cmp = *v;
110 *v = op;
111 return cmp;
112 }
113
func_add(volatile a128 * v,a128 op)114 a128 func_add(volatile a128 *v, a128 op) {
115 SpinMutexLock lock(&mutex128);
116 a128 cmp = *v;
117 *v = cmp + op;
118 return cmp;
119 }
120
func_sub(volatile a128 * v,a128 op)121 a128 func_sub(volatile a128 *v, a128 op) {
122 SpinMutexLock lock(&mutex128);
123 a128 cmp = *v;
124 *v = cmp - op;
125 return cmp;
126 }
127
func_and(volatile a128 * v,a128 op)128 a128 func_and(volatile a128 *v, a128 op) {
129 SpinMutexLock lock(&mutex128);
130 a128 cmp = *v;
131 *v = cmp & op;
132 return cmp;
133 }
134
func_or(volatile a128 * v,a128 op)135 a128 func_or(volatile a128 *v, a128 op) {
136 SpinMutexLock lock(&mutex128);
137 a128 cmp = *v;
138 *v = cmp | op;
139 return cmp;
140 }
141
func_xor(volatile a128 * v,a128 op)142 a128 func_xor(volatile a128 *v, a128 op) {
143 SpinMutexLock lock(&mutex128);
144 a128 cmp = *v;
145 *v = cmp ^ op;
146 return cmp;
147 }
148
func_nand(volatile a128 * v,a128 op)149 a128 func_nand(volatile a128 *v, a128 op) {
150 SpinMutexLock lock(&mutex128);
151 a128 cmp = *v;
152 *v = ~(cmp & op);
153 return cmp;
154 }
155
func_cas(volatile a128 * v,a128 cmp,a128 xch)156 a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
157 SpinMutexLock lock(&mutex128);
158 a128 cur = *v;
159 if (cur == cmp)
160 *v = xch;
161 return cur;
162 }
163 #endif
164
165 template<typename T>
SizeLog()166 static int SizeLog() {
167 if (sizeof(T) <= 1)
168 return kSizeLog1;
169 else if (sizeof(T) <= 2)
170 return kSizeLog2;
171 else if (sizeof(T) <= 4)
172 return kSizeLog4;
173 else
174 return kSizeLog8;
175 // For 16-byte atomics we also use 8-byte memory access,
176 // this leads to false negatives only in very obscure cases.
177 }
178
179 #if !SANITIZER_GO
to_atomic(const volatile a8 * a)180 static atomic_uint8_t *to_atomic(const volatile a8 *a) {
181 return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
182 }
183
to_atomic(const volatile a16 * a)184 static atomic_uint16_t *to_atomic(const volatile a16 *a) {
185 return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
186 }
187 #endif
188
to_atomic(const volatile a32 * a)189 static atomic_uint32_t *to_atomic(const volatile a32 *a) {
190 return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
191 }
192
to_atomic(const volatile a64 * a)193 static atomic_uint64_t *to_atomic(const volatile a64 *a) {
194 return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
195 }
196
to_mo(morder mo)197 static memory_order to_mo(morder mo) {
198 switch (mo) {
199 case mo_relaxed: return memory_order_relaxed;
200 case mo_consume: return memory_order_consume;
201 case mo_acquire: return memory_order_acquire;
202 case mo_release: return memory_order_release;
203 case mo_acq_rel: return memory_order_acq_rel;
204 case mo_seq_cst: return memory_order_seq_cst;
205 }
206 CHECK(0);
207 return memory_order_seq_cst;
208 }
209
210 template<typename T>
NoTsanAtomicLoad(const volatile T * a,morder mo)211 static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
212 return atomic_load(to_atomic(a), to_mo(mo));
213 }
214
215 #if __TSAN_HAS_INT128 && !SANITIZER_GO
NoTsanAtomicLoad(const volatile a128 * a,morder mo)216 static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
217 SpinMutexLock lock(&mutex128);
218 return *a;
219 }
220 #endif
221
222 template<typename T>
AtomicLoad(ThreadState * thr,uptr pc,const volatile T * a,morder mo)223 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
224 CHECK(IsLoadOrder(mo));
225 // This fast-path is critical for performance.
226 // Assume the access is atomic.
227 if (!IsAcquireOrder(mo)) {
228 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
229 return NoTsanAtomicLoad(a, mo);
230 }
231 // Don't create sync object if it does not exist yet. For example, an atomic
232 // pointer is initialized to nullptr and then periodically acquire-loaded.
233 T v = NoTsanAtomicLoad(a, mo);
234 SyncVar *s = ctx->metamap.GetIfExistsAndLock((uptr)a, false);
235 if (s) {
236 AcquireImpl(thr, pc, &s->clock);
237 // Re-read under sync mutex because we need a consistent snapshot
238 // of the value and the clock we acquire.
239 v = NoTsanAtomicLoad(a, mo);
240 s->mtx.ReadUnlock();
241 }
242 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
243 return v;
244 }
245
246 template<typename T>
NoTsanAtomicStore(volatile T * a,T v,morder mo)247 static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
248 atomic_store(to_atomic(a), v, to_mo(mo));
249 }
250
251 #if __TSAN_HAS_INT128 && !SANITIZER_GO
NoTsanAtomicStore(volatile a128 * a,a128 v,morder mo)252 static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
253 SpinMutexLock lock(&mutex128);
254 *a = v;
255 }
256 #endif
257
258 template<typename T>
AtomicStore(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)259 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
260 morder mo) {
261 CHECK(IsStoreOrder(mo));
262 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
263 // This fast-path is critical for performance.
264 // Assume the access is atomic.
265 // Strictly saying even relaxed store cuts off release sequence,
266 // so must reset the clock.
267 if (!IsReleaseOrder(mo)) {
268 NoTsanAtomicStore(a, v, mo);
269 return;
270 }
271 __sync_synchronize();
272 SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
273 thr->fast_state.IncrementEpoch();
274 // Can't increment epoch w/o writing to the trace as well.
275 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
276 ReleaseStoreImpl(thr, pc, &s->clock);
277 NoTsanAtomicStore(a, v, mo);
278 s->mtx.Unlock();
279 }
280
281 template<typename T, T (*F)(volatile T *v, T op)>
AtomicRMW(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)282 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
283 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
284 SyncVar *s = 0;
285 if (mo != mo_relaxed) {
286 s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
287 thr->fast_state.IncrementEpoch();
288 // Can't increment epoch w/o writing to the trace as well.
289 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
290 if (IsAcqRelOrder(mo))
291 AcquireReleaseImpl(thr, pc, &s->clock);
292 else if (IsReleaseOrder(mo))
293 ReleaseImpl(thr, pc, &s->clock);
294 else if (IsAcquireOrder(mo))
295 AcquireImpl(thr, pc, &s->clock);
296 }
297 v = F(a, v);
298 if (s)
299 s->mtx.Unlock();
300 return v;
301 }
302
303 template<typename T>
NoTsanAtomicExchange(volatile T * a,T v,morder mo)304 static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
305 return func_xchg(a, v);
306 }
307
308 template<typename T>
NoTsanAtomicFetchAdd(volatile T * a,T v,morder mo)309 static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
310 return func_add(a, v);
311 }
312
313 template<typename T>
NoTsanAtomicFetchSub(volatile T * a,T v,morder mo)314 static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
315 return func_sub(a, v);
316 }
317
318 template<typename T>
NoTsanAtomicFetchAnd(volatile T * a,T v,morder mo)319 static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
320 return func_and(a, v);
321 }
322
323 template<typename T>
NoTsanAtomicFetchOr(volatile T * a,T v,morder mo)324 static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
325 return func_or(a, v);
326 }
327
328 template<typename T>
NoTsanAtomicFetchXor(volatile T * a,T v,morder mo)329 static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
330 return func_xor(a, v);
331 }
332
333 template<typename T>
NoTsanAtomicFetchNand(volatile T * a,T v,morder mo)334 static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
335 return func_nand(a, v);
336 }
337
338 template<typename T>
AtomicExchange(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)339 static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
340 morder mo) {
341 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
342 }
343
344 template<typename T>
AtomicFetchAdd(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)345 static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
346 morder mo) {
347 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
348 }
349
350 template<typename T>
AtomicFetchSub(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)351 static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
352 morder mo) {
353 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
354 }
355
356 template<typename T>
AtomicFetchAnd(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)357 static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
358 morder mo) {
359 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
360 }
361
362 template<typename T>
AtomicFetchOr(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)363 static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
364 morder mo) {
365 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
366 }
367
368 template<typename T>
AtomicFetchXor(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)369 static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
370 morder mo) {
371 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
372 }
373
374 template<typename T>
AtomicFetchNand(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)375 static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
376 morder mo) {
377 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
378 }
379
380 template<typename T>
NoTsanAtomicCAS(volatile T * a,T * c,T v,morder mo,morder fmo)381 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
382 return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
383 }
384
385 #if __TSAN_HAS_INT128
NoTsanAtomicCAS(volatile a128 * a,a128 * c,a128 v,morder mo,morder fmo)386 static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
387 morder mo, morder fmo) {
388 a128 old = *c;
389 a128 cur = func_cas(a, old, v);
390 if (cur == old)
391 return true;
392 *c = cur;
393 return false;
394 }
395 #endif
396
397 template<typename T>
NoTsanAtomicCAS(volatile T * a,T c,T v,morder mo,morder fmo)398 static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
399 NoTsanAtomicCAS(a, &c, v, mo, fmo);
400 return c;
401 }
402
403 template<typename T>
AtomicCAS(ThreadState * thr,uptr pc,volatile T * a,T * c,T v,morder mo,morder fmo)404 static bool AtomicCAS(ThreadState *thr, uptr pc,
405 volatile T *a, T *c, T v, morder mo, morder fmo) {
406 (void)fmo; // Unused because llvm does not pass it yet.
407 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
408 SyncVar *s = 0;
409 bool write_lock = mo != mo_acquire && mo != mo_consume;
410 if (mo != mo_relaxed) {
411 s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
412 thr->fast_state.IncrementEpoch();
413 // Can't increment epoch w/o writing to the trace as well.
414 TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
415 if (IsAcqRelOrder(mo))
416 AcquireReleaseImpl(thr, pc, &s->clock);
417 else if (IsReleaseOrder(mo))
418 ReleaseImpl(thr, pc, &s->clock);
419 else if (IsAcquireOrder(mo))
420 AcquireImpl(thr, pc, &s->clock);
421 }
422 T cc = *c;
423 T pr = func_cas(a, cc, v);
424 if (s) {
425 if (write_lock)
426 s->mtx.Unlock();
427 else
428 s->mtx.ReadUnlock();
429 }
430 if (pr == cc)
431 return true;
432 *c = pr;
433 return false;
434 }
435
436 template<typename T>
AtomicCAS(ThreadState * thr,uptr pc,volatile T * a,T c,T v,morder mo,morder fmo)437 static T AtomicCAS(ThreadState *thr, uptr pc,
438 volatile T *a, T c, T v, morder mo, morder fmo) {
439 AtomicCAS(thr, pc, a, &c, v, mo, fmo);
440 return c;
441 }
442
443 #if !SANITIZER_GO
NoTsanAtomicFence(morder mo)444 static void NoTsanAtomicFence(morder mo) {
445 __sync_synchronize();
446 }
447
AtomicFence(ThreadState * thr,uptr pc,morder mo)448 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
449 // FIXME(dvyukov): not implemented.
450 __sync_synchronize();
451 }
452 #endif
453
454 // Interface functions follow.
455 #if !SANITIZER_GO
456
457 // C/C++
458
convert_morder(morder mo)459 static morder convert_morder(morder mo) {
460 if (flags()->force_seq_cst_atomics)
461 return (morder)mo_seq_cst;
462
463 // Filter out additional memory order flags:
464 // MEMMODEL_SYNC = 1 << 15
465 // __ATOMIC_HLE_ACQUIRE = 1 << 16
466 // __ATOMIC_HLE_RELEASE = 1 << 17
467 //
468 // HLE is an optimization, and we pretend that elision always fails.
469 // MEMMODEL_SYNC is used when lowering __sync_ atomics,
470 // since we use __sync_ atomics for actual atomic operations,
471 // we can safely ignore it as well. It also subtly affects semantics,
472 // but we don't model the difference.
473 return (morder)(mo & 0x7fff);
474 }
475
476 #define SCOPED_ATOMIC(func, ...) \
477 ThreadState *const thr = cur_thread(); \
478 if (thr->ignore_sync || thr->ignore_interceptors) { \
479 ProcessPendingSignals(thr); \
480 return NoTsanAtomic##func(__VA_ARGS__); \
481 } \
482 const uptr callpc = (uptr)__builtin_return_address(0); \
483 uptr pc = StackTrace::GetCurrentPc(); \
484 mo = convert_morder(mo); \
485 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
486 ScopedAtomic sa(thr, callpc, a, mo, __func__); \
487 return Atomic##func(thr, pc, __VA_ARGS__); \
488 /**/
489
490 class ScopedAtomic {
491 public:
ScopedAtomic(ThreadState * thr,uptr pc,const volatile void * a,morder mo,const char * func)492 ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
493 morder mo, const char *func)
494 : thr_(thr) {
495 FuncEntry(thr_, pc);
496 DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
497 }
~ScopedAtomic()498 ~ScopedAtomic() {
499 ProcessPendingSignals(thr_);
500 FuncExit(thr_);
501 }
502 private:
503 ThreadState *thr_;
504 };
505
AtomicStatInc(ThreadState * thr,uptr size,morder mo,StatType t)506 static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
507 StatInc(thr, StatAtomic);
508 StatInc(thr, t);
509 StatInc(thr, size == 1 ? StatAtomic1
510 : size == 2 ? StatAtomic2
511 : size == 4 ? StatAtomic4
512 : size == 8 ? StatAtomic8
513 : StatAtomic16);
514 StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
515 : mo == mo_consume ? StatAtomicConsume
516 : mo == mo_acquire ? StatAtomicAcquire
517 : mo == mo_release ? StatAtomicRelease
518 : mo == mo_acq_rel ? StatAtomicAcq_Rel
519 : StatAtomicSeq_Cst);
520 }
521
522 extern "C" {
523 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_load(const volatile a8 * a,morder mo)524 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
525 SCOPED_ATOMIC(Load, a, mo);
526 }
527
528 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_load(const volatile a16 * a,morder mo)529 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
530 SCOPED_ATOMIC(Load, a, mo);
531 }
532
533 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_load(const volatile a32 * a,morder mo)534 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
535 SCOPED_ATOMIC(Load, a, mo);
536 }
537
538 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_load(const volatile a64 * a,morder mo)539 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
540 SCOPED_ATOMIC(Load, a, mo);
541 }
542
543 #if __TSAN_HAS_INT128
544 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_load(const volatile a128 * a,morder mo)545 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
546 SCOPED_ATOMIC(Load, a, mo);
547 }
548 #endif
549
550 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_store(volatile a8 * a,a8 v,morder mo)551 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
552 SCOPED_ATOMIC(Store, a, v, mo);
553 }
554
555 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_store(volatile a16 * a,a16 v,morder mo)556 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
557 SCOPED_ATOMIC(Store, a, v, mo);
558 }
559
560 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_store(volatile a32 * a,a32 v,morder mo)561 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
562 SCOPED_ATOMIC(Store, a, v, mo);
563 }
564
565 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_store(volatile a64 * a,a64 v,morder mo)566 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
567 SCOPED_ATOMIC(Store, a, v, mo);
568 }
569
570 #if __TSAN_HAS_INT128
571 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_store(volatile a128 * a,a128 v,morder mo)572 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
573 SCOPED_ATOMIC(Store, a, v, mo);
574 }
575 #endif
576
577 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_exchange(volatile a8 * a,a8 v,morder mo)578 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
579 SCOPED_ATOMIC(Exchange, a, v, mo);
580 }
581
582 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_exchange(volatile a16 * a,a16 v,morder mo)583 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
584 SCOPED_ATOMIC(Exchange, a, v, mo);
585 }
586
587 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_exchange(volatile a32 * a,a32 v,morder mo)588 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
589 SCOPED_ATOMIC(Exchange, a, v, mo);
590 }
591
592 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_exchange(volatile a64 * a,a64 v,morder mo)593 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
594 SCOPED_ATOMIC(Exchange, a, v, mo);
595 }
596
597 #if __TSAN_HAS_INT128
598 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_exchange(volatile a128 * a,a128 v,morder mo)599 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
600 SCOPED_ATOMIC(Exchange, a, v, mo);
601 }
602 #endif
603
604 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_add(volatile a8 * a,a8 v,morder mo)605 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
606 SCOPED_ATOMIC(FetchAdd, a, v, mo);
607 }
608
609 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_add(volatile a16 * a,a16 v,morder mo)610 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
611 SCOPED_ATOMIC(FetchAdd, a, v, mo);
612 }
613
614 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_add(volatile a32 * a,a32 v,morder mo)615 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
616 SCOPED_ATOMIC(FetchAdd, a, v, mo);
617 }
618
619 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_add(volatile a64 * a,a64 v,morder mo)620 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
621 SCOPED_ATOMIC(FetchAdd, a, v, mo);
622 }
623
624 #if __TSAN_HAS_INT128
625 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_add(volatile a128 * a,a128 v,morder mo)626 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
627 SCOPED_ATOMIC(FetchAdd, a, v, mo);
628 }
629 #endif
630
631 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_sub(volatile a8 * a,a8 v,morder mo)632 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
633 SCOPED_ATOMIC(FetchSub, a, v, mo);
634 }
635
636 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_sub(volatile a16 * a,a16 v,morder mo)637 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
638 SCOPED_ATOMIC(FetchSub, a, v, mo);
639 }
640
641 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_sub(volatile a32 * a,a32 v,morder mo)642 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
643 SCOPED_ATOMIC(FetchSub, a, v, mo);
644 }
645
646 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_sub(volatile a64 * a,a64 v,morder mo)647 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
648 SCOPED_ATOMIC(FetchSub, a, v, mo);
649 }
650
651 #if __TSAN_HAS_INT128
652 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_sub(volatile a128 * a,a128 v,morder mo)653 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
654 SCOPED_ATOMIC(FetchSub, a, v, mo);
655 }
656 #endif
657
658 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_and(volatile a8 * a,a8 v,morder mo)659 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
660 SCOPED_ATOMIC(FetchAnd, a, v, mo);
661 }
662
663 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_and(volatile a16 * a,a16 v,morder mo)664 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
665 SCOPED_ATOMIC(FetchAnd, a, v, mo);
666 }
667
668 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_and(volatile a32 * a,a32 v,morder mo)669 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
670 SCOPED_ATOMIC(FetchAnd, a, v, mo);
671 }
672
673 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_and(volatile a64 * a,a64 v,morder mo)674 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
675 SCOPED_ATOMIC(FetchAnd, a, v, mo);
676 }
677
678 #if __TSAN_HAS_INT128
679 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_and(volatile a128 * a,a128 v,morder mo)680 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
681 SCOPED_ATOMIC(FetchAnd, a, v, mo);
682 }
683 #endif
684
685 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_or(volatile a8 * a,a8 v,morder mo)686 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
687 SCOPED_ATOMIC(FetchOr, a, v, mo);
688 }
689
690 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_or(volatile a16 * a,a16 v,morder mo)691 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
692 SCOPED_ATOMIC(FetchOr, a, v, mo);
693 }
694
695 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_or(volatile a32 * a,a32 v,morder mo)696 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
697 SCOPED_ATOMIC(FetchOr, a, v, mo);
698 }
699
700 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_or(volatile a64 * a,a64 v,morder mo)701 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
702 SCOPED_ATOMIC(FetchOr, a, v, mo);
703 }
704
705 #if __TSAN_HAS_INT128
706 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_or(volatile a128 * a,a128 v,morder mo)707 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
708 SCOPED_ATOMIC(FetchOr, a, v, mo);
709 }
710 #endif
711
712 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_xor(volatile a8 * a,a8 v,morder mo)713 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
714 SCOPED_ATOMIC(FetchXor, a, v, mo);
715 }
716
717 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_xor(volatile a16 * a,a16 v,morder mo)718 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
719 SCOPED_ATOMIC(FetchXor, a, v, mo);
720 }
721
722 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_xor(volatile a32 * a,a32 v,morder mo)723 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
724 SCOPED_ATOMIC(FetchXor, a, v, mo);
725 }
726
727 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_xor(volatile a64 * a,a64 v,morder mo)728 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
729 SCOPED_ATOMIC(FetchXor, a, v, mo);
730 }
731
732 #if __TSAN_HAS_INT128
733 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_xor(volatile a128 * a,a128 v,morder mo)734 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
735 SCOPED_ATOMIC(FetchXor, a, v, mo);
736 }
737 #endif
738
739 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_nand(volatile a8 * a,a8 v,morder mo)740 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
741 SCOPED_ATOMIC(FetchNand, a, v, mo);
742 }
743
744 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_nand(volatile a16 * a,a16 v,morder mo)745 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
746 SCOPED_ATOMIC(FetchNand, a, v, mo);
747 }
748
749 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_nand(volatile a32 * a,a32 v,morder mo)750 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
751 SCOPED_ATOMIC(FetchNand, a, v, mo);
752 }
753
754 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_nand(volatile a64 * a,a64 v,morder mo)755 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
756 SCOPED_ATOMIC(FetchNand, a, v, mo);
757 }
758
759 #if __TSAN_HAS_INT128
760 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_nand(volatile a128 * a,a128 v,morder mo)761 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
762 SCOPED_ATOMIC(FetchNand, a, v, mo);
763 }
764 #endif
765
766 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_compare_exchange_strong(volatile a8 * a,a8 * c,a8 v,morder mo,morder fmo)767 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
768 morder mo, morder fmo) {
769 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
770 }
771
772 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_compare_exchange_strong(volatile a16 * a,a16 * c,a16 v,morder mo,morder fmo)773 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
774 morder mo, morder fmo) {
775 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
776 }
777
778 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_compare_exchange_strong(volatile a32 * a,a32 * c,a32 v,morder mo,morder fmo)779 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
780 morder mo, morder fmo) {
781 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
782 }
783
784 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_compare_exchange_strong(volatile a64 * a,a64 * c,a64 v,morder mo,morder fmo)785 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
786 morder mo, morder fmo) {
787 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
788 }
789
790 #if __TSAN_HAS_INT128
791 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_compare_exchange_strong(volatile a128 * a,a128 * c,a128 v,morder mo,morder fmo)792 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
793 morder mo, morder fmo) {
794 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
795 }
796 #endif
797
798 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_compare_exchange_weak(volatile a8 * a,a8 * c,a8 v,morder mo,morder fmo)799 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
800 morder mo, morder fmo) {
801 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
802 }
803
804 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_compare_exchange_weak(volatile a16 * a,a16 * c,a16 v,morder mo,morder fmo)805 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
806 morder mo, morder fmo) {
807 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
808 }
809
810 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_compare_exchange_weak(volatile a32 * a,a32 * c,a32 v,morder mo,morder fmo)811 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
812 morder mo, morder fmo) {
813 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
814 }
815
816 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_compare_exchange_weak(volatile a64 * a,a64 * c,a64 v,morder mo,morder fmo)817 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
818 morder mo, morder fmo) {
819 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
820 }
821
822 #if __TSAN_HAS_INT128
823 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_compare_exchange_weak(volatile a128 * a,a128 * c,a128 v,morder mo,morder fmo)824 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
825 morder mo, morder fmo) {
826 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
827 }
828 #endif
829
830 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_compare_exchange_val(volatile a8 * a,a8 c,a8 v,morder mo,morder fmo)831 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
832 morder mo, morder fmo) {
833 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
834 }
835
836 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_compare_exchange_val(volatile a16 * a,a16 c,a16 v,morder mo,morder fmo)837 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
838 morder mo, morder fmo) {
839 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
840 }
841
842 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_compare_exchange_val(volatile a32 * a,a32 c,a32 v,morder mo,morder fmo)843 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
844 morder mo, morder fmo) {
845 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
846 }
847
848 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_compare_exchange_val(volatile a64 * a,a64 c,a64 v,morder mo,morder fmo)849 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
850 morder mo, morder fmo) {
851 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
852 }
853
854 #if __TSAN_HAS_INT128
855 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_compare_exchange_val(volatile a128 * a,a128 c,a128 v,morder mo,morder fmo)856 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
857 morder mo, morder fmo) {
858 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
859 }
860 #endif
861
862 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic_thread_fence(morder mo)863 void __tsan_atomic_thread_fence(morder mo) {
864 char* a = 0;
865 SCOPED_ATOMIC(Fence, mo);
866 }
867
868 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic_signal_fence(morder mo)869 void __tsan_atomic_signal_fence(morder mo) {
870 }
871 } // extern "C"
872
873 #else // #if !SANITIZER_GO
874
875 // Go
876
877 #define ATOMIC(func, ...) \
878 if (thr->ignore_sync) { \
879 NoTsanAtomic##func(__VA_ARGS__); \
880 } else { \
881 FuncEntry(thr, cpc); \
882 Atomic##func(thr, pc, __VA_ARGS__); \
883 FuncExit(thr); \
884 } \
885 /**/
886
887 #define ATOMIC_RET(func, ret, ...) \
888 if (thr->ignore_sync) { \
889 (ret) = NoTsanAtomic##func(__VA_ARGS__); \
890 } else { \
891 FuncEntry(thr, cpc); \
892 (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
893 FuncExit(thr); \
894 } \
895 /**/
896
897 extern "C" {
898 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_load(ThreadState * thr,uptr cpc,uptr pc,u8 * a)899 void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
900 ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
901 }
902
903 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_load(ThreadState * thr,uptr cpc,uptr pc,u8 * a)904 void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
905 ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
906 }
907
908 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_store(ThreadState * thr,uptr cpc,uptr pc,u8 * a)909 void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
910 ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
911 }
912
913 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_store(ThreadState * thr,uptr cpc,uptr pc,u8 * a)914 void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
915 ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
916 }
917
918 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_fetch_add(ThreadState * thr,uptr cpc,uptr pc,u8 * a)919 void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
920 ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
921 }
922
923 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_fetch_add(ThreadState * thr,uptr cpc,uptr pc,u8 * a)924 void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
925 ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
926 }
927
928 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)929 void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
930 ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
931 }
932
933 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)934 void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
935 ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
936 }
937
938 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_compare_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)939 void __tsan_go_atomic32_compare_exchange(
940 ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
941 a32 cur = 0;
942 a32 cmp = *(a32*)(a+8);
943 ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
944 *(bool*)(a+16) = (cur == cmp);
945 }
946
947 SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_compare_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)948 void __tsan_go_atomic64_compare_exchange(
949 ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
950 a64 cur = 0;
951 a64 cmp = *(a64*)(a+8);
952 ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
953 *(bool*)(a+24) = (cur == cmp);
954 }
955 } // extern "C"
956 #endif // #if !SANITIZER_GO
957