1 //===-- tsan_interface_atomic.cc ------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is a part of ThreadSanitizer (TSan), a race detector.
9 //
10 //===----------------------------------------------------------------------===//
11
12 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
13 // For background see C++11 standard. A slightly older, publically
14 // available draft of the standard (not entirely up-to-date, but close enough
15 // for casual browsing) is available here:
16 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
17 // The following page contains more background information:
18 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
19
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_stacktrace.h"
22 #include "tsan_interface_atomic.h"
23 #include "tsan_flags.h"
24 #include "tsan_rtl.h"
25
26 using namespace __tsan; // NOLINT
27
28 #define SCOPED_ATOMIC(func, ...) \
29 const uptr callpc = (uptr)__builtin_return_address(0); \
30 uptr pc = __sanitizer::StackTrace::GetCurrentPc(); \
31 pc = __sanitizer::StackTrace::GetPreviousInstructionPc(pc); \
32 mo = ConvertOrder(mo); \
33 mo = flags()->force_seq_cst_atomics ? (morder)mo_seq_cst : mo; \
34 ThreadState *const thr = cur_thread(); \
35 AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
36 ScopedAtomic sa(thr, callpc, __FUNCTION__); \
37 return Atomic##func(thr, pc, __VA_ARGS__); \
38 /**/
39
40 class ScopedAtomic {
41 public:
ScopedAtomic(ThreadState * thr,uptr pc,const char * func)42 ScopedAtomic(ThreadState *thr, uptr pc, const char *func)
43 : thr_(thr) {
44 CHECK_EQ(thr_->in_rtl, 0);
45 ProcessPendingSignals(thr);
46 FuncEntry(thr_, pc);
47 DPrintf("#%d: %s\n", thr_->tid, func);
48 thr_->in_rtl++;
49 }
~ScopedAtomic()50 ~ScopedAtomic() {
51 thr_->in_rtl--;
52 CHECK_EQ(thr_->in_rtl, 0);
53 FuncExit(thr_);
54 }
55 private:
56 ThreadState *thr_;
57 };
58
59 // Some shortcuts.
60 typedef __tsan_memory_order morder;
61 typedef __tsan_atomic8 a8;
62 typedef __tsan_atomic16 a16;
63 typedef __tsan_atomic32 a32;
64 typedef __tsan_atomic64 a64;
65 typedef __tsan_atomic128 a128;
66 const morder mo_relaxed = __tsan_memory_order_relaxed;
67 const morder mo_consume = __tsan_memory_order_consume;
68 const morder mo_acquire = __tsan_memory_order_acquire;
69 const morder mo_release = __tsan_memory_order_release;
70 const morder mo_acq_rel = __tsan_memory_order_acq_rel;
71 const morder mo_seq_cst = __tsan_memory_order_seq_cst;
72
AtomicStatInc(ThreadState * thr,uptr size,morder mo,StatType t)73 static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
74 StatInc(thr, StatAtomic);
75 StatInc(thr, t);
76 StatInc(thr, size == 1 ? StatAtomic1
77 : size == 2 ? StatAtomic2
78 : size == 4 ? StatAtomic4
79 : size == 8 ? StatAtomic8
80 : StatAtomic16);
81 StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
82 : mo == mo_consume ? StatAtomicConsume
83 : mo == mo_acquire ? StatAtomicAcquire
84 : mo == mo_release ? StatAtomicRelease
85 : mo == mo_acq_rel ? StatAtomicAcq_Rel
86 : StatAtomicSeq_Cst);
87 }
88
IsLoadOrder(morder mo)89 static bool IsLoadOrder(morder mo) {
90 return mo == mo_relaxed || mo == mo_consume
91 || mo == mo_acquire || mo == mo_seq_cst;
92 }
93
IsStoreOrder(morder mo)94 static bool IsStoreOrder(morder mo) {
95 return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
96 }
97
IsReleaseOrder(morder mo)98 static bool IsReleaseOrder(morder mo) {
99 return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
100 }
101
IsAcquireOrder(morder mo)102 static bool IsAcquireOrder(morder mo) {
103 return mo == mo_consume || mo == mo_acquire
104 || mo == mo_acq_rel || mo == mo_seq_cst;
105 }
106
IsAcqRelOrder(morder mo)107 static bool IsAcqRelOrder(morder mo) {
108 return mo == mo_acq_rel || mo == mo_seq_cst;
109 }
110
ConvertOrder(morder mo)111 static morder ConvertOrder(morder mo) {
112 if (mo > (morder)100500) {
113 mo = morder(mo - 100500);
114 if (mo == morder(1 << 0))
115 mo = mo_relaxed;
116 else if (mo == morder(1 << 1))
117 mo = mo_consume;
118 else if (mo == morder(1 << 2))
119 mo = mo_acquire;
120 else if (mo == morder(1 << 3))
121 mo = mo_release;
122 else if (mo == morder(1 << 4))
123 mo = mo_acq_rel;
124 else if (mo == morder(1 << 5))
125 mo = mo_seq_cst;
126 }
127 CHECK_GE(mo, mo_relaxed);
128 CHECK_LE(mo, mo_seq_cst);
129 return mo;
130 }
131
func_xchg(volatile T * v,T op)132 template<typename T> T func_xchg(volatile T *v, T op) {
133 T res = __sync_lock_test_and_set(v, op);
134 // __sync_lock_test_and_set does not contain full barrier.
135 __sync_synchronize();
136 return res;
137 }
138
func_add(volatile T * v,T op)139 template<typename T> T func_add(volatile T *v, T op) {
140 return __sync_fetch_and_add(v, op);
141 }
142
func_sub(volatile T * v,T op)143 template<typename T> T func_sub(volatile T *v, T op) {
144 return __sync_fetch_and_sub(v, op);
145 }
146
func_and(volatile T * v,T op)147 template<typename T> T func_and(volatile T *v, T op) {
148 return __sync_fetch_and_and(v, op);
149 }
150
func_or(volatile T * v,T op)151 template<typename T> T func_or(volatile T *v, T op) {
152 return __sync_fetch_and_or(v, op);
153 }
154
func_xor(volatile T * v,T op)155 template<typename T> T func_xor(volatile T *v, T op) {
156 return __sync_fetch_and_xor(v, op);
157 }
158
func_nand(volatile T * v,T op)159 template<typename T> T func_nand(volatile T *v, T op) {
160 // clang does not support __sync_fetch_and_nand.
161 T cmp = *v;
162 for (;;) {
163 T newv = ~(cmp & op);
164 T cur = __sync_val_compare_and_swap(v, cmp, newv);
165 if (cmp == cur)
166 return cmp;
167 cmp = cur;
168 }
169 }
170
func_cas(volatile T * v,T cmp,T xch)171 template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
172 return __sync_val_compare_and_swap(v, cmp, xch);
173 }
174
175 // clang does not support 128-bit atomic ops.
176 // Atomic ops are executed under tsan internal mutex,
177 // here we assume that the atomic variables are not accessed
178 // from non-instrumented code.
179 #ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16
func_xchg(volatile a128 * v,a128 op)180 a128 func_xchg(volatile a128 *v, a128 op) {
181 a128 cmp = *v;
182 *v = op;
183 return cmp;
184 }
185
func_add(volatile a128 * v,a128 op)186 a128 func_add(volatile a128 *v, a128 op) {
187 a128 cmp = *v;
188 *v = cmp + op;
189 return cmp;
190 }
191
func_sub(volatile a128 * v,a128 op)192 a128 func_sub(volatile a128 *v, a128 op) {
193 a128 cmp = *v;
194 *v = cmp - op;
195 return cmp;
196 }
197
func_and(volatile a128 * v,a128 op)198 a128 func_and(volatile a128 *v, a128 op) {
199 a128 cmp = *v;
200 *v = cmp & op;
201 return cmp;
202 }
203
func_or(volatile a128 * v,a128 op)204 a128 func_or(volatile a128 *v, a128 op) {
205 a128 cmp = *v;
206 *v = cmp | op;
207 return cmp;
208 }
209
func_xor(volatile a128 * v,a128 op)210 a128 func_xor(volatile a128 *v, a128 op) {
211 a128 cmp = *v;
212 *v = cmp ^ op;
213 return cmp;
214 }
215
func_nand(volatile a128 * v,a128 op)216 a128 func_nand(volatile a128 *v, a128 op) {
217 a128 cmp = *v;
218 *v = ~(cmp & op);
219 return cmp;
220 }
221
func_cas(volatile a128 * v,a128 cmp,a128 xch)222 a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
223 a128 cur = *v;
224 if (cur == cmp)
225 *v = xch;
226 return cur;
227 }
228 #endif
229
230 template<typename T>
SizeLog()231 static int SizeLog() {
232 if (sizeof(T) <= 1)
233 return kSizeLog1;
234 else if (sizeof(T) <= 2)
235 return kSizeLog2;
236 else if (sizeof(T) <= 4)
237 return kSizeLog4;
238 else
239 return kSizeLog8;
240 // For 16-byte atomics we also use 8-byte memory access,
241 // this leads to false negatives only in very obscure cases.
242 }
243
244 template<typename T>
AtomicLoad(ThreadState * thr,uptr pc,const volatile T * a,morder mo)245 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
246 morder mo) {
247 CHECK(IsLoadOrder(mo));
248 // This fast-path is critical for performance.
249 // Assume the access is atomic.
250 if (!IsAcquireOrder(mo) && sizeof(T) <= sizeof(a)) {
251 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
252 return *a;
253 }
254 SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, false);
255 thr->clock.set(thr->tid, thr->fast_state.epoch());
256 thr->clock.acquire(&s->clock);
257 T v = *a;
258 s->mtx.ReadUnlock();
259 __sync_synchronize();
260 MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
261 return v;
262 }
263
264 template<typename T>
AtomicStore(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)265 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
266 morder mo) {
267 CHECK(IsStoreOrder(mo));
268 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
269 // This fast-path is critical for performance.
270 // Assume the access is atomic.
271 // Strictly saying even relaxed store cuts off release sequence,
272 // so must reset the clock.
273 if (!IsReleaseOrder(mo) && sizeof(T) <= sizeof(a)) {
274 *a = v;
275 return;
276 }
277 __sync_synchronize();
278 SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
279 thr->clock.set(thr->tid, thr->fast_state.epoch());
280 thr->clock.ReleaseStore(&s->clock);
281 *a = v;
282 s->mtx.Unlock();
283 // Trainling memory barrier to provide sequential consistency
284 // for Dekker-like store-load synchronization.
285 __sync_synchronize();
286 }
287
288 template<typename T, T (*F)(volatile T *v, T op)>
AtomicRMW(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)289 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
290 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
291 SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
292 thr->clock.set(thr->tid, thr->fast_state.epoch());
293 if (IsAcqRelOrder(mo))
294 thr->clock.acq_rel(&s->clock);
295 else if (IsReleaseOrder(mo))
296 thr->clock.release(&s->clock);
297 else if (IsAcquireOrder(mo))
298 thr->clock.acquire(&s->clock);
299 v = F(a, v);
300 s->mtx.Unlock();
301 return v;
302 }
303
304 template<typename T>
AtomicExchange(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)305 static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
306 morder mo) {
307 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
308 }
309
310 template<typename T>
AtomicFetchAdd(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)311 static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
312 morder mo) {
313 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
314 }
315
316 template<typename T>
AtomicFetchSub(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)317 static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
318 morder mo) {
319 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
320 }
321
322 template<typename T>
AtomicFetchAnd(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)323 static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
324 morder mo) {
325 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
326 }
327
328 template<typename T>
AtomicFetchOr(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)329 static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
330 morder mo) {
331 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
332 }
333
334 template<typename T>
AtomicFetchXor(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)335 static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
336 morder mo) {
337 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
338 }
339
340 template<typename T>
AtomicFetchNand(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)341 static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
342 morder mo) {
343 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
344 }
345
346 template<typename T>
AtomicCAS(ThreadState * thr,uptr pc,volatile T * a,T * c,T v,morder mo,morder fmo)347 static bool AtomicCAS(ThreadState *thr, uptr pc,
348 volatile T *a, T *c, T v, morder mo, morder fmo) {
349 (void)fmo; // Unused because llvm does not pass it yet.
350 MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
351 SyncVar *s = CTX()->synctab.GetOrCreateAndLock(thr, pc, (uptr)a, true);
352 thr->clock.set(thr->tid, thr->fast_state.epoch());
353 if (IsAcqRelOrder(mo))
354 thr->clock.acq_rel(&s->clock);
355 else if (IsReleaseOrder(mo))
356 thr->clock.release(&s->clock);
357 else if (IsAcquireOrder(mo))
358 thr->clock.acquire(&s->clock);
359 T cc = *c;
360 T pr = func_cas(a, cc, v);
361 s->mtx.Unlock();
362 if (pr == cc)
363 return true;
364 *c = pr;
365 return false;
366 }
367
368 template<typename T>
AtomicCAS(ThreadState * thr,uptr pc,volatile T * a,T c,T v,morder mo,morder fmo)369 static T AtomicCAS(ThreadState *thr, uptr pc,
370 volatile T *a, T c, T v, morder mo, morder fmo) {
371 AtomicCAS(thr, pc, a, &c, v, mo, fmo);
372 return c;
373 }
374
AtomicFence(ThreadState * thr,uptr pc,morder mo)375 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
376 // FIXME(dvyukov): not implemented.
377 __sync_synchronize();
378 }
379
__tsan_atomic8_load(const volatile a8 * a,morder mo)380 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
381 SCOPED_ATOMIC(Load, a, mo);
382 }
383
__tsan_atomic16_load(const volatile a16 * a,morder mo)384 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
385 SCOPED_ATOMIC(Load, a, mo);
386 }
387
__tsan_atomic32_load(const volatile a32 * a,morder mo)388 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
389 SCOPED_ATOMIC(Load, a, mo);
390 }
391
__tsan_atomic64_load(const volatile a64 * a,morder mo)392 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
393 SCOPED_ATOMIC(Load, a, mo);
394 }
395
396 #if __TSAN_HAS_INT128
__tsan_atomic128_load(const volatile a128 * a,morder mo)397 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
398 SCOPED_ATOMIC(Load, a, mo);
399 }
400 #endif
401
__tsan_atomic8_store(volatile a8 * a,a8 v,morder mo)402 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
403 SCOPED_ATOMIC(Store, a, v, mo);
404 }
405
__tsan_atomic16_store(volatile a16 * a,a16 v,morder mo)406 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
407 SCOPED_ATOMIC(Store, a, v, mo);
408 }
409
__tsan_atomic32_store(volatile a32 * a,a32 v,morder mo)410 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
411 SCOPED_ATOMIC(Store, a, v, mo);
412 }
413
__tsan_atomic64_store(volatile a64 * a,a64 v,morder mo)414 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
415 SCOPED_ATOMIC(Store, a, v, mo);
416 }
417
418 #if __TSAN_HAS_INT128
__tsan_atomic128_store(volatile a128 * a,a128 v,morder mo)419 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
420 SCOPED_ATOMIC(Store, a, v, mo);
421 }
422 #endif
423
__tsan_atomic8_exchange(volatile a8 * a,a8 v,morder mo)424 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
425 SCOPED_ATOMIC(Exchange, a, v, mo);
426 }
427
__tsan_atomic16_exchange(volatile a16 * a,a16 v,morder mo)428 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
429 SCOPED_ATOMIC(Exchange, a, v, mo);
430 }
431
__tsan_atomic32_exchange(volatile a32 * a,a32 v,morder mo)432 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
433 SCOPED_ATOMIC(Exchange, a, v, mo);
434 }
435
__tsan_atomic64_exchange(volatile a64 * a,a64 v,morder mo)436 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
437 SCOPED_ATOMIC(Exchange, a, v, mo);
438 }
439
440 #if __TSAN_HAS_INT128
__tsan_atomic128_exchange(volatile a128 * a,a128 v,morder mo)441 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
442 SCOPED_ATOMIC(Exchange, a, v, mo);
443 }
444 #endif
445
__tsan_atomic8_fetch_add(volatile a8 * a,a8 v,morder mo)446 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
447 SCOPED_ATOMIC(FetchAdd, a, v, mo);
448 }
449
__tsan_atomic16_fetch_add(volatile a16 * a,a16 v,morder mo)450 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
451 SCOPED_ATOMIC(FetchAdd, a, v, mo);
452 }
453
__tsan_atomic32_fetch_add(volatile a32 * a,a32 v,morder mo)454 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
455 SCOPED_ATOMIC(FetchAdd, a, v, mo);
456 }
457
__tsan_atomic64_fetch_add(volatile a64 * a,a64 v,morder mo)458 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
459 SCOPED_ATOMIC(FetchAdd, a, v, mo);
460 }
461
462 #if __TSAN_HAS_INT128
__tsan_atomic128_fetch_add(volatile a128 * a,a128 v,morder mo)463 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
464 SCOPED_ATOMIC(FetchAdd, a, v, mo);
465 }
466 #endif
467
__tsan_atomic8_fetch_sub(volatile a8 * a,a8 v,morder mo)468 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
469 SCOPED_ATOMIC(FetchSub, a, v, mo);
470 }
471
__tsan_atomic16_fetch_sub(volatile a16 * a,a16 v,morder mo)472 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
473 SCOPED_ATOMIC(FetchSub, a, v, mo);
474 }
475
__tsan_atomic32_fetch_sub(volatile a32 * a,a32 v,morder mo)476 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
477 SCOPED_ATOMIC(FetchSub, a, v, mo);
478 }
479
__tsan_atomic64_fetch_sub(volatile a64 * a,a64 v,morder mo)480 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
481 SCOPED_ATOMIC(FetchSub, a, v, mo);
482 }
483
484 #if __TSAN_HAS_INT128
__tsan_atomic128_fetch_sub(volatile a128 * a,a128 v,morder mo)485 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
486 SCOPED_ATOMIC(FetchSub, a, v, mo);
487 }
488 #endif
489
__tsan_atomic8_fetch_and(volatile a8 * a,a8 v,morder mo)490 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
491 SCOPED_ATOMIC(FetchAnd, a, v, mo);
492 }
493
__tsan_atomic16_fetch_and(volatile a16 * a,a16 v,morder mo)494 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
495 SCOPED_ATOMIC(FetchAnd, a, v, mo);
496 }
497
__tsan_atomic32_fetch_and(volatile a32 * a,a32 v,morder mo)498 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
499 SCOPED_ATOMIC(FetchAnd, a, v, mo);
500 }
501
__tsan_atomic64_fetch_and(volatile a64 * a,a64 v,morder mo)502 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
503 SCOPED_ATOMIC(FetchAnd, a, v, mo);
504 }
505
506 #if __TSAN_HAS_INT128
__tsan_atomic128_fetch_and(volatile a128 * a,a128 v,morder mo)507 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
508 SCOPED_ATOMIC(FetchAnd, a, v, mo);
509 }
510 #endif
511
__tsan_atomic8_fetch_or(volatile a8 * a,a8 v,morder mo)512 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
513 SCOPED_ATOMIC(FetchOr, a, v, mo);
514 }
515
__tsan_atomic16_fetch_or(volatile a16 * a,a16 v,morder mo)516 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
517 SCOPED_ATOMIC(FetchOr, a, v, mo);
518 }
519
__tsan_atomic32_fetch_or(volatile a32 * a,a32 v,morder mo)520 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
521 SCOPED_ATOMIC(FetchOr, a, v, mo);
522 }
523
__tsan_atomic64_fetch_or(volatile a64 * a,a64 v,morder mo)524 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
525 SCOPED_ATOMIC(FetchOr, a, v, mo);
526 }
527
528 #if __TSAN_HAS_INT128
__tsan_atomic128_fetch_or(volatile a128 * a,a128 v,morder mo)529 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
530 SCOPED_ATOMIC(FetchOr, a, v, mo);
531 }
532 #endif
533
__tsan_atomic8_fetch_xor(volatile a8 * a,a8 v,morder mo)534 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
535 SCOPED_ATOMIC(FetchXor, a, v, mo);
536 }
537
__tsan_atomic16_fetch_xor(volatile a16 * a,a16 v,morder mo)538 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
539 SCOPED_ATOMIC(FetchXor, a, v, mo);
540 }
541
__tsan_atomic32_fetch_xor(volatile a32 * a,a32 v,morder mo)542 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
543 SCOPED_ATOMIC(FetchXor, a, v, mo);
544 }
545
__tsan_atomic64_fetch_xor(volatile a64 * a,a64 v,morder mo)546 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
547 SCOPED_ATOMIC(FetchXor, a, v, mo);
548 }
549
550 #if __TSAN_HAS_INT128
__tsan_atomic128_fetch_xor(volatile a128 * a,a128 v,morder mo)551 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
552 SCOPED_ATOMIC(FetchXor, a, v, mo);
553 }
554 #endif
555
__tsan_atomic8_fetch_nand(volatile a8 * a,a8 v,morder mo)556 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
557 SCOPED_ATOMIC(FetchNand, a, v, mo);
558 }
559
__tsan_atomic16_fetch_nand(volatile a16 * a,a16 v,morder mo)560 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
561 SCOPED_ATOMIC(FetchNand, a, v, mo);
562 }
563
__tsan_atomic32_fetch_nand(volatile a32 * a,a32 v,morder mo)564 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
565 SCOPED_ATOMIC(FetchNand, a, v, mo);
566 }
567
__tsan_atomic64_fetch_nand(volatile a64 * a,a64 v,morder mo)568 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
569 SCOPED_ATOMIC(FetchNand, a, v, mo);
570 }
571
572 #if __TSAN_HAS_INT128
__tsan_atomic128_fetch_nand(volatile a128 * a,a128 v,morder mo)573 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
574 SCOPED_ATOMIC(FetchNand, a, v, mo);
575 }
576 #endif
577
__tsan_atomic8_compare_exchange_strong(volatile a8 * a,a8 * c,a8 v,morder mo,morder fmo)578 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
579 morder mo, morder fmo) {
580 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
581 }
582
__tsan_atomic16_compare_exchange_strong(volatile a16 * a,a16 * c,a16 v,morder mo,morder fmo)583 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
584 morder mo, morder fmo) {
585 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
586 }
587
__tsan_atomic32_compare_exchange_strong(volatile a32 * a,a32 * c,a32 v,morder mo,morder fmo)588 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
589 morder mo, morder fmo) {
590 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
591 }
592
__tsan_atomic64_compare_exchange_strong(volatile a64 * a,a64 * c,a64 v,morder mo,morder fmo)593 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
594 morder mo, morder fmo) {
595 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
596 }
597
598 #if __TSAN_HAS_INT128
__tsan_atomic128_compare_exchange_strong(volatile a128 * a,a128 * c,a128 v,morder mo,morder fmo)599 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
600 morder mo, morder fmo) {
601 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
602 }
603 #endif
604
__tsan_atomic8_compare_exchange_weak(volatile a8 * a,a8 * c,a8 v,morder mo,morder fmo)605 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
606 morder mo, morder fmo) {
607 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
608 }
609
__tsan_atomic16_compare_exchange_weak(volatile a16 * a,a16 * c,a16 v,morder mo,morder fmo)610 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
611 morder mo, morder fmo) {
612 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
613 }
614
__tsan_atomic32_compare_exchange_weak(volatile a32 * a,a32 * c,a32 v,morder mo,morder fmo)615 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
616 morder mo, morder fmo) {
617 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
618 }
619
__tsan_atomic64_compare_exchange_weak(volatile a64 * a,a64 * c,a64 v,morder mo,morder fmo)620 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
621 morder mo, morder fmo) {
622 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
623 }
624
625 #if __TSAN_HAS_INT128
__tsan_atomic128_compare_exchange_weak(volatile a128 * a,a128 * c,a128 v,morder mo,morder fmo)626 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
627 morder mo, morder fmo) {
628 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
629 }
630 #endif
631
__tsan_atomic8_compare_exchange_val(volatile a8 * a,a8 c,a8 v,morder mo,morder fmo)632 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
633 morder mo, morder fmo) {
634 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
635 }
__tsan_atomic16_compare_exchange_val(volatile a16 * a,a16 c,a16 v,morder mo,morder fmo)636 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
637 morder mo, morder fmo) {
638 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
639 }
640
__tsan_atomic32_compare_exchange_val(volatile a32 * a,a32 c,a32 v,morder mo,morder fmo)641 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
642 morder mo, morder fmo) {
643 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
644 }
645
__tsan_atomic64_compare_exchange_val(volatile a64 * a,a64 c,a64 v,morder mo,morder fmo)646 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
647 morder mo, morder fmo) {
648 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
649 }
650
651 #if __TSAN_HAS_INT128
__tsan_atomic64_compare_exchange_val(volatile a128 * a,a128 c,a128 v,morder mo,morder fmo)652 a128 __tsan_atomic64_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
653 morder mo, morder fmo) {
654 SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
655 }
656 #endif
657
__tsan_atomic_thread_fence(morder mo)658 void __tsan_atomic_thread_fence(morder mo) {
659 char* a;
660 SCOPED_ATOMIC(Fence, mo);
661 }
662
__tsan_atomic_signal_fence(morder mo)663 void __tsan_atomic_signal_fence(morder mo) {
664 }
665