Lines Matching defs:a

181 static atomic_uint8_t *to_atomic(const volatile a8 *a) {  in to_atomic()
185 static atomic_uint16_t *to_atomic(const volatile a16 *a) { in to_atomic()
190 static atomic_uint32_t *to_atomic(const volatile a32 *a) { in to_atomic()
194 static atomic_uint64_t *to_atomic(const volatile a64 *a) { in to_atomic()
212 static T NoTsanAtomicLoad(const volatile T *a, morder mo) { in NoTsanAtomicLoad()
217 static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) { in NoTsanAtomicLoad()
224 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) { in AtomicLoad()
250 static void NoTsanAtomicStore(volatile T *a, T v, morder mo) { in NoTsanAtomicStore()
255 static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) { in NoTsanAtomicStore()
262 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v, in AtomicStore()
285 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) { in AtomicRMW()
307 static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) { in NoTsanAtomicExchange()
312 static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) { in NoTsanAtomicFetchAdd()
317 static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) { in NoTsanAtomicFetchSub()
322 static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) { in NoTsanAtomicFetchAnd()
327 static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) { in NoTsanAtomicFetchOr()
332 static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) { in NoTsanAtomicFetchXor()
337 static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) { in NoTsanAtomicFetchNand()
342 static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v, in AtomicExchange()
348 static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v, in AtomicFetchAdd()
354 static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v, in AtomicFetchSub()
360 static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v, in AtomicFetchAnd()
366 static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v, in AtomicFetchOr()
372 static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v, in AtomicFetchXor()
378 static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v, in AtomicFetchNand()
384 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) { in NoTsanAtomicCAS()
389 static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v, in NoTsanAtomicCAS()
401 static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) { in NoTsanAtomicCAS()
407 static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v, in AtomicCAS()
450 volatile T *a, T c, T v, morder mo, morder fmo) { in AtomicCAS()
498 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) { in __tsan_atomic8_load()
503 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) { in __tsan_atomic16_load()
508 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) { in __tsan_atomic32_load()
513 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) { in __tsan_atomic64_load()
519 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) { in __tsan_atomic128_load()
525 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_store()
530 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_store()
535 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_store()
540 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_store()
546 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_store()
552 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_exchange()
557 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_exchange()
562 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_exchange()
567 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_exchange()
573 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_exchange()
579 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_fetch_add()
584 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_fetch_add()
589 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_fetch_add()
594 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_fetch_add()
600 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_fetch_add()
606 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_fetch_sub()
611 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_fetch_sub()
616 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_fetch_sub()
621 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_fetch_sub()
627 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_fetch_sub()
633 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_fetch_and()
638 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_fetch_and()
643 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_fetch_and()
648 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_fetch_and()
654 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_fetch_and()
660 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_fetch_or()
665 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_fetch_or()
670 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_fetch_or()
675 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_fetch_or()
681 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_fetch_or()
687 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_fetch_xor()
692 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_fetch_xor()
697 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_fetch_xor()
702 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_fetch_xor()
708 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_fetch_xor()
714 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) { in __tsan_atomic8_fetch_nand()
719 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) { in __tsan_atomic16_fetch_nand()
724 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) { in __tsan_atomic32_fetch_nand()
729 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) { in __tsan_atomic64_fetch_nand()
735 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) { in __tsan_atomic128_fetch_nand()
741 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v, in __tsan_atomic8_compare_exchange_strong()
747 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v, in __tsan_atomic16_compare_exchange_strong()
753 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v, in __tsan_atomic32_compare_exchange_strong()
759 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v, in __tsan_atomic64_compare_exchange_strong()
766 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v, in __tsan_atomic128_compare_exchange_strong()
773 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, in __tsan_atomic8_compare_exchange_weak()
779 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v, in __tsan_atomic16_compare_exchange_weak()
785 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v, in __tsan_atomic32_compare_exchange_weak()
791 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v, in __tsan_atomic64_compare_exchange_weak()
798 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v, in __tsan_atomic128_compare_exchange_weak()
805 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, in __tsan_atomic8_compare_exchange_val()
811 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v, in __tsan_atomic16_compare_exchange_val()
817 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v, in __tsan_atomic32_compare_exchange_val()
823 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v, in __tsan_atomic64_compare_exchange_val()
830 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v, in __tsan_atomic128_compare_exchange_val()
868 void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { in __tsan_go_atomic32_load()
873 void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { in __tsan_go_atomic64_load()
878 void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { in __tsan_go_atomic32_store()
883 void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { in __tsan_go_atomic64_store()
888 void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { in __tsan_go_atomic32_fetch_add()
893 void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { in __tsan_go_atomic64_fetch_add()
898 void __tsan_go_atomic32_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { in __tsan_go_atomic32_fetch_and()
904 void __tsan_go_atomic64_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { in __tsan_go_atomic64_fetch_and()
910 void __tsan_go_atomic32_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { in __tsan_go_atomic32_fetch_or()
916 void __tsan_go_atomic64_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { in __tsan_go_atomic64_fetch_or()
922 void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { in __tsan_go_atomic32_exchange()
927 void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) { in __tsan_go_atomic64_exchange()
933 ThreadState *thr, uptr cpc, uptr pc, u8 *a) { in __tsan_go_atomic32_compare_exchange()
942 ThreadState *thr, uptr cpc, uptr pc, u8 *a) { in __tsan_go_atomic64_compare_exchange()