110d565efSmrg //===-- sanitizer_atomic_clang.h --------------------------------*- C++ -*-===//
210d565efSmrg //
310d565efSmrg // This file is distributed under the University of Illinois Open Source
410d565efSmrg // License. See LICENSE.TXT for details.
510d565efSmrg //
610d565efSmrg //===----------------------------------------------------------------------===//
710d565efSmrg //
810d565efSmrg // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
910d565efSmrg // Not intended for direct inclusion. Include sanitizer_atomic.h.
1010d565efSmrg //
1110d565efSmrg //===----------------------------------------------------------------------===//
1210d565efSmrg 
1310d565efSmrg #ifndef SANITIZER_ATOMIC_CLANG_H
1410d565efSmrg #define SANITIZER_ATOMIC_CLANG_H
1510d565efSmrg 
1610d565efSmrg #if defined(__i386__) || defined(__x86_64__)
1710d565efSmrg # include "sanitizer_atomic_clang_x86.h"
1810d565efSmrg #else
1910d565efSmrg # include "sanitizer_atomic_clang_other.h"
2010d565efSmrg #endif
2110d565efSmrg 
2210d565efSmrg namespace __sanitizer {
2310d565efSmrg 
2410d565efSmrg // We would like to just use compiler builtin atomic operations
2510d565efSmrg // for loads and stores, but they are mostly broken in clang:
2610d565efSmrg // - they lead to vastly inefficient code generation
2710d565efSmrg // (http://llvm.org/bugs/show_bug.cgi?id=17281)
2810d565efSmrg // - 64-bit atomic operations are not implemented on x86_32
2910d565efSmrg // (http://llvm.org/bugs/show_bug.cgi?id=15034)
3010d565efSmrg // - they are not implemented on ARM
3110d565efSmrg // error: undefined reference to '__atomic_load_4'
3210d565efSmrg 
3310d565efSmrg // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
3410d565efSmrg // for mappings of the memory model to different processors.
3510d565efSmrg 
atomic_signal_fence(memory_order)3610d565efSmrg INLINE void atomic_signal_fence(memory_order) {
3710d565efSmrg   __asm__ __volatile__("" ::: "memory");
3810d565efSmrg }
3910d565efSmrg 
atomic_thread_fence(memory_order)4010d565efSmrg INLINE void atomic_thread_fence(memory_order) {
4110d565efSmrg   __sync_synchronize();
4210d565efSmrg }
4310d565efSmrg 
4410d565efSmrg template<typename T>
atomic_fetch_add(volatile T * a,typename T::Type v,memory_order mo)4510d565efSmrg INLINE typename T::Type atomic_fetch_add(volatile T *a,
4610d565efSmrg     typename T::Type v, memory_order mo) {
4710d565efSmrg   (void)mo;
4810d565efSmrg   DCHECK(!((uptr)a % sizeof(*a)));
4910d565efSmrg   return __sync_fetch_and_add(&a->val_dont_use, v);
5010d565efSmrg }
5110d565efSmrg 
5210d565efSmrg template<typename T>
atomic_fetch_sub(volatile T * a,typename T::Type v,memory_order mo)5310d565efSmrg INLINE typename T::Type atomic_fetch_sub(volatile T *a,
5410d565efSmrg     typename T::Type v, memory_order mo) {
5510d565efSmrg   (void)mo;
5610d565efSmrg   DCHECK(!((uptr)a % sizeof(*a)));
5710d565efSmrg   return __sync_fetch_and_add(&a->val_dont_use, -v);
5810d565efSmrg }
5910d565efSmrg 
6010d565efSmrg template<typename T>
atomic_exchange(volatile T * a,typename T::Type v,memory_order mo)6110d565efSmrg INLINE typename T::Type atomic_exchange(volatile T *a,
6210d565efSmrg     typename T::Type v, memory_order mo) {
6310d565efSmrg   DCHECK(!((uptr)a % sizeof(*a)));
6410d565efSmrg   if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst))
6510d565efSmrg     __sync_synchronize();
6610d565efSmrg   v = __sync_lock_test_and_set(&a->val_dont_use, v);
6710d565efSmrg   if (mo == memory_order_seq_cst)
6810d565efSmrg     __sync_synchronize();
6910d565efSmrg   return v;
7010d565efSmrg }
7110d565efSmrg 
7210d565efSmrg template <typename T>
atomic_compare_exchange_strong(volatile T * a,typename T::Type * cmp,typename T::Type xchg,memory_order mo)73c7a68eb7Smrg INLINE bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
7410d565efSmrg                                            typename T::Type xchg,
7510d565efSmrg                                            memory_order mo) {
7610d565efSmrg   typedef typename T::Type Type;
7710d565efSmrg   Type cmpv = *cmp;
78c7a68eb7Smrg   Type prev;
79c7a68eb7Smrg   prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
80c7a68eb7Smrg   if (prev == cmpv) return true;
8110d565efSmrg   *cmp = prev;
8210d565efSmrg   return false;
8310d565efSmrg }
8410d565efSmrg 
8510d565efSmrg template<typename T>
atomic_compare_exchange_weak(volatile T * a,typename T::Type * cmp,typename T::Type xchg,memory_order mo)8610d565efSmrg INLINE bool atomic_compare_exchange_weak(volatile T *a,
8710d565efSmrg                                          typename T::Type *cmp,
8810d565efSmrg                                          typename T::Type xchg,
8910d565efSmrg                                          memory_order mo) {
9010d565efSmrg   return atomic_compare_exchange_strong(a, cmp, xchg, mo);
9110d565efSmrg }
9210d565efSmrg 
9310d565efSmrg }  // namespace __sanitizer
9410d565efSmrg 
95*0fc04c29Smrg // This include provides explicit template instantiations for atomic_uint64_t
96*0fc04c29Smrg // on MIPS32, which does not directly support 8 byte atomics. It has to
97*0fc04c29Smrg // proceed the template definitions above.
98*0fc04c29Smrg #if defined(_MIPS_SIM) && defined(_ABIO32)
99*0fc04c29Smrg   #include "sanitizer_atomic_clang_mips.h"
100*0fc04c29Smrg #endif
101*0fc04c29Smrg 
10210d565efSmrg #undef ATOMIC_ORDER
10310d565efSmrg 
10410d565efSmrg #endif  // SANITIZER_ATOMIC_CLANG_H
105