10b57cec5SDimitry Andric //===-- sanitizer_atomic_clang_other.h --------------------------*- C++ -*-===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
90b57cec5SDimitry Andric // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
100b57cec5SDimitry Andric // Not intended for direct inclusion. Include sanitizer_atomic.h.
110b57cec5SDimitry Andric //
120b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
130b57cec5SDimitry Andric 
140b57cec5SDimitry Andric #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
150b57cec5SDimitry Andric #define SANITIZER_ATOMIC_CLANG_OTHER_H
160b57cec5SDimitry Andric 
170b57cec5SDimitry Andric namespace __sanitizer {
180b57cec5SDimitry Andric 
190b57cec5SDimitry Andric 
proc_yield(int cnt)200b57cec5SDimitry Andric inline void proc_yield(int cnt) {
210b57cec5SDimitry Andric   __asm__ __volatile__("" ::: "memory");
220b57cec5SDimitry Andric }
230b57cec5SDimitry Andric 
240b57cec5SDimitry Andric template<typename T>
atomic_load(const volatile T * a,memory_order mo)250b57cec5SDimitry Andric inline typename T::Type atomic_load(
260b57cec5SDimitry Andric     const volatile T *a, memory_order mo) {
270b57cec5SDimitry Andric   DCHECK(mo & (memory_order_relaxed | memory_order_consume
280b57cec5SDimitry Andric       | memory_order_acquire | memory_order_seq_cst));
290b57cec5SDimitry Andric   DCHECK(!((uptr)a % sizeof(*a)));
300b57cec5SDimitry Andric   typename T::Type v;
310b57cec5SDimitry Andric 
320b57cec5SDimitry Andric   if (sizeof(*a) < 8 || sizeof(void*) == 8) {
330b57cec5SDimitry Andric     // Assume that aligned loads are atomic.
340b57cec5SDimitry Andric     if (mo == memory_order_relaxed) {
350b57cec5SDimitry Andric       v = a->val_dont_use;
360b57cec5SDimitry Andric     } else if (mo == memory_order_consume) {
370b57cec5SDimitry Andric       // Assume that processor respects data dependencies
380b57cec5SDimitry Andric       // (and that compiler won't break them).
390b57cec5SDimitry Andric       __asm__ __volatile__("" ::: "memory");
400b57cec5SDimitry Andric       v = a->val_dont_use;
410b57cec5SDimitry Andric       __asm__ __volatile__("" ::: "memory");
420b57cec5SDimitry Andric     } else if (mo == memory_order_acquire) {
430b57cec5SDimitry Andric       __asm__ __volatile__("" ::: "memory");
440b57cec5SDimitry Andric       v = a->val_dont_use;
450b57cec5SDimitry Andric       __sync_synchronize();
460b57cec5SDimitry Andric     } else {  // seq_cst
470b57cec5SDimitry Andric       // E.g. on POWER we need a hw fence even before the store.
480b57cec5SDimitry Andric       __sync_synchronize();
490b57cec5SDimitry Andric       v = a->val_dont_use;
500b57cec5SDimitry Andric       __sync_synchronize();
510b57cec5SDimitry Andric     }
520b57cec5SDimitry Andric   } else {
530b57cec5SDimitry Andric     __atomic_load(const_cast<typename T::Type volatile *>(&a->val_dont_use), &v,
540b57cec5SDimitry Andric                   __ATOMIC_SEQ_CST);
550b57cec5SDimitry Andric   }
560b57cec5SDimitry Andric   return v;
570b57cec5SDimitry Andric }
580b57cec5SDimitry Andric 
590b57cec5SDimitry Andric template<typename T>
atomic_store(volatile T * a,typename T::Type v,memory_order mo)600b57cec5SDimitry Andric inline void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
610b57cec5SDimitry Andric   DCHECK(mo & (memory_order_relaxed | memory_order_release
620b57cec5SDimitry Andric       | memory_order_seq_cst));
630b57cec5SDimitry Andric   DCHECK(!((uptr)a % sizeof(*a)));
640b57cec5SDimitry Andric 
650b57cec5SDimitry Andric   if (sizeof(*a) < 8 || sizeof(void*) == 8) {
660b57cec5SDimitry Andric     // Assume that aligned loads are atomic.
670b57cec5SDimitry Andric     if (mo == memory_order_relaxed) {
680b57cec5SDimitry Andric       a->val_dont_use = v;
690b57cec5SDimitry Andric     } else if (mo == memory_order_release) {
700b57cec5SDimitry Andric       __sync_synchronize();
710b57cec5SDimitry Andric       a->val_dont_use = v;
720b57cec5SDimitry Andric       __asm__ __volatile__("" ::: "memory");
730b57cec5SDimitry Andric     } else {  // seq_cst
740b57cec5SDimitry Andric       __sync_synchronize();
750b57cec5SDimitry Andric       a->val_dont_use = v;
760b57cec5SDimitry Andric       __sync_synchronize();
770b57cec5SDimitry Andric     }
780b57cec5SDimitry Andric   } else {
790b57cec5SDimitry Andric     __atomic_store(&a->val_dont_use, &v, __ATOMIC_SEQ_CST);
800b57cec5SDimitry Andric   }
810b57cec5SDimitry Andric }
820b57cec5SDimitry Andric 
830b57cec5SDimitry Andric }  // namespace __sanitizer
840b57cec5SDimitry Andric 
850b57cec5SDimitry Andric #endif  // #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
860b57cec5SDimitry Andric