1 //===-- sanitizer_atomic_clang_mips.h ---------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11 // Not intended for direct inclusion. Include sanitizer_atomic.h.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef SANITIZER_ATOMIC_CLANG_MIPS_H
16 #define SANITIZER_ATOMIC_CLANG_MIPS_H
17 
18 namespace __sanitizer {
19 
20 // MIPS32 does not support atomics > 4 bytes. To address this lack of
21 // functionality, the sanitizer library provides helper methods which use an
22 // internal spin lock mechanism to emulate atomic oprations when the size is
23 // 8 bytes.
__spin_lock(volatile int * lock)24 static void __spin_lock(volatile int *lock) {
25   while (__sync_lock_test_and_set(lock, 1))
26     while (*lock) {
27     }
28 }
29 
__spin_unlock(volatile int * lock)30 static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); }
31 
32 // Make sure the lock is on its own cache line to prevent false sharing.
33 // Put it inside a struct that is aligned and padded to the typical MIPS
34 // cacheline which is 32 bytes.
35 static struct {
36   int lock;
37   char pad[32 - sizeof(int)];
38 } __attribute__((aligned(32))) lock = {0, {0}};
39 
40 template <>
atomic_fetch_add(volatile atomic_uint64_t * ptr,atomic_uint64_t::Type val,memory_order mo)41 INLINE atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
42                                               atomic_uint64_t::Type val,
43                                               memory_order mo) {
44   DCHECK(mo &
45          (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
46   DCHECK(!((uptr)ptr % sizeof(*ptr)));
47 
48   atomic_uint64_t::Type ret;
49 
50   __spin_lock(&lock.lock);
51   ret = *(const_cast<atomic_uint64_t::Type volatile *>(&ptr->val_dont_use));
52   ptr->val_dont_use = ret + val;
53   __spin_unlock(&lock.lock);
54 
55   return ret;
56 }
57 
58 template <>
atomic_fetch_sub(volatile atomic_uint64_t * ptr,atomic_uint64_t::Type val,memory_order mo)59 INLINE atomic_uint64_t::Type atomic_fetch_sub(volatile atomic_uint64_t *ptr,
60                                               atomic_uint64_t::Type val,
61                                               memory_order mo) {
62   return atomic_fetch_add(ptr, -val, mo);
63 }
64 
65 template <>
atomic_compare_exchange_strong(volatile atomic_uint64_t * ptr,atomic_uint64_t::Type * cmp,atomic_uint64_t::Type xchg,memory_order mo)66 INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
67                                            atomic_uint64_t::Type *cmp,
68                                            atomic_uint64_t::Type xchg,
69                                            memory_order mo) {
70   DCHECK(mo &
71          (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
72   DCHECK(!((uptr)ptr % sizeof(*ptr)));
73 
74   typedef atomic_uint64_t::Type Type;
75   Type cmpv = *cmp;
76   Type prev;
77   bool ret = false;
78 
79   __spin_lock(&lock.lock);
80   prev = *(const_cast<Type volatile *>(&ptr->val_dont_use));
81   if (prev == cmpv) {
82     ret = true;
83     ptr->val_dont_use = xchg;
84   }
85   __spin_unlock(&lock.lock);
86 
87   return ret;
88 }
89 
90 template <>
atomic_load(const volatile atomic_uint64_t * ptr,memory_order mo)91 INLINE atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
92                                          memory_order mo) {
93   DCHECK(mo &
94          (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
95   DCHECK(!((uptr)ptr % sizeof(*ptr)));
96 
97   atomic_uint64_t::Type zero = 0;
98   volatile atomic_uint64_t *Newptr =
99       const_cast<volatile atomic_uint64_t *>(ptr);
100   return atomic_fetch_add(Newptr, zero, mo);
101 }
102 
103 template <>
atomic_store(volatile atomic_uint64_t * ptr,atomic_uint64_t::Type v,memory_order mo)104 INLINE void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
105                          memory_order mo) {
106   DCHECK(mo &
107          (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
108   DCHECK(!((uptr)ptr % sizeof(*ptr)));
109 
110   __spin_lock(&lock.lock);
111   ptr->val_dont_use = v;
112   __spin_unlock(&lock.lock);
113 }
114 
115 }  // namespace __sanitizer
116 
117 #endif  // SANITIZER_ATOMIC_CLANG_MIPS_H
118 
119