1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 /* For documentation, see jit/AtomicOperations.h */
8 
9 #ifndef jit_arm_AtomicOperations_arm_h
10 #define jit_arm_AtomicOperations_arm_h
11 
12 #include "jit/arm/Architecture-arm.h"
13 
14 #if defined(__clang__) || defined(__GNUC__)
15 
16 // The default implementation tactic for gcc/clang is to use the newer
17 // __atomic intrinsics added for use in C++11 <atomic>.  Where that
18 // isn't available, we use GCC's older __sync functions instead.
19 //
20 // ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS is kept as a backward
21 // compatible option for older compilers: enable this to use GCC's old
22 // __sync functions instead of the newer __atomic functions.  This
23 // will be required for GCC 4.6.x and earlier, and probably for Clang
24 // 3.1, should we need to use those versions.
25 
26 //#define ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
27 
28 inline bool
isLockfree8()29 js::jit::AtomicOperations::isLockfree8()
30 {
31     // The JIT and the C++ compiler must agree on whether to use atomics
32     // for 64-bit accesses.  There are two ways to do this: either the
33     // JIT defers to the C++ compiler (so if the C++ code is compiled
34     // for ARMv6, say, and __atomic_always_lock_free(8) is false, then the
35     // JIT ignores the fact that the program is running on ARMv7 or newer);
36     // or the C++ code in this file calls out to run-time generated code
37     // to do whatever the JIT does.
38     //
39     // For now, make the JIT defer to the C++ compiler when we know what
40     // the C++ compiler will do, otherwise assume a lock is needed.
41 # ifndef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
42     MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
43     MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
44     MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
45     return HasLDSTREXBHD() && __atomic_always_lock_free(sizeof(int64_t), 0);
46 # else
47     return false;
48 # endif
49 }
50 
51 inline void
fenceSeqCst()52 js::jit::AtomicOperations::fenceSeqCst()
53 {
54 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
55     __sync_synchronize();
56 # else
57     __atomic_thread_fence(__ATOMIC_SEQ_CST);
58 # endif
59 }
60 
61 template<typename T>
62 inline T
loadSeqCst(T * addr)63 js::jit::AtomicOperations::loadSeqCst(T* addr)
64 {
65     MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
66 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
67     __sync_synchronize();
68     T v = *addr;
69     __sync_synchronize();
70 # else
71     T v;
72     __atomic_load(addr, &v, __ATOMIC_SEQ_CST);
73 # endif
74     return v;
75 }
76 
77 template<typename T>
78 inline void
storeSeqCst(T * addr,T val)79 js::jit::AtomicOperations::storeSeqCst(T* addr, T val)
80 {
81     MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
82 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
83     __sync_synchronize();
84     *addr = val;
85     __sync_synchronize();
86 # else
87     __atomic_store(addr, &val, __ATOMIC_SEQ_CST);
88 # endif
89 }
90 
91 template<typename T>
92 inline T
exchangeSeqCst(T * addr,T val)93 js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val)
94 {
95     MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
96 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
97     T v;
98     __sync_synchronize();
99     do {
100 	v = *addr;
101     } while (__sync_val_compare_and_swap(addr, v, val) != v);
102     return v;
103 # else
104     T v;
105     __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
106     return v;
107 # endif
108 }
109 
110 template<typename T>
111 inline T
compareExchangeSeqCst(T * addr,T oldval,T newval)112 js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval)
113 {
114     MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
115 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
116     return __sync_val_compare_and_swap(addr, oldval, newval);
117 # else
118     __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
119     return oldval;
120 # endif
121 }
122 
123 template<typename T>
124 inline T
fetchAddSeqCst(T * addr,T val)125 js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val)
126 {
127     static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
128 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
129     return __sync_fetch_and_add(addr, val);
130 # else
131     return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
132 # endif
133 }
134 
135 template<typename T>
136 inline T
fetchSubSeqCst(T * addr,T val)137 js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val)
138 {
139     static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
140 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
141     return __sync_fetch_and_sub(addr, val);
142 # else
143     return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
144 # endif
145 }
146 
147 template<typename T>
148 inline T
fetchAndSeqCst(T * addr,T val)149 js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val)
150 {
151     static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
152 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
153     return __sync_fetch_and_and(addr, val);
154 # else
155     return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
156 # endif
157 }
158 
159 template<typename T>
160 inline T
fetchOrSeqCst(T * addr,T val)161 js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val)
162 {
163     static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
164 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
165     return __sync_fetch_and_or(addr, val);
166 # else
167     return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
168 # endif
169 }
170 
171 template<typename T>
172 inline T
fetchXorSeqCst(T * addr,T val)173 js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val)
174 {
175     static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
176 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
177     return __sync_fetch_and_xor(addr, val);
178 # else
179     return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
180 # endif
181 }
182 
183 template<typename T>
184 inline T
loadSafeWhenRacy(T * addr)185 js::jit::AtomicOperations::loadSafeWhenRacy(T* addr)
186 {
187     return *addr;               // FIXME (1208663): not yet safe
188 }
189 
190 template<typename T>
191 inline void
storeSafeWhenRacy(T * addr,T val)192 js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val)
193 {
194     *addr = val;                // FIXME (1208663): not yet safe
195 }
196 
197 inline void
memcpySafeWhenRacy(void * dest,const void * src,size_t nbytes)198 js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest, const void* src, size_t nbytes)
199 {
200     memcpy(dest, src, nbytes); // FIXME (1208663): not yet safe
201 }
202 
203 inline void
memmoveSafeWhenRacy(void * dest,const void * src,size_t nbytes)204 js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest, const void* src, size_t nbytes)
205 {
206     memmove(dest, src, nbytes); // FIXME (1208663): not yet safe
207 }
208 
209 template<size_t nbytes>
210 inline void
acquire(void * addr)211 js::jit::RegionLock::acquire(void* addr)
212 {
213 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
214     while (!__sync_bool_compare_and_swap(&spinlock, 0, 1))
215         ;
216 # else
217     uint32_t zero = 0;
218     uint32_t one = 1;
219     while (!__atomic_compare_exchange(&spinlock, &zero, &one, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) {
220         zero = 0;
221         continue;
222     }
223 # endif
224 }
225 
226 template<size_t nbytes>
227 inline void
release(void * addr)228 js::jit::RegionLock::release(void* addr)
229 {
230     MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
231 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
232     __sync_sub_and_fetch(&spinlock, 1);
233 # else
234     uint32_t zero = 0;
235     __atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
236 # endif
237 }
238 
239 # undef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
240 
241 #elif defined(ENABLE_SHARED_ARRAY_BUFFER)
242 
243 # error "Either disable JS shared memory at compile time, use GCC or Clang, or add code here"
244 
245 #endif
246 
247 #endif // jit_arm_AtomicOperations_arm_h
248