1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2  * vim: set ts=8 sts=2 et sw=2 tw=80:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 /* For documentation, see jit/AtomicOperations.h */
8 
9 // NOTE, MIPS32 unlike MIPS64 doesn't provide hardware support for lock-free
10 // 64-bit atomics. We lie down below about 8-byte atomics being always lock-
11 // free in order to support wasm jit. The 64-bit atomic for MIPS32 do not use
12 // __atomic intrinsic and therefore do not relay on -latomic.
13 // Access to a aspecific 64-bit variable in memory is protected by an
14 // AddressLock whose instance is shared between jit and AtomicOperations.
15 
16 #ifndef jit_mips_shared_AtomicOperations_mips_shared_h
17 #define jit_mips_shared_AtomicOperations_mips_shared_h
18 
19 #include "mozilla/Assertions.h"
20 #include "mozilla/Types.h"
21 
22 #include "builtin/AtomicsObject.h"
23 #include "vm/Uint8Clamped.h"
24 
25 #if !defined(__clang__) && !defined(__GNUC__)
26 #  error "This file only for gcc-compatible compilers"
27 #endif
28 
29 #if defined(JS_SIMULATOR_MIPS32) && !defined(__i386__)
30 #  error "The MIPS32 simulator atomics assume x86"
31 #endif
32 
33 namespace js {
34 namespace jit {
35 
36 #if !defined(JS_64BIT)
37 
38 struct AddressLock {
39  public:
40   void acquire();
41   void release();
42 
43  private:
44   uint32_t spinlock;
45 };
46 
47 static_assert(sizeof(AddressLock) == sizeof(uint32_t),
48               "AddressLock must be 4 bytes for it to be consumed by jit");
49 
50 // For now use a single global AddressLock.
51 static AddressLock gAtomic64Lock;
52 
53 struct MOZ_RAII AddressGuard {
AddressGuardAddressGuard54   explicit AddressGuard(void* addr) { gAtomic64Lock.acquire(); }
55 
~AddressGuardAddressGuard56   ~AddressGuard() { gAtomic64Lock.release(); }
57 };
58 
59 #endif
60 
61 }  // namespace jit
62 }  // namespace js
63 
Initialize()64 inline bool js::jit::AtomicOperations::Initialize() {
65   // Nothing
66   return true;
67 }
68 
ShutDown()69 inline void js::jit::AtomicOperations::ShutDown() {
70   // Nothing
71 }
72 
hasAtomic8()73 inline bool js::jit::AtomicOperations::hasAtomic8() { return true; }
74 
isLockfree8()75 inline bool js::jit::AtomicOperations::isLockfree8() {
76   MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
77   MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
78   MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
79 #if defined(JS_64BIT)
80   MOZ_ASSERT(__atomic_always_lock_free(sizeof(int64_t), 0));
81 #endif
82   return true;
83 }
84 
fenceSeqCst()85 inline void js::jit::AtomicOperations::fenceSeqCst() {
86   __atomic_thread_fence(__ATOMIC_SEQ_CST);
87 }
88 
89 template <typename T>
loadSeqCst(T * addr)90 inline T js::jit::AtomicOperations::loadSeqCst(T* addr) {
91   static_assert(sizeof(T) <= sizeof(void*),
92                 "atomics supported up to pointer size only");
93   T v;
94   __atomic_load(addr, &v, __ATOMIC_SEQ_CST);
95   return v;
96 }
97 
98 namespace js {
99 namespace jit {
100 
101 #if !defined(JS_64BIT)
102 
103 template <>
loadSeqCst(int64_t * addr)104 inline int64_t js::jit::AtomicOperations::loadSeqCst(int64_t* addr) {
105   AddressGuard guard(addr);
106   return *addr;
107 }
108 
109 template <>
loadSeqCst(uint64_t * addr)110 inline uint64_t js::jit::AtomicOperations::loadSeqCst(uint64_t* addr) {
111   AddressGuard guard(addr);
112   return *addr;
113 }
114 
115 #endif
116 
117 }  // namespace jit
118 }  // namespace js
119 
120 template <typename T>
storeSeqCst(T * addr,T val)121 inline void js::jit::AtomicOperations::storeSeqCst(T* addr, T val) {
122   static_assert(sizeof(T) <= sizeof(void*),
123                 "atomics supported up to pointer size only");
124   __atomic_store(addr, &val, __ATOMIC_SEQ_CST);
125 }
126 
127 namespace js {
128 namespace jit {
129 
130 #if !defined(JS_64BIT)
131 
132 template <>
storeSeqCst(int64_t * addr,int64_t val)133 inline void js::jit::AtomicOperations::storeSeqCst(int64_t* addr, int64_t val) {
134   AddressGuard guard(addr);
135   *addr = val;
136 }
137 
138 template <>
storeSeqCst(uint64_t * addr,uint64_t val)139 inline void js::jit::AtomicOperations::storeSeqCst(uint64_t* addr,
140                                                    uint64_t val) {
141   AddressGuard guard(addr);
142   *addr = val;
143 }
144 
145 #endif
146 
147 }  // namespace jit
148 }  // namespace js
149 
150 template <typename T>
compareExchangeSeqCst(T * addr,T oldval,T newval)151 inline T js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval,
152                                                           T newval) {
153   static_assert(sizeof(T) <= sizeof(void*),
154                 "atomics supported up to pointer size only");
155   __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST,
156                             __ATOMIC_SEQ_CST);
157   return oldval;
158 }
159 
160 namespace js {
161 namespace jit {
162 
163 #if !defined(JS_64BIT)
164 
165 template <>
compareExchangeSeqCst(int64_t * addr,int64_t oldval,int64_t newval)166 inline int64_t js::jit::AtomicOperations::compareExchangeSeqCst(
167     int64_t* addr, int64_t oldval, int64_t newval) {
168   AddressGuard guard(addr);
169   int64_t val = *addr;
170   if (val == oldval) {
171     *addr = newval;
172   }
173   return val;
174 }
175 
176 template <>
compareExchangeSeqCst(uint64_t * addr,uint64_t oldval,uint64_t newval)177 inline uint64_t js::jit::AtomicOperations::compareExchangeSeqCst(
178     uint64_t* addr, uint64_t oldval, uint64_t newval) {
179   AddressGuard guard(addr);
180   uint64_t val = *addr;
181   if (val == oldval) {
182     *addr = newval;
183   }
184   return val;
185 }
186 
187 #endif
188 
189 }  // namespace jit
190 }  // namespace js
191 
192 template <typename T>
fetchAddSeqCst(T * addr,T val)193 inline T js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val) {
194   static_assert(sizeof(T) <= sizeof(void*),
195                 "atomics supported up to pointer size only");
196   return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
197 }
198 
199 namespace js {
200 namespace jit {
201 
202 #if !defined(JS_64BIT)
203 
204 template <>
fetchAddSeqCst(int64_t * addr,int64_t val)205 inline int64_t js::jit::AtomicOperations::fetchAddSeqCst(int64_t* addr,
206                                                          int64_t val) {
207   AddressGuard guard(addr);
208   int64_t old = *addr;
209   *addr = old + val;
210   return old;
211 }
212 
213 template <>
fetchAddSeqCst(uint64_t * addr,uint64_t val)214 inline uint64_t js::jit::AtomicOperations::fetchAddSeqCst(uint64_t* addr,
215                                                           uint64_t val) {
216   AddressGuard guard(addr);
217   uint64_t old = *addr;
218   *addr = old + val;
219   return old;
220 }
221 
222 #endif
223 
224 }  // namespace jit
225 }  // namespace js
226 
227 template <typename T>
fetchSubSeqCst(T * addr,T val)228 inline T js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val) {
229   static_assert(sizeof(T) <= sizeof(void*),
230                 "atomics supported up to pointer size only");
231   return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
232 }
233 
234 namespace js {
235 namespace jit {
236 
237 #if !defined(JS_64BIT)
238 
239 template <>
fetchSubSeqCst(int64_t * addr,int64_t val)240 inline int64_t js::jit::AtomicOperations::fetchSubSeqCst(int64_t* addr,
241                                                          int64_t val) {
242   AddressGuard guard(addr);
243   int64_t old = *addr;
244   *addr = old - val;
245   return old;
246 }
247 
248 template <>
fetchSubSeqCst(uint64_t * addr,uint64_t val)249 inline uint64_t js::jit::AtomicOperations::fetchSubSeqCst(uint64_t* addr,
250                                                           uint64_t val) {
251   AddressGuard guard(addr);
252   uint64_t old = *addr;
253   *addr = old - val;
254   return old;
255 }
256 
257 #endif
258 
259 }  // namespace jit
260 }  // namespace js
261 
262 template <typename T>
fetchAndSeqCst(T * addr,T val)263 inline T js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val) {
264   static_assert(sizeof(T) <= sizeof(void*),
265                 "atomics supported up to pointer size only");
266   return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
267 }
268 
269 namespace js {
270 namespace jit {
271 
272 #if !defined(JS_64BIT)
273 
274 template <>
fetchAndSeqCst(int64_t * addr,int64_t val)275 inline int64_t js::jit::AtomicOperations::fetchAndSeqCst(int64_t* addr,
276                                                          int64_t val) {
277   AddressGuard guard(addr);
278   int64_t old = *addr;
279   *addr = old & val;
280   return old;
281 }
282 
283 template <>
fetchAndSeqCst(uint64_t * addr,uint64_t val)284 inline uint64_t js::jit::AtomicOperations::fetchAndSeqCst(uint64_t* addr,
285                                                           uint64_t val) {
286   AddressGuard guard(addr);
287   uint64_t old = *addr;
288   *addr = old & val;
289   return old;
290 }
291 
292 #endif
293 
294 }  // namespace jit
295 }  // namespace js
296 
297 template <typename T>
fetchOrSeqCst(T * addr,T val)298 inline T js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val) {
299   static_assert(sizeof(T) <= sizeof(void*),
300                 "atomics supported up to pointer size only");
301   return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
302 }
303 
304 namespace js {
305 namespace jit {
306 
307 #if !defined(JS_64BIT)
308 
309 template <>
fetchOrSeqCst(int64_t * addr,int64_t val)310 inline int64_t js::jit::AtomicOperations::fetchOrSeqCst(int64_t* addr,
311                                                         int64_t val) {
312   AddressGuard guard(addr);
313   int64_t old = *addr;
314   *addr = old | val;
315   return old;
316 }
317 
318 template <>
fetchOrSeqCst(uint64_t * addr,uint64_t val)319 inline uint64_t js::jit::AtomicOperations::fetchOrSeqCst(uint64_t* addr,
320                                                          uint64_t val) {
321   AddressGuard guard(addr);
322   uint64_t old = *addr;
323   *addr = old | val;
324   return old;
325 }
326 
327 #endif
328 
329 }  // namespace jit
330 }  // namespace js
331 
332 template <typename T>
fetchXorSeqCst(T * addr,T val)333 inline T js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val) {
334   static_assert(sizeof(T) <= sizeof(void*),
335                 "atomics supported up to pointer size only");
336   return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
337 }
338 
339 namespace js {
340 namespace jit {
341 
342 #if !defined(JS_64BIT)
343 
344 template <>
fetchXorSeqCst(int64_t * addr,int64_t val)345 inline int64_t js::jit::AtomicOperations::fetchXorSeqCst(int64_t* addr,
346                                                          int64_t val) {
347   AddressGuard guard(addr);
348   int64_t old = *addr;
349   *addr = old ^ val;
350   return old;
351 }
352 
353 template <>
fetchXorSeqCst(uint64_t * addr,uint64_t val)354 inline uint64_t js::jit::AtomicOperations::fetchXorSeqCst(uint64_t* addr,
355                                                           uint64_t val) {
356   AddressGuard guard(addr);
357   uint64_t old = *addr;
358   *addr = old ^ val;
359   return old;
360 }
361 
362 #endif
363 
364 }  // namespace jit
365 }  // namespace js
366 
367 template <typename T>
loadSafeWhenRacy(T * addr)368 inline T js::jit::AtomicOperations::loadSafeWhenRacy(T* addr) {
369   static_assert(sizeof(T) <= sizeof(void*),
370                 "atomics supported up to pointer size only");
371   T v;
372   __atomic_load(addr, &v, __ATOMIC_RELAXED);
373   return v;
374 }
375 
376 namespace js {
377 namespace jit {
378 
379 #if !defined(JS_64BIT)
380 
381 template <>
loadSafeWhenRacy(int64_t * addr)382 inline int64_t js::jit::AtomicOperations::loadSafeWhenRacy(int64_t* addr) {
383   return *addr;
384 }
385 
386 template <>
loadSafeWhenRacy(uint64_t * addr)387 inline uint64_t js::jit::AtomicOperations::loadSafeWhenRacy(uint64_t* addr) {
388   return *addr;
389 }
390 
391 #endif
392 
393 template <>
loadSafeWhenRacy(uint8_clamped * addr)394 inline uint8_clamped js::jit::AtomicOperations::loadSafeWhenRacy(
395     uint8_clamped* addr) {
396   uint8_t v;
397   __atomic_load(&addr->val, &v, __ATOMIC_RELAXED);
398   return uint8_clamped(v);
399 }
400 
401 template <>
loadSafeWhenRacy(float * addr)402 inline float js::jit::AtomicOperations::loadSafeWhenRacy(float* addr) {
403   return *addr;
404 }
405 
406 template <>
loadSafeWhenRacy(double * addr)407 inline double js::jit::AtomicOperations::loadSafeWhenRacy(double* addr) {
408   return *addr;
409 }
410 
411 }  // namespace jit
412 }  // namespace js
413 
414 template <typename T>
storeSafeWhenRacy(T * addr,T val)415 inline void js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val) {
416   static_assert(sizeof(T) <= sizeof(void*),
417                 "atomics supported up to pointer size only");
418   __atomic_store(addr, &val, __ATOMIC_RELAXED);
419 }
420 
421 namespace js {
422 namespace jit {
423 
424 #if !defined(JS_64BIT)
425 
426 template <>
storeSafeWhenRacy(int64_t * addr,int64_t val)427 inline void js::jit::AtomicOperations::storeSafeWhenRacy(int64_t* addr,
428                                                          int64_t val) {
429   *addr = val;
430 }
431 
432 template <>
storeSafeWhenRacy(uint64_t * addr,uint64_t val)433 inline void js::jit::AtomicOperations::storeSafeWhenRacy(uint64_t* addr,
434                                                          uint64_t val) {
435   *addr = val;
436 }
437 
438 #endif
439 
440 template <>
storeSafeWhenRacy(uint8_clamped * addr,uint8_clamped val)441 inline void js::jit::AtomicOperations::storeSafeWhenRacy(uint8_clamped* addr,
442                                                          uint8_clamped val) {
443   __atomic_store(&addr->val, &val.val, __ATOMIC_RELAXED);
444 }
445 
446 template <>
storeSafeWhenRacy(float * addr,float val)447 inline void js::jit::AtomicOperations::storeSafeWhenRacy(float* addr,
448                                                          float val) {
449   *addr = val;
450 }
451 
452 template <>
storeSafeWhenRacy(double * addr,double val)453 inline void js::jit::AtomicOperations::storeSafeWhenRacy(double* addr,
454                                                          double val) {
455   *addr = val;
456 }
457 
458 }  // namespace jit
459 }  // namespace js
460 
memcpySafeWhenRacy(void * dest,const void * src,size_t nbytes)461 inline void js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest,
462                                                           const void* src,
463                                                           size_t nbytes) {
464   MOZ_ASSERT(!((char*)dest <= (char*)src && (char*)src < (char*)dest + nbytes));
465   MOZ_ASSERT(!((char*)src <= (char*)dest && (char*)dest < (char*)src + nbytes));
466   ::memcpy(dest, src, nbytes);
467 }
468 
memmoveSafeWhenRacy(void * dest,const void * src,size_t nbytes)469 inline void js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest,
470                                                            const void* src,
471                                                            size_t nbytes) {
472   ::memmove(dest, src, nbytes);
473 }
474 
475 template <typename T>
exchangeSeqCst(T * addr,T val)476 inline T js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val) {
477   static_assert(sizeof(T) <= sizeof(void*),
478                 "atomics supported up to pointer size only");
479   T v;
480   __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
481   return v;
482 }
483 
484 namespace js {
485 namespace jit {
486 
487 #if !defined(JS_64BIT)
488 
489 template <>
exchangeSeqCst(int64_t * addr,int64_t val)490 inline int64_t js::jit::AtomicOperations::exchangeSeqCst(int64_t* addr,
491                                                          int64_t val) {
492   AddressGuard guard(addr);
493   int64_t old = *addr;
494   *addr = val;
495   return old;
496 }
497 
498 template <>
exchangeSeqCst(uint64_t * addr,uint64_t val)499 inline uint64_t js::jit::AtomicOperations::exchangeSeqCst(uint64_t* addr,
500                                                           uint64_t val) {
501   AddressGuard guard(addr);
502   uint64_t old = *addr;
503   *addr = val;
504   return old;
505 }
506 
507 #endif
508 
509 }  // namespace jit
510 }  // namespace js
511 
512 #if !defined(JS_64BIT)
513 
acquire()514 inline void js::jit::AddressLock::acquire() {
515   uint32_t zero = 0;
516   uint32_t one = 1;
517   while (!__atomic_compare_exchange(&spinlock, &zero, &one, true,
518                                     __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
519     zero = 0;
520   }
521 }
522 
release()523 inline void js::jit::AddressLock::release() {
524   uint32_t zero = 0;
525   __atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
526 }
527 
528 #endif
529 
530 #endif  // jit_mips_shared_AtomicOperations_mips_shared_h
531