1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2  * vim: set ts=8 sts=4 et sw=4 tw=99:
3  * This Source Code Form is subject to the terms of the Mozilla Public
4  * License, v. 2.0. If a copy of the MPL was not distributed with this
5  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 
7 /* For overall documentation, see jit/AtomicOperations.h */
8 
9 #ifndef jit_shared_AtomicOperations_x86_shared_h
10 #define jit_shared_AtomicOperations_x86_shared_h
11 
12 #include "mozilla/Assertions.h"
13 #include "mozilla/Types.h"
14 
15 // Lock-freedom on x86 and x64:
16 //
17 // On x86 and x64 there are atomic instructions for 8-byte accesses:
18 //
19 // Load and stores:
20 // - Loads and stores are single-copy atomic for up to 8 bytes
21 //   starting with the Pentium; the store requires a post-fence for
22 //   sequential consistency
23 //
24 // CompareExchange:
25 // - On x64 CMPXCHGQ can always be used
26 // - On x86 CMPXCHG8B can be used starting with the first Pentium
27 //
28 // Exchange:
29 // - On x64 XCHGQ can always be used
30 // - On x86 one has to use a CompareExchange loop
31 //
32 // Observe also that the JIT will not be enabled unless we have SSE2,
33 // which was introduced with the Pentium 4.  Ergo the JIT will be able
34 // to use atomic instructions for up to 8 bytes on all x86 platforms
35 // for the primitives we care about.
36 //
37 // However, C++ compilers and libraries may not provide access to
38 // those 8-byte instructions directly.  Clang in 32-bit mode does not
39 // provide 8-byte atomic primitives at all (even with eg -arch i686
40 // specified).  On Windows 32-bit, MSVC does not provide
41 // _InterlockedExchange64 since it does not map directly to an
42 // instruction.
43 //
44 // There are thus sundry workarounds below to handle known corner
45 // cases.
46 
47 #if defined(__clang__) || defined(__GNUC__)
48 
49 // The default implementation tactic for gcc/clang is to use the newer
50 // __atomic intrinsics added for use in C++11 <atomic>.  Where that
51 // isn't available, we use GCC's older __sync functions instead.
52 //
53 // ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS is kept as a backward
54 // compatible option for older compilers: enable this to use GCC's old
55 // __sync functions instead of the newer __atomic functions.  This
56 // will be required for GCC 4.6.x and earlier, and probably for Clang
57 // 3.1, should we need to use those versions.
58 
59 // #define ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
60 
61 // Lock-free 8-byte atomics are assumed on x86 but must be disabled in
62 // corner cases, see comments below and in isLockfree8().
63 
64 # define LOCKFREE8
65 
66 // This pertains to Clang compiling with -m32, in this case the 64-bit
67 // __atomic builtins are not available (observed on various Mac OS X
68 // versions with Apple Clang and on Linux with Clang 3.5).
69 //
70 // For now just punt: disable lock-free 8-word data.  The JIT will
71 // call isLockfree8() to determine what to do and will stay in sync.
72 // (Bug 1146817 tracks the work to improve on this.)
73 
74 # if defined(__clang__) && defined(__i386)
75 #  undef LOCKFREE8
76 # endif
77 
78 inline bool
isLockfree8()79 js::jit::AtomicOperations::isLockfree8()
80 {
81 # ifndef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
82     MOZ_ASSERT(__atomic_always_lock_free(sizeof(int8_t), 0));
83     MOZ_ASSERT(__atomic_always_lock_free(sizeof(int16_t), 0));
84     MOZ_ASSERT(__atomic_always_lock_free(sizeof(int32_t), 0));
85 # endif
86 # ifdef LOCKFREE8
87 #  ifndef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
88     MOZ_ASSERT(__atomic_always_lock_free(sizeof(int64_t), 0));
89 #  endif
90     return true;
91 # else
92     return false;
93 # endif
94 }
95 
96 inline void
fenceSeqCst()97 js::jit::AtomicOperations::fenceSeqCst()
98 {
99 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
100     __sync_synchronize();
101 # else
102     __atomic_thread_fence(__ATOMIC_SEQ_CST);
103 # endif
104 }
105 
106 template<typename T>
107 inline T
loadSeqCst(T * addr)108 js::jit::AtomicOperations::loadSeqCst(T* addr)
109 {
110     MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
111 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
112     // Inhibit compiler reordering with a volatile load.  The x86 does
113     // not reorder loads with respect to subsequent loads or stores
114     // and no ordering barrier is required here.  See more elaborate
115     // comments in storeSeqCst.
116     T v = *static_cast<T volatile*>(addr);
117 # else
118     T v;
119     __atomic_load(addr, &v, __ATOMIC_SEQ_CST);
120 # endif
121     return v;
122 }
123 
124 # ifndef LOCKFREE8
125 template<>
126 inline int64_t
loadSeqCst(int64_t * addr)127 js::jit::AtomicOperations::loadSeqCst(int64_t* addr)
128 {
129     MOZ_CRASH();
130 }
131 
132 template<>
133 inline uint64_t
loadSeqCst(uint64_t * addr)134 js::jit::AtomicOperations::loadSeqCst(uint64_t* addr)
135 {
136     MOZ_CRASH();
137 }
138 # endif // LOCKFREE8
139 
140 template<typename T>
141 inline void
storeSeqCst(T * addr,T val)142 js::jit::AtomicOperations::storeSeqCst(T* addr, T val)
143 {
144     MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
145 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
146     // Inhibit compiler reordering with a volatile store.  The x86 may
147     // reorder a store with respect to a subsequent load from a
148     // different location, hence there is an ordering barrier here to
149     // prevent that.
150     //
151     // By way of background, look to eg
152     // http://bartoszmilewski.com/2008/11/05/who-ordered-memory-fences-on-an-x86/
153     //
154     // Consider:
155     //
156     //   uint8_t x = 0, y = 0; // to start
157     //
158     // thread1:
159     //   sx: AtomicOperations::store(&x, 1);
160     //   gy: uint8_t obs1 = AtomicOperations::loadSeqCst(&y);
161     //
162     // thread2:
163     //   sy: AtomicOperations::store(&y, 1);
164     //   gx: uint8_t obs2 = AtomicOperations::loadSeqCst(&x);
165     //
166     // Sequential consistency requires a total global ordering of
167     // operations: sx-gy-sy-gx, sx-sy-gx-gy, sx-sy-gy-gx, sy-gx-sx-gy,
168     // sy-sx-gy-gx, or sy-sx-gx-gy.  In every ordering at least one of
169     // sx-before-gx or sy-before-gy happens, so *at least one* of
170     // obs1/obs2 is 1.
171     //
172     // If AtomicOperations::{load,store}SeqCst were just volatile
173     // {load,store}, x86 could reorder gx/gy before each thread's
174     // prior load.  That would permit gx-gy-sx-sy: both loads would be
175     // 0!  Thus after a volatile store we must synchronize to ensure
176     // the store happens before the load.
177     *static_cast<T volatile*>(addr) = val;
178     __sync_synchronize();
179 # else
180     __atomic_store(addr, &val, __ATOMIC_SEQ_CST);
181 # endif
182 }
183 
184 # ifndef LOCKFREE8
185 template<>
186 inline void
storeSeqCst(int64_t * addr,int64_t val)187 js::jit::AtomicOperations::storeSeqCst(int64_t* addr, int64_t val)
188 {
189     MOZ_CRASH();
190 }
191 
192 template<>
193 inline void
storeSeqCst(uint64_t * addr,uint64_t val)194 js::jit::AtomicOperations::storeSeqCst(uint64_t* addr, uint64_t val)
195 {
196     MOZ_CRASH();
197 }
198 # endif // LOCKFREE8
199 
200 template<typename T>
201 inline T
exchangeSeqCst(T * addr,T val)202 js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val)
203 {
204     MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
205 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
206     T v;
207     do {
208         // Here I assume the compiler will not hoist the load.  It
209         // shouldn't, because the CAS could affect* addr.
210         v = *addr;
211     } while (!__sync_bool_compare_and_swap(addr, v, val));
212     return v;
213 # else
214     T v;
215     __atomic_exchange(addr, &val, &v, __ATOMIC_SEQ_CST);
216     return v;
217 # endif
218 }
219 
220 # ifndef LOCKFREE8
221 template<>
222 inline int64_t
exchangeSeqCst(int64_t * addr,int64_t val)223 js::jit::AtomicOperations::exchangeSeqCst(int64_t* addr, int64_t val)
224 {
225     MOZ_CRASH();
226 }
227 
228 template<>
229 inline uint64_t
exchangeSeqCst(uint64_t * addr,uint64_t val)230 js::jit::AtomicOperations::exchangeSeqCst(uint64_t* addr, uint64_t val)
231 {
232     MOZ_CRASH();
233 }
234 # endif // LOCKFREE8
235 
236 template<typename T>
237 inline T
compareExchangeSeqCst(T * addr,T oldval,T newval)238 js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval)
239 {
240     MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
241 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
242     return __sync_val_compare_and_swap(addr, oldval, newval);
243 # else
244     __atomic_compare_exchange(addr, &oldval, &newval, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
245     return oldval;
246 # endif
247 }
248 
249 # ifndef LOCKFREE8
250 template<>
251 inline int64_t
compareExchangeSeqCst(int64_t * addr,int64_t oldval,int64_t newval)252 js::jit::AtomicOperations::compareExchangeSeqCst(int64_t* addr, int64_t oldval, int64_t newval)
253 {
254     MOZ_CRASH();
255 }
256 
257 template<>
258 inline uint64_t
compareExchangeSeqCst(uint64_t * addr,uint64_t oldval,uint64_t newval)259 js::jit::AtomicOperations::compareExchangeSeqCst(uint64_t* addr, uint64_t oldval, uint64_t newval)
260 {
261     MOZ_CRASH();
262 }
263 # endif // LOCKFREE8
264 
265 template<typename T>
266 inline T
fetchAddSeqCst(T * addr,T val)267 js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val)
268 {
269     static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
270 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
271     return __sync_fetch_and_add(addr, val);
272 # else
273     return __atomic_fetch_add(addr, val, __ATOMIC_SEQ_CST);
274 # endif
275 }
276 
277 template<typename T>
278 inline T
fetchSubSeqCst(T * addr,T val)279 js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val)
280 {
281     static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
282 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
283     return __sync_fetch_and_sub(addr, val);
284 # else
285     return __atomic_fetch_sub(addr, val, __ATOMIC_SEQ_CST);
286 # endif
287 }
288 
289 template<typename T>
290 inline T
fetchAndSeqCst(T * addr,T val)291 js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val)
292 {
293     static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
294 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
295     return __sync_fetch_and_and(addr, val);
296 # else
297     return __atomic_fetch_and(addr, val, __ATOMIC_SEQ_CST);
298 # endif
299 }
300 
301 template<typename T>
302 inline T
fetchOrSeqCst(T * addr,T val)303 js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val)
304 {
305     static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
306 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
307     return __sync_fetch_and_or(addr, val);
308 # else
309     return __atomic_fetch_or(addr, val, __ATOMIC_SEQ_CST);
310 # endif
311 }
312 
313 template<typename T>
314 inline T
fetchXorSeqCst(T * addr,T val)315 js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val)
316 {
317     static_assert(sizeof(T) <= 4, "not available for 8-byte values yet");
318 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
319     return __sync_fetch_and_xor(addr, val);
320 # else
321     return __atomic_fetch_xor(addr, val, __ATOMIC_SEQ_CST);
322 # endif
323 }
324 
325 template<typename T>
326 inline T
loadSafeWhenRacy(T * addr)327 js::jit::AtomicOperations::loadSafeWhenRacy(T* addr)
328 {
329     return *addr;               // FIXME (1208663): not yet safe
330 }
331 
332 template<typename T>
333 inline void
storeSafeWhenRacy(T * addr,T val)334 js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val)
335 {
336     *addr = val;                // FIXME (1208663): not yet safe
337 }
338 
339 inline void
memcpySafeWhenRacy(void * dest,const void * src,size_t nbytes)340 js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest, const void* src, size_t nbytes)
341 {
342     ::memcpy(dest, src, nbytes); // FIXME (1208663): not yet safe
343 }
344 
345 inline void
memmoveSafeWhenRacy(void * dest,const void * src,size_t nbytes)346 js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest, const void* src, size_t nbytes)
347 {
348     ::memmove(dest, src, nbytes); // FIXME (1208663): not yet safe
349 }
350 
351 template<size_t nbytes>
352 inline void
acquire(void * addr)353 js::jit::RegionLock::acquire(void* addr)
354 {
355 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
356     while (!__sync_bool_compare_and_swap(&spinlock, 0, 1))
357         continue;
358 # else
359     uint32_t zero = 0;
360     uint32_t one = 1;
361     while (!__atomic_compare_exchange(&spinlock, &zero, &one, false, __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE)) {
362         zero = 0;
363         continue;
364     }
365 # endif
366 }
367 
368 template<size_t nbytes>
369 inline void
release(void * addr)370 js::jit::RegionLock::release(void* addr)
371 {
372     MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
373 # ifdef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
374     __sync_sub_and_fetch(&spinlock, 1); // Should turn into LOCK XADD
375 # else
376     uint32_t zero = 0;
377     __atomic_store(&spinlock, &zero, __ATOMIC_SEQ_CST);
378 # endif
379 }
380 
381 # undef ATOMICS_IMPLEMENTED_WITH_SYNC_INTRINSICS
382 # undef LOCKFREE8
383 
384 #elif defined(_MSC_VER)
385 
386 // On 32-bit CPUs there is no 64-bit XCHG instruction, one must
387 // instead use a loop with CMPXCHG8B.  Since MSVC provides
388 // _InterlockedExchange64 only if it maps directly to XCHG, the
389 // workaround must be manual.
390 
391 # define HAVE_EXCHANGE64
392 
393 # if !_WIN64
394 #  undef HAVE_EXCHANGE64
395 # endif
396 
397 // Below, _ReadWriteBarrier is a compiler directive, preventing
398 // reordering of instructions and reuse of memory values across it.
399 
400 inline bool
isLockfree8()401 js::jit::AtomicOperations::isLockfree8()
402 {
403     // See general comments at the start of this file.
404     //
405     // The MSDN docs suggest very strongly that if code is compiled for
406     // Pentium or better the 64-bit primitives will be lock-free, see
407     // eg the "Remarks" secion of the page for _InterlockedCompareExchange64,
408     // currently here:
409     // https://msdn.microsoft.com/en-us/library/ttk2z1ws%28v=vs.85%29.aspx
410     //
411     // But I've found no way to assert that at compile time or run time,
412     // there appears to be no WinAPI is_lock_free() test.
413     return true;
414 }
415 
416 inline void
fenceSeqCst()417 js::jit::AtomicOperations::fenceSeqCst()
418 {
419     _ReadWriteBarrier();
420 # if JS_BITS_PER_WORD == 32
421     // If configured for SSE2+ we can use the MFENCE instruction, available
422     // through the _mm_mfence intrinsic.  But for non-SSE2 systems we have
423     // to do something else.  Linux uses "lock add [esp], 0", so why not?
424     __asm lock add [esp], 0;
425 # else
426     _mm_mfence();
427 # endif
428 }
429 
430 template<typename T>
431 inline T
loadSeqCst(T * addr)432 js::jit::AtomicOperations::loadSeqCst(T* addr)
433 {
434     MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
435     _ReadWriteBarrier();
436     T v = *addr;
437     _ReadWriteBarrier();
438     return v;
439 }
440 
441 template<typename T>
442 inline void
storeSeqCst(T * addr,T val)443 js::jit::AtomicOperations::storeSeqCst(T* addr, T val)
444 {
445     MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());
446     _ReadWriteBarrier();
447     *addr = val;
448     fenceSeqCst();
449 }
450 
451 # define MSC_EXCHANGEOP(T, U, xchgop)                           \
452     template<> inline T                                         \
453     js::jit::AtomicOperations::exchangeSeqCst(T* addr, T val) { \
454         MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());        \
455         return (T)xchgop((U volatile*)addr, (U)val);            \
456     }
457 
458 # define MSC_EXCHANGEOP_CAS(T, U, cmpxchg)                           \
459     template<> inline T                                              \
460     js::jit::AtomicOperations::exchangeSeqCst(T* addr, T newval) {   \
461         MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());             \
462         T oldval;                                                    \
463         do {                                                         \
464             _ReadWriteBarrier();                                     \
465             oldval = *addr;                                          \
466         } while (!cmpxchg((U volatile*)addr, (U)newval, (U)oldval)); \
467         return oldval;                                               \
468     }
469 
MSC_EXCHANGEOP(int8_t,char,_InterlockedExchange8)470 MSC_EXCHANGEOP(int8_t, char, _InterlockedExchange8)
471 MSC_EXCHANGEOP(uint8_t, char, _InterlockedExchange8)
472 MSC_EXCHANGEOP(int16_t, short, _InterlockedExchange16)
473 MSC_EXCHANGEOP(uint16_t, short, _InterlockedExchange16)
474 MSC_EXCHANGEOP(int32_t, long, _InterlockedExchange)
475 MSC_EXCHANGEOP(uint32_t, long, _InterlockedExchange)
476 # ifdef HAVE_EXCHANGE64
477 MSC_EXCHANGEOP(int64_t, __int64, _InterlockedExchange64)
478 MSC_EXCHANGEOP(uint64_t, __int64, _InterlockedExchange64)
479 # else
480 MSC_EXCHANGEOP_CAS(int64_t, __int64, _InterlockedCompareExchange64)
481 MSC_EXCHANGEOP_CAS(uint64_t, __int64, _InterlockedCompareExchange64)
482 # endif
483 
484 # undef MSC_EXCHANGEOP
485 # undef MSC_EXCHANGEOP_CAS
486 
487 # define MSC_CAS(T, U, cmpxchg)                                                     \
488     template<> inline T                                                             \
489     js::jit::AtomicOperations::compareExchangeSeqCst(T* addr, T oldval, T newval) { \
490         MOZ_ASSERT(sizeof(T) < 8 || isLockfree8());                            \
491         return (T)cmpxchg((U volatile*)addr, (U)newval, (U)oldval);                 \
492     }
493 
494 MSC_CAS(int8_t, char, _InterlockedCompareExchange8)
495 MSC_CAS(uint8_t, char, _InterlockedCompareExchange8)
496 MSC_CAS(int16_t, short, _InterlockedCompareExchange16)
497 MSC_CAS(uint16_t, short, _InterlockedCompareExchange16)
498 MSC_CAS(int32_t, long, _InterlockedCompareExchange)
499 MSC_CAS(uint32_t, long, _InterlockedCompareExchange)
500 MSC_CAS(int64_t, __int64, _InterlockedCompareExchange64)
501 MSC_CAS(uint64_t, __int64, _InterlockedCompareExchange64)
502 
503 # undef MSC_CAS
504 
505 # define MSC_FETCHADDOP(T, U, xadd)                                           \
506     template<> inline T                                                       \
507     js::jit::AtomicOperations::fetchAddSeqCst(T* addr, T val) {               \
508         static_assert(sizeof(T) <= 4, "not available for 8-byte values yet"); \
509         return (T)xadd((U volatile*)addr, (U)val);                            \
510     }                                                                         \
511     template<> inline T                                                       \
512     js::jit::AtomicOperations::fetchSubSeqCst(T* addr, T val) {               \
513         static_assert(sizeof(T) <= 4, "not available for 8-byte values yet"); \
514         return (T)xadd((U volatile*)addr, -(U)val);                           \
515     }
516 
517 MSC_FETCHADDOP(int8_t, char, _InterlockedExchangeAdd8)
518 MSC_FETCHADDOP(uint8_t, char, _InterlockedExchangeAdd8)
519 MSC_FETCHADDOP(int16_t, short, _InterlockedExchangeAdd16)
520 MSC_FETCHADDOP(uint16_t, short, _InterlockedExchangeAdd16)
521 MSC_FETCHADDOP(int32_t, long, _InterlockedExchangeAdd)
522 MSC_FETCHADDOP(uint32_t, long, _InterlockedExchangeAdd)
523 
524 # undef MSC_FETCHADDOP
525 
526 # define MSC_FETCHBITOP(T, U, andop, orop, xorop)                             \
527     template<> inline T                                                       \
528     js::jit::AtomicOperations::fetchAndSeqCst(T* addr, T val) {               \
529         static_assert(sizeof(T) <= 4, "not available for 8-byte values yet"); \
530         return (T)andop((U volatile*)addr, (U)val);                           \
531     }                                                                         \
532     template<> inline T                                                       \
533     js::jit::AtomicOperations::fetchOrSeqCst(T* addr, T val) {                \
534         static_assert(sizeof(T) <= 4, "not available for 8-byte values yet"); \
535         return (T)orop((U volatile*)addr, (U)val);                            \
536     }                                                                         \
537     template<> inline T                                                       \
538     js::jit::AtomicOperations::fetchXorSeqCst(T* addr, T val) {               \
539         static_assert(sizeof(T) <= 4, "not available for 8-byte values yet"); \
540         return (T)xorop((U volatile*)addr, (U)val);                           \
541     }
542 
543 MSC_FETCHBITOP(int8_t, char, _InterlockedAnd8, _InterlockedOr8, _InterlockedXor8)
544 MSC_FETCHBITOP(uint8_t, char, _InterlockedAnd8, _InterlockedOr8, _InterlockedXor8)
545 MSC_FETCHBITOP(int16_t, short, _InterlockedAnd16, _InterlockedOr16, _InterlockedXor16)
546 MSC_FETCHBITOP(uint16_t, short, _InterlockedAnd16, _InterlockedOr16, _InterlockedXor16)
547 MSC_FETCHBITOP(int32_t, long,  _InterlockedAnd, _InterlockedOr, _InterlockedXor)
548 MSC_FETCHBITOP(uint32_t, long, _InterlockedAnd, _InterlockedOr, _InterlockedXor)
549 
550 # undef MSC_FETCHBITOP
551 
552 template<typename T>
553 inline T
554 js::jit::AtomicOperations::loadSafeWhenRacy(T* addr)
555 {
556     return *addr;               // FIXME (1208663): not yet safe
557 }
558 
559 template<typename T>
560 inline void
storeSafeWhenRacy(T * addr,T val)561 js::jit::AtomicOperations::storeSafeWhenRacy(T* addr, T val)
562 {
563     *addr = val;                // FIXME (1208663): not yet safe
564 }
565 
566 inline void
memcpySafeWhenRacy(void * dest,const void * src,size_t nbytes)567 js::jit::AtomicOperations::memcpySafeWhenRacy(void* dest, const void* src, size_t nbytes)
568 {
569     ::memcpy(dest, src, nbytes); // FIXME (1208663): not yet safe
570 }
571 
572 inline void
memmoveSafeWhenRacy(void * dest,const void * src,size_t nbytes)573 js::jit::AtomicOperations::memmoveSafeWhenRacy(void* dest, const void* src, size_t nbytes)
574 {
575     ::memmove(dest, src, nbytes); // FIXME (1208663): not yet safe
576 }
577 
578 template<size_t nbytes>
579 inline void
acquire(void * addr)580 js::jit::RegionLock::acquire(void* addr)
581 {
582     while (_InterlockedCompareExchange((long*)&spinlock, /*newval=*/1, /*oldval=*/0) == 1)
583         continue;
584 }
585 
586 template<size_t nbytes>
587 inline void
release(void * addr)588 js::jit::RegionLock::release(void* addr)
589 {
590     MOZ_ASSERT(AtomicOperations::loadSeqCst(&spinlock) == 1, "releasing unlocked region lock");
591     _InterlockedExchange((long*)&spinlock, 0);
592 }
593 
594 # undef HAVE_EXCHANGE64
595 
596 #elif defined(ENABLE_SHARED_ARRAY_BUFFER)
597 
598 # error "Either disable JS shared memory at compile time, use GCC, Clang, or MSVC, or add code here"
599 
600 #endif // platform
601 
602 #endif // jit_shared_AtomicOperations_x86_shared_h
603