1*e4b17023SJohn Marino // -*- C++ -*-
2*e4b17023SJohn Marino 
3*e4b17023SJohn Marino // Copyright (C) 2007, 2008, 2009, 2010, 2012 Free Software Foundation, Inc.
4*e4b17023SJohn Marino //
5*e4b17023SJohn Marino // This file is part of the GNU ISO C++ Library.  This library is free
6*e4b17023SJohn Marino // software; you can redistribute it and/or modify it under the terms
7*e4b17023SJohn Marino // of the GNU General Public License as published by the Free Software
8*e4b17023SJohn Marino // Foundation; either version 3, or (at your option) any later
9*e4b17023SJohn Marino // version.
10*e4b17023SJohn Marino 
11*e4b17023SJohn Marino // This library is distributed in the hope that it will be useful, but
12*e4b17023SJohn Marino // WITHOUT ANY WARRANTY; without even the implied warranty of
13*e4b17023SJohn Marino // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14*e4b17023SJohn Marino // General Public License for more details.
15*e4b17023SJohn Marino 
16*e4b17023SJohn Marino // Under Section 7 of GPL version 3, you are granted additional
17*e4b17023SJohn Marino // permissions described in the GCC Runtime Library Exception, version
18*e4b17023SJohn Marino // 3.1, as published by the Free Software Foundation.
19*e4b17023SJohn Marino 
20*e4b17023SJohn Marino // You should have received a copy of the GNU General Public License and
21*e4b17023SJohn Marino // a copy of the GCC Runtime Library Exception along with this program;
22*e4b17023SJohn Marino // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23*e4b17023SJohn Marino // <http://www.gnu.org/licenses/>.
24*e4b17023SJohn Marino 
25*e4b17023SJohn Marino /** @file parallel/compatibility.h
26*e4b17023SJohn Marino  *  @brief Compatibility layer, mostly concerned with atomic operations.
27*e4b17023SJohn Marino  *  This file is a GNU parallel extension to the Standard C++ Library.
28*e4b17023SJohn Marino  */
29*e4b17023SJohn Marino 
30*e4b17023SJohn Marino // Written by Felix Putze.
31*e4b17023SJohn Marino 
32*e4b17023SJohn Marino #ifndef _GLIBCXX_PARALLEL_COMPATIBILITY_H
33*e4b17023SJohn Marino #define _GLIBCXX_PARALLEL_COMPATIBILITY_H 1
34*e4b17023SJohn Marino 
35*e4b17023SJohn Marino #include <parallel/types.h>
36*e4b17023SJohn Marino #include <parallel/base.h>
37*e4b17023SJohn Marino 
38*e4b17023SJohn Marino #if defined(__SUNPRO_CC) && defined(__sparc)
39*e4b17023SJohn Marino #include <sys/atomic.h>
40*e4b17023SJohn Marino #endif
41*e4b17023SJohn Marino 
42*e4b17023SJohn Marino #if !defined(_WIN32) || defined (__CYGWIN__)
43*e4b17023SJohn Marino #include <sched.h>
44*e4b17023SJohn Marino #endif
45*e4b17023SJohn Marino 
46*e4b17023SJohn Marino #if defined(_MSC_VER)
47*e4b17023SJohn Marino #include <Windows.h>
48*e4b17023SJohn Marino #include <intrin.h>
49*e4b17023SJohn Marino #undef max
50*e4b17023SJohn Marino #undef min
51*e4b17023SJohn Marino #endif
52*e4b17023SJohn Marino 
53*e4b17023SJohn Marino #ifdef __MINGW32__
54*e4b17023SJohn Marino // Including <windows.h> will drag in all the windows32 names.  Since
55*e4b17023SJohn Marino // that can cause user code portability problems, we just declare the
56*e4b17023SJohn Marino // one needed function here.
57*e4b17023SJohn Marino extern "C"
58*e4b17023SJohn Marino __attribute((dllimport)) void __attribute__((stdcall)) Sleep (unsigned long);
59*e4b17023SJohn Marino #endif
60*e4b17023SJohn Marino 
61*e4b17023SJohn Marino namespace __gnu_parallel
62*e4b17023SJohn Marino {
63*e4b17023SJohn Marino #if defined(__ICC)
64*e4b17023SJohn Marino   template<typename _MustBeInt = int>
__faa32(int32_t * __x,int32_t __inc)65*e4b17023SJohn Marino   int32_t __faa32(int32_t* __x, int32_t __inc)
66*e4b17023SJohn Marino   {
67*e4b17023SJohn Marino     asm volatile("lock xadd %0,%1"
68*e4b17023SJohn Marino                  : "=__r" (__inc), "=__m" (*__x)
69*e4b17023SJohn Marino                  : "0" (__inc)
70*e4b17023SJohn Marino                  : "memory");
71*e4b17023SJohn Marino     return __inc;
72*e4b17023SJohn Marino   }
73*e4b17023SJohn Marino #if defined(__x86_64)
74*e4b17023SJohn Marino   template<typename _MustBeInt = int>
__faa64(int64_t * __x,int64_t __inc)75*e4b17023SJohn Marino   int64_t __faa64(int64_t* __x, int64_t __inc)
76*e4b17023SJohn Marino   {
77*e4b17023SJohn Marino     asm volatile("lock xadd %0,%1"
78*e4b17023SJohn Marino                  : "=__r" (__inc), "=__m" (*__x)
79*e4b17023SJohn Marino                  : "0" (__inc)
80*e4b17023SJohn Marino                  : "memory");
81*e4b17023SJohn Marino     return __inc;
82*e4b17023SJohn Marino   }
83*e4b17023SJohn Marino #endif
84*e4b17023SJohn Marino #endif
85*e4b17023SJohn Marino 
86*e4b17023SJohn Marino   // atomic functions only work on integers
87*e4b17023SJohn Marino 
88*e4b17023SJohn Marino   /** @brief Add a value to a variable, atomically.
89*e4b17023SJohn Marino    *
90*e4b17023SJohn Marino    *  Implementation is heavily platform-dependent.
91*e4b17023SJohn Marino    *  @param __ptr Pointer to a 32-bit signed integer.
92*e4b17023SJohn Marino    *  @param __addend Value to add.
93*e4b17023SJohn Marino    */
94*e4b17023SJohn Marino   inline int32_t
__fetch_and_add_32(volatile int32_t * __ptr,int32_t __addend)95*e4b17023SJohn Marino   __fetch_and_add_32(volatile int32_t* __ptr, int32_t __addend)
96*e4b17023SJohn Marino   {
97*e4b17023SJohn Marino #if defined(__ICC)      //x86 version
98*e4b17023SJohn Marino     return _InterlockedExchangeAdd((void*)__ptr, __addend);
99*e4b17023SJohn Marino #elif defined(__ECC)    //IA-64 version
100*e4b17023SJohn Marino     return _InterlockedExchangeAdd((void*)__ptr, __addend);
101*e4b17023SJohn Marino #elif defined(__ICL) || defined(_MSC_VER)
102*e4b17023SJohn Marino     return _InterlockedExchangeAdd(reinterpret_cast<volatile long*>(__ptr),
103*e4b17023SJohn Marino                                    __addend);
104*e4b17023SJohn Marino #elif defined(__GNUC__)
105*e4b17023SJohn Marino     return __atomic_fetch_add(__ptr, __addend, __ATOMIC_ACQ_REL);
106*e4b17023SJohn Marino #elif defined(__SUNPRO_CC) && defined(__sparc)
107*e4b17023SJohn Marino     volatile int32_t __before, __after;
108*e4b17023SJohn Marino     do
109*e4b17023SJohn Marino       {
110*e4b17023SJohn Marino         __before = *__ptr;
111*e4b17023SJohn Marino         __after = __before + __addend;
112*e4b17023SJohn Marino       } while (atomic_cas_32((volatile unsigned int*)__ptr, __before,
113*e4b17023SJohn Marino                              __after) != __before);
114*e4b17023SJohn Marino     return __before;
115*e4b17023SJohn Marino #else   //fallback, slow
116*e4b17023SJohn Marino #pragma message("slow __fetch_and_add_32")
117*e4b17023SJohn Marino     int32_t __res;
118*e4b17023SJohn Marino #pragma omp critical
119*e4b17023SJohn Marino     {
120*e4b17023SJohn Marino       __res = *__ptr;
121*e4b17023SJohn Marino       *(__ptr) += __addend;
122*e4b17023SJohn Marino     }
123*e4b17023SJohn Marino     return __res;
124*e4b17023SJohn Marino #endif
125*e4b17023SJohn Marino   }
126*e4b17023SJohn Marino 
127*e4b17023SJohn Marino   /** @brief Add a value to a variable, atomically.
128*e4b17023SJohn Marino    *
129*e4b17023SJohn Marino    *  Implementation is heavily platform-dependent.
130*e4b17023SJohn Marino    *  @param __ptr Pointer to a 64-bit signed integer.
131*e4b17023SJohn Marino    *  @param __addend Value to add.
132*e4b17023SJohn Marino    */
133*e4b17023SJohn Marino   inline int64_t
__fetch_and_add_64(volatile int64_t * __ptr,int64_t __addend)134*e4b17023SJohn Marino   __fetch_and_add_64(volatile int64_t* __ptr, int64_t __addend)
135*e4b17023SJohn Marino   {
136*e4b17023SJohn Marino #if defined(__ICC) && defined(__x86_64) //x86 version
137*e4b17023SJohn Marino     return __faa64<int>((int64_t*)__ptr, __addend);
138*e4b17023SJohn Marino #elif defined(__ECC)    //IA-64 version
139*e4b17023SJohn Marino     return _InterlockedExchangeAdd64((void*)__ptr, __addend);
140*e4b17023SJohn Marino #elif defined(__ICL) || defined(_MSC_VER)
141*e4b17023SJohn Marino #ifndef _WIN64
142*e4b17023SJohn Marino     _GLIBCXX_PARALLEL_ASSERT(false);    //not available in this case
143*e4b17023SJohn Marino     return 0;
144*e4b17023SJohn Marino #else
145*e4b17023SJohn Marino     return _InterlockedExchangeAdd64(__ptr, __addend);
146*e4b17023SJohn Marino #endif
147*e4b17023SJohn Marino #elif defined(__GNUC__) && defined(__x86_64)
148*e4b17023SJohn Marino     return __atomic_fetch_add(__ptr, __addend, __ATOMIC_ACQ_REL);
149*e4b17023SJohn Marino #elif defined(__GNUC__) && defined(__i386) &&                   \
150*e4b17023SJohn Marino   (defined(__i686) || defined(__pentium4) || defined(__athlon)  \
151*e4b17023SJohn Marino    || defined(__k8) || defined(__core2))
152*e4b17023SJohn Marino     return __atomic_fetch_add(__ptr, __addend, __ATOMIC_ACQ_REL);
153*e4b17023SJohn Marino #elif defined(__SUNPRO_CC) && defined(__sparc)
154*e4b17023SJohn Marino     volatile int64_t __before, __after;
155*e4b17023SJohn Marino     do
156*e4b17023SJohn Marino       {
157*e4b17023SJohn Marino         __before = *__ptr;
158*e4b17023SJohn Marino         __after = __before + __addend;
159*e4b17023SJohn Marino       } while (atomic_cas_64((volatile unsigned long long*)__ptr, __before,
160*e4b17023SJohn Marino                              __after) != __before);
161*e4b17023SJohn Marino     return __before;
162*e4b17023SJohn Marino #else   //fallback, slow
163*e4b17023SJohn Marino #if defined(__GNUC__) && defined(__i386)
164*e4b17023SJohn Marino     // XXX doesn'__t work with -march=native
165*e4b17023SJohn Marino     //#warning "please compile with -march=i686 or better"
166*e4b17023SJohn Marino #endif
167*e4b17023SJohn Marino #pragma message("slow __fetch_and_add_64")
168*e4b17023SJohn Marino     int64_t __res;
169*e4b17023SJohn Marino #pragma omp critical
170*e4b17023SJohn Marino     {
171*e4b17023SJohn Marino       __res = *__ptr;
172*e4b17023SJohn Marino       *(__ptr) += __addend;
173*e4b17023SJohn Marino     }
174*e4b17023SJohn Marino     return __res;
175*e4b17023SJohn Marino #endif
176*e4b17023SJohn Marino   }
177*e4b17023SJohn Marino 
178*e4b17023SJohn Marino   /** @brief Add a value to a variable, atomically.
179*e4b17023SJohn Marino    *
180*e4b17023SJohn Marino    *  Implementation is heavily platform-dependent.
181*e4b17023SJohn Marino    *  @param __ptr Pointer to a signed integer.
182*e4b17023SJohn Marino    *  @param __addend Value to add.
183*e4b17023SJohn Marino    */
184*e4b17023SJohn Marino   template<typename _Tp>
185*e4b17023SJohn Marino   inline _Tp
__fetch_and_add(volatile _Tp * __ptr,_Tp __addend)186*e4b17023SJohn Marino   __fetch_and_add(volatile _Tp* __ptr, _Tp __addend)
187*e4b17023SJohn Marino   {
188*e4b17023SJohn Marino     if (sizeof(_Tp) == sizeof(int32_t))
189*e4b17023SJohn Marino       return
190*e4b17023SJohn Marino         (_Tp)__fetch_and_add_32((volatile int32_t*) __ptr, (int32_t)__addend);
191*e4b17023SJohn Marino     else if (sizeof(_Tp) == sizeof(int64_t))
192*e4b17023SJohn Marino       return
193*e4b17023SJohn Marino         (_Tp)__fetch_and_add_64((volatile int64_t*) __ptr, (int64_t)__addend);
194*e4b17023SJohn Marino     else
195*e4b17023SJohn Marino       _GLIBCXX_PARALLEL_ASSERT(false);
196*e4b17023SJohn Marino   }
197*e4b17023SJohn Marino 
198*e4b17023SJohn Marino 
199*e4b17023SJohn Marino #if defined(__ICC)
200*e4b17023SJohn Marino 
201*e4b17023SJohn Marino   template<typename _MustBeInt = int>
202*e4b17023SJohn Marino   inline int32_t
__cas32(volatile int32_t * __ptr,int32_t __old,int32_t __nw)203*e4b17023SJohn Marino   __cas32(volatile int32_t* __ptr, int32_t __old, int32_t __nw)
204*e4b17023SJohn Marino   {
205*e4b17023SJohn Marino     int32_t __before;
206*e4b17023SJohn Marino     __asm__ __volatile__("lock; cmpxchgl %1,%2"
207*e4b17023SJohn Marino                          : "=a"(__before)
208*e4b17023SJohn Marino                          : "q"(__nw), "__m"(*(volatile long long*)(__ptr)),
209*e4b17023SJohn Marino                                "0"(__old)
210*e4b17023SJohn Marino                          : "memory");
211*e4b17023SJohn Marino     return __before;
212*e4b17023SJohn Marino   }
213*e4b17023SJohn Marino 
214*e4b17023SJohn Marino #if defined(__x86_64)
215*e4b17023SJohn Marino   template<typename _MustBeInt = int>
216*e4b17023SJohn Marino   inline int64_t
__cas64(volatile int64_t * __ptr,int64_t __old,int64_t __nw)217*e4b17023SJohn Marino   __cas64(volatile int64_t *__ptr, int64_t __old, int64_t __nw)
218*e4b17023SJohn Marino   {
219*e4b17023SJohn Marino     int64_t __before;
220*e4b17023SJohn Marino     __asm__ __volatile__("lock; cmpxchgq %1,%2"
221*e4b17023SJohn Marino                          : "=a"(__before)
222*e4b17023SJohn Marino                          : "q"(__nw), "__m"(*(volatile long long*)(__ptr)),
223*e4b17023SJohn Marino                                "0"(__old)
224*e4b17023SJohn Marino                          : "memory");
225*e4b17023SJohn Marino     return __before;
226*e4b17023SJohn Marino   }
227*e4b17023SJohn Marino #endif
228*e4b17023SJohn Marino 
229*e4b17023SJohn Marino #endif
230*e4b17023SJohn Marino 
231*e4b17023SJohn Marino   /** @brief Compare @c *__ptr and @c __comparand. If equal, let @c
232*e4b17023SJohn Marino    * *__ptr=__replacement and return @c true, return @c false otherwise.
233*e4b17023SJohn Marino    *
234*e4b17023SJohn Marino    *  Implementation is heavily platform-dependent.
235*e4b17023SJohn Marino    *  @param __ptr Pointer to 32-bit signed integer.
236*e4b17023SJohn Marino    *  @param __comparand Compare value.
237*e4b17023SJohn Marino    *  @param __replacement Replacement value.
238*e4b17023SJohn Marino    */
239*e4b17023SJohn Marino   inline bool
__compare_and_swap_32(volatile int32_t * __ptr,int32_t __comparand,int32_t __replacement)240*e4b17023SJohn Marino   __compare_and_swap_32(volatile int32_t* __ptr, int32_t __comparand,
241*e4b17023SJohn Marino                         int32_t __replacement)
242*e4b17023SJohn Marino   {
243*e4b17023SJohn Marino #if defined(__ICC)      //x86 version
244*e4b17023SJohn Marino     return _InterlockedCompareExchange((void*)__ptr, __replacement,
245*e4b17023SJohn Marino                                        __comparand) == __comparand;
246*e4b17023SJohn Marino #elif defined(__ECC)    //IA-64 version
247*e4b17023SJohn Marino     return _InterlockedCompareExchange((void*)__ptr, __replacement,
248*e4b17023SJohn Marino                                        __comparand) == __comparand;
249*e4b17023SJohn Marino #elif defined(__ICL) || defined(_MSC_VER)
250*e4b17023SJohn Marino     return _InterlockedCompareExchange(
251*e4b17023SJohn Marino                reinterpret_cast<volatile long*>(__ptr),
252*e4b17023SJohn Marino                __replacement, __comparand)
253*e4b17023SJohn Marino              == __comparand;
254*e4b17023SJohn Marino #elif defined(__GNUC__)
255*e4b17023SJohn Marino     return __atomic_compare_exchange_n(__ptr, &__comparand, __replacement,
256*e4b17023SJohn Marino 				       false, __ATOMIC_ACQ_REL,
257*e4b17023SJohn Marino 				       __ATOMIC_RELAXED);
258*e4b17023SJohn Marino #elif defined(__SUNPRO_CC) && defined(__sparc)
259*e4b17023SJohn Marino     return atomic_cas_32((volatile unsigned int*)__ptr, __comparand,
260*e4b17023SJohn Marino                          __replacement) == __comparand;
261*e4b17023SJohn Marino #else
262*e4b17023SJohn Marino #pragma message("slow __compare_and_swap_32")
263*e4b17023SJohn Marino     bool __res = false;
264*e4b17023SJohn Marino #pragma omp critical
265*e4b17023SJohn Marino     {
266*e4b17023SJohn Marino       if (*__ptr == __comparand)
267*e4b17023SJohn Marino         {
268*e4b17023SJohn Marino           *__ptr = __replacement;
269*e4b17023SJohn Marino           __res = true;
270*e4b17023SJohn Marino         }
271*e4b17023SJohn Marino     }
272*e4b17023SJohn Marino     return __res;
273*e4b17023SJohn Marino #endif
274*e4b17023SJohn Marino   }
275*e4b17023SJohn Marino 
276*e4b17023SJohn Marino   /** @brief Compare @c *__ptr and @c __comparand. If equal, let @c
277*e4b17023SJohn Marino    * *__ptr=__replacement and return @c true, return @c false otherwise.
278*e4b17023SJohn Marino    *
279*e4b17023SJohn Marino    *  Implementation is heavily platform-dependent.
280*e4b17023SJohn Marino    *  @param __ptr Pointer to 64-bit signed integer.
281*e4b17023SJohn Marino    *  @param __comparand Compare value.
282*e4b17023SJohn Marino    *  @param __replacement Replacement value.
283*e4b17023SJohn Marino    */
284*e4b17023SJohn Marino   inline bool
__compare_and_swap_64(volatile int64_t * __ptr,int64_t __comparand,int64_t __replacement)285*e4b17023SJohn Marino   __compare_and_swap_64(volatile int64_t* __ptr, int64_t __comparand,
286*e4b17023SJohn Marino                         int64_t __replacement)
287*e4b17023SJohn Marino   {
288*e4b17023SJohn Marino #if defined(__ICC) && defined(__x86_64) //x86 version
289*e4b17023SJohn Marino     return __cas64<int>(__ptr, __comparand, __replacement) == __comparand;
290*e4b17023SJohn Marino #elif defined(__ECC)    //IA-64 version
291*e4b17023SJohn Marino     return _InterlockedCompareExchange64((void*)__ptr, __replacement,
292*e4b17023SJohn Marino                                          __comparand) == __comparand;
293*e4b17023SJohn Marino #elif defined(__ICL) || defined(_MSC_VER)
294*e4b17023SJohn Marino #ifndef _WIN64
295*e4b17023SJohn Marino     _GLIBCXX_PARALLEL_ASSERT(false);    //not available in this case
296*e4b17023SJohn Marino     return 0;
297*e4b17023SJohn Marino #else
298*e4b17023SJohn Marino     return _InterlockedCompareExchange64(__ptr, __replacement,
299*e4b17023SJohn Marino                                          __comparand) == __comparand;
300*e4b17023SJohn Marino #endif
301*e4b17023SJohn Marino 
302*e4b17023SJohn Marino #elif defined(__GNUC__) && defined(__x86_64)
303*e4b17023SJohn Marino     return __atomic_compare_exchange_n(__ptr, &__comparand, __replacement,
304*e4b17023SJohn Marino 				       false, __ATOMIC_ACQ_REL,
305*e4b17023SJohn Marino 				       __ATOMIC_RELAXED);
306*e4b17023SJohn Marino #elif defined(__GNUC__) && defined(__i386) &&                   \
307*e4b17023SJohn Marino   (defined(__i686) || defined(__pentium4) || defined(__athlon)  \
308*e4b17023SJohn Marino    || defined(__k8) || defined(__core2))
309*e4b17023SJohn Marino     return __atomic_compare_exchange_n(__ptr, &__comparand, __replacement,
310*e4b17023SJohn Marino 				       false, __ATOMIC_ACQ_REL,
311*e4b17023SJohn Marino 				       __ATOMIC_RELAXED);
312*e4b17023SJohn Marino #elif defined(__SUNPRO_CC) && defined(__sparc)
313*e4b17023SJohn Marino     return atomic_cas_64((volatile unsigned long long*)__ptr,
314*e4b17023SJohn Marino                          __comparand, __replacement) == __comparand;
315*e4b17023SJohn Marino #else
316*e4b17023SJohn Marino #if defined(__GNUC__) && defined(__i386)
317*e4b17023SJohn Marino     // XXX -march=native
318*e4b17023SJohn Marino     //#warning "please compile with -march=i686 or better"
319*e4b17023SJohn Marino #endif
320*e4b17023SJohn Marino #pragma message("slow __compare_and_swap_64")
321*e4b17023SJohn Marino     bool __res = false;
322*e4b17023SJohn Marino #pragma omp critical
323*e4b17023SJohn Marino     {
324*e4b17023SJohn Marino       if (*__ptr == __comparand)
325*e4b17023SJohn Marino         {
326*e4b17023SJohn Marino           *__ptr = __replacement;
327*e4b17023SJohn Marino           __res = true;
328*e4b17023SJohn Marino         }
329*e4b17023SJohn Marino     }
330*e4b17023SJohn Marino     return __res;
331*e4b17023SJohn Marino #endif
332*e4b17023SJohn Marino   }
333*e4b17023SJohn Marino 
334*e4b17023SJohn Marino   /** @brief Compare @c *__ptr and @c __comparand. If equal, let @c
335*e4b17023SJohn Marino    * *__ptr=__replacement and return @c true, return @c false otherwise.
336*e4b17023SJohn Marino    *
337*e4b17023SJohn Marino    *  Implementation is heavily platform-dependent.
338*e4b17023SJohn Marino    *  @param __ptr Pointer to signed integer.
339*e4b17023SJohn Marino    *  @param __comparand Compare value.
340*e4b17023SJohn Marino    *  @param __replacement Replacement value. */
341*e4b17023SJohn Marino   template<typename _Tp>
342*e4b17023SJohn Marino   inline bool
__compare_and_swap(volatile _Tp * __ptr,_Tp __comparand,_Tp __replacement)343*e4b17023SJohn Marino   __compare_and_swap(volatile _Tp* __ptr, _Tp __comparand, _Tp __replacement)
344*e4b17023SJohn Marino   {
345*e4b17023SJohn Marino     if (sizeof(_Tp) == sizeof(int32_t))
346*e4b17023SJohn Marino       return __compare_and_swap_32((volatile int32_t*) __ptr,
347*e4b17023SJohn Marino                                    (int32_t)__comparand,
348*e4b17023SJohn Marino                                    (int32_t)__replacement);
349*e4b17023SJohn Marino     else if (sizeof(_Tp) == sizeof(int64_t))
350*e4b17023SJohn Marino       return __compare_and_swap_64((volatile int64_t*) __ptr,
351*e4b17023SJohn Marino                                    (int64_t)__comparand,
352*e4b17023SJohn Marino                                    (int64_t)__replacement);
353*e4b17023SJohn Marino     else
354*e4b17023SJohn Marino       _GLIBCXX_PARALLEL_ASSERT(false);
355*e4b17023SJohn Marino   }
356*e4b17023SJohn Marino 
357*e4b17023SJohn Marino   /** @brief Yield the control to another thread, without waiting for
358*e4b17023SJohn Marino       the end to the time slice. */
359*e4b17023SJohn Marino   inline void
__yield()360*e4b17023SJohn Marino   __yield()
361*e4b17023SJohn Marino   {
362*e4b17023SJohn Marino #if defined (_WIN32) && !defined (__CYGWIN__)
363*e4b17023SJohn Marino     Sleep(0);
364*e4b17023SJohn Marino #else
365*e4b17023SJohn Marino     sched_yield();
366*e4b17023SJohn Marino #endif
367*e4b17023SJohn Marino   }
368*e4b17023SJohn Marino } // end namespace
369*e4b17023SJohn Marino 
370*e4b17023SJohn Marino #endif /* _GLIBCXX_PARALLEL_COMPATIBILITY_H */
371