xref: /reactos/sdk/include/c++/stlport/stl/_threads.h (revision c2c66aff)
1 /*
2  * Copyright (c) 1997-1999
3  * Silicon Graphics Computer Systems, Inc.
4  *
5  * Copyright (c) 1999
6  * Boris Fomitchev
7  *
8  * This material is provided "as is", with absolutely no warranty expressed
9  * or implied. Any use is at your own risk.
10  *
11  * Permission to use or copy this software for any purpose is hereby granted
12  * without fee, provided the above notices are retained on all copies.
13  * Permission to modify the code and to distribute modified code is granted,
14  * provided the above notices are retained, and a notice that the code was
15  * modified is included with the above copyright notice.
16  *
17  */
18 
19 // WARNING: This is an internal header file, included by other C++
20 // standard library headers.  You should not attempt to use this header
21 // file directly.
22 
23 
24 #ifndef _STLP_INTERNAL_THREADS_H
25 #define _STLP_INTERNAL_THREADS_H
26 
27 // Supported threading models are native SGI, pthreads, uithreads
28 // (similar to pthreads, but based on an earlier draft of the Posix
29 // threads standard), and Win32 threads.  Uithread support by Jochen
30 // Schlick, 1999, and Solaris threads generalized to them.
31 
32 #ifndef _STLP_INTERNAL_CSTDDEF
33 #  include <stl/_cstddef.h>
34 #endif
35 
36 #ifndef _STLP_INTERNAL_CSTDLIB
37 #  include <stl/_cstdlib.h>
38 #endif
39 
40 // On SUN and Mac OS X gcc, zero-initialization works just fine...
41 #if defined (__sun) || (defined (__GNUC__) && defined(__APPLE__))
42 #  define _STLP_MUTEX_INITIALIZER
43 #endif
44 
45 /* This header defines the following atomic operation that platform should
46  * try to support as much as possible. Atomic operation are exposed as macro
47  * in order to easily test for their existance. They are:
48  * __stl_atomic_t _STLP_ATOMIC_INCREMENT(volatile __stl_atomic_t* __ptr) :
49  * increment *__ptr by 1 and returns the new value
50  * __stl_atomic_t _STLP_ATOMIC_DECREMENT(volatile __stl_atomic_t* __ptr) :
51  * decrement  *__ptr by 1 and returns the new value
52  * __stl_atomic_t _STLP_ATOMIC_EXCHANGE(volatile __stl_atomic_t* __target, __stl_atomic_t __val) :
53  * assign __val to *__target and returns former *__target value
54  * void* _STLP_ATOMIC_EXCHANGE_PTR(void* volatile* __target, void* __ptr) :
55  * assign __ptr to *__target and returns former *__target value
56  */
57 
58 #if defined (_STLP_THREADS)
59 
60 #  if defined (_STLP_SGI_THREADS)
61 
62 #    include <mutex.h>
63 // Hack for SGI o32 compilers.
64 #    if !defined(__add_and_fetch) && \
65         (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))
66 #      define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)
67 #      define __test_and_set(__l,__v)  test_and_set(__l,__v)
68 #    endif /* o32 */
69 
70 #    if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
71 #      define _STLP_ATOMIC_EXCHANGE(__p, __q) test_and_set(__p, __q)
72 #    else
73 #      define _STLP_ATOMIC_EXCHANGE(__p, __q) __test_and_set((unsigned long*)__p, (unsigned long)__q)
74 #    endif
75 
76 #    define _STLP_ATOMIC_INCREMENT(__x) __add_and_fetch(__x, 1)
77 #    define _STLP_ATOMIC_DECREMENT(__x) __add_and_fetch(__x, (size_t) -1)
78 typedef long __stl_atomic_t;
79 
80 #  elif defined (_STLP_PTHREADS)
81 
82 #    include <pthread.h>
83 #    if !defined (_STLP_USE_PTHREAD_SPINLOCK)
84 #      if defined (PTHREAD_MUTEX_INITIALIZER) && !defined (_STLP_MUTEX_INITIALIZER) && defined (_REENTRANT)
85 #        define _STLP_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER }
86 #      endif
87 //HPUX variants have (on some platforms optional) non-standard "DCE" pthreads impl
88 #      if defined (_DECTHREADS_) && (defined (_PTHREAD_USE_D4) || defined (__hpux)) && !defined (_CMA_SUPPRESS_EXTERNALS_)
89 #        define _STLP_PTHREAD_ATTR_DEFAULT pthread_mutexattr_default
90 #      else
91 #        define _STLP_PTHREAD_ATTR_DEFAULT 0
92 #      endif
93 #    else
94 #      if defined (__OpenBSD__)
95 #        include <spinlock.h>
96 #      endif
97 #    endif
98 
99 #    if defined (__GNUC__) && defined (__i386__)
100 #      if !defined (_STLP_ATOMIC_INCREMENT)
_STLP_atomic_increment_gcc_x86(long volatile * p)101 inline long _STLP_atomic_increment_gcc_x86(long volatile* p) {
102   long result;
103   __asm__ __volatile__
104     ("lock; xaddl  %1, %0;"
105     :"=m" (*p), "=r" (result)
106     :"m" (*p),  "1"  (1)
107     :"cc");
108   return result + 1;
109 }
110 #        define _STLP_ATOMIC_INCREMENT(__x) (_STLP_atomic_increment_gcc_x86((long volatile*)__x))
111 #      endif
112 
113 #      if !defined (_STLP_ATOMIC_DECREMENT)
_STLP_atomic_decrement_gcc_x86(long volatile * p)114 inline long _STLP_atomic_decrement_gcc_x86(long volatile* p) {
115   long result;
116   __asm__ __volatile__
117     ("lock; xaddl  %1, %0;"
118     :"=m" (*p), "=r" (result)
119     :"m" (*p),  "1"  (-1)
120     :"cc");
121   return result - 1;
122 }
123 #        define _STLP_ATOMIC_DECREMENT(__x) (_STLP_atomic_decrement_gcc_x86((long volatile*)__x))
124 #      endif
125 typedef long __stl_atomic_t;
126 #    else
127 typedef size_t __stl_atomic_t;
128 #    endif /* if defined(__GNUC__) && defined(__i386__) */
129 
130 #  elif defined (_STLP_WIN32THREADS)
131 
132 #    if !defined (_STLP_ATOMIC_INCREMENT)
133 #      if !defined (_STLP_NEW_PLATFORM_SDK)
134 #        define _STLP_ATOMIC_INCREMENT(__x)           InterlockedIncrement(__CONST_CAST(long*, __x))
135 #        define _STLP_ATOMIC_DECREMENT(__x)           InterlockedDecrement(__CONST_CAST(long*, __x))
136 #        define _STLP_ATOMIC_EXCHANGE(__x, __y)       InterlockedExchange(__CONST_CAST(long*, __x), __y)
137 #      else
138 #        define _STLP_ATOMIC_INCREMENT(__x)           InterlockedIncrement(__x)
139 #        define _STLP_ATOMIC_DECREMENT(__x)           InterlockedDecrement(__x)
140 #        define _STLP_ATOMIC_EXCHANGE(__x, __y)       InterlockedExchange(__x, __y)
141 #      endif
142 #      define _STLP_ATOMIC_EXCHANGE_PTR(__x, __y)     STLPInterlockedExchangePointer(__x, __y)
143 #    endif
144 typedef long __stl_atomic_t;
145 
146 #  elif defined (__DECC) || defined (__DECCXX)
147 
148 #    include <machine/builtins.h>
149 #    define _STLP_ATOMIC_EXCHANGE __ATOMIC_EXCH_LONG
150 #    define _STLP_ATOMIC_INCREMENT(__x) __ATOMIC_ADD_LONG(__x, 1)
151 #    define _STLP_ATOMIC_DECREMENT(__x) __ATOMIC_ADD_LONG(__x, -1)
152 typedef long __stl_atomic_t;
153 
154 #  elif defined (_STLP_SPARC_SOLARIS_THREADS)
155 
156 typedef long __stl_atomic_t;
157 #    include <stl/_sparc_atomic.h>
158 
159 #  elif defined (_STLP_UITHREADS)
160 
161 // this inclusion is potential hazard to bring up all sorts
162 // of old-style headers. Let's assume vendor already know how
163 // to deal with that.
164 #    ifndef _STLP_INTERNAL_CTIME
165 #      include <stl/_ctime.h>
166 #    endif
167 #    if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
168 using _STLP_VENDOR_CSTD::time_t;
169 #    endif
170 #    include <synch.h>
171 #    ifndef _STLP_INTERNAL_CSTDIO
172 #      include <stl/_cstdio.h>
173 #    endif
174 #    ifndef _STLP_INTERNAL_CWCHAR
175 #      include <stl/_cwchar.h>
176 #    endif
177 typedef size_t __stl_atomic_t;
178 
179 #  elif defined (_STLP_BETHREADS)
180 
181 #    include <OS.h>
182 #    include <cassert>
183 #    include <stdio.h>
184 #    define _STLP_MUTEX_INITIALIZER = { 0 }
185 typedef size_t __stl_atomic_t;
186 
187 #  elif defined (_STLP_NWTHREADS)
188 
189 #    include <nwthread.h>
190 #    include <nwsemaph.h>
191 typedef size_t __stl_atomic_t;
192 
193 #  elif defined(_STLP_OS2THREADS)
194 
195 #    if defined (__GNUC__)
196 #      define INCL_DOSSEMAPHORES
197 #      include <os2.h>
198 #    else
199 // This section serves to replace os2.h for VisualAge C++
200   typedef unsigned long ULONG;
201 #      if !defined (__HEV__)  /* INCL_SEMAPHORE may also define HEV */
202 #        define __HEV__
203   typedef ULONG HEV;
204   typedef HEV*  PHEV;
205 #      endif
206   typedef ULONG APIRET;
207   typedef ULONG HMTX;
208   typedef HMTX*  PHMTX;
209   typedef const char*  PCSZ;
210   typedef ULONG BOOL32;
211   APIRET _System DosCreateMutexSem(PCSZ pszName, PHEV phev, ULONG flAttr, BOOL32 fState);
212   APIRET _System DosRequestMutexSem(HMTX hmtx, ULONG ulTimeout);
213   APIRET _System DosReleaseMutexSem(HMTX hmtx);
214   APIRET _System DosCloseMutexSem(HMTX hmtx);
215 #      define _STLP_MUTEX_INITIALIZER = { 0 }
216 #    endif /* GNUC */
217 typedef size_t __stl_atomic_t;
218 
219 #  else
220 
221 typedef size_t __stl_atomic_t;
222 
223 #  endif
224 
225 #else
226 /* no threads */
227 #  define _STLP_ATOMIC_INCREMENT(__x) ++(*__x)
228 #  define _STLP_ATOMIC_DECREMENT(__x) --(*__x)
229 /* We do not grant other atomic operations as they are useless if STLport do not have
230  * to be thread safe
231  */
232 typedef size_t __stl_atomic_t;
233 #endif
234 
235 #if !defined (_STLP_MUTEX_INITIALIZER)
236 #  if defined(_STLP_ATOMIC_EXCHANGE)
237 #    define _STLP_MUTEX_INITIALIZER = { 0 }
238 #  elif defined(_STLP_UITHREADS)
239 #    define _STLP_MUTEX_INITIALIZER = { DEFAULTMUTEX }
240 #  else
241 #    define _STLP_MUTEX_INITIALIZER
242 #  endif
243 #endif
244 
245 _STLP_BEGIN_NAMESPACE
246 
247 #if defined (_STLP_THREADS) && !defined (_STLP_USE_PTHREAD_SPINLOCK)
248 // Helper struct.  This is a workaround for various compilers that don't
249 // handle static variables in inline functions properly.
250 template <int __inst>
251 struct _STLP_mutex_spin {
252   enum { __low_max = 30, __high_max = 1000 };
253   // Low if we suspect uniprocessor, high for multiprocessor.
254   static unsigned __max;
255   static unsigned __last;
256   static void _STLP_CALL _M_do_lock(volatile __stl_atomic_t* __lock);
257   static void _STLP_CALL _S_nsec_sleep(int __log_nsec, unsigned int& __iteration);
258 };
259 #endif // !_STLP_USE_PTHREAD_SPINLOCK
260 
261 // Locking class.  Note that this class *does not have a constructor*.
262 // It must be initialized either statically, with _STLP_MUTEX_INITIALIZER,
263 // or dynamically, by explicitly calling the _M_initialize member function.
264 // (This is similar to the ways that a pthreads mutex can be initialized.)
265 // There are explicit member functions for acquiring and releasing the lock.
266 
267 // There is no constructor because static initialization is essential for
268 // some uses, and only a class aggregate (see section 8.5.1 of the C++
269 // standard) can be initialized that way.  That means we must have no
270 // constructors, no base classes, no virtual functions, and no private or
271 // protected members.
272 
273 // For non-static cases, clients should use  _STLP_mutex.
274 
275 struct _STLP_CLASS_DECLSPEC _STLP_mutex_base {
276 #if defined (_STLP_ATOMIC_EXCHANGE) || defined (_STLP_SGI_THREADS)
277   // It should be relatively easy to get this to work on any modern Unix.
278   volatile __stl_atomic_t _M_lock;
279 #endif
280 
281 #if defined (_STLP_THREADS)
282 #  if defined (_STLP_ATOMIC_EXCHANGE)
_M_initialize_STLP_mutex_base283   inline void _M_initialize() { _M_lock = 0; }
_M_destroy_STLP_mutex_base284   inline void _M_destroy() {}
285 
_M_acquire_lock_STLP_mutex_base286   void _M_acquire_lock() {
287     _STLP_mutex_spin<0>::_M_do_lock(&_M_lock);
288   }
289 
_M_release_lock_STLP_mutex_base290   inline void _M_release_lock() {
291     volatile __stl_atomic_t* __lock = &_M_lock;
292 #    if defined(_STLP_SGI_THREADS) && defined(__GNUC__) && __mips >= 3
293     asm("sync");
294     *__lock = 0;
295 #    elif defined(_STLP_SGI_THREADS) && __mips >= 3 && \
296          (defined (_ABIN32) || defined(_ABI64))
297     __lock_release(__lock);
298 #    elif defined (_STLP_SPARC_SOLARIS_THREADS)
299 #      if defined (__WORD64) || defined (__arch64__) || defined (__sparcv9) || defined (__sparcv8plus)
300     asm("membar #StoreStore ; membar #LoadStore");
301 #      else
302     asm(" stbar ");
303 #      endif
304     *__lock = 0;
305 #    else
306     *__lock = 0;
307     // This is not sufficient on many multiprocessors, since
308     // writes to protected variables and the lock may be reordered.
309 #    endif
310   }
311 #  elif defined (_STLP_PTHREADS)
312 #    if defined (_STLP_USE_PTHREAD_SPINLOCK)
313 #      if !defined (__OpenBSD__)
314   pthread_spinlock_t _M_lock;
_M_initialize_STLP_mutex_base315   inline void _M_initialize() { pthread_spin_init( &_M_lock, 0 ); }
_M_destroy_STLP_mutex_base316   inline void _M_destroy() { pthread_spin_destroy( &_M_lock ); }
317 
318   // sorry, but no static initializer for pthread_spinlock_t;
319   // this will not work for compilers that has problems with call
320   // constructor of static object...
321 
322   // _STLP_mutex_base()
323   //   { pthread_spin_init( &_M_lock, 0 ); }
324 
325   // ~_STLP_mutex_base()
326   //   { pthread_spin_destroy( &_M_lock ); }
327 
_M_acquire_lock_STLP_mutex_base328   inline void _M_acquire_lock() { pthread_spin_lock( &_M_lock ); }
_M_release_lock_STLP_mutex_base329   inline void _M_release_lock() { pthread_spin_unlock( &_M_lock ); }
330 #      else // __OpenBSD__
331   spinlock_t _M_lock;
_M_initialize_STLP_mutex_base332   inline void _M_initialize() { _SPINLOCK_INIT( &_M_lock ); }
_M_destroy_STLP_mutex_base333   inline void _M_destroy() { }
_M_acquire_lock_STLP_mutex_base334   inline void _M_acquire_lock() { _SPINLOCK( &_M_lock ); }
_M_release_lock_STLP_mutex_base335   inline void _M_release_lock() { _SPINUNLOCK( &_M_lock ); }
336 #      endif // __OpenBSD__
337 #    else // !_STLP_USE_PTHREAD_SPINLOCK
338   pthread_mutex_t _M_lock;
_M_initialize_STLP_mutex_base339   inline void _M_initialize()
340   { pthread_mutex_init(&_M_lock,_STLP_PTHREAD_ATTR_DEFAULT); }
_M_destroy_STLP_mutex_base341   inline void _M_destroy()
342   { pthread_mutex_destroy(&_M_lock); }
_M_acquire_lock_STLP_mutex_base343   inline void _M_acquire_lock() {
344 #      if defined ( __hpux ) && ! defined (PTHREAD_MUTEX_INITIALIZER)
345     if (!_M_lock.field1)  _M_initialize();
346 #      endif
347     pthread_mutex_lock(&_M_lock);
348   }
_M_release_lock_STLP_mutex_base349   inline void _M_release_lock() { pthread_mutex_unlock(&_M_lock); }
350 #    endif // !_STLP_USE_PTHREAD_SPINLOCK
351 
352 #  elif defined (_STLP_UITHREADS)
353   mutex_t _M_lock;
_M_initialize_STLP_mutex_base354   inline void _M_initialize()
355   { mutex_init(&_M_lock, 0, NULL); }
_M_destroy_STLP_mutex_base356   inline void _M_destroy()
357   { mutex_destroy(&_M_lock); }
_M_acquire_lock_STLP_mutex_base358   inline void _M_acquire_lock() { mutex_lock(&_M_lock); }
_M_release_lock_STLP_mutex_base359   inline void _M_release_lock() { mutex_unlock(&_M_lock); }
360 
361 #  elif defined (_STLP_OS2THREADS)
362   HMTX _M_lock;
_M_initialize_STLP_mutex_base363   inline void _M_initialize() { DosCreateMutexSem(NULL, &_M_lock, 0, false); }
_M_destroy_STLP_mutex_base364   inline void _M_destroy() { DosCloseMutexSem(_M_lock); }
_M_acquire_lock_STLP_mutex_base365   inline void _M_acquire_lock() {
366     if (!_M_lock) _M_initialize();
367     DosRequestMutexSem(_M_lock, SEM_INDEFINITE_WAIT);
368   }
_M_release_lock_STLP_mutex_base369   inline void _M_release_lock() { DosReleaseMutexSem(_M_lock); }
370 #  elif defined (_STLP_BETHREADS)
371   sem_id sem;
_M_initialize_STLP_mutex_base372   inline void _M_initialize() {
373     sem = create_sem(1, "STLPort");
374     assert(sem > 0);
375   }
_M_destroy_STLP_mutex_base376   inline void _M_destroy() {
377     int t = delete_sem(sem);
378     assert(t == B_NO_ERROR);
379   }
380   inline void _M_acquire_lock();
_M_release_lock_STLP_mutex_base381   inline void _M_release_lock() {
382     status_t t = release_sem(sem);
383     assert(t == B_NO_ERROR);
384   }
385 #  elif defined (_STLP_NWTHREADS)
386   LONG _M_lock;
_M_initialize_STLP_mutex_base387   inline void _M_initialize()
388   { _M_lock = OpenLocalSemaphore(1); }
_M_destroy_STLP_mutex_base389   inline void _M_destroy()
390   { CloseLocalSemaphore(_M_lock); }
_M_acquire_lock_STLP_mutex_base391   inline void _M_acquire_lock()
392   { WaitOnLocalSemaphore(_M_lock); }
_M_release_lock_STLP_mutex_base393   inline void _M_release_lock() { SignalLocalSemaphore(_M_lock); }
394 #  else      //*ty 11/24/2001 - added configuration check
395 #    error "Unknown thread facility configuration"
396 #  endif
397 #else /* No threads */
_M_initialize_STLP_mutex_base398   inline void _M_initialize() {}
_M_destroy_STLP_mutex_base399   inline void _M_destroy() {}
_M_acquire_lock_STLP_mutex_base400   inline void _M_acquire_lock() {}
_M_release_lock_STLP_mutex_base401   inline void _M_release_lock() {}
402 #endif // _STLP_PTHREADS
403 };
404 
405 // Locking class.  The constructor initializes the lock, the destructor destroys it.
406 // Well - behaving class, does not need static initializer
407 
408 class _STLP_CLASS_DECLSPEC _STLP_mutex : public _STLP_mutex_base {
409   public:
_STLP_mutex()410     inline _STLP_mutex () { _M_initialize(); }
~_STLP_mutex()411     inline ~_STLP_mutex () { _M_destroy(); }
412   private:
413     _STLP_mutex(const _STLP_mutex&);
414     void operator=(const _STLP_mutex&);
415 };
416 
417 // A locking class that uses _STLP_STATIC_MUTEX.  The constructor takes
418 // a reference to an _STLP_STATIC_MUTEX, and acquires a lock.  The destructor
419 // releases the lock.
420 // It's not clear that this is exactly the right functionality.
421 // It will probably change in the future.
422 
423 struct _STLP_CLASS_DECLSPEC _STLP_auto_lock {
_STLP_auto_lock_STLP_auto_lock424   _STLP_auto_lock(_STLP_STATIC_MUTEX& __lock) : _M_lock(__lock)
425   { _M_lock._M_acquire_lock(); }
~_STLP_auto_lock_STLP_auto_lock426   ~_STLP_auto_lock()
427   { _M_lock._M_release_lock(); }
428 
429 private:
430   _STLP_STATIC_MUTEX& _M_lock;
431   void operator=(const _STLP_auto_lock&);
432   _STLP_auto_lock(const _STLP_auto_lock&);
433 };
434 
435 /*
436  * Class _Refcount_Base provides a type, __stl_atomic_t, a data member,
437  * _M_ref_count, and member functions _M_incr and _M_decr, which perform
438  * atomic preincrement/predecrement.  The constructor initializes
439  * _M_ref_count.
440  */
441 class _STLP_CLASS_DECLSPEC _Refcount_Base {
442   // The data member _M_ref_count
443 #if defined (__DMC__)
444 public:
445 #endif
446   _STLP_VOLATILE __stl_atomic_t _M_ref_count;
447 
448 #if defined (_STLP_THREADS) && \
449    (!defined (_STLP_ATOMIC_INCREMENT) || !defined (_STLP_ATOMIC_DECREMENT) || \
450     defined (_STLP_WIN95_LIKE))
451 #  define _STLP_USE_MUTEX
452   _STLP_mutex _M_mutex;
453 #endif
454 
455   public:
456   // Constructor
_Refcount_Base(__stl_atomic_t __n)457   _Refcount_Base(__stl_atomic_t __n) : _M_ref_count(__n) {}
458 #if defined (__BORLANDC__)
~_Refcount_Base()459   ~_Refcount_Base(){};
460 #endif
461 
462   // _M_incr and _M_decr
463 #if defined (_STLP_THREADS)
464 #  if !defined (_STLP_USE_MUTEX)
_M_incr()465    __stl_atomic_t _M_incr() { return _STLP_ATOMIC_INCREMENT(&_M_ref_count); }
_M_decr()466    __stl_atomic_t _M_decr() { return _STLP_ATOMIC_DECREMENT(&_M_ref_count); }
467 #  else
468 #    undef _STLP_USE_MUTEX
_M_incr()469   __stl_atomic_t _M_incr() {
470     _STLP_auto_lock l(_M_mutex);
471     return ++_M_ref_count;
472   }
_M_decr()473   __stl_atomic_t _M_decr() {
474     _STLP_auto_lock l(_M_mutex);
475     return --_M_ref_count;
476   }
477 #  endif
478 #else  /* No threads */
_M_incr()479   __stl_atomic_t _M_incr() { return ++_M_ref_count; }
_M_decr()480   __stl_atomic_t _M_decr() { return --_M_ref_count; }
481 #endif
482 };
483 
484 /* Atomic swap on __stl_atomic_t
485  * This is guaranteed to behave as though it were atomic only if all
486  * possibly concurrent updates use _Atomic_swap.
487  * In some cases the operation is emulated with a lock.
488  * Idem for _Atomic_swap_ptr
489  */
490 /* Helper struct to handle following cases:
491  * - on platforms where sizeof(__stl_atomic_t) == sizeof(void*) atomic
492  *   exchange can be done on pointers
493  * - on platform without atomic operation swap is done in a critical section,
494  *   portable but inefficient.
495  */
496 template <int __use_ptr_atomic_swap>
497 class _Atomic_swap_struct {
498 public:
499 #if defined (_STLP_THREADS) && \
500     !defined (_STLP_ATOMIC_EXCHANGE) && \
501     (defined (_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || \
502      defined (_STLP_USE_PTHREAD_SPINLOCK) || defined (_STLP_NWTHREADS))
503 #  define _STLP_USE_ATOMIC_SWAP_MUTEX
504   static _STLP_STATIC_MUTEX _S_swap_lock;
505 #endif
506 
_S_swap(_STLP_VOLATILE __stl_atomic_t * __p,__stl_atomic_t __q)507   static __stl_atomic_t _S_swap(_STLP_VOLATILE __stl_atomic_t* __p, __stl_atomic_t __q) {
508 #if defined (_STLP_THREADS)
509 #  if defined (_STLP_ATOMIC_EXCHANGE)
510   return _STLP_ATOMIC_EXCHANGE(__p, __q);
511 #  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
512   _S_swap_lock._M_acquire_lock();
513   __stl_atomic_t __result = *__p;
514   *__p = __q;
515   _S_swap_lock._M_release_lock();
516   return __result;
517 #  else
518 #    error Missing atomic swap implementation
519 #  endif
520 #else
521   /* no threads */
522   __stl_atomic_t __result = *__p;
523   *__p = __q;
524   return __result;
525 #endif // _STLP_THREADS
526   }
527 
_S_swap_ptr(void * _STLP_VOLATILE * __p,void * __q)528   static void* _S_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
529 #if defined (_STLP_THREADS)
530 #  if defined (_STLP_ATOMIC_EXCHANGE_PTR)
531   return _STLP_ATOMIC_EXCHANGE_PTR(__p, __q);
532 #  elif defined (_STLP_ATOMIC_EXCHANGE)
533   _STLP_STATIC_ASSERT(sizeof(__stl_atomic_t) == sizeof(void*))
534   return __REINTERPRET_CAST(void*, _STLP_ATOMIC_EXCHANGE(__REINTERPRET_CAST(volatile __stl_atomic_t*, __p),
535                                                          __REINTERPRET_CAST(__stl_atomic_t, __q))
536                             );
537 #  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
538   _S_swap_lock._M_acquire_lock();
539   void *__result = *__p;
540   *__p = __q;
541   _S_swap_lock._M_release_lock();
542   return __result;
543 #  else
544 #    error Missing pointer atomic swap implementation
545 #  endif
546 #else
547   /* no thread */
548   void *__result = *__p;
549   *__p = __q;
550   return __result;
551 #endif
552   }
553 };
554 
555 _STLP_TEMPLATE_NULL
556 class _Atomic_swap_struct<0> {
557 public:
558 #if defined (_STLP_THREADS) && \
559     (!defined (_STLP_ATOMIC_EXCHANGE) || !defined (_STLP_ATOMIC_EXCHANGE_PTR)) && \
560     (defined (_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || \
561      defined (_STLP_USE_PTHREAD_SPINLOCK) || defined (_STLP_NWTHREADS))
562 #  define _STLP_USE_ATOMIC_SWAP_MUTEX
563   static _STLP_STATIC_MUTEX _S_swap_lock;
564 #endif
565 
_S_swap(_STLP_VOLATILE __stl_atomic_t * __p,__stl_atomic_t __q)566   static __stl_atomic_t _S_swap(_STLP_VOLATILE __stl_atomic_t* __p, __stl_atomic_t __q) {
567 #if defined (_STLP_THREADS)
568 #  if defined (_STLP_ATOMIC_EXCHANGE)
569   return _STLP_ATOMIC_EXCHANGE(__p, __q);
570 #  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
571   /* This should be portable, but performance is expected
572    * to be quite awful.  This really needs platform specific
573    * code.
574    */
575   _S_swap_lock._M_acquire_lock();
576   __stl_atomic_t __result = *__p;
577   *__p = __q;
578   _S_swap_lock._M_release_lock();
579   return __result;
580 #  else
581 #    error Missing atomic swap implementation
582 #  endif
583 #else
584   /* no threads */
585   __stl_atomic_t __result = *__p;
586   *__p = __q;
587   return __result;
588 #endif // _STLP_THREADS
589   }
590 
_S_swap_ptr(void * _STLP_VOLATILE * __p,void * __q)591   static void* _S_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
592 #if defined (_STLP_THREADS)
593 #  if defined (_STLP_ATOMIC_EXCHANGE_PTR)
594   return _STLP_ATOMIC_EXCHANGE_PTR(__p, __q);
595 #  elif defined (_STLP_ATOMIC_EXCHANGE)
596   _STLP_STATIC_ASSERT(sizeof(__stl_atomic_t) == sizeof(void*))
597   return __REINTERPRET_CAST(void*, _STLP_ATOMIC_EXCHANGE(__REINTERPRET_CAST(volatile __stl_atomic_t*, __p),
598                                                          __REINTERPRET_CAST(__stl_atomic_t, __q))
599                             );
600 #  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
601   _S_swap_lock._M_acquire_lock();
602   void *__result = *__p;
603   *__p = __q;
604   _S_swap_lock._M_release_lock();
605   return __result;
606 #  else
607 #    error Missing pointer atomic swap implementation
608 #  endif
609 #else
610   /* no thread */
611   void *__result = *__p;
612   *__p = __q;
613   return __result;
614 #endif
615   }
616 };
617 
618 #if defined (_STLP_MSVC) && (_STLP_MSVC == 1300)
619 #  pragma warning (push)
620 #  pragma warning (disable : 4189) //__use_ptr_atomic_swap initialized but not used
621 #endif
622 
_Atomic_swap(_STLP_VOLATILE __stl_atomic_t * __p,__stl_atomic_t __q)623 inline __stl_atomic_t _STLP_CALL _Atomic_swap(_STLP_VOLATILE __stl_atomic_t * __p, __stl_atomic_t __q) {
624   const int __use_ptr_atomic_swap = sizeof(__stl_atomic_t) == sizeof(void*);
625   return _Atomic_swap_struct<__use_ptr_atomic_swap>::_S_swap(__p, __q);
626 }
627 
_Atomic_swap_ptr(void * _STLP_VOLATILE * __p,void * __q)628 inline void* _STLP_CALL _Atomic_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
629   const int __use_ptr_atomic_swap = sizeof(__stl_atomic_t) == sizeof(void*);
630   return _Atomic_swap_struct<__use_ptr_atomic_swap>::_S_swap_ptr(__p, __q);
631 }
632 
633 #if defined (_STLP_MSVC) && (_STLP_MSVC == 1300)
634 #  pragma warning (pop)
635 #endif
636 
637 #if defined (_STLP_BETHREADS)
638 template <int __inst>
639 struct _STLP_beos_static_lock_data {
640   static bool is_init;
641   struct mutex_t : public _STLP_mutex {
mutex_t_STLP_beos_static_lock_data::mutex_t642     mutex_t()
643     { _STLP_beos_static_lock_data<0>::is_init = true; }
~mutex_t_STLP_beos_static_lock_data::mutex_t644     ~mutex_t()
645     { _STLP_beos_static_lock_data<0>::is_init = false; }
646   };
647   static mutex_t mut;
648 };
649 
650 template <int __inst>
651 bool _STLP_beos_static_lock_data<__inst>::is_init = false;
652 template <int __inst>
653 typename _STLP_beos_static_lock_data<__inst>::mutex_t _STLP_beos_static_lock_data<__inst>::mut;
654 
_M_acquire_lock()655 inline void _STLP_mutex_base::_M_acquire_lock() {
656   if (sem == 0) {
657     // we need to initialise on demand here
658     // to prevent race conditions use our global
659     // mutex if it's available:
660     if (_STLP_beos_static_lock_data<0>::is_init) {
661       _STLP_auto_lock al(_STLP_beos_static_lock_data<0>::mut);
662       if (sem == 0) _M_initialize();
663     }
664     else {
665       // no lock available, we must still be
666       // in startup code, THERE MUST BE ONE THREAD
667       // ONLY active at this point.
668       _M_initialize();
669     }
670   }
671   status_t t;
672   t = acquire_sem(sem);
673   assert(t == B_NO_ERROR);
674 }
675 #endif
676 
677 _STLP_END_NAMESPACE
678 
679 #if !defined (_STLP_LINK_TIME_INSTANTIATION)
680 #  include <stl/_threads.c>
681 #endif
682 
683 #endif /* _STLP_INTERNAL_THREADS_H */
684 
685 // Local Variables:
686 // mode:C++
687 // End:
688