1 /*
2 * Copyright (c) 1997-1999
3 * Silicon Graphics Computer Systems, Inc.
4 *
5 * Copyright (c) 1999
6 * Boris Fomitchev
7 *
8 * This material is provided "as is", with absolutely no warranty expressed
9 * or implied. Any use is at your own risk.
10 *
11 * Permission to use or copy this software for any purpose is hereby granted
12 * without fee, provided the above notices are retained on all copies.
13 * Permission to modify the code and to distribute modified code is granted,
14 * provided the above notices are retained, and a notice that the code was
15 * modified is included with the above copyright notice.
16 *
17 */
18
19 // WARNING: This is an internal header file, included by other C++
20 // standard library headers. You should not attempt to use this header
21 // file directly.
22
23
24 #ifndef _STLP_INTERNAL_THREADS_H
25 #define _STLP_INTERNAL_THREADS_H
26
27 // Supported threading models are native SGI, pthreads, uithreads
28 // (similar to pthreads, but based on an earlier draft of the Posix
29 // threads standard), and Win32 threads. Uithread support by Jochen
30 // Schlick, 1999, and Solaris threads generalized to them.
31
32 #ifndef _STLP_INTERNAL_CSTDDEF
33 # include <stl/_cstddef.h>
34 #endif
35
36 #ifndef _STLP_INTERNAL_CSTDLIB
37 # include <stl/_cstdlib.h>
38 #endif
39
40 // On SUN and Mac OS X gcc, zero-initialization works just fine...
41 #if defined (__sun) || (defined (__GNUC__) && defined(__APPLE__))
42 # define _STLP_MUTEX_INITIALIZER
43 #endif
44
45 /* This header defines the following atomic operation that platform should
46 * try to support as much as possible. Atomic operation are exposed as macro
47 * in order to easily test for their existance. They are:
48 * __stl_atomic_t _STLP_ATOMIC_INCREMENT(volatile __stl_atomic_t* __ptr) :
49 * increment *__ptr by 1 and returns the new value
50 * __stl_atomic_t _STLP_ATOMIC_DECREMENT(volatile __stl_atomic_t* __ptr) :
51 * decrement *__ptr by 1 and returns the new value
52 * __stl_atomic_t _STLP_ATOMIC_EXCHANGE(volatile __stl_atomic_t* __target, __stl_atomic_t __val) :
53 * assign __val to *__target and returns former *__target value
54 * void* _STLP_ATOMIC_EXCHANGE_PTR(void* volatile* __target, void* __ptr) :
55 * assign __ptr to *__target and returns former *__target value
56 * __stl_atomic_t _STLP_ATOMIC_ADD(volatile __stl_atomic_t* __target, __stl_atomic_t __val) :
57 * does *__target = *__target + __val and returns the old *__target value
58 */
59
60 #if defined (_STLP_WIN32) || defined (__sgi) || defined (_STLP_SPARC_SOLARIS_THREADS)
61 typedef long __stl_atomic_t;
62 #else
63 /* Don't import whole namespace!!!! - ptr */
64 // # if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
65 // // using _STLP_VENDOR_CSTD::size_t;
66 // using namespace _STLP_VENDOR_CSTD;
67 // # endif
68 typedef size_t __stl_atomic_t;
69 #endif
70
71 #if defined (_STLP_THREADS)
72
73 # if defined (_STLP_SGI_THREADS)
74
75 # include <mutex.h>
76 // Hack for SGI o32 compilers.
77 # if !defined(__add_and_fetch) && \
78 (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))
79 # define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)
80 # define __test_and_set(__l,__v) test_and_set(__l,__v)
81 # endif /* o32 */
82
83 # if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
84 # define _STLP_ATOMIC_EXCHANGE(__p, __q) test_and_set(__p, __q)
85 # else
86 # define _STLP_ATOMIC_EXCHANGE(__p, __q) __test_and_set((unsigned long*)__p, (unsigned long)__q)
87 # endif
88
89 # define _STLP_ATOMIC_INCREMENT(__x) __add_and_fetch(__x, 1)
90 # define _STLP_ATOMIC_DECREMENT(__x) __add_and_fetch(__x, (size_t) -1)
91
92 # elif defined (_STLP_PTHREADS)
93
94 # include <pthread.h>
95 # if !defined (_STLP_USE_PTHREAD_SPINLOCK)
96 # if defined (PTHREAD_MUTEX_INITIALIZER) && !defined (_STLP_MUTEX_INITIALIZER) && defined (_REENTRANT)
97 # define _STLP_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER }
98 # endif
99 //HPUX variants have (on some platforms optional) non-standard "DCE" pthreads impl
100 # if defined (_DECTHREADS_) && (defined (_PTHREAD_USE_D4) || defined (__hpux)) && !defined (_CMA_SUPPRESS_EXTERNALS_)
101 # define _STLP_PTHREAD_ATTR_DEFAULT pthread_mutexattr_default
102 # else
103 # define _STLP_PTHREAD_ATTR_DEFAULT 0
104 # endif
105 # else // _STLP_USE_PTHREAD_SPINLOCK
106 # if defined (__OpenBSD__)
107 # include <spinlock.h>
108 # endif
109 # endif // _STLP_USE_PTHREAD_SPINLOCK
110
111 # if defined (__GNUC__) && defined (__i386__)
112
113 # if !defined (_STLP_ATOMIC_INCREMENT)
_STLP_atomic_increment_gcc_x86(long volatile * p)114 inline long _STLP_atomic_increment_gcc_x86(long volatile* p) {
115 long result;
116 __asm__ __volatile__
117 ("lock; xaddl %1, %0;"
118 :"=m" (*p), "=r" (result)
119 :"m" (*p), "1" (1)
120 :"cc");
121 return result + 1;
122 }
123 # define _STLP_ATOMIC_INCREMENT(__x) (_STLP_atomic_increment_gcc_x86((long volatile*)__x))
124 # endif
125
126 # if !defined (_STLP_ATOMIC_DECREMENT)
_STLP_atomic_decrement_gcc_x86(long volatile * p)127 inline long _STLP_atomic_decrement_gcc_x86(long volatile* p) {
128 long result;
129 __asm__ __volatile__
130 ("lock; xaddl %1, %0;"
131 :"=m" (*p), "=r" (result)
132 :"m" (*p), "1" (-1)
133 :"cc");
134 return result - 1;
135 }
136 # define _STLP_ATOMIC_DECREMENT(__x) (_STLP_atomic_decrement_gcc_x86((long volatile*)__x))
137 # endif
138
139 # if !defined (_STLP_ATOMIC_ADD)
_STLP_atomic_add_gcc_x86(long volatile * p,long addend)140 inline long _STLP_atomic_add_gcc_x86(long volatile* p, long addend) {
141 long result;
142 __asm__ __volatile__
143 ("lock; xaddl %1, %0;"
144 :"=m" (*p), "=r" (result)
145 :"m" (*p), "1" (addend)
146 :"cc");
147 return result + addend;
148 }
149 # define _STLP_ATOMIC_ADD(__dst, __val) (_STLP_atomic_add_gcc_x86((long volatile*)__dst, (long)__val))
150 # endif
151
152 # endif /* if defined(__GNUC__) && defined(__i386__) */
153
154 # elif defined (_STLP_WIN32THREADS)
155
156 # if !defined (_STLP_ATOMIC_INCREMENT)
157 # if !defined (_STLP_NEW_PLATFORM_SDK)
158 # define _STLP_ATOMIC_INCREMENT(__x) InterlockedIncrement(__CONST_CAST(long*, __x))
159 # define _STLP_ATOMIC_DECREMENT(__x) InterlockedDecrement(__CONST_CAST(long*, __x))
160 # define _STLP_ATOMIC_EXCHANGE(__x, __y) InterlockedExchange(__CONST_CAST(long*, __x), __y)
161 # else
162 # define _STLP_ATOMIC_INCREMENT(__x) InterlockedIncrement(__x)
163 # define _STLP_ATOMIC_DECREMENT(__x) InterlockedDecrement(__x)
164 # define _STLP_ATOMIC_EXCHANGE(__x, __y) InterlockedExchange(__x, __y)
165 # endif
166 # define _STLP_ATOMIC_EXCHANGE_PTR(__x, __y) STLPInterlockedExchangePointer(__x, __y)
167 /*
168 * The following functionnality is only available since Windows 98, those that are targeting previous OSes
169 * should define _WIN32_WINDOWS to a value lower that the one of Win 98, see Platform SDK documentation for
170 * more informations:
171 */
172 # if defined (_STLP_NEW_PLATFORM_SDK) && (!defined (_STLP_WIN32_VERSION) || (_STLP_WIN32_VERSION >= 0x0410))
173 # define _STLP_ATOMIC_ADD(__dst, __val) InterlockedExchangeAdd(__dst, __val)
174 # endif
175 # endif
176
177 # elif defined (__DECC) || defined (__DECCXX)
178
179 # include <machine/builtins.h>
180 # define _STLP_ATOMIC_EXCHANGE __ATOMIC_EXCH_LONG
181 # define _STLP_ATOMIC_INCREMENT(__x) __ATOMIC_ADD_LONG(__x, 1)
182 # define _STLP_ATOMIC_DECREMENT(__x) __ATOMIC_ADD_LONG(__x, -1)
183
184 # elif defined(_STLP_SPARC_SOLARIS_THREADS)
185
186 # include <stl/_sparc_atomic.h>
187
188 # elif defined (_STLP_UITHREADS)
189
190 // this inclusion is potential hazard to bring up all sorts
191 // of old-style headers. Let's assume vendor already know how
192 // to deal with that.
193 # ifndef _STLP_INTERNAL_CTIME
194 # include <stl/_ctime.h>
195 # endif
196 # if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
197 using _STLP_VENDOR_CSTD::time_t;
198 # endif
199 # include <synch.h>
200 # include <cstdio>
201 # include <cwchar>
202
203 # elif defined (_STLP_BETHREADS)
204
205 # include <OS.h>
206 # include <cassert>
207 # include <stdio.h>
208 # define _STLP_MUTEX_INITIALIZER = { 0 }
209
210 # elif defined (_STLP_NWTHREADS)
211
212 # include <nwthread.h>
213 # include <nwsemaph.h>
214
215 # elif defined(_STLP_OS2THREADS)
216
217 # if defined (__GNUC__)
218 # define INCL_DOSSEMAPHORES
219 # include <os2.h>
220 # else
221 // This section serves to replace os2.h for VisualAge C++
222 typedef unsigned long ULONG;
223 # if !defined (__HEV__) /* INCL_SEMAPHORE may also define HEV */
224 # define __HEV__
225 typedef ULONG HEV;
226 typedef HEV* PHEV;
227 # endif
228 typedef ULONG APIRET;
229 typedef ULONG HMTX;
230 typedef HMTX* PHMTX;
231 typedef const char* PCSZ;
232 typedef ULONG BOOL32;
233 APIRET _System DosCreateMutexSem(PCSZ pszName, PHEV phev, ULONG flAttr, BOOL32 fState);
234 APIRET _System DosRequestMutexSem(HMTX hmtx, ULONG ulTimeout);
235 APIRET _System DosReleaseMutexSem(HMTX hmtx);
236 APIRET _System DosCloseMutexSem(HMTX hmtx);
237 # define _STLP_MUTEX_INITIALIZER = { 0 }
238 # endif /* GNUC */
239
240 # endif
241
242 #else
243 /* no threads */
244 # define _STLP_ATOMIC_INCREMENT(__x) ++(*__x)
245 # define _STLP_ATOMIC_DECREMENT(__x) --(*__x)
246 /* We do not grant other atomic operations as they are useless if STLport do not have
247 * to be thread safe
248 */
249 #endif
250
251 #if !defined (_STLP_MUTEX_INITIALIZER)
252 # if defined(_STLP_ATOMIC_EXCHANGE)
253 # define _STLP_MUTEX_INITIALIZER = { 0 }
254 # elif defined(_STLP_UITHREADS)
255 # define _STLP_MUTEX_INITIALIZER = { DEFAULTMUTEX }
256 # else
257 # define _STLP_MUTEX_INITIALIZER
258 # endif
259 #endif
260
261 _STLP_BEGIN_NAMESPACE
262
263 #if defined (_STLP_THREADS) && !defined (_STLP_USE_PTHREAD_SPINLOCK)
264 // Helper struct. This is a workaround for various compilers that don't
265 // handle static variables in inline functions properly.
266 template <int __inst>
267 struct _STLP_mutex_spin {
268 enum { __low_max = 30, __high_max = 1000 };
269 // Low if we suspect uniprocessor, high for multiprocessor.
270 static unsigned __max;
271 static unsigned __last;
272 static void _STLP_CALL _M_do_lock(volatile __stl_atomic_t* __lock);
273 static void _STLP_CALL _S_nsec_sleep(int __log_nsec);
274 };
275 #endif // !_STLP_USE_PTHREAD_SPINLOCK
276
277 // Locking class. Note that this class *does not have a constructor*.
278 // It must be initialized either statically, with _STLP_MUTEX_INITIALIZER,
279 // or dynamically, by explicitly calling the _M_initialize member function.
280 // (This is similar to the ways that a pthreads mutex can be initialized.)
281 // There are explicit member functions for acquiring and releasing the lock.
282
283 // There is no constructor because static initialization is essential for
284 // some uses, and only a class aggregate (see section 8.5.1 of the C++
285 // standard) can be initialized that way. That means we must have no
286 // constructors, no base classes, no virtual functions, and no private or
287 // protected members.
288
289 // For non-static cases, clients should use _STLP_mutex.
290
291 struct _STLP_CLASS_DECLSPEC _STLP_mutex_base {
292 #if defined (_STLP_ATOMIC_EXCHANGE) || defined (_STLP_SGI_THREADS)
293 // It should be relatively easy to get this to work on any modern Unix.
294 volatile __stl_atomic_t _M_lock;
295 #endif
296
297 #if defined (_STLP_THREADS)
298 # if defined (_STLP_ATOMIC_EXCHANGE)
_M_initialize_STLP_mutex_base299 inline void _M_initialize() { _M_lock = 0; }
_M_destroy_STLP_mutex_base300 inline void _M_destroy() {}
301
_M_acquire_lock_STLP_mutex_base302 void _M_acquire_lock() {
303 _STLP_mutex_spin<0>::_M_do_lock(&_M_lock);
304 }
305
_M_release_lock_STLP_mutex_base306 inline void _M_release_lock() {
307 volatile __stl_atomic_t* __lock = &_M_lock;
308 # if defined(_STLP_SGI_THREADS) && defined(__GNUC__) && __mips >= 3
309 asm("sync");
310 *__lock = 0;
311 # elif defined(_STLP_SGI_THREADS) && __mips >= 3 && \
312 (defined (_ABIN32) || defined(_ABI64))
313 __lock_release(__lock);
314 # elif defined (_STLP_SPARC_SOLARIS_THREADS)
315 # if defined (__WORD64) || defined (__arch64__) || defined (__sparcv9) || defined (__sparcv8plus)
316 asm("membar #StoreStore ; membar #LoadStore");
317 # else
318 asm(" stbar ");
319 # endif
320 *__lock = 0;
321 # else
322 *__lock = 0;
323 // This is not sufficient on many multiprocessors, since
324 // writes to protected variables and the lock may be reordered.
325 # endif
326 }
327 # elif defined (_STLP_PTHREADS)
328 # if defined (_STLP_USE_PTHREAD_SPINLOCK)
329 # if !defined (__OpenBSD__)
330 pthread_spinlock_t _M_lock;
_M_initialize_STLP_mutex_base331 inline void _M_initialize() { pthread_spin_init( &_M_lock, 0 ); }
_M_destroy_STLP_mutex_base332 inline void _M_destroy() { pthread_spin_destroy( &_M_lock ); }
333
334 // sorry, but no static initializer for pthread_spinlock_t;
335 // this will not work for compilers that has problems with call
336 // constructor of static object...
337
338 // _STLP_mutex_base()
339 // { pthread_spin_init( &_M_lock, 0 ); }
340
341 // ~_STLP_mutex_base()
342 // { pthread_spin_destroy( &_M_lock ); }
343
_M_acquire_lock_STLP_mutex_base344 inline void _M_acquire_lock() { pthread_spin_lock( &_M_lock ); }
_M_release_lock_STLP_mutex_base345 inline void _M_release_lock() { pthread_spin_unlock( &_M_lock ); }
346 # else // __OpenBSD__
347 spinlock_t _M_lock;
_M_initialize_STLP_mutex_base348 inline void _M_initialize() { _SPINLOCK_INIT( &_M_lock ); }
_M_destroy_STLP_mutex_base349 inline void _M_destroy() { }
_M_acquire_lock_STLP_mutex_base350 inline void _M_acquire_lock() { _SPINLOCK( &_M_lock ); }
_M_release_lock_STLP_mutex_base351 inline void _M_release_lock() { _SPINUNLOCK( &_M_lock ); }
352 # endif // __OpenBSD__
353 # else // !_STLP_USE_PTHREAD_SPINLOCK
354 pthread_mutex_t _M_lock;
_M_initialize_STLP_mutex_base355 inline void _M_initialize()
356 { pthread_mutex_init(&_M_lock,_STLP_PTHREAD_ATTR_DEFAULT); }
_M_destroy_STLP_mutex_base357 inline void _M_destroy()
358 { pthread_mutex_destroy(&_M_lock); }
_M_acquire_lock_STLP_mutex_base359 inline void _M_acquire_lock() {
360 # if defined ( __hpux ) && ! defined (PTHREAD_MUTEX_INITIALIZER)
361 if (!_M_lock.field1) _M_initialize();
362 # endif
363 pthread_mutex_lock(&_M_lock);
364 }
_M_release_lock_STLP_mutex_base365 inline void _M_release_lock() { pthread_mutex_unlock(&_M_lock); }
366 # endif // !_STLP_USE_PTHREAD_SPINLOCK
367
368 # elif defined (_STLP_UITHREADS)
369 mutex_t _M_lock;
_M_initialize_STLP_mutex_base370 inline void _M_initialize()
371 { mutex_init(&_M_lock, 0, NULL); }
_M_destroy_STLP_mutex_base372 inline void _M_destroy()
373 { mutex_destroy(&_M_lock); }
_M_acquire_lock_STLP_mutex_base374 inline void _M_acquire_lock() { mutex_lock(&_M_lock); }
_M_release_lock_STLP_mutex_base375 inline void _M_release_lock() { mutex_unlock(&_M_lock); }
376
377 # elif defined (_STLP_OS2THREADS)
378 HMTX _M_lock;
_M_initialize_STLP_mutex_base379 inline void _M_initialize() { DosCreateMutexSem(NULL, &_M_lock, 0, false); }
_M_destroy_STLP_mutex_base380 inline void _M_destroy() { DosCloseMutexSem(_M_lock); }
_M_acquire_lock_STLP_mutex_base381 inline void _M_acquire_lock() {
382 if (!_M_lock) _M_initialize();
383 DosRequestMutexSem(_M_lock, SEM_INDEFINITE_WAIT);
384 }
_M_release_lock_STLP_mutex_base385 inline void _M_release_lock() { DosReleaseMutexSem(_M_lock); }
386 # elif defined (_STLP_BETHREADS)
387 sem_id sem;
_M_initialize_STLP_mutex_base388 inline void _M_initialize() {
389 sem = create_sem(1, "STLPort");
390 assert(sem > 0);
391 }
_M_destroy_STLP_mutex_base392 inline void _M_destroy() {
393 int t = delete_sem(sem);
394 assert(t == B_NO_ERROR);
395 }
396 inline void _M_acquire_lock();
_M_release_lock_STLP_mutex_base397 inline void _M_release_lock() {
398 status_t t = release_sem(sem);
399 assert(t == B_NO_ERROR);
400 }
401 # elif defined (_STLP_NWTHREADS)
402 LONG _M_lock;
_M_initialize_STLP_mutex_base403 inline void _M_initialize()
404 { _M_lock = OpenLocalSemaphore(1); }
_M_destroy_STLP_mutex_base405 inline void _M_destroy()
406 { CloseLocalSemaphore(_M_lock); }
_M_acquire_lock_STLP_mutex_base407 inline void _M_acquire_lock()
408 { WaitOnLocalSemaphore(_M_lock); }
_M_release_lock_STLP_mutex_base409 inline void _M_release_lock() { SignalLocalSemaphore(_M_lock); }
410 # else //*ty 11/24/2001 - added configuration check
411 # error "Unknown thread facility configuration"
412 # endif
413 #else /* No threads */
_M_initialize_STLP_mutex_base414 inline void _M_initialize() {}
_M_destroy_STLP_mutex_base415 inline void _M_destroy() {}
_M_acquire_lock_STLP_mutex_base416 inline void _M_acquire_lock() {}
_M_release_lock_STLP_mutex_base417 inline void _M_release_lock() {}
418 #endif // _STLP_PTHREADS
419 };
420
421 // Locking class. The constructor initializes the lock, the destructor destroys it.
422 // Well - behaving class, does not need static initializer
423
424 class _STLP_CLASS_DECLSPEC _STLP_mutex : public _STLP_mutex_base {
425 public:
_STLP_mutex()426 inline _STLP_mutex () { _M_initialize(); }
~_STLP_mutex()427 inline ~_STLP_mutex () { _M_destroy(); }
428 private:
429 _STLP_mutex(const _STLP_mutex&);
430 void operator=(const _STLP_mutex&);
431 };
432
433 // A locking class that uses _STLP_STATIC_MUTEX. The constructor takes
434 // a reference to an _STLP_STATIC_MUTEX, and acquires a lock. The destructor
435 // releases the lock.
436 // It's not clear that this is exactly the right functionality.
437 // It will probably change in the future.
438
439 struct _STLP_CLASS_DECLSPEC _STLP_auto_lock {
_STLP_auto_lock_STLP_auto_lock440 _STLP_auto_lock(_STLP_STATIC_MUTEX& __lock) : _M_lock(__lock)
441 { _M_lock._M_acquire_lock(); }
~_STLP_auto_lock_STLP_auto_lock442 ~_STLP_auto_lock()
443 { _M_lock._M_release_lock(); }
444
445 private:
446 _STLP_STATIC_MUTEX& _M_lock;
447 void operator=(const _STLP_auto_lock&);
448 _STLP_auto_lock(const _STLP_auto_lock&);
449 };
450
451 /*
452 * Class _Refcount_Base provides a type, __stl_atomic_t, a data member,
453 * _M_ref_count, and member functions _M_incr and _M_decr, which perform
454 * atomic preincrement/predecrement. The constructor initializes
455 * _M_ref_count.
456 */
457 class _STLP_CLASS_DECLSPEC _Refcount_Base {
458 // The data member _M_ref_count
459 #if defined (__DMC__)
460 public:
461 #endif
462 _STLP_VOLATILE __stl_atomic_t _M_ref_count;
463
464 #if defined (_STLP_THREADS) && \
465 (!defined (_STLP_ATOMIC_INCREMENT) || !defined (_STLP_ATOMIC_DECREMENT) || \
466 (defined (_STLP_WIN32_VERSION) && (_STLP_WIN32_VERSION <= 0x0400)))
467 # define _STLP_USE_MUTEX
468 _STLP_mutex _M_mutex;
469 #endif
470
471 public:
472 // Constructor
_Refcount_Base(__stl_atomic_t __n)473 _Refcount_Base(__stl_atomic_t __n) : _M_ref_count(__n) {}
474
475 // _M_incr and _M_decr
476 #if defined (_STLP_THREADS)
477 # if !defined (_STLP_USE_MUTEX)
_M_incr()478 __stl_atomic_t _M_incr() { return _STLP_ATOMIC_INCREMENT(&_M_ref_count); }
_M_decr()479 __stl_atomic_t _M_decr() { return _STLP_ATOMIC_DECREMENT(&_M_ref_count); }
480 # else
481 # undef _STLP_USE_MUTEX
_M_incr()482 __stl_atomic_t _M_incr() {
483 _STLP_auto_lock l(_M_mutex);
484 return ++_M_ref_count;
485 }
_M_decr()486 __stl_atomic_t _M_decr() {
487 _STLP_auto_lock l(_M_mutex);
488 return --_M_ref_count;
489 }
490 # endif
491 #else /* No threads */
_M_incr()492 __stl_atomic_t _M_incr() { return ++_M_ref_count; }
_M_decr()493 __stl_atomic_t _M_decr() { return --_M_ref_count; }
494 #endif
495 };
496
497 /* Atomic swap on __stl_atomic_t
498 * This is guaranteed to behave as though it were atomic only if all
499 * possibly concurrent updates use _Atomic_swap.
500 * In some cases the operation is emulated with a lock.
501 * Idem for _Atomic_swap_ptr
502 */
503 /* Helper struct to handle following cases:
504 * - on platforms where sizeof(__stl_atomic_t) == sizeof(void*) atomic
505 * exchange can be done on pointers
506 * - on platform without atomic operation swap is done in a critical section,
507 * portable but inefficient.
508 */
509 template <int __use_ptr_atomic_swap>
510 class _Atomic_swap_struct {
511 public:
512 #if defined (_STLP_THREADS) && \
513 !defined (_STLP_ATOMIC_EXCHANGE) && \
514 (defined (_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || \
515 defined (_STLP_USE_PTHREAD_SPINLOCK) || defined (_STLP_NWTHREADS))
516 # define _STLP_USE_ATOMIC_SWAP_MUTEX
517 static _STLP_STATIC_MUTEX _S_swap_lock;
518 #endif
519
_S_swap(_STLP_VOLATILE __stl_atomic_t * __p,__stl_atomic_t __q)520 static __stl_atomic_t _S_swap(_STLP_VOLATILE __stl_atomic_t* __p, __stl_atomic_t __q) {
521 #if defined (_STLP_THREADS)
522 # if defined (_STLP_ATOMIC_EXCHANGE)
523 return _STLP_ATOMIC_EXCHANGE(__p, __q);
524 # elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
525 _S_swap_lock._M_acquire_lock();
526 __stl_atomic_t __result = *__p;
527 *__p = __q;
528 _S_swap_lock._M_release_lock();
529 return __result;
530 # else
531 # error Missing atomic swap implementation
532 # endif
533 #else
534 /* no threads */
535 __stl_atomic_t __result = *__p;
536 *__p = __q;
537 return __result;
538 #endif // _STLP_THREADS
539 }
540
_S_swap_ptr(void * _STLP_VOLATILE * __p,void * __q)541 static void* _S_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
542 #if defined (_STLP_THREADS)
543 # if defined (_STLP_ATOMIC_EXCHANGE_PTR)
544 return _STLP_ATOMIC_EXCHANGE_PTR(__p, __q);
545 # elif defined (_STLP_ATOMIC_EXCHANGE)
546 _STLP_STATIC_ASSERT(sizeof(__stl_atomic_t) == sizeof(void*))
547 return __REINTERPRET_CAST(void*, _STLP_ATOMIC_EXCHANGE(__REINTERPRET_CAST(volatile __stl_atomic_t*, __p),
548 __REINTERPRET_CAST(__stl_atomic_t, __q))
549 );
550 # elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
551 _S_swap_lock._M_acquire_lock();
552 void *__result = *__p;
553 *__p = __q;
554 _S_swap_lock._M_release_lock();
555 return __result;
556 # else
557 # error Missing pointer atomic swap implementation
558 # endif
559 #else
560 /* no thread */
561 void *__result = *__p;
562 *__p = __q;
563 return __result;
564 #endif
565 }
566 };
567
568 _STLP_TEMPLATE_NULL
569 class _Atomic_swap_struct<0> {
570 public:
571 #if defined (_STLP_THREADS) && \
572 (!defined (_STLP_ATOMIC_EXCHANGE) || !defined (_STLP_ATOMIC_EXCHANGE_PTR)) && \
573 (defined (_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || \
574 defined (_STLP_USE_PTHREAD_SPINLOCK) || defined (_STLP_NWTHREADS))
575 # define _STLP_USE_ATOMIC_SWAP_MUTEX
576 static _STLP_STATIC_MUTEX _S_swap_lock;
577 #endif
578
_S_swap(_STLP_VOLATILE __stl_atomic_t * __p,__stl_atomic_t __q)579 static __stl_atomic_t _S_swap(_STLP_VOLATILE __stl_atomic_t* __p, __stl_atomic_t __q) {
580 #if defined (_STLP_THREADS)
581 # if defined (_STLP_ATOMIC_EXCHANGE)
582 return _STLP_ATOMIC_EXCHANGE(__p, __q);
583 # elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
584 /* This should be portable, but performance is expected
585 * to be quite awful. This really needs platform specific
586 * code.
587 */
588 _S_swap_lock._M_acquire_lock();
589 __stl_atomic_t __result = *__p;
590 *__p = __q;
591 _S_swap_lock._M_release_lock();
592 return __result;
593 # else
594 # error Missing atomic swap implementation
595 # endif
596 #else
597 /* no threads */
598 __stl_atomic_t __result = *__p;
599 *__p = __q;
600 return __result;
601 #endif // _STLP_THREADS
602 }
603
_S_swap_ptr(void * _STLP_VOLATILE * __p,void * __q)604 static void* _S_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
605 #if defined (_STLP_THREADS)
606 # if defined (_STLP_ATOMIC_EXCHANGE_PTR)
607 return _STLP_ATOMIC_EXCHANGE_PTR(__p, __q);
608 # elif defined (_STLP_ATOMIC_EXCHANGE)
609 _STLP_STATIC_ASSERT(sizeof(__stl_atomic_t) == sizeof(void*))
610 return __REINTERPRET_CAST(void*, _STLP_ATOMIC_EXCHANGE(__REINTERPRET_CAST(volatile __stl_atomic_t*, __p),
611 __REINTERPRET_CAST(__stl_atomic_t, __q))
612 );
613 # elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
614 _S_swap_lock._M_acquire_lock();
615 void *__result = *__p;
616 *__p = __q;
617 _S_swap_lock._M_release_lock();
618 return __result;
619 # else
620 # error Missing pointer atomic swap implementation
621 # endif
622 #else
623 /* no thread */
624 void *__result = *__p;
625 *__p = __q;
626 return __result;
627 #endif
628 }
629 };
630
631 #if defined (_STLP_MSVC) && (_STLP_MSVC == 1300)
632 # pragma warning (push)
633 # pragma warning (disable : 4189) //__use_ptr_atomic_swap initialized but not used
634 #endif
635
_Atomic_swap(_STLP_VOLATILE __stl_atomic_t * __p,__stl_atomic_t __q)636 inline __stl_atomic_t _STLP_CALL _Atomic_swap(_STLP_VOLATILE __stl_atomic_t * __p, __stl_atomic_t __q) {
637 const int __use_ptr_atomic_swap = sizeof(__stl_atomic_t) == sizeof(void*);
638 return _Atomic_swap_struct<__use_ptr_atomic_swap>::_S_swap(__p, __q);
639 }
640
_Atomic_swap_ptr(void * _STLP_VOLATILE * __p,void * __q)641 inline void* _STLP_CALL _Atomic_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
642 const int __use_ptr_atomic_swap = sizeof(__stl_atomic_t) == sizeof(void*);
643 return _Atomic_swap_struct<__use_ptr_atomic_swap>::_S_swap_ptr(__p, __q);
644 }
645
646 #if defined (_STLP_MSVC) && (_STLP_MSVC == 1300)
647 # pragma warning (pop)
648 #endif
649
650 #if defined (_STLP_BETHREADS)
651 template <int __inst>
652 struct _STLP_beos_static_lock_data {
653 static bool is_init;
654 struct mutex_t : public _STLP_mutex {
mutex_t_STLP_beos_static_lock_data::mutex_t655 mutex_t()
656 { _STLP_beos_static_lock_data<0>::is_init = true; }
~mutex_t_STLP_beos_static_lock_data::mutex_t657 ~mutex_t()
658 { _STLP_beos_static_lock_data<0>::is_init = false; }
659 };
660 static mutex_t mut;
661 };
662
663 template <int __inst>
664 bool _STLP_beos_static_lock_data<__inst>::is_init = false;
665 template <int __inst>
666 typename _STLP_beos_static_lock_data<__inst>::mutex_t _STLP_beos_static_lock_data<__inst>::mut;
667
_M_acquire_lock()668 inline void _STLP_mutex_base::_M_acquire_lock() {
669 if (sem == 0) {
670 // we need to initialise on demand here
671 // to prevent race conditions use our global
672 // mutex if it's available:
673 if (_STLP_beos_static_lock_data<0>::is_init) {
674 _STLP_auto_lock al(_STLP_beos_static_lock_data<0>::mut);
675 if (sem == 0) _M_initialize();
676 }
677 else {
678 // no lock available, we must still be
679 // in startup code, THERE MUST BE ONE THREAD
680 // ONLY active at this point.
681 _M_initialize();
682 }
683 }
684 status_t t;
685 t = acquire_sem(sem);
686 assert(t == B_NO_ERROR);
687 }
688 #endif
689
690 _STLP_END_NAMESPACE
691
692 #if !defined (_STLP_LINK_TIME_INSTANTIATION)
693 # include <stl/_threads.c>
694 #endif
695
696 #endif /* _STLP_INTERNAL_THREADS_H */
697
698 // Local Variables:
699 // mode:C++
700 // End:
701