1 /*
2 Copyright (c) 2003-2014 Erwin Coumans  http://bullet.googlecode.com
3 
4 This software is provided 'as-is', without any express or implied warranty.
5 In no event will the authors be held liable for any damages arising from the use of this software.
6 Permission is granted to anyone to use this software for any purpose,
7 including commercial applications, and to alter it and redistribute it freely,
8 subject to the following restrictions:
9 
10 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
11 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
12 3. This notice may not be removed or altered from any source distribution.
13 */
14 
15 
16 #include "btThreads.h"
17 
18 //
19 // Lightweight spin-mutex based on atomics
20 // Using ordinary system-provided mutexes like Windows critical sections was noticeably slower
21 // presumably because when it fails to lock at first it would sleep the thread and trigger costly
22 // context switching.
23 //
24 
25 #if BT_THREADSAFE
26 
27 #if __cplusplus >= 201103L
28 
29 // for anything claiming full C++11 compliance, use C++11 atomics
30 // on GCC or Clang you need to compile with -std=c++11
31 #define USE_CPP11_ATOMICS 1
32 
33 #elif defined( _MSC_VER )
34 
35 // on MSVC, use intrinsics instead
36 #define USE_MSVC_INTRINSICS 1
37 
38 #elif defined( __GNUC__ ) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))
39 
40 // available since GCC 4.7 and some versions of clang
41 // todo: check for clang
42 #define USE_GCC_BUILTIN_ATOMICS 1
43 
44 #elif defined( __GNUC__ ) && (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)
45 
46 // available since GCC 4.1
47 #define USE_GCC_BUILTIN_ATOMICS_OLD 1
48 
49 #endif
50 
51 
52 #if USE_CPP11_ATOMICS
53 
54 #include <atomic>
55 #include <thread>
56 
57 #define THREAD_LOCAL_STATIC thread_local static
58 
tryLock()59 bool btSpinMutex::tryLock()
60 {
61     std::atomic<int>* aDest = reinterpret_cast<std::atomic<int>*>(&mLock);
62     int expected = 0;
63     return std::atomic_compare_exchange_weak_explicit( aDest, &expected, int(1), std::memory_order_acq_rel, std::memory_order_acquire );
64 }
65 
lock()66 void btSpinMutex::lock()
67 {
68     // note: this lock does not sleep the thread.
69     while (! tryLock())
70     {
71         // spin
72     }
73 }
74 
unlock()75 void btSpinMutex::unlock()
76 {
77     std::atomic<int>* aDest = reinterpret_cast<std::atomic<int>*>(&mLock);
78     std::atomic_store_explicit( aDest, int(0), std::memory_order_release );
79 }
80 
81 
82 #elif USE_MSVC_INTRINSICS
83 
84 #define WIN32_LEAN_AND_MEAN
85 
86 #include <windows.h>
87 #include <intrin.h>
88 
89 #define THREAD_LOCAL_STATIC __declspec( thread ) static
90 
91 
tryLock()92 bool btSpinMutex::tryLock()
93 {
94     volatile long* aDest = reinterpret_cast<long*>(&mLock);
95     return ( 0 == _InterlockedCompareExchange( aDest, 1, 0) );
96 }
97 
lock()98 void btSpinMutex::lock()
99 {
100     // note: this lock does not sleep the thread
101     while (! tryLock())
102     {
103         // spin
104     }
105 }
106 
unlock()107 void btSpinMutex::unlock()
108 {
109     volatile long* aDest = reinterpret_cast<long*>( &mLock );
110     _InterlockedExchange( aDest, 0 );
111 }
112 
113 #elif USE_GCC_BUILTIN_ATOMICS
114 
115 #define THREAD_LOCAL_STATIC static __thread
116 
117 
tryLock()118 bool btSpinMutex::tryLock()
119 {
120     int expected = 0;
121     bool weak = false;
122     const int memOrderSuccess = __ATOMIC_ACQ_REL;
123     const int memOrderFail = __ATOMIC_ACQUIRE;
124     return __atomic_compare_exchange_n(&mLock, &expected, int(1), weak, memOrderSuccess, memOrderFail);
125 }
126 
lock()127 void btSpinMutex::lock()
128 {
129     // note: this lock does not sleep the thread
130     while (! tryLock())
131     {
132         // spin
133     }
134 }
135 
unlock()136 void btSpinMutex::unlock()
137 {
138     __atomic_store_n(&mLock, int(0), __ATOMIC_RELEASE);
139 }
140 
141 #elif USE_GCC_BUILTIN_ATOMICS_OLD
142 
143 
144 #define THREAD_LOCAL_STATIC static __thread
145 
tryLock()146 bool btSpinMutex::tryLock()
147 {
148     return __sync_bool_compare_and_swap(&mLock, int(0), int(1));
149 }
150 
lock()151 void btSpinMutex::lock()
152 {
153     // note: this lock does not sleep the thread
154     while (! tryLock())
155     {
156         // spin
157     }
158 }
159 
unlock()160 void btSpinMutex::unlock()
161 {
162     // write 0
163     __sync_fetch_and_and(&mLock, int(0));
164 }
165 
166 #else //#elif USE_MSVC_INTRINSICS
167 
168 #error "no threading primitives defined -- unknown platform"
169 
170 #endif  //#else //#elif USE_MSVC_INTRINSICS
171 
172 
173 struct ThreadsafeCounter
174 {
175     unsigned int mCounter;
176     btSpinMutex mMutex;
177 
ThreadsafeCounterThreadsafeCounter178     ThreadsafeCounter() {mCounter=0;}
179 
getNextThreadsafeCounter180     unsigned int getNext()
181     {
182         // no need to optimize this with atomics, it is only called ONCE per thread!
183         mMutex.lock();
184         unsigned int val = mCounter++;
185         mMutex.unlock();
186         return val;
187     }
188 };
189 
190 static ThreadsafeCounter gThreadCounter;
191 
192 
193 // return a unique index per thread, starting with 0 and counting up
btGetCurrentThreadIndex()194 unsigned int btGetCurrentThreadIndex()
195 {
196     const unsigned int kNullIndex = ~0U;
197     THREAD_LOCAL_STATIC unsigned int sThreadIndex = kNullIndex;
198     if ( sThreadIndex == kNullIndex )
199     {
200         sThreadIndex = gThreadCounter.getNext();
201     }
202     return sThreadIndex;
203 }
204 
btIsMainThread()205 bool btIsMainThread()
206 {
207     return btGetCurrentThreadIndex() == 0;
208 }
209 
210 #else // #if BT_THREADSAFE
211 
212 // These should not be called ever
lock()213 void btSpinMutex::lock()
214 {
215     btAssert(!"unimplemented btSpinMutex::lock() called");
216 }
217 
unlock()218 void btSpinMutex::unlock()
219 {
220     btAssert(!"unimplemented btSpinMutex::unlock() called");
221 }
222 
tryLock()223 bool btSpinMutex::tryLock()
224 {
225     btAssert(!"unimplemented btSpinMutex::tryLock() called");
226     return true;
227 }
228 
229 
230 #endif // #if BT_THREADSAFE
231 
232