1 /*
2     Copyright (c) 2005-2017 Intel Corporation
3 
4     Licensed under the Apache License, Version 2.0 (the "License");
5     you may not use this file except in compliance with the License.
6     You may obtain a copy of the License at
7 
8         http://www.apache.org/licenses/LICENSE-2.0
9 
10     Unless required by applicable law or agreed to in writing, software
11     distributed under the License is distributed on an "AS IS" BASIS,
12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13     See the License for the specific language governing permissions and
14     limitations under the License.
15 
16 
17 
18 
19 */
20 
21 #ifndef __TBB_recursive_mutex_H
22 #define __TBB_recursive_mutex_H
23 
24 #if _WIN32||_WIN64
25 #include "machine/windows_api.h"
26 #else
27 #include <pthread.h>
28 #endif /* _WIN32||_WIN64 */
29 
30 #include <new>
31 #include "aligned_space.h"
32 #include "tbb_stddef.h"
33 #include "tbb_profiling.h"
34 
35 namespace tbb {
36 //! Mutex that allows recursive mutex acquisition.
37 /** Mutex that allows recursive mutex acquisition.
38     @ingroup synchronization */
39 class recursive_mutex : internal::mutex_copy_deprecated_and_disabled {
40 public:
41     //! Construct unacquired recursive_mutex.
recursive_mutex()42     recursive_mutex() {
43 #if TBB_USE_ASSERT || TBB_USE_THREADING_TOOLS
44         internal_construct();
45 #else
46   #if _WIN32||_WIN64
47         InitializeCriticalSectionEx(&impl, 4000, 0);
48   #else
49         pthread_mutexattr_t mtx_attr;
50         int error_code = pthread_mutexattr_init( &mtx_attr );
51         if( error_code )
52             tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutexattr_init failed");
53 
54         pthread_mutexattr_settype( &mtx_attr, PTHREAD_MUTEX_RECURSIVE );
55         error_code = pthread_mutex_init( &impl, &mtx_attr );
56         if( error_code )
57             tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutex_init failed");
58 
59         pthread_mutexattr_destroy( &mtx_attr );
60   #endif /* _WIN32||_WIN64*/
61 #endif /* TBB_USE_ASSERT */
62     };
63 
~recursive_mutex()64     ~recursive_mutex() {
65 #if TBB_USE_ASSERT
66         internal_destroy();
67 #else
68   #if _WIN32||_WIN64
69         DeleteCriticalSection(&impl);
70   #else
71         pthread_mutex_destroy(&impl);
72 
73   #endif /* _WIN32||_WIN64 */
74 #endif /* TBB_USE_ASSERT */
75     };
76 
77     class scoped_lock;
78     friend class scoped_lock;
79 
80     //! The scoped locking pattern
81     /** It helps to avoid the common problem of forgetting to release lock.
82         It also nicely provides the "node" for queuing locks. */
83     class scoped_lock: internal::no_copy {
84     public:
85         //! Construct lock that has not acquired a recursive_mutex.
scoped_lock()86         scoped_lock() : my_mutex(NULL) {};
87 
88         //! Acquire lock on given mutex.
scoped_lock(recursive_mutex & mutex)89         scoped_lock( recursive_mutex& mutex ) {
90 #if TBB_USE_ASSERT
91             my_mutex = &mutex;
92 #endif /* TBB_USE_ASSERT */
93             acquire( mutex );
94         }
95 
96         //! Release lock (if lock is held).
~scoped_lock()97         ~scoped_lock() {
98             if( my_mutex )
99                 release();
100         }
101 
102         //! Acquire lock on given mutex.
acquire(recursive_mutex & mutex)103         void acquire( recursive_mutex& mutex ) {
104 #if TBB_USE_ASSERT
105             internal_acquire( mutex );
106 #else
107             my_mutex = &mutex;
108             mutex.lock();
109 #endif /* TBB_USE_ASSERT */
110         }
111 
112         //! Try acquire lock on given recursive_mutex.
try_acquire(recursive_mutex & mutex)113         bool try_acquire( recursive_mutex& mutex ) {
114 #if TBB_USE_ASSERT
115             return internal_try_acquire( mutex );
116 #else
117             bool result = mutex.try_lock();
118             if( result )
119                 my_mutex = &mutex;
120             return result;
121 #endif /* TBB_USE_ASSERT */
122         }
123 
124         //! Release lock
release()125         void release() {
126 #if TBB_USE_ASSERT
127             internal_release();
128 #else
129             my_mutex->unlock();
130             my_mutex = NULL;
131 #endif /* TBB_USE_ASSERT */
132         }
133 
134     private:
135         //! The pointer to the current recursive_mutex to work
136         recursive_mutex* my_mutex;
137 
138         //! All checks from acquire using mutex.state were moved here
139         void __TBB_EXPORTED_METHOD internal_acquire( recursive_mutex& m );
140 
141         //! All checks from try_acquire using mutex.state were moved here
142         bool __TBB_EXPORTED_METHOD internal_try_acquire( recursive_mutex& m );
143 
144         //! All checks from release using mutex.state were moved here
145         void __TBB_EXPORTED_METHOD internal_release();
146 
147         friend class recursive_mutex;
148     };
149 
150     // Mutex traits
151     static const bool is_rw_mutex = false;
152     static const bool is_recursive_mutex = true;
153     static const bool is_fair_mutex = false;
154 
155     // C++0x compatibility interface
156 
157     //! Acquire lock
lock()158     void lock() {
159 #if TBB_USE_ASSERT
160         aligned_space<scoped_lock> tmp;
161         new(tmp.begin()) scoped_lock(*this);
162 #else
163   #if _WIN32||_WIN64
164         EnterCriticalSection(&impl);
165   #else
166         int error_code = pthread_mutex_lock(&impl);
167         if( error_code )
168             tbb::internal::handle_perror(error_code,"recursive_mutex: pthread_mutex_lock failed");
169   #endif /* _WIN32||_WIN64 */
170 #endif /* TBB_USE_ASSERT */
171     }
172 
173     //! Try acquiring lock (non-blocking)
174     /** Return true if lock acquired; false otherwise. */
try_lock()175     bool try_lock() {
176 #if TBB_USE_ASSERT
177         aligned_space<scoped_lock> tmp;
178         return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this);
179 #else
180   #if _WIN32||_WIN64
181         return TryEnterCriticalSection(&impl)!=0;
182   #else
183         return pthread_mutex_trylock(&impl)==0;
184   #endif /* _WIN32||_WIN64 */
185 #endif /* TBB_USE_ASSERT */
186     }
187 
188     //! Release lock
unlock()189     void unlock() {
190 #if TBB_USE_ASSERT
191         aligned_space<scoped_lock> tmp;
192         scoped_lock& s = *tmp.begin();
193         s.my_mutex = this;
194         s.internal_release();
195 #else
196   #if _WIN32||_WIN64
197         LeaveCriticalSection(&impl);
198   #else
199         pthread_mutex_unlock(&impl);
200   #endif /* _WIN32||_WIN64 */
201 #endif /* TBB_USE_ASSERT */
202     }
203 
204     //! Return native_handle
205   #if _WIN32||_WIN64
206     typedef LPCRITICAL_SECTION native_handle_type;
207   #else
208     typedef pthread_mutex_t* native_handle_type;
209   #endif
native_handle()210     native_handle_type native_handle() { return (native_handle_type) &impl; }
211 
212 private:
213 #if _WIN32||_WIN64
214     CRITICAL_SECTION impl;
215     enum state_t {
216         INITIALIZED=0x1234,
217         DESTROYED=0x789A,
218     } state;
219 #else
220     pthread_mutex_t impl;
221 #endif /* _WIN32||_WIN64 */
222 
223     //! All checks from mutex constructor using mutex.state were moved here
224     void __TBB_EXPORTED_METHOD internal_construct();
225 
226     //! All checks from mutex destructor using mutex.state were moved here
227     void __TBB_EXPORTED_METHOD internal_destroy();
228 };
229 
230 __TBB_DEFINE_PROFILING_SET_NAME(recursive_mutex)
231 
232 } // namespace tbb
233 
234 #endif /* __TBB_recursive_mutex_H */
235