1 /*
2     Copyright (c) 2005-2020 Intel Corporation
3 
4     Licensed under the Apache License, Version 2.0 (the "License");
5     you may not use this file except in compliance with the License.
6     You may obtain a copy of the License at
7 
8         http://www.apache.org/licenses/LICENSE-2.0
9 
10     Unless required by applicable law or agreed to in writing, software
11     distributed under the License is distributed on an "AS IS" BASIS,
12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13     See the License for the specific language governing permissions and
14     limitations under the License.
15 */
16 
17 #ifndef __TBB_spin_mutex_H
18 #define __TBB_spin_mutex_H
19 
20 #define __TBB_spin_mutex_H_include_area
21 #include "internal/_warning_suppress_enable_notice.h"
22 
23 #include <cstddef>
24 #include <new>
25 #include "aligned_space.h"
26 #include "tbb_stddef.h"
27 #include "tbb_machine.h"
28 #include "tbb_profiling.h"
29 #include "internal/_mutex_padding.h"
30 
31 namespace tbb {
32 
33 //! A lock that occupies a single byte.
34 /** A spin_mutex is a spin mutex that fits in a single byte.
35     It should be used only for locking short critical sections
36     (typically less than 20 instructions) when fairness is not an issue.
37     If zero-initialized, the mutex is considered unheld.
38     @ingroup synchronization */
39 class spin_mutex : internal::mutex_copy_deprecated_and_disabled {
40     //! 0 if lock is released, 1 if lock is acquired.
41     __TBB_atomic_flag flag;
42 
43 public:
44     //! Construct unacquired lock.
45     /** Equivalent to zero-initialization of *this. */
spin_mutex()46     spin_mutex() : flag(0) {
47 #if TBB_USE_THREADING_TOOLS
48         internal_construct();
49 #endif
50     }
51 
52     //! Represents acquisition of a mutex.
53     class scoped_lock : internal::no_copy {
54     private:
55         //! Points to currently held mutex, or NULL if no lock is held.
56         spin_mutex* my_mutex;
57 
58         //! Value to store into spin_mutex::flag to unlock the mutex.
59         /** This variable is no longer used. Instead, 0 and 1 are used to
60             represent that the lock is free and acquired, respectively.
61             We keep the member variable here to ensure backward compatibility */
62         __TBB_Flag my_unlock_value;
63 
64         //! Like acquire, but with ITT instrumentation.
65         void __TBB_EXPORTED_METHOD internal_acquire( spin_mutex& m );
66 
67         //! Like try_acquire, but with ITT instrumentation.
68         bool __TBB_EXPORTED_METHOD internal_try_acquire( spin_mutex& m );
69 
70         //! Like release, but with ITT instrumentation.
71         void __TBB_EXPORTED_METHOD internal_release();
72 
73         friend class spin_mutex;
74 
75     public:
76         //! Construct without acquiring a mutex.
scoped_lock()77         scoped_lock() : my_mutex(NULL), my_unlock_value(0) {}
78 
79         //! Construct and acquire lock on a mutex.
scoped_lock(spin_mutex & m)80         scoped_lock( spin_mutex& m ) : my_unlock_value(0) {
81             internal::suppress_unused_warning(my_unlock_value);
82 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
83             my_mutex=NULL;
84             internal_acquire(m);
85 #else
86             my_mutex=&m;
87             __TBB_LockByte(m.flag);
88 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/
89         }
90 
91         //! Acquire lock.
acquire(spin_mutex & m)92         void acquire( spin_mutex& m ) {
93 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
94             internal_acquire(m);
95 #else
96             my_mutex = &m;
97             __TBB_LockByte(m.flag);
98 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/
99         }
100 
101         //! Try acquiring lock (non-blocking)
102         /** Return true if lock acquired; false otherwise. */
try_acquire(spin_mutex & m)103         bool try_acquire( spin_mutex& m ) {
104 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
105             return internal_try_acquire(m);
106 #else
107             bool result = __TBB_TryLockByte(m.flag);
108             if( result )
109                 my_mutex = &m;
110             return result;
111 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT*/
112         }
113 
114         //! Release lock
release()115         void release() {
116 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
117             internal_release();
118 #else
119             __TBB_UnlockByte(my_mutex->flag);
120             my_mutex = NULL;
121 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
122         }
123 
124         //! Destroy lock.  If holding a lock, releases the lock first.
~scoped_lock()125         ~scoped_lock() {
126             if( my_mutex ) {
127 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
128                 internal_release();
129 #else
130                 __TBB_UnlockByte(my_mutex->flag);
131 #endif /* TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT */
132             }
133         }
134     };
135 
136     //! Internal constructor with ITT instrumentation.
137     void __TBB_EXPORTED_METHOD internal_construct();
138 
139     // Mutex traits
140     static const bool is_rw_mutex = false;
141     static const bool is_recursive_mutex = false;
142     static const bool is_fair_mutex = false;
143 
144     // ISO C++0x compatibility methods
145 
146     //! Acquire lock
lock()147     void lock() {
148 #if TBB_USE_THREADING_TOOLS
149         aligned_space<scoped_lock> tmp;
150         new(tmp.begin()) scoped_lock(*this);
151 #else
152         __TBB_LockByte(flag);
153 #endif /* TBB_USE_THREADING_TOOLS*/
154     }
155 
156     //! Try acquiring lock (non-blocking)
157     /** Return true if lock acquired; false otherwise. */
try_lock()158     bool try_lock() {
159 #if TBB_USE_THREADING_TOOLS
160         aligned_space<scoped_lock> tmp;
161         return (new(tmp.begin()) scoped_lock)->internal_try_acquire(*this);
162 #else
163         return __TBB_TryLockByte(flag);
164 #endif /* TBB_USE_THREADING_TOOLS*/
165     }
166 
167     //! Release lock
unlock()168     void unlock() {
169 #if TBB_USE_THREADING_TOOLS
170         aligned_space<scoped_lock> tmp;
171         scoped_lock& s = *tmp.begin();
172         s.my_mutex = this;
173         s.internal_release();
174 #else
175         __TBB_UnlockByte(flag);
176 #endif /* TBB_USE_THREADING_TOOLS */
177     }
178 
179     friend class scoped_lock;
180 }; // end of spin_mutex
181 
182 __TBB_DEFINE_PROFILING_SET_NAME(spin_mutex)
183 
184 } // namespace tbb
185 
186 #if ( __TBB_x86_32 || __TBB_x86_64 )
187 #include "internal/_x86_eliding_mutex_impl.h"
188 #endif
189 
190 namespace tbb {
191 //! A cross-platform spin mutex with speculative lock acquisition.
192 /** On platforms with proper HW support, this lock may speculatively execute
193     its critical sections, using HW mechanisms to detect real data races and
194     ensure atomicity of the critical sections. In particular, it uses
195     Intel(R) Transactional Synchronization Extensions (Intel(R) TSX).
196     Without such HW support, it behaves like a spin_mutex.
197     It should be used for locking short critical sections where the lock is
198     contended but the data it protects are not.  If zero-initialized, the
199     mutex is considered unheld.
200     @ingroup synchronization */
201 
202 #if ( __TBB_x86_32 || __TBB_x86_64 )
203 typedef interface7::internal::padded_mutex<interface7::internal::x86_eliding_mutex,false> speculative_spin_mutex;
204 #else
205 typedef interface7::internal::padded_mutex<spin_mutex,false> speculative_spin_mutex;
206 #endif
207 __TBB_DEFINE_PROFILING_SET_NAME(speculative_spin_mutex)
208 
209 } // namespace tbb
210 
211 #include "internal/_warning_suppress_disable_notice.h"
212 #undef __TBB_spin_mutex_H_include_area
213 
214 #endif /* __TBB_spin_mutex_H */
215