1 /*
2     Copyright (c) 2005-2021 Intel Corporation
3 
4     Licensed under the Apache License, Version 2.0 (the "License");
5     you may not use this file except in compliance with the License.
6     You may obtain a copy of the License at
7 
8         http://www.apache.org/licenses/LICENSE-2.0
9 
10     Unless required by applicable law or agreed to in writing, software
11     distributed under the License is distributed on an "AS IS" BASIS,
12     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13     See the License for the specific language governing permissions and
14     limitations under the License.
15 */
16 
17 #ifndef __TBB_spin_rw_mutex_H
18 #define __TBB_spin_rw_mutex_H
19 
20 #include "detail/_namespace_injection.h"
21 #include "detail/_mutex_common.h"
22 
23 #include "profiling.h"
24 
25 #include "detail/_assert.h"
26 #include "detail/_utils.h"
27 #include "detail/_scoped_lock.h"
28 
29 #include <atomic>
30 
31 namespace tbb {
32 namespace detail {
33 namespace d1 {
34 
35 #if __TBB_TSX_INTRINSICS_PRESENT
36 class rtm_rw_mutex;
37 #endif
38 
39 //! Fast, unfair, spinning reader-writer lock with backoff and writer-preference
40 /** @ingroup synchronization */
41 class spin_rw_mutex {
42 public:
43     //! Constructors
spin_rw_mutex()44     spin_rw_mutex() noexcept : m_state(0) {
45        create_itt_sync(this, "tbb::spin_rw_mutex", "");
46     }
47 
48     //! Destructor
~spin_rw_mutex()49     ~spin_rw_mutex() {
50         __TBB_ASSERT(!m_state, "destruction of an acquired mutex");
51     }
52 
53     //! No Copy
54     spin_rw_mutex(const spin_rw_mutex&) = delete;
55     spin_rw_mutex& operator=(const spin_rw_mutex&) = delete;
56 
57     using scoped_lock = rw_scoped_lock<spin_rw_mutex>;
58 
59     //! Mutex traits
60     static constexpr bool is_rw_mutex = true;
61     static constexpr bool is_recursive_mutex = false;
62     static constexpr bool is_fair_mutex = false;
63 
64     //! Acquire lock
lock()65     void lock() {
66         call_itt_notify(prepare, this);
67         for (atomic_backoff backoff; ; backoff.pause()) {
68             state_type s = m_state.load(std::memory_order_relaxed);
69             if (!(s & BUSY)) { // no readers, no writers
70                 if (m_state.compare_exchange_strong(s, WRITER))
71                     break; // successfully stored writer flag
72                 backoff.reset(); // we could be very close to complete op.
73             } else if (!(s & WRITER_PENDING)) { // no pending writers
74                 m_state |= WRITER_PENDING;
75             }
76         }
77         call_itt_notify(acquired, this);
78     }
79 
80     //! Try acquiring lock (non-blocking)
81     /** Return true if lock acquired; false otherwise. */
try_lock()82     bool try_lock() {
83         // for a writer: only possible to acquire if no active readers or writers
84         state_type s = m_state.load(std::memory_order_relaxed);
85         if (!(s & BUSY)) { // no readers, no writers; mask is 1..1101
86             if (m_state.compare_exchange_strong(s, WRITER)) {
87                 call_itt_notify(acquired, this);
88                 return true; // successfully stored writer flag
89             }
90         }
91         return false;
92     }
93 
94     //! Release lock
unlock()95     void unlock() {
96         call_itt_notify(releasing, this);
97         m_state &= READERS;
98     }
99 
100     //! Lock shared ownership mutex
lock_shared()101     void lock_shared() {
102         call_itt_notify(prepare, this);
103         for (atomic_backoff b; ; b.pause()) {
104             state_type s = m_state.load(std::memory_order_relaxed);
105             if (!(s & (WRITER | WRITER_PENDING))) { // no writer or write requests
106                 state_type prev_state = m_state.fetch_add(ONE_READER);
107                 if (!(prev_state & WRITER)) {
108                     break; // successfully stored increased number of readers
109                 }
110                 // writer got there first, undo the increment
111                 m_state -= ONE_READER;
112             }
113         }
114         call_itt_notify(acquired, this);
115         __TBB_ASSERT(m_state & READERS, "invalid state of a read lock: no readers");
116     }
117 
118     //! Try lock shared ownership mutex
try_lock_shared()119     bool try_lock_shared() {
120         // for a reader: acquire if no active or waiting writers
121         state_type s = m_state.load(std::memory_order_relaxed);
122         if (!(s & (WRITER | WRITER_PENDING))) { // no writers
123             state_type prev_state = m_state.fetch_add(ONE_READER);
124             if (!(prev_state & WRITER)) {  // got the lock
125                 call_itt_notify(acquired, this);
126                 return true; // successfully stored increased number of readers
127             }
128             // writer got there first, undo the increment
129             m_state -= ONE_READER;
130         }
131         return false;
132     }
133 
134     //! Unlock shared ownership mutex
unlock_shared()135     void unlock_shared() {
136         __TBB_ASSERT(m_state & READERS, "invalid state of a read lock: no readers");
137         call_itt_notify(releasing, this);
138         m_state -= ONE_READER;
139     }
140 
141 protected:
142     /** Internal non ISO C++ standard API **/
143     //! This API is used through the scoped_lock class
144 
145     //! Upgrade reader to become a writer.
146     /** Returns whether the upgrade happened without releasing and re-acquiring the lock */
upgrade()147     bool upgrade() {
148         state_type s = m_state.load(std::memory_order_relaxed);
149         __TBB_ASSERT(s & READERS, "invalid state before upgrade: no readers ");
150         // Check and set writer-pending flag.
151         // Required conditions: either no pending writers, or we are the only reader
152         // (with multiple readers and pending writer, another upgrade could have been requested)
153         while ((s & READERS) == ONE_READER || !(s & WRITER_PENDING)) {
154             if (m_state.compare_exchange_strong(s, s | WRITER | WRITER_PENDING)) {
155                 atomic_backoff backoff;
156                 while ((m_state.load(std::memory_order_relaxed) & READERS) != ONE_READER) backoff.pause();
157                 __TBB_ASSERT((m_state & (WRITER_PENDING|WRITER)) == (WRITER_PENDING | WRITER), "invalid state when upgrading to writer");
158                 // Both new readers and writers are blocked at this time
159                 m_state -= (ONE_READER + WRITER_PENDING);
160                 return true; // successfully upgraded
161             }
162         }
163         // Slow reacquire
164         unlock_shared();
165         lock();
166         return false;
167     }
168 
169     //! Downgrade writer to a reader
downgrade()170     void downgrade() {
171         call_itt_notify(releasing, this);
172         m_state += (ONE_READER - WRITER);
173         __TBB_ASSERT(m_state & READERS, "invalid state after downgrade: no readers");
174     }
175 
176     using state_type = std::intptr_t;
177     static constexpr state_type WRITER = 1;
178     static constexpr state_type WRITER_PENDING = 2;
179     static constexpr state_type READERS = ~(WRITER | WRITER_PENDING);
180     static constexpr state_type ONE_READER = 4;
181     static constexpr state_type BUSY = WRITER | READERS;
182     friend scoped_lock;
183     //! State of lock
184     /** Bit 0 = writer is holding lock
185         Bit 1 = request by a writer to acquire lock (hint to readers to wait)
186         Bit 2..N = number of readers holding lock */
187     std::atomic<state_type> m_state;
188 }; // class spin_rw_mutex
189 
190 #if TBB_USE_PROFILING_TOOLS
set_name(spin_rw_mutex & obj,const char * name)191 inline void set_name(spin_rw_mutex& obj, const char* name) {
192     itt_set_sync_name(&obj, name);
193 }
194 #if (_WIN32||_WIN64)
set_name(spin_rw_mutex & obj,const wchar_t * name)195 inline void set_name(spin_rw_mutex& obj, const wchar_t* name) {
196     itt_set_sync_name(&obj, name);
197 }
198 #endif // WIN
199 #else
set_name(spin_rw_mutex &,const char *)200 inline void set_name(spin_rw_mutex&, const char*) {}
201 #if (_WIN32||_WIN64)
set_name(spin_rw_mutex &,const wchar_t *)202 inline void set_name(spin_rw_mutex&, const wchar_t*) {}
203 #endif // WIN
204 #endif
205 } // namespace d1
206 } // namespace detail
207 
208 inline namespace v1 {
209 using detail::d1::spin_rw_mutex;
210 } // namespace v1
211 namespace profiling {
212     using detail::d1::set_name;
213 }
214 } // namespace tbb
215 
216 #include "detail/_rtm_rw_mutex.h"
217 
218 namespace tbb {
219 inline namespace v1 {
220 #if __TBB_TSX_INTRINSICS_PRESENT
221     using speculative_spin_rw_mutex = detail::d1::rtm_rw_mutex;
222 #else
223     using speculative_spin_rw_mutex = detail::d1::spin_rw_mutex;
224 #endif
225 }
226 }
227 
228 #endif /* __TBB_spin_rw_mutex_H */
229 
230