1 /*
2 Copyright 2005-2014 Intel Corporation. All Rights Reserved.
3
4 This file is part of Threading Building Blocks. Threading Building Blocks is free software;
5 you can redistribute it and/or modify it under the terms of the GNU General Public License
6 version 2 as published by the Free Software Foundation. Threading Building Blocks is
7 distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
8 implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
9 See the GNU General Public License for more details. You should have received a copy of
10 the GNU General Public License along with Threading Building Blocks; if not, write to the
11 Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
12
13 As a special exception, you may use this file as part of a free software library without
14 restriction. Specifically, if other files instantiate templates or use macros or inline
15 functions from this file, or you compile this file and link it with other files to produce
16 an executable, this file does not by itself cause the resulting executable to be covered
17 by the GNU General Public License. This exception does not however invalidate any other
18 reasons why the executable file might be covered by the GNU General Public License.
19 */
20
21 #include "tbb/tbb_config.h"
22 #if __TBB_TSX_AVAILABLE
23 #include "tbb/spin_rw_mutex.h"
24 #include "tbb/tbb_machine.h"
25 #include "itt_notify.h"
26 #include "governor.h"
27 #include "tbb/atomic.h"
28
29 // __TBB_RW_MUTEX_DELAY_TEST shifts the point where flags aborting speculation are
30 // added to the read-set of the operation. If 1, will add the test just before
31 // the transaction is ended.
32 #ifndef __TBB_RW_MUTEX_DELAY_TEST
33 #define __TBB_RW_MUTEX_DELAY_TEST 1
34 #endif
35
36 #if defined(_MSC_VER) && defined(_Wp64)
37 // Workaround for overzealous compiler warnings in /Wp64 mode
38 #pragma warning (disable: 4244)
39 #endif
40
41 namespace tbb {
42
43 namespace interface8 {
44 namespace internal {
45
46 // abort code for mutexes that detect a conflict with another thread.
47 // value is hexadecimal
48 enum {
49 speculation_transaction_aborted = 0x01,
50 speculation_can_retry = 0x02,
51 speculation_memadd_conflict = 0x04,
52 speculation_buffer_overflow = 0x08,
53 speculation_breakpoint_hit = 0x10,
54 speculation_nested_abort = 0x20,
55 speculation_xabort_mask = 0xFF000000,
56 speculation_xabort_shift = 24,
57 speculation_retry = speculation_transaction_aborted
58 | speculation_can_retry
59 | speculation_memadd_conflict
60 };
61
62 // maximum number of times to retry
63 static const int retry_threshold_read = 10;
64 static const int retry_threshold_write = 10;
65
66 //! Release speculative mutex
internal_release(x86_rtm_rw_mutex::scoped_lock & s)67 void x86_rtm_rw_mutex::internal_release(x86_rtm_rw_mutex::scoped_lock& s) {
68 switch(s.transaction_state) {
69 case RTM_transacting_writer:
70 case RTM_transacting_reader:
71 {
72 __TBB_ASSERT(__TBB_machine_is_in_transaction(), "transaction_state && not speculating");
73 #if __TBB_RW_MUTEX_DELAY_TEST
74 if(s.transaction_state == RTM_transacting_reader) {
75 if(this->w_flag) __TBB_machine_transaction_conflict_abort();
76 } else {
77 if(this->state) __TBB_machine_transaction_conflict_abort();
78 }
79 #endif
80 __TBB_machine_end_transaction();
81 s.my_scoped_lock.internal_set_mutex(NULL);
82 }
83 break;
84 case RTM_real_reader:
85 __TBB_ASSERT(!this->w_flag, "w_flag set but read lock acquired");
86 s.my_scoped_lock.release();
87 break;
88 case RTM_real_writer:
89 __TBB_ASSERT(this->w_flag, "w_flag unset but write lock acquired");
90 this->w_flag = false;
91 s.my_scoped_lock.release();
92 break;
93 case RTM_not_in_mutex:
94 __TBB_ASSERT(false, "RTM_not_in_mutex, but in release");
95 default:
96 __TBB_ASSERT(false, "invalid transaction_state");
97 }
98 s.transaction_state = RTM_not_in_mutex;
99 }
100
101 //! Acquire write lock on the given mutex.
internal_acquire_writer(x86_rtm_rw_mutex::scoped_lock & s,bool only_speculate)102 void x86_rtm_rw_mutex::internal_acquire_writer(x86_rtm_rw_mutex::scoped_lock& s, bool only_speculate)
103 {
104 __TBB_ASSERT(s.transaction_state == RTM_not_in_mutex, "scoped_lock already in transaction");
105 if(tbb::internal::governor::speculation_enabled()) {
106 int num_retries = 0;
107 unsigned int abort_code;
108 do {
109 tbb::internal::atomic_backoff backoff;
110 if(this->state) {
111 if(only_speculate) return;
112 do {
113 backoff.pause(); // test the spin_rw_mutex (real readers or writers)
114 } while(this->state);
115 }
116 // _xbegin returns -1 on success or the abort code, so capture it
117 if(( abort_code = __TBB_machine_begin_transaction()) == ~(unsigned int)(0) )
118 {
119 // started speculation
120 #if !__TBB_RW_MUTEX_DELAY_TEST
121 if(this->state) { // add spin_rw_mutex to read-set.
122 // reader or writer grabbed the lock, so abort.
123 __TBB_machine_transaction_conflict_abort();
124 }
125 #endif
126 s.transaction_state = RTM_transacting_writer;
127 s.my_scoped_lock.internal_set_mutex(this); // need mutex for release()
128 return; // successfully started speculation
129 }
130 ++num_retries;
131 } while( (abort_code & speculation_retry) != 0 && (num_retries < retry_threshold_write) );
132 }
133
134 if(only_speculate) return; // should apply a real try_lock...
135 s.my_scoped_lock.acquire(*this, true); // kill transactional writers
136 __TBB_ASSERT(!w_flag, "After acquire for write, w_flag already true");
137 w_flag = true; // kill transactional readers
138 s.transaction_state = RTM_real_writer;
139 return;
140 }
141
142 //! Acquire read lock on given mutex.
143 // only_speculate : true if we are doing a try_acquire. If true and we fail to speculate, don't
144 // really acquire the lock, return and do a try_acquire on the contained spin_rw_mutex. If
145 // the lock is already held by a writer, just return.
internal_acquire_reader(x86_rtm_rw_mutex::scoped_lock & s,bool only_speculate)146 void x86_rtm_rw_mutex::internal_acquire_reader(x86_rtm_rw_mutex::scoped_lock& s, bool only_speculate) {
147 __TBB_ASSERT(s.transaction_state == RTM_not_in_mutex, "scoped_lock already in transaction");
148 if(tbb::internal::governor::speculation_enabled()) {
149 int num_retries = 0;
150 unsigned int abort_code;
151 do {
152 tbb::internal::atomic_backoff backoff;
153 // if in try_acquire, and lock is held as writer, don't attempt to speculate.
154 if(w_flag) {
155 if(only_speculate) return;
156 do {
157 backoff.pause(); // test the spin_rw_mutex (real readers or writers)
158 } while(w_flag);
159 }
160 // _xbegin returns -1 on success or the abort code, so capture it
161 if((abort_code = __TBB_machine_begin_transaction()) == ~(unsigned int)(0) )
162 {
163 // started speculation
164 #if !__TBB_RW_MUTEX_DELAY_TEST
165 if(w_flag) { // add w_flag to read-set.
166 __TBB_machine_transaction_conflict_abort(); // writer grabbed the lock, so abort.
167 }
168 #endif
169 s.transaction_state = RTM_transacting_reader;
170 s.my_scoped_lock.internal_set_mutex(this); // need mutex for release()
171 return; // successfully started speculation
172 }
173 // fallback path
174 // retry only if there is any hope of getting into a transaction soon
175 // Retry in the following cases (from Section 8.3.5 of Intel(R)
176 // Architecture Instruction Set Extensions Programming Reference):
177 // 1. abort caused by XABORT instruction (bit 0 of EAX register is set)
178 // 2. the transaction may succeed on a retry (bit 1 of EAX register is set)
179 // 3. if another logical processor conflicted with a memory address
180 // that was part of the transaction that aborted (bit 2 of EAX register is set)
181 // That is, retry if (abort_code & 0x7) is non-zero
182 ++num_retries;
183 } while( (abort_code & speculation_retry) != 0 && (num_retries < retry_threshold_read) );
184 }
185
186 if(only_speculate) return;
187 s.my_scoped_lock.acquire( *this, false );
188 s.transaction_state = RTM_real_reader;
189 }
190
191 //! Upgrade reader to become a writer.
192 /** Returns whether the upgrade happened without releasing and re-acquiring the lock */
internal_upgrade(x86_rtm_rw_mutex::scoped_lock & s)193 bool x86_rtm_rw_mutex::internal_upgrade(x86_rtm_rw_mutex::scoped_lock& s)
194 {
195 switch(s.transaction_state) {
196 case RTM_real_reader: {
197 s.transaction_state = RTM_real_writer;
198 bool no_release = s.my_scoped_lock.upgrade_to_writer();
199 __TBB_ASSERT(!w_flag, "After upgrade_to_writer, w_flag already true");
200 w_flag = true;
201 return no_release;
202 }
203 case RTM_transacting_reader:
204 s.transaction_state = RTM_transacting_writer;
205 // don't need to add w_flag to read_set even if __TBB_RW_MUTEX_DELAY_TEST
206 // because the this pointer (the spin_rw_mutex) will be sufficient on release.
207 return true;
208 default:
209 __TBB_ASSERT(false, "Invalid state for upgrade");
210 return false;
211 }
212 }
213
214 //! Downgrade writer to a reader.
internal_downgrade(x86_rtm_rw_mutex::scoped_lock & s)215 bool x86_rtm_rw_mutex::internal_downgrade(x86_rtm_rw_mutex::scoped_lock& s) {
216 switch(s.transaction_state) {
217 case RTM_real_writer:
218 s.transaction_state = RTM_real_reader;
219 __TBB_ASSERT(w_flag, "Before downgrade_to_reader w_flag not true");
220 w_flag = false;
221 return s.my_scoped_lock.downgrade_to_reader();
222 case RTM_transacting_writer:
223 #if __TBB_RW_MUTEX_DELAY_TEST
224 if(this->state) { // a reader or writer has acquired mutex for real.
225 __TBB_machine_transaction_conflict_abort();
226 }
227 #endif
228 s.transaction_state = RTM_transacting_reader;
229 return true;
230 default:
231 __TBB_ASSERT(false, "Invalid state for downgrade");
232 return false;
233 }
234 }
235
236 //! Try to acquire write lock on the given mutex.
237 // There may be reader(s) which acquired the spin_rw_mutex, as well as possibly
238 // transactional reader(s). If this is the case, the acquire will fail, and assigning
239 // w_flag will kill the transactors. So we only assign w_flag if we have successfully
240 // acquired the lock.
internal_try_acquire_writer(x86_rtm_rw_mutex::scoped_lock & s)241 bool x86_rtm_rw_mutex::internal_try_acquire_writer(x86_rtm_rw_mutex::scoped_lock& s)
242 {
243 internal_acquire_writer(s, /*only_speculate=*/true);
244 if(s.transaction_state == RTM_transacting_writer) {
245 return true;
246 }
247 __TBB_ASSERT(s.transaction_state == RTM_not_in_mutex, "Trying to acquire writer which is already allocated");
248 // transacting write acquire failed. try_acquire the real mutex
249 bool result = s.my_scoped_lock.try_acquire(*this, true);
250 if(result) {
251 // only shoot down readers if we're not transacting ourselves
252 __TBB_ASSERT(!w_flag, "After try_acquire_writer, w_flag already true");
253 w_flag = true;
254 s.transaction_state = RTM_real_writer;
255 }
256 return result;
257 }
258
internal_construct()259 void x86_rtm_rw_mutex::internal_construct() {
260 ITT_SYNC_CREATE(this, _T("tbb::x86_rtm_rw_mutex"), _T(""));
261 }
262
263 } // namespace internal
264 } // namespace interface8
265 } // namespace tbb
266
267 #endif /* __TBB_TSX_AVAILABLE */
268