1 /* Copyright (c) 2020, 2021, Oracle and/or its affiliates.
2
3 This program is free software; you can redistribute it and/or modify
4 it under the terms of the GNU General Public License, version 2.0,
5 as published by the Free Software Foundation.
6
7 This program is also distributed with certain software (including
8 but not limited to OpenSSL) that is licensed under separate terms,
9 as designated in a particular file or component or in included license
10 documentation. The authors of MySQL hereby grant you an additional
11 permission to link the program and your derivative works with the
12 separately licensed software that they have included with MySQL.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License, version 2.0, for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
22
23 #include <iostream>
24 #include <map>
25 #include <memory>
26
27 #include "locks/shared_spin_lock.h"
28
Guard(lock::Shared_spin_lock & target,lock::Shared_spin_lock::enum_lock_acquisition acquisition,bool try_and_fail)29 lock::Shared_spin_lock::Guard::Guard(
30 lock::Shared_spin_lock &target,
31 lock::Shared_spin_lock::enum_lock_acquisition acquisition,
32 bool try_and_fail)
33 : m_target(target), m_acquisition(Shared_spin_lock::SL_NO_ACQUISITION)
34 {
35 if (acquisition != Shared_spin_lock::SL_NO_ACQUISITION)
36 {
37 this->acquire(acquisition, try_and_fail);
38 }
39 }
40
~Guard()41 lock::Shared_spin_lock::Guard::~Guard() { this->release(); }
42
operator ->()43 lock::Shared_spin_lock *lock::Shared_spin_lock::Guard::operator->()
44 {
45 return &this->m_target;
46 }
47
operator *()48 lock::Shared_spin_lock &lock::Shared_spin_lock::Guard::operator*()
49 {
50 return this->m_target;
51 }
52
acquire(enum_lock_acquisition acquisition,bool try_and_fail)53 lock::Shared_spin_lock::Guard &lock::Shared_spin_lock::Guard::acquire(
54 enum_lock_acquisition acquisition, bool try_and_fail)
55 {
56 assert(this->m_acquisition == Shared_spin_lock::SL_NO_ACQUISITION);
57 assert(acquisition == Shared_spin_lock::SL_SHARED ||
58 acquisition == Shared_spin_lock::SL_EXCLUSIVE);
59
60 this->m_acquisition= acquisition;
61
62 switch (this->m_acquisition)
63 {
64 case Shared_spin_lock::SL_SHARED:
65 {
66 if (try_and_fail)
67 {
68 this->m_target.try_shared();
69 if (!this->m_target.is_shared_acquisition())
70 {
71 this->m_acquisition= Shared_spin_lock::SL_NO_ACQUISITION;
72 }
73 }
74 else
75 {
76 this->m_target.acquire_shared();
77 }
78 break;
79 }
80 case Shared_spin_lock::SL_EXCLUSIVE:
81 {
82 if (try_and_fail)
83 {
84 this->m_target.try_exclusive();
85 if (!this->m_target.is_exclusive_acquisition())
86 {
87 this->m_acquisition= Shared_spin_lock::SL_NO_ACQUISITION;
88 }
89 }
90 else
91 {
92 this->m_target.acquire_exclusive();
93 }
94 break;
95 }
96 default:
97 break;
98 }
99 return (*this);
100 }
101
release()102 lock::Shared_spin_lock::Guard &lock::Shared_spin_lock::Guard::release()
103 {
104 if (this->m_acquisition == Shared_spin_lock::SL_NO_ACQUISITION)
105 {
106 return (*this);
107 }
108 switch (this->m_acquisition)
109 {
110 case Shared_spin_lock::SL_SHARED:
111 {
112 this->m_target.release_shared();
113 this->m_acquisition= Shared_spin_lock::SL_NO_ACQUISITION;
114 break;
115 }
116 case Shared_spin_lock::SL_EXCLUSIVE:
117 {
118 this->m_target.release_exclusive();
119 this->m_acquisition= Shared_spin_lock::SL_NO_ACQUISITION;
120 break;
121 }
122 default:
123 break;
124 }
125 return (*this);
126 }
127
Guard(const Shared_spin_lock::Guard & rhs)128 lock::Shared_spin_lock::Guard::Guard(const Shared_spin_lock::Guard &rhs)
129 : m_target(rhs.m_target)
130 {
131 }
132
operator =(Shared_spin_lock::Guard const &)133 lock::Shared_spin_lock::Guard &lock::Shared_spin_lock::Guard::operator=(
134 Shared_spin_lock::Guard const &)
135 {
136 return (*this);
137 }
138
Shared_spin_lock()139 lock::Shared_spin_lock::Shared_spin_lock()
140 : m_shared_access(0), m_exclusive_access(0), m_exclusive_owner(0)
141 {
142 }
143
~Shared_spin_lock()144 lock::Shared_spin_lock::~Shared_spin_lock() {}
145
acquire_shared()146 lock::Shared_spin_lock &lock::Shared_spin_lock::acquire_shared()
147 {
148 return this->try_or_spin_shared_lock(false);
149 }
150
acquire_exclusive()151 lock::Shared_spin_lock &lock::Shared_spin_lock::acquire_exclusive()
152 {
153 return this->try_or_spin_exclusive_lock(false);
154 }
155
try_shared()156 lock::Shared_spin_lock &lock::Shared_spin_lock::try_shared()
157 {
158 return this->try_or_spin_shared_lock(true);
159 }
160
try_exclusive()161 lock::Shared_spin_lock &lock::Shared_spin_lock::try_exclusive()
162 {
163 return this->try_or_spin_exclusive_lock(true);
164 }
165
release_shared()166 lock::Shared_spin_lock &lock::Shared_spin_lock::release_shared()
167 {
168 assert(my_atomic_load32(&this->m_shared_access) > 0);
169 my_atomic_add32(&this->m_shared_access, -1);
170 return (*this);
171 }
172
release_exclusive()173 lock::Shared_spin_lock &lock::Shared_spin_lock::release_exclusive()
174 {
175 my_thread_t self= my_thread_self();
176 my_thread_t owner= (my_thread_t)(my_atomic_load64(&this->m_exclusive_owner));
177 assert(self != 0);
178 assert(my_thread_equal(owner, self));
179 if (!my_thread_equal(owner, self)) return (*this);
180
181 if (my_atomic_load32(&this->m_exclusive_access) == 1)
182 my_atomic_store64(&this->m_exclusive_owner, 0);
183
184 assert(my_atomic_load32(&this->m_exclusive_access) > 0);
185 my_atomic_add32(&this->m_exclusive_access, -1);
186 return (*this);
187 }
188
is_shared_acquisition()189 bool lock::Shared_spin_lock::is_shared_acquisition()
190 {
191 return my_atomic_load32(&this->m_shared_access) != 0;
192 }
193
is_exclusive_acquisition()194 bool lock::Shared_spin_lock::is_exclusive_acquisition()
195 {
196 if (my_atomic_load32(&this->m_exclusive_access) != 0)
197 {
198 my_thread_t self= my_thread_self();
199 my_thread_t owner=
200 (my_thread_t)(my_atomic_load64(&this->m_exclusive_owner));
201 return my_thread_equal(owner, self);
202 }
203 return false;
204 }
205
try_or_spin_shared_lock(bool try_and_fail)206 lock::Shared_spin_lock &lock::Shared_spin_lock::try_or_spin_shared_lock(
207 bool try_and_fail)
208 {
209 if (try_and_fail)
210 {
211 this->try_shared_lock();
212 }
213 else
214 {
215 this->spin_shared_lock();
216 }
217 return (*this);
218 }
219
try_or_spin_exclusive_lock(bool try_and_fail)220 lock::Shared_spin_lock &lock::Shared_spin_lock::try_or_spin_exclusive_lock(
221 bool try_and_fail)
222 {
223 my_thread_t self= my_thread_self();
224 my_thread_t owner= (my_thread_t)(my_atomic_load64(&this->m_exclusive_owner));
225 if (owner != 0 && my_thread_equal(owner, self))
226 {
227 my_atomic_add32(&this->m_exclusive_access, 1);
228 return (*this);
229 }
230
231 if (try_and_fail)
232 {
233 if (!this->try_exclusive_lock())
234 {
235 return (*this);
236 }
237 }
238 else
239 {
240 this->spin_exclusive_lock();
241 }
242 #if defined(__APPLE__) || defined(__FreeBSD__) || defined(__DragonFly__)
243 my_atomic_store64(&this->m_exclusive_owner, reinterpret_cast<int64>(self));
244 #else
245 my_atomic_store64(&this->m_exclusive_owner, self);
246 #endif
247 return (*this);
248 }
249
try_shared_lock()250 bool lock::Shared_spin_lock::try_shared_lock()
251 {
252 if (my_atomic_load32(&this->m_exclusive_access) != 0)
253 {
254 return false;
255 }
256
257 my_atomic_add32(&this->m_shared_access, 1);
258
259 if (my_atomic_load32(&this->m_exclusive_access) != 0)
260 {
261 my_atomic_add32(&this->m_shared_access, -1);
262 return false;
263 }
264 return true;
265 }
266
try_exclusive_lock()267 bool lock::Shared_spin_lock::try_exclusive_lock()
268 {
269 int32 expected= 0;
270 if (!my_atomic_cas32(&this->m_exclusive_access, &expected, 1))
271 {
272 return false;
273 }
274 if (my_atomic_load32(&this->m_shared_access) != 0)
275 {
276 my_atomic_store32(&this->m_exclusive_access, 0);
277 return false;
278 }
279 return true;
280 }
281
spin_shared_lock()282 void lock::Shared_spin_lock::spin_shared_lock()
283 {
284 do
285 {
286 if (my_atomic_load32(&this->m_exclusive_access) != 0)
287 {
288 this->yield();
289 continue;
290 }
291
292 my_atomic_add32(&this->m_shared_access, 1);
293
294 if (my_atomic_load32(&this->m_exclusive_access) != 0)
295 {
296 my_atomic_add32(&this->m_shared_access, -1);
297 this->yield();
298 continue;
299 }
300
301 break;
302 } while (true);
303 }
304
spin_exclusive_lock()305 void lock::Shared_spin_lock::spin_exclusive_lock()
306 {
307 bool success= false;
308 do
309 {
310 int32 expected= 0;
311 success= my_atomic_cas32(&this->m_exclusive_access, &expected, 1);
312 if (!success) this->yield();
313 } while (!success);
314
315 while (my_atomic_load32(&this->m_shared_access) != 0)
316 {
317 this->yield();
318 }
319 }
320
yield()321 lock::Shared_spin_lock &lock::Shared_spin_lock::yield()
322 {
323 my_thread_yield();
324 return (*this);
325 }
326