1 //
2 // Copyright 2017 The Abseil Authors.
3 //
4 // Licensed under the Apache License, Version 2.0 (the "License");
5 // you may not use this file except in compliance with the License.
6 // You may obtain a copy of the License at
7 //
8 // http://www.apache.org/licenses/LICENSE-2.0
9 //
10 // Unless required by applicable law or agreed to in writing, software
11 // distributed under the License is distributed on an "AS IS" BASIS,
12 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 // See the License for the specific language governing permissions and
14 // limitations under the License.
15 //
16
17 // Most users requiring mutual exclusion should use Mutex.
18 // SpinLock is provided for use in three situations:
19 // - for use in code that Mutex itself depends on
20 // - to get a faster fast-path release under low contention (without an
21 // atomic read-modify-write) In return, SpinLock has worse behaviour under
22 // contention, which is why Mutex is preferred in most situations.
23 // - for async signal safety (see below)
24
25 // SpinLock is async signal safe. If a spinlock is used within a signal
26 // handler, all code that acquires the lock must ensure that the signal cannot
27 // arrive while they are holding the lock. Typically, this is done by blocking
28 // the signal.
29
30 #ifndef ABSL_BASE_INTERNAL_SPINLOCK_H_
31 #define ABSL_BASE_INTERNAL_SPINLOCK_H_
32
33 #include <stdint.h>
34 #include <sys/types.h>
35 #include <atomic>
36
37 #include "absl/base/attributes.h"
38 #include "absl/base/dynamic_annotations.h"
39 #include "absl/base/internal/low_level_scheduling.h"
40 #include "absl/base/internal/raw_logging.h"
41 #include "absl/base/internal/scheduling_mode.h"
42 #include "absl/base/internal/tsan_mutex_interface.h"
43 #include "absl/base/macros.h"
44 #include "absl/base/port.h"
45 #include "absl/base/thread_annotations.h"
46
47 namespace absl {
48 namespace base_internal {
49
50 class LOCKABLE SpinLock {
51 public:
SpinLock()52 SpinLock() : lockword_(kSpinLockCooperative) {
53 ABSL_TSAN_MUTEX_CREATE(this, __tsan_mutex_not_static);
54 }
55
56 // Special constructor for use with static SpinLock objects. E.g.,
57 //
58 // static SpinLock lock(base_internal::kLinkerInitialized);
59 //
60 // When intialized using this constructor, we depend on the fact
61 // that the linker has already initialized the memory appropriately.
62 // A SpinLock constructed like this can be freely used from global
63 // initializers without worrying about the order in which global
64 // initializers run.
SpinLock(base_internal::LinkerInitialized)65 explicit SpinLock(base_internal::LinkerInitialized) {
66 // Does nothing; lockword_ is already initialized
67 ABSL_TSAN_MUTEX_CREATE(this, 0);
68 }
69
70 // Constructors that allow non-cooperative spinlocks to be created for use
71 // inside thread schedulers. Normal clients should not use these.
72 explicit SpinLock(base_internal::SchedulingMode mode);
73 SpinLock(base_internal::LinkerInitialized,
74 base_internal::SchedulingMode mode);
75
~SpinLock()76 ~SpinLock() { ABSL_TSAN_MUTEX_DESTROY(this, __tsan_mutex_not_static); }
77
78 // Acquire this SpinLock.
Lock()79 inline void Lock() EXCLUSIVE_LOCK_FUNCTION() {
80 ABSL_TSAN_MUTEX_PRE_LOCK(this, 0);
81 if (!TryLockImpl()) {
82 SlowLock();
83 }
84 ABSL_TSAN_MUTEX_POST_LOCK(this, 0, 0);
85 }
86
87 // Try to acquire this SpinLock without blocking and return true if the
88 // acquisition was successful. If the lock was not acquired, false is
89 // returned. If this SpinLock is free at the time of the call, TryLock
90 // will return true with high probability.
TryLock()91 inline bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
92 ABSL_TSAN_MUTEX_PRE_LOCK(this, __tsan_mutex_try_lock);
93 bool res = TryLockImpl();
94 ABSL_TSAN_MUTEX_POST_LOCK(
95 this, __tsan_mutex_try_lock | (res ? 0 : __tsan_mutex_try_lock_failed),
96 0);
97 return res;
98 }
99
100 // Release this SpinLock, which must be held by the calling thread.
Unlock()101 inline void Unlock() UNLOCK_FUNCTION() {
102 ABSL_TSAN_MUTEX_PRE_UNLOCK(this, 0);
103 uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
104 lockword_.store(lock_value & kSpinLockCooperative,
105 std::memory_order_release);
106
107 if ((lock_value & kSpinLockDisabledScheduling) != 0) {
108 base_internal::SchedulingGuard::EnableRescheduling(true);
109 }
110 if ((lock_value & kWaitTimeMask) != 0) {
111 // Collect contentionz profile info, and speed the wakeup of any waiter.
112 // The wait_cycles value indicates how long this thread spent waiting
113 // for the lock.
114 SlowUnlock(lock_value);
115 }
116 ABSL_TSAN_MUTEX_POST_UNLOCK(this, 0);
117 }
118
119 // Determine if the lock is held. When the lock is held by the invoking
120 // thread, true will always be returned. Intended to be used as
121 // CHECK(lock.IsHeld()).
IsHeld()122 inline bool IsHeld() const {
123 return (lockword_.load(std::memory_order_relaxed) & kSpinLockHeld) != 0;
124 }
125
126 protected:
127 // These should not be exported except for testing.
128
129 // Store number of cycles between wait_start_time and wait_end_time in a
130 // lock value.
131 static uint32_t EncodeWaitCycles(int64_t wait_start_time,
132 int64_t wait_end_time);
133
134 // Extract number of wait cycles in a lock value.
135 static uint64_t DecodeWaitCycles(uint32_t lock_value);
136
137 // Provide access to protected method above. Use for testing only.
138 friend struct SpinLockTest;
139
140 private:
141 // lockword_ is used to store the following:
142 //
143 // bit[0] encodes whether a lock is being held.
144 // bit[1] encodes whether a lock uses cooperative scheduling.
145 // bit[2] encodes whether a lock disables scheduling.
146 // bit[3:31] encodes time a lock spent on waiting as a 29-bit unsigned int.
147 enum { kSpinLockHeld = 1 };
148 enum { kSpinLockCooperative = 2 };
149 enum { kSpinLockDisabledScheduling = 4 };
150 enum { kSpinLockSleeper = 8 };
151 enum { kWaitTimeMask = // Includes kSpinLockSleeper.
152 ~(kSpinLockHeld | kSpinLockCooperative | kSpinLockDisabledScheduling) };
153
154 // Returns true if the provided scheduling mode is cooperative.
IsCooperative(base_internal::SchedulingMode scheduling_mode)155 static constexpr bool IsCooperative(
156 base_internal::SchedulingMode scheduling_mode) {
157 return scheduling_mode == base_internal::SCHEDULE_COOPERATIVE_AND_KERNEL;
158 }
159
160 uint32_t TryLockInternal(uint32_t lock_value, uint32_t wait_cycles);
161 void InitLinkerInitializedAndCooperative();
162 void SlowLock() ABSL_ATTRIBUTE_COLD;
163 void SlowUnlock(uint32_t lock_value) ABSL_ATTRIBUTE_COLD;
164 uint32_t SpinLoop(int64_t initial_wait_timestamp, uint32_t* wait_cycles);
165
TryLockImpl()166 inline bool TryLockImpl() {
167 uint32_t lock_value = lockword_.load(std::memory_order_relaxed);
168 return (TryLockInternal(lock_value, 0) & kSpinLockHeld) == 0;
169 }
170
171 std::atomic<uint32_t> lockword_;
172
173 SpinLock(const SpinLock&) = delete;
174 SpinLock& operator=(const SpinLock&) = delete;
175 };
176
177 // Corresponding locker object that arranges to acquire a spinlock for
178 // the duration of a C++ scope.
179 class SCOPED_LOCKABLE SpinLockHolder {
180 public:
SpinLockHolder(SpinLock * l)181 inline explicit SpinLockHolder(SpinLock* l) EXCLUSIVE_LOCK_FUNCTION(l)
182 : lock_(l) {
183 l->Lock();
184 }
UNLOCK_FUNCTION()185 inline ~SpinLockHolder() UNLOCK_FUNCTION() { lock_->Unlock(); }
186
187 SpinLockHolder(const SpinLockHolder&) = delete;
188 SpinLockHolder& operator=(const SpinLockHolder&) = delete;
189
190 private:
191 SpinLock* lock_;
192 };
193
194 // Register a hook for profiling support.
195 //
196 // The function pointer registered here will be called whenever a spinlock is
197 // contended. The callback is given an opaque handle to the contended spinlock
198 // and the number of wait cycles. This is thread-safe, but only a single
199 // profiler can be registered. It is an error to call this function multiple
200 // times with different arguments.
201 void RegisterSpinLockProfiler(void (*fn)(const void* lock,
202 int64_t wait_cycles));
203
204 //------------------------------------------------------------------------------
205 // Public interface ends here.
206 //------------------------------------------------------------------------------
207
208 // If (result & kSpinLockHeld) == 0, then *this was successfully locked.
209 // Otherwise, returns last observed value for lockword_.
TryLockInternal(uint32_t lock_value,uint32_t wait_cycles)210 inline uint32_t SpinLock::TryLockInternal(uint32_t lock_value,
211 uint32_t wait_cycles) {
212 if ((lock_value & kSpinLockHeld) != 0) {
213 return lock_value;
214 }
215
216 uint32_t sched_disabled_bit = 0;
217 if ((lock_value & kSpinLockCooperative) == 0) {
218 // For non-cooperative locks we must make sure we mark ourselves as
219 // non-reschedulable before we attempt to CompareAndSwap.
220 if (base_internal::SchedulingGuard::DisableRescheduling()) {
221 sched_disabled_bit = kSpinLockDisabledScheduling;
222 }
223 }
224
225 if (lockword_.compare_exchange_strong(
226 lock_value,
227 kSpinLockHeld | lock_value | wait_cycles | sched_disabled_bit,
228 std::memory_order_acquire, std::memory_order_relaxed)) {
229 } else {
230 base_internal::SchedulingGuard::EnableRescheduling(sched_disabled_bit != 0);
231 }
232
233 return lock_value;
234 }
235
236 } // namespace base_internal
237 } // namespace absl
238
239 #endif // ABSL_BASE_INTERNAL_SPINLOCK_H_
240