1 /*
2  * Copyright (c) Facebook, Inc. and its affiliates.
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 /*
18  * N.B. You most likely do _not_ want to use MicroSpinLock or any
19  * other kind of spinlock.  Consider MicroLock instead.
20  *
21  * In short, spinlocks in preemptive multi-tasking operating systems
22  * have serious problems and fast mutexes like std::mutex are almost
23  * certainly the better choice, because letting the OS scheduler put a
24  * thread to sleep is better for system responsiveness and throughput
25  * than wasting a timeslice repeatedly querying a lock held by a
26  * thread that's blocked, and you can't prevent userspace
27  * programs blocking.
28  *
29  * Spinlocks in an operating system kernel make much more sense than
30  * they do in userspace.
31  */
32 
33 #pragma once
34 
35 /*
36  * @author Keith Adams <kma@fb.com>
37  * @author Jordan DeLong <delong.j@fb.com>
38  */
39 
40 #include <array>
41 #include <atomic>
42 #include <cassert>
43 #include <cstdint>
44 #include <mutex>
45 #include <type_traits>
46 
47 #include <folly/Portability.h>
48 #include <folly/lang/Align.h>
49 #include <folly/synchronization/SanitizeThread.h>
50 #include <folly/synchronization/detail/Sleeper.h>
51 
52 namespace folly {
53 
54 /*
55  * A really, *really* small spinlock for fine-grained locking of lots
56  * of teeny-tiny data.
57  *
58  * Zero initializing these is guaranteed to be as good as calling
59  * init(), since the free state is guaranteed to be all-bits zero.
60  *
61  * This class should be kept a POD, so we can used it in other packed
62  * structs (gcc does not allow __attribute__((__packed__)) on structs that
63  * contain non-POD data).  This means avoid adding a constructor, or
64  * making some members private, etc.
65  */
66 struct MicroSpinLock {
67   enum { FREE = 0, LOCKED = 1 };
68   // lock_ can't be std::atomic<> to preserve POD-ness.
69   uint8_t lock_;
70 
71   // Initialize this MSL.  It is unnecessary to call this if you
72   // zero-initialize the MicroSpinLock.
initMicroSpinLock73   void init() noexcept { payload()->store(FREE); }
74 
try_lockMicroSpinLock75   bool try_lock() noexcept {
76     bool ret = cas(FREE, LOCKED);
77     annotate_rwlock_try_acquired(
78         this, annotate_rwlock_level::wrlock, ret, __FILE__, __LINE__);
79     return ret;
80   }
81 
lockMicroSpinLock82   void lock() noexcept {
83     detail::Sleeper sleeper;
84     while (!cas(FREE, LOCKED)) {
85       do {
86         sleeper.wait();
87       } while (payload()->load(std::memory_order_relaxed) == LOCKED);
88     }
89     assert(payload()->load() == LOCKED);
90     annotate_rwlock_acquired(
91         this, annotate_rwlock_level::wrlock, __FILE__, __LINE__);
92   }
93 
unlockMicroSpinLock94   void unlock() noexcept {
95     assert(payload()->load() == LOCKED);
96     annotate_rwlock_released(
97         this, annotate_rwlock_level::wrlock, __FILE__, __LINE__);
98     payload()->store(FREE, std::memory_order_release);
99   }
100 
101  private:
payloadMicroSpinLock102   std::atomic<uint8_t>* payload() noexcept {
103     return reinterpret_cast<std::atomic<uint8_t>*>(&this->lock_);
104   }
105 
casMicroSpinLock106   bool cas(uint8_t compare, uint8_t newVal) noexcept {
107     return std::atomic_compare_exchange_strong_explicit(
108         payload(),
109         &compare,
110         newVal,
111         std::memory_order_acquire,
112         std::memory_order_relaxed);
113   }
114 };
115 static_assert(
116     std::is_pod<MicroSpinLock>::value,
117     "MicroSpinLock must be kept a POD type.");
118 
119 //////////////////////////////////////////////////////////////////////
120 
121 /**
122  * Array of spinlocks where each one is padded to prevent false sharing.
123  * Useful for shard-based locking implementations in environments where
124  * contention is unlikely.
125  */
126 
127 template <class T, size_t N>
alignas(max_align_v)128 struct alignas(max_align_v) SpinLockArray {
129   T& operator[](size_t i) noexcept { return data_[i].lock; }
130 
131   const T& operator[](size_t i) const noexcept { return data_[i].lock; }
132 
133   constexpr size_t size() const noexcept { return N; }
134 
135  private:
136   struct PaddedSpinLock {
137     PaddedSpinLock() : lock() {}
138     T lock;
139     char padding[hardware_destructive_interference_size - sizeof(T)];
140   };
141   static_assert(
142       sizeof(PaddedSpinLock) == hardware_destructive_interference_size,
143       "Invalid size of PaddedSpinLock");
144 
145   // Check if T can theoretically cross a cache line.
146   static_assert(
147       max_align_v > 0 &&
148           hardware_destructive_interference_size % max_align_v == 0 &&
149           sizeof(T) <= max_align_v,
150       "T can cross cache line boundaries");
151 
152   char padding_[hardware_destructive_interference_size];
153   std::array<PaddedSpinLock, N> data_;
154 };
155 
156 //////////////////////////////////////////////////////////////////////
157 
158 typedef std::lock_guard<MicroSpinLock> MSLGuard;
159 
160 //////////////////////////////////////////////////////////////////////
161 
162 } // namespace folly
163