1 /*
2  * SObjectizer-5
3  */
4 
5 /*!
6  * \since
7  * v.5.4.0
8  *
9  * \file
10  * \brief Definition of various types of spinlocks.
11  */
12 
13 #pragma once
14 
15 #include <atomic>
16 #include <thread>
17 #include <cstdint>
18 
19 #if defined(_MSC_VER) && defined(__SSE2__)
20 	#define SO_5_ARCH_MSC_WITH_SSE2
21 
22 	#include <intrin.h>
23 #endif
24 
25 namespace so_5
26 {
27 
28 //
29 // yield_backoff_t
30 //
31 /*!
32  * \since
33  * v.5.4.0
34  *
35  * \brief An implementation of backoff object with usage of std::yield.
36  */
37 class yield_backoff_t
38 	{
39 	public :
40 		inline void
operator ()()41 		operator()()
42 			{
43 				std::this_thread::yield();
44 			}
45 	};
46 
47 //
48 // pause_backoff_t
49 //
50 /*!
51  * \since
52  * v.5.5.22.2
53  *
54  * \brief An implementation of backoff object using assembly instruction.
55  *
56  * \note
57  * This implementation is provided by Pavel Begunkov.
58  */
59 class pause_backoff_t
60 	{
61 	public :
62 		inline void
operator ()()63 		operator()()
64 			{
65 #if (defined(__GNUC__) || defined(__clang__)) && \
66 	(defined(_M_X64) || defined(_M_AMD64) || defined(__amd64__) || \
67 	defined(__amd64) || \
68 	defined(_M_IX86) || defined(__i386__) || defined(__i386))
69 				asm( "pause;" );
70 #elif defined(SO_5_ARCH_MSC_WITH_SSE2)
71 				_mm_pause();
72 #else
73 				;
74 #endif
75 			}
76 	};
77 
78 //
79 // spinlock_t
80 //
81 /*!
82  * \since
83  * v.5.4.0
84  *
85  * \brief A simple spinlock (analog of std::mutex).
86  *
87  * \note
88  * Since v.5.5.22.2 a TATAS spinlock implementation is used.
89  * Implementation is provided by Pavel Begunkov.
90  */
91 template< class Backoff >
92 class spinlock_t
93 	{
94 	public :
spinlock_t()95 		spinlock_t()
96 			{
97 				m_flag.store( false, std::memory_order_release );
98 			}
99 		spinlock_t( const spinlock_t & ) = delete;
100 		spinlock_t( spinlock_t && ) = delete;
101 
102 		spinlock_t & operator=( const spinlock_t & ) = delete;
103 		spinlock_t & operator=( spinlock_t && ) = delete;
104 
105 		//! Lock object.
106 		void
lock()107 		lock()
108 			{
109 				Backoff backoff;
110 
111 				do
112 					{
113 						while( m_flag.load( std::memory_order_relaxed ) )
114 							backoff();
115 					}
116 				while( m_flag.exchange( true, std::memory_order_acquire ) );
117 			}
118 
119 		//! Unlock object.
120 		void
unlock()121 		unlock()
122 			{
123 				m_flag.store( false, std::memory_order_release );
124 			}
125 
126 	private :
127 		//! Atomic flag which is used as actual lock.
128 		std::atomic_bool m_flag;
129 	};
130 
131 //
132 // default_spinlock_t
133 //
134 using default_spinlock_t = spinlock_t< pause_backoff_t >;
135 
136 //
137 // rw_spinlock_t
138 //
139 /*!
140  * \since
141  * v.5.4.0
142  *
143  * \brief A simple multi-readers/single-writer spinlock
144  * (analog of std::shared_mutex).
145  *
146  * This implementation is based on Dmitry Vyukov implementation
147  * from LLVM code base:
148  * http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_mutex.cc?revision=210345&view=markup
149  */
150 template< class Backoff >
151 class rw_spinlock_t
152 	{
153 	private :
154 		std::atomic_uint_fast32_t m_counters;
155 
156 		static constexpr const std::uint_fast32_t unlocked = 0;
157 		static constexpr const std::uint_fast32_t write_lock = 1;
158 		static constexpr const std::uint_fast32_t read_lock = 2;
159 
160 	public :
rw_spinlock_t()161 		rw_spinlock_t()
162 			{
163 				m_counters.store( unlocked, std::memory_order_release );
164 			}
165 		rw_spinlock_t( const rw_spinlock_t & ) = delete;
166 
167 		rw_spinlock_t & operator=( const rw_spinlock_t & ) = delete;
168 
169 		//! Lock object in shared mode.
170 		inline void
lock_shared()171 		lock_shared()
172 			{
173 				Backoff backoff;
174 
175 				std::uint_fast32_t previous = m_counters.fetch_add(
176 						read_lock,
177 						std::memory_order_acquire );
178 				while( previous & write_lock )
179 					{
180 						backoff();
181 
182 						previous = m_counters.load( std::memory_order_acquire );
183 					}
184 			}
185 
186 		//! Unlock object locked in shared mode.
187 		inline void
unlock_shared()188 		unlock_shared()
189 			{
190 				m_counters.fetch_sub( read_lock, std::memory_order_release );
191 			}
192 
193 		//! Lock object in exclusive mode.
194 		inline void
lock()195 		lock()
196 			{
197 				std::uint_fast32_t expected = unlocked;
198 				const std::uint_fast32_t desired = write_lock;
199 
200 				if( m_counters.compare_exchange_strong(
201 						expected, desired,
202 						std::memory_order_acquire,
203 						std::memory_order_relaxed ) )
204 					return;
205 
206 				Backoff backoff;
207 
208 				while( true )
209 					{
210 						if( unlocked == m_counters.load( std::memory_order_relaxed ) )
211 						{
212 							expected = unlocked;
213 
214 							if( m_counters.compare_exchange_weak(
215 									expected, desired,
216 									std::memory_order_acquire,
217 									std::memory_order_relaxed ) )
218 								break;
219 						}
220 
221 						backoff();
222 					}
223 			}
224 
225 		//! Unlock object locked in exclusive mode.
226 		inline void
unlock()227 		unlock()
228 			{
229 				m_counters.fetch_sub( write_lock, std::memory_order_release );
230 			}
231 	};
232 
233 using default_rw_spinlock_t = rw_spinlock_t< pause_backoff_t >;
234 
235 //
236 // read_lock_guard_t
237 //
238 /*!
239  * \since
240  * v.5.4.0
241  *
242  * \brief Scoped guard for shared locks.
243  */
244 template< class Lock >
245 class read_lock_guard_t
246 	{
247 	private :
248 		Lock & m_lock;
249 
250 	public :
read_lock_guard_t(Lock & l)251 		read_lock_guard_t( Lock & l ) : m_lock( l )
252 			{
253 				m_lock.lock_shared();
254 			}
~read_lock_guard_t()255 		~read_lock_guard_t()
256 			{
257 				m_lock.unlock_shared();
258 			}
259 
260 		read_lock_guard_t( const read_lock_guard_t & ) = delete;
261 		read_lock_guard_t &
262 		operator=( const read_lock_guard_t & ) = delete;
263 	};
264 
265 } /* namespace so_5 */
266 
267