1 /*****************************************************************************
2 Copyright (c) 1995, 2018, Oracle and/or its affiliates. All Rights Reserved.
3 Copyright (c) 2008, Google Inc.
4 
5 Portions of this file contain modifications contributed and copyrighted by
6 Google, Inc. Those modifications are gratefully acknowledged and are described
7 briefly in the InnoDB documentation. The contributions by Google are
8 incorporated with their permission, and subject to the conditions contained in
9 the file COPYING.Google.
10 
11 This program is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License, version 2.0, as published by the
13 Free Software Foundation.
14 
15 This program is also distributed with certain software (including but not
16 limited to OpenSSL) that is licensed under separate terms, as designated in a
17 particular file or component or in included license documentation. The authors
18 of MySQL hereby grant you an additional permission to link the program and
19 your derivative works with the separately licensed software that they have
20 included with MySQL.
21 
22 This program is distributed in the hope that it will be useful, but WITHOUT
23 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
24 FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0,
25 for more details.
26 
27 You should have received a copy of the GNU General Public License along with
28 this program; if not, write to the Free Software Foundation, Inc.,
29 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
30 
31 *****************************************************************************/
32 
33 /** @file include/os0atomic.h
34  Macros for using atomics
35 
36  Created 2012-09-23 Sunny Bains (Split from os0sync.h)
37  *******************************************************/
38 
39 #ifndef os0atomic_h
40 #define os0atomic_h
41 
42 #include "univ.i"
43 
44 #ifdef _WIN32
45 
46 /** On Windows, InterlockedExchange operates on LONG variable */
47 typedef LONG lock_word_t;
48 
49 #elif defined(MUTEX_FUTEX)
50 
51 typedef int lock_word_t;
52 
53 #else
54 
55 typedef ulint lock_word_t;
56 
57 #endif /* _WIN32 */
58 
59 #if defined __i386__ || defined __x86_64__ || defined _M_IX86 || \
60     defined _M_X64 || defined __WIN__
61 
62 #define IB_STRONG_MEMORY_MODEL
63 
64 #endif /* __i386__ || __x86_64__ || _M_IX86 || _M_X64 || __WIN__ */
65 
66 /** Atomic compare-and-swap and increment for InnoDB. */
67 
68 /** Do an atomic test and set.
69 @param[in,out]	ptr	Memory location to set
70 @param[in]	new_val	new value
71 @return	old value of memory location. */
72 UNIV_INLINE
73 lock_word_t os_atomic_test_and_set(volatile lock_word_t *ptr,
74                                    lock_word_t new_val);
75 
76 /** Do an atomic compare and set
77 @param[in,out]	ptr	Memory location to set
78 @param[in]	old_val	old value to compare
79 @param[in]	new_val	new value to set
80 @return the value of ptr before the operation. */
81 UNIV_INLINE
82 lock_word_t os_atomic_val_compare_and_swap(volatile lock_word_t *ptr,
83                                            lock_word_t old_val,
84                                            lock_word_t new_val);
85 
86 #ifdef _WIN32
87 
88 /** Atomic compare and exchange of signed integers (both 32 and 64 bit).
89  @return value found before the exchange.
90  If it is not equal to old_value the exchange did not happen. */
91 UNIV_INLINE
92 lint win_cmp_and_xchg_lint(
93     volatile lint *ptr, /*!< in/out: source/destination */
94     lint new_val,       /*!< in: exchange value */
95     lint old_val);      /*!< in: value to compare to */
96 
97 /** Atomic addition of signed integers.
98  @return Initial value of the variable pointed to by ptr */
99 UNIV_INLINE
100 lint win_xchg_and_add(volatile lint *ptr, /*!< in/out: address of destination */
101                       lint val);          /*!< in: number to be added */
102 
103 /** Atomic compare and exchange of unsigned integers.
104  @return value found before the exchange.
105  If it is not equal to old_value the exchange did not happen. */
106 UNIV_INLINE
107 ulint win_cmp_and_xchg_ulint(
108     volatile ulint *ptr, /*!< in/out: source/destination */
109     ulint new_val,       /*!< in: exchange value */
110     ulint old_val);      /*!< in: value to compare to */
111 
112 /** Atomic compare and exchange of 32 bit unsigned integers.
113  @return value found before the exchange.
114  If it is not equal to old_value the exchange did not happen. */
115 UNIV_INLINE
116 DWORD
117 win_cmp_and_xchg_dword(volatile DWORD *ptr, /*!< in/out: source/destination */
118                        DWORD new_val,       /*!< in: exchange value */
119                        DWORD old_val);      /*!< in: value to compare to */
120 
121 /** Returns true if swapped, ptr is pointer to target, old_val is value to
122  compare to, new_val is the value to swap in. */
123 
124 #define os_compare_and_swap_lint(ptr, old_val, new_val) \
125   (win_cmp_and_xchg_lint(ptr, new_val, old_val) == old_val)
126 
127 #define os_compare_and_swap_ulint(ptr, old_val, new_val) \
128   (win_cmp_and_xchg_ulint(ptr, new_val, old_val) == old_val)
129 
130 #define os_compare_and_swap_uint32(ptr, old_val, new_val)                  \
131   (InterlockedCompareExchange(                                             \
132        reinterpret_cast<volatile LONG *>(ptr), static_cast<LONG>(new_val), \
133        static_cast<LONG>(old_val)) == static_cast<LONG>(old_val))
134 
135 #define os_compare_and_swap_uint64(ptr, old_val, new_val)                   \
136   (InterlockedCompareExchange64(reinterpret_cast<volatile LONGLONG *>(ptr), \
137                                 static_cast<LONGLONG>(new_val),             \
138                                 static_cast<LONGLONG>(old_val)) ==          \
139    static_cast<LONGLONG>(old_val))
140 
141 /* Windows thread HANDLEs are of type PVOID */
142 #define os_compare_and_swap_thread_id(ptr, old_val, new_val) \
143   (InterlockedCompareExchangePointer(ptr, new_val, old_val) == old_val)
144 
145 #define INNODB_RW_LOCKS_USE_ATOMICS
146 #define IB_ATOMICS_STARTUP_MSG \
147   "Mutexes and rw_locks use Windows interlocked functions"
148 
149 /** Returns the resulting value, ptr is pointer to target, amount is the
150  amount of increment. */
151 
152 #define os_atomic_increment_lint(ptr, amount) \
153   (win_xchg_and_add(ptr, amount) + amount)
154 
155 #define os_atomic_increment_ulint(ptr, amount)                                 \
156   (static_cast<ulint>(win_xchg_and_add(reinterpret_cast<volatile lint *>(ptr), \
157                                        static_cast<lint>(amount))) +           \
158    static_cast<ulint>(amount))
159 
160 #define os_atomic_increment_uint32(ptr, amount)                             \
161   (static_cast<ulint>(InterlockedExchangeAdd(reinterpret_cast<long *>(ptr), \
162                                              static_cast<long>(amount))) +  \
163    static_cast<ulint>(amount))
164 
165 #define os_atomic_increment_uint64(ptr, amount)                             \
166   (static_cast<ib_uint64_t>(InterlockedExchangeAdd64(                       \
167        reinterpret_cast<LONGLONG *>(ptr), static_cast<LONGLONG>(amount))) + \
168    static_cast<ib_uint64_t>(amount))
169 
170 /** Returns the resulting value, ptr is pointer to target, amount is the
171  amount to decrement. There is no atomic substract function on Windows */
172 
173 #define os_atomic_decrement_lint(ptr, amount) \
174   (win_xchg_and_add(ptr, -(static_cast<lint>(amount))) - amount)
175 
176 #define os_atomic_decrement_ulint(ptr, amount)                                 \
177   (static_cast<ulint>(win_xchg_and_add(reinterpret_cast<volatile lint *>(ptr), \
178                                        -(static_cast<lint>(amount)))) -        \
179    static_cast<ulint>(amount))
180 
181 #define os_atomic_decrement_uint32(ptr, amount)                        \
182   (static_cast<ib_uint32_t>(InterlockedExchangeAdd(                    \
183        reinterpret_cast<long *>(ptr), -(static_cast<long>(amount)))) - \
184    static_cast<ib_uint32_t>(amount))
185 
186 #define os_atomic_decrement_uint64(ptr, amount)                                \
187   (static_cast<ib_uint64_t>(InterlockedExchangeAdd64(                          \
188        reinterpret_cast<LONGLONG *>(ptr), -(static_cast<LONGLONG>(amount)))) - \
189    static_cast<ib_uint64_t>(amount))
190 
191 #else
192 /* Fall back to GCC-style atomic builtins. */
193 
194 /** Returns true if swapped, ptr is pointer to target, old_val is value to
195  compare to, new_val is the value to swap in. */
196 
197 #if defined(HAVE_GCC_SYNC_BUILTINS)
198 
199 #define os_compare_and_swap(ptr, old_val, new_val) \
200   __sync_bool_compare_and_swap(ptr, old_val, new_val)
201 
202 #define os_compare_and_swap_ulint(ptr, old_val, new_val) \
203   os_compare_and_swap(ptr, old_val, new_val)
204 
205 #define os_compare_and_swap_lint(ptr, old_val, new_val) \
206   os_compare_and_swap(ptr, old_val, new_val)
207 
208 #define os_compare_and_swap_uint32(ptr, old_val, new_val) \
209   os_compare_and_swap(ptr, old_val, new_val)
210 
211 #define os_compare_and_swap_uint64(ptr, old_val, new_val) \
212   os_compare_and_swap(ptr, old_val, new_val)
213 
214 #else
215 
216 UNIV_INLINE
os_compare_and_swap_ulint(volatile ulint * ptr,ulint old_val,ulint new_val)217 bool os_compare_and_swap_ulint(volatile ulint *ptr, ulint old_val,
218                                ulint new_val) {
219   return __atomic_compare_exchange_n(ptr, &old_val, new_val, 0,
220                                      __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
221 }
222 
223 UNIV_INLINE
os_compare_and_swap_lint(volatile lint * ptr,lint old_val,lint new_val)224 bool os_compare_and_swap_lint(volatile lint *ptr, lint old_val, lint new_val) {
225   return __atomic_compare_exchange_n(ptr, &old_val, new_val, 0,
226                                      __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
227 }
228 
229 UNIV_INLINE
os_compare_and_swap_uint32(volatile ib_uint32_t * ptr,ib_uint32_t old_val,ib_uint32_t new_val)230 bool os_compare_and_swap_uint32(volatile ib_uint32_t *ptr, ib_uint32_t old_val,
231                                 ib_uint32_t new_val) {
232   return __atomic_compare_exchange_n(ptr, &old_val, new_val, 0,
233                                      __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
234 }
235 
236 UNIV_INLINE
os_compare_and_swap_uint64(volatile ib_uint64_t * ptr,ib_uint64_t old_val,ib_uint64_t new_val)237 bool os_compare_and_swap_uint64(volatile ib_uint64_t *ptr, ib_uint64_t old_val,
238                                 ib_uint64_t new_val) {
239   return __atomic_compare_exchange_n(ptr, &old_val, new_val, 0,
240                                      __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
241 }
242 
243 #endif /* HAVE_GCC_SYNC_BUILTINS */
244 
245 #ifdef HAVE_IB_ATOMIC_PTHREAD_T_GCC
246 #if defined(HAVE_GCC_SYNC_BUILTINS)
247 #define os_compare_and_swap_thread_id(ptr, old_val, new_val) \
248   os_compare_and_swap(ptr, old_val, new_val)
249 #else
250 UNIV_INLINE
os_compare_and_swap_thread_id(volatile os_thread_id_t * ptr,os_thread_id_t old_val,os_thread_id_t new_val)251 bool os_compare_and_swap_thread_id(volatile os_thread_id_t *ptr,
252                                    os_thread_id_t old_val,
253                                    os_thread_id_t new_val) {
254   return __atomic_compare_exchange_n(ptr, &old_val, new_val, 0,
255                                      __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
256 }
257 #endif /* HAVE_GCC_SYNC_BUILTINS */
258 #define INNODB_RW_LOCKS_USE_ATOMICS
259 #define IB_ATOMICS_STARTUP_MSG "Mutexes and rw_locks use GCC atomic builtins"
260 #else /* HAVE_IB_ATOMIC_PTHREAD_T_GCC */
261 #define IB_ATOMICS_STARTUP_MSG \
262   "Mutexes use GCC atomic builtins, rw_locks do not"
263 #endif /* HAVE_IB_ATOMIC_PTHREAD_T_GCC */
264 
265 /** Returns the resulting value, ptr is pointer to target, amount is the
266  amount of increment. */
267 
268 #if defined(HAVE_GCC_SYNC_BUILTINS)
269 #define os_atomic_increment(ptr, amount) __sync_add_and_fetch(ptr, amount)
270 #else
271 #define os_atomic_increment(ptr, amount) \
272   __atomic_add_fetch(ptr, amount, __ATOMIC_SEQ_CST)
273 #endif /* HAVE_GCC_SYNC_BUILTINS */
274 
275 #define os_atomic_increment_lint(ptr, amount) os_atomic_increment(ptr, amount)
276 
277 #define os_atomic_increment_ulint(ptr, amount) os_atomic_increment(ptr, amount)
278 
279 #define os_atomic_increment_uint32(ptr, amount) os_atomic_increment(ptr, amount)
280 
281 #define os_atomic_increment_uint64(ptr, amount) os_atomic_increment(ptr, amount)
282 
283 /* Returns the resulting value, ptr is pointer to target, amount is the
284 amount to decrement. */
285 
286 #if defined(HAVE_GCC_SYNC_BUILTINS)
287 #define os_atomic_decrement(ptr, amount) __sync_sub_and_fetch(ptr, amount)
288 #else
289 #define os_atomic_decrement(ptr, amount) \
290   __atomic_sub_fetch(ptr, amount, __ATOMIC_SEQ_CST)
291 #endif /* HAVE_GCC_SYNC_BUILTINS */
292 
293 #define os_atomic_decrement_lint(ptr, amount) os_atomic_decrement(ptr, amount)
294 
295 #define os_atomic_decrement_ulint(ptr, amount) os_atomic_decrement(ptr, amount)
296 
297 #define os_atomic_decrement_uint32(ptr, amount) os_atomic_decrement(ptr, amount)
298 
299 #define os_atomic_decrement_uint64(ptr, amount) os_atomic_decrement(ptr, amount)
300 
301 #endif
302 
303 #define os_atomic_inc_ulint(m, v, d) os_atomic_increment_ulint(v, d)
304 #define os_atomic_dec_ulint(m, v, d) os_atomic_decrement_ulint(v, d)
305 #define TAS(l, n) os_atomic_test_and_set((l), (n))
306 #define CAS(l, o, n) os_atomic_val_compare_and_swap((l), (o), (n))
307 
308 /** barrier definitions for memory ordering */
309 #ifdef HAVE_IB_GCC_ATOMIC_THREAD_FENCE
310 #define HAVE_MEMORY_BARRIER
311 #define os_rmb __atomic_thread_fence(__ATOMIC_ACQUIRE)
312 #define os_wmb __atomic_thread_fence(__ATOMIC_RELEASE)
313 #define IB_MEMORY_BARRIER_STARTUP_MSG \
314   "GCC builtin __atomic_thread_fence() is used for memory barrier"
315 
316 #elif defined(HAVE_IB_GCC_SYNC_SYNCHRONISE)
317 #define HAVE_MEMORY_BARRIER
318 #define os_rmb __sync_synchronize()
319 #define os_wmb __sync_synchronize()
320 #define IB_MEMORY_BARRIER_STARTUP_MSG \
321   "GCC builtin __sync_synchronize() is used for memory barrier"
322 
323 #elif defined(HAVE_IB_MACHINE_BARRIER_SOLARIS)
324 #define HAVE_MEMORY_BARRIER
325 #include <mbarrier.h>
326 #define os_rmb __machine_r_barrier()
327 #define os_wmb __machine_w_barrier()
328 #define IB_MEMORY_BARRIER_STARTUP_MSG \
329   "Solaris memory ordering functions are used for memory barrier"
330 
331 #elif defined(HAVE_WINDOWS_MM_FENCE) && defined(_WIN64)
332 #define HAVE_MEMORY_BARRIER
333 #include <mmintrin.h>
334 #define os_rmb _mm_lfence()
335 #define os_wmb _mm_sfence()
336 #define IB_MEMORY_BARRIER_STARTUP_MSG \
337   "_mm_lfence() and _mm_sfence() are used for memory barrier"
338 
339 #else
340 #define os_rmb
341 #define os_wmb
342 #define IB_MEMORY_BARRIER_STARTUP_MSG "Memory barrier is not used"
343 #endif
344 
345 #include "os0atomic.ic"
346 
347 #endif /* !os0atomic_h */
348