1 /*****************************************************************************
2 
3 Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
4 Copyright (c) 2008, Google Inc.
5 
6 Portions of this file contain modifications contributed and copyrighted by
7 Google, Inc. Those modifications are gratefully acknowledged and are described
8 briefly in the InnoDB documentation. The contributions by Google are
9 incorporated with their permission, and subject to the conditions contained in
10 the file COPYING.Google.
11 
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License, version 2.0,
14 as published by the Free Software Foundation.
15 
16 This program is also distributed with certain software (including
17 but not limited to OpenSSL) that is licensed under separate terms,
18 as designated in a particular file or component or in included license
19 documentation.  The authors of MySQL hereby grant you an additional
20 permission to link the program and your derivative works with the
21 separately licensed software that they have included with MySQL.
22 
23 This program is distributed in the hope that it will be useful,
24 but WITHOUT ANY WARRANTY; without even the implied warranty of
25 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
26 GNU General Public License, version 2.0, for more details.
27 
28 You should have received a copy of the GNU General Public License along with
29 this program; if not, write to the Free Software Foundation, Inc.,
30 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
31 
32 *****************************************************************************/
33 
34 /**************************************************//**
35 @file include/os0sync.h
36 The interface to the operating system
37 synchronization primitives.
38 
39 Created 9/6/1995 Heikki Tuuri
40 *******************************************************/
41 
42 #ifndef os0sync_h
43 #define os0sync_h
44 
45 #include "univ.i"
46 #include "ut0lst.h"
47 #include "sync0types.h"
48 
49 #if defined __i386__ || defined __x86_64__ || defined _M_IX86 \
50     || defined _M_X64 || defined __WIN__
51 
52 #define IB_STRONG_MEMORY_MODEL
53 
54 #endif /* __i386__ || __x86_64__ || _M_IX86 || _M_X64 || __WIN__ */
55 
56 #ifdef HAVE_WINDOWS_ATOMICS
57 typedef LONG lock_word_t;	/*!< On Windows, InterlockedExchange operates
58 				on LONG variable */
59 #elif defined(HAVE_ATOMIC_BUILTINS) && !defined(HAVE_ATOMIC_BUILTINS_BYTE)
60 typedef ulint lock_word_t;
61 #else
62 
63 #define IB_LOCK_WORD_IS_BYTE
64 
65 typedef byte lock_word_t;
66 
67 #endif /* HAVE_WINDOWS_ATOMICS */
68 
69 #ifdef __WIN__
70 /** Native event (slow)*/
71 typedef HANDLE			os_native_event_t;
72 /** Native mutex */
73 typedef CRITICAL_SECTION	fast_mutex_t;
74 /** Native condition variable. */
75 typedef CONDITION_VARIABLE	os_cond_t;
76 #else
77 /** Native mutex */
78 typedef pthread_mutex_t		fast_mutex_t;
79 /** Native condition variable */
80 typedef pthread_cond_t		os_cond_t;
81 #endif
82 
83 /** Structure that includes Performance Schema Probe pfs_psi
84 in the os_fast_mutex structure if UNIV_PFS_MUTEX is defined */
85 struct os_fast_mutex_t {
86 	fast_mutex_t		mutex;	/*!< os_fast_mutex */
87 #ifdef UNIV_PFS_MUTEX
88 	struct PSI_mutex*	pfs_psi;/*!< The performance schema
89 					instrumentation hook */
90 #endif
91 };
92 
93 /** Operating system event handle */
94 typedef struct os_event*	os_event_t;
95 
96 /** An asynchronous signal sent between threads */
97 struct os_event {
98 #ifdef __WIN__
99 	HANDLE		handle;		/*!< kernel event object, slow,
100 					used on older Windows */
101 #endif
102 	os_fast_mutex_t	os_mutex;	/*!< this mutex protects the next
103 					fields */
104 	ibool		is_set;		/*!< this is TRUE when the event is
105 					in the signaled state, i.e., a thread
106 					does not stop if it tries to wait for
107 					this event */
108 	ib_int64_t	signal_count;	/*!< this is incremented each time
109 					the event becomes signaled */
110 	os_cond_t	cond_var;	/*!< condition variable is used in
111 					waiting for the event */
112 	UT_LIST_NODE_T(os_event_t) os_event_list;
113 					/*!< list of all created events */
114 };
115 
116 /** Denotes an infinite delay for os_event_wait_time() */
117 #define OS_SYNC_INFINITE_TIME   ULINT_UNDEFINED
118 
119 /** Return value of os_event_wait_time() when the time is exceeded */
120 #define OS_SYNC_TIME_EXCEEDED   1
121 
122 /** Operating system mutex handle */
123 typedef struct os_mutex_t*	os_ib_mutex_t;
124 
125 /** Mutex protecting counts and the event and OS 'slow' mutex lists */
126 extern os_ib_mutex_t	os_sync_mutex;
127 
128 /** This is incremented by 1 in os_thread_create and decremented by 1 in
129 os_thread_exit */
130 extern ulint		os_thread_count;
131 
132 extern ulint		os_event_count;
133 extern ulint		os_mutex_count;
134 extern ulint		os_fast_mutex_count;
135 
136 /*********************************************************//**
137 Initializes global event and OS 'slow' mutex lists. */
138 UNIV_INTERN
139 void
140 os_sync_init(void);
141 /*==============*/
142 /*********************************************************//**
143 Frees created events and OS 'slow' mutexes. */
144 UNIV_INTERN
145 void
146 os_sync_free(void);
147 /*==============*/
148 /*********************************************************//**
149 Creates an event semaphore, i.e., a semaphore which may just have two states:
150 signaled and nonsignaled. The created event is manual reset: it must be reset
151 explicitly by calling sync_os_reset_event.
152 @return	the event handle */
153 UNIV_INTERN
154 os_event_t
155 os_event_create(void);
156 /*==================*/
157 /**********************************************************//**
158 Sets an event semaphore to the signaled state: lets waiting threads
159 proceed. */
160 UNIV_INTERN
161 void
162 os_event_set(
163 /*=========*/
164 	os_event_t	event);	/*!< in: event to set */
165 /**********************************************************//**
166 Resets an event semaphore to the nonsignaled state. Waiting threads will
167 stop to wait for the event.
168 The return value should be passed to os_even_wait_low() if it is desired
169 that this thread should not wait in case of an intervening call to
170 os_event_set() between this os_event_reset() and the
171 os_event_wait_low() call. See comments for os_event_wait_low(). */
172 UNIV_INTERN
173 ib_int64_t
174 os_event_reset(
175 /*===========*/
176 	os_event_t	event);	/*!< in: event to reset */
177 /**********************************************************//**
178 Frees an event object. */
179 UNIV_INTERN
180 void
181 os_event_free(
182 /*==========*/
183 	os_event_t	event);	/*!< in: event to free */
184 
185 /**********************************************************//**
186 Waits for an event object until it is in the signaled state.
187 
188 Typically, if the event has been signalled after the os_event_reset()
189 we'll return immediately because event->is_set == TRUE.
190 There are, however, situations (e.g.: sync_array code) where we may
191 lose this information. For example:
192 
193 thread A calls os_event_reset()
194 thread B calls os_event_set()   [event->is_set == TRUE]
195 thread C calls os_event_reset() [event->is_set == FALSE]
196 thread A calls os_event_wait()  [infinite wait!]
197 thread C calls os_event_wait()  [infinite wait!]
198 
199 Where such a scenario is possible, to avoid infinite wait, the
200 value returned by os_event_reset() should be passed in as
201 reset_sig_count. */
202 UNIV_INTERN
203 void
204 os_event_wait_low(
205 /*==============*/
206 	os_event_t	event,		/*!< in: event to wait */
207 	ib_int64_t	reset_sig_count);/*!< in: zero or the value
208 					returned by previous call of
209 					os_event_reset(). */
210 
211 #define os_event_wait(event) os_event_wait_low(event, 0)
212 #define os_event_wait_time(event, t) os_event_wait_time_low(event, t, 0)
213 
214 /**********************************************************//**
215 Waits for an event object until it is in the signaled state or
216 a timeout is exceeded. In Unix the timeout is always infinite.
217 @return 0 if success, OS_SYNC_TIME_EXCEEDED if timeout was exceeded */
218 UNIV_INTERN
219 ulint
220 os_event_wait_time_low(
221 /*===================*/
222 	os_event_t	event,			/*!< in: event to wait */
223 	ulint		time_in_usec,		/*!< in: timeout in
224 						microseconds, or
225 						OS_SYNC_INFINITE_TIME */
226 	ib_int64_t	reset_sig_count);	/*!< in: zero or the value
227 						returned by previous call of
228 						os_event_reset(). */
229 /*********************************************************//**
230 Creates an operating system mutex semaphore. Because these are slow, the
231 mutex semaphore of InnoDB itself (ib_mutex_t) should be used where possible.
232 @return	the mutex handle */
233 UNIV_INTERN
234 os_ib_mutex_t
235 os_mutex_create(void);
236 /*=================*/
237 /**********************************************************//**
238 Acquires ownership of a mutex semaphore. */
239 UNIV_INTERN
240 void
241 os_mutex_enter(
242 /*===========*/
243 	os_ib_mutex_t	mutex);	/*!< in: mutex to acquire */
244 /**********************************************************//**
245 Releases ownership of a mutex. */
246 UNIV_INTERN
247 void
248 os_mutex_exit(
249 /*==========*/
250 	os_ib_mutex_t	mutex);	/*!< in: mutex to release */
251 /**********************************************************//**
252 Frees an mutex object. */
253 UNIV_INTERN
254 void
255 os_mutex_free(
256 /*==========*/
257 	os_ib_mutex_t	mutex);	/*!< in: mutex to free */
258 /**********************************************************//**
259 Acquires ownership of a fast mutex. Currently in Windows this is the same
260 as os_fast_mutex_lock!
261 @return	0 if success, != 0 if was reserved by another thread */
262 UNIV_INLINE
263 ulint
264 os_fast_mutex_trylock(
265 /*==================*/
266 	os_fast_mutex_t*	fast_mutex);	/*!< in: mutex to acquire */
267 
268 /**********************************************************************
269 Following os_fast_ mutex APIs would be performance schema instrumented:
270 
271 os_fast_mutex_init
272 os_fast_mutex_lock
273 os_fast_mutex_unlock
274 os_fast_mutex_free
275 
276 These mutex APIs will point to corresponding wrapper functions that contain
277 the performance schema instrumentation.
278 
279 NOTE! The following macro should be used in mutex operation, not the
280 corresponding function. */
281 
282 #ifdef UNIV_PFS_MUTEX
283 # define os_fast_mutex_init(K, M)			\
284 	pfs_os_fast_mutex_init(K, M)
285 
286 # define os_fast_mutex_lock(M)				\
287 	pfs_os_fast_mutex_lock(M, __FILE__, __LINE__)
288 
289 # define os_fast_mutex_unlock(M)	pfs_os_fast_mutex_unlock(M)
290 
291 # define os_fast_mutex_free(M)		pfs_os_fast_mutex_free(M)
292 
293 /*********************************************************//**
294 NOTE! Please use the corresponding macro os_fast_mutex_init(), not directly
295 this function!
296 A wrapper function for os_fast_mutex_init_func(). Initializes an operating
297 system fast mutex semaphore. */
298 UNIV_INLINE
299 void
300 pfs_os_fast_mutex_init(
301 /*===================*/
302 	PSI_mutex_key		key,		/*!< in: Performance Schema
303 						key */
304 	os_fast_mutex_t*	fast_mutex);	/*!< out: fast mutex */
305 /**********************************************************//**
306 NOTE! Please use the corresponding macro os_fast_mutex_free(), not directly
307 this function!
308 Wrapper function for pfs_os_fast_mutex_free(). Also destroys the performance
309 schema probes when freeing the mutex */
310 UNIV_INLINE
311 void
312 pfs_os_fast_mutex_free(
313 /*===================*/
314 	os_fast_mutex_t*	fast_mutex);	/*!< in/out: mutex to free */
315 /**********************************************************//**
316 NOTE! Please use the corresponding macro os_fast_mutex_lock, not directly
317 this function!
318 Wrapper function of os_fast_mutex_lock. Acquires ownership of a fast mutex. */
319 UNIV_INLINE
320 void
321 pfs_os_fast_mutex_lock(
322 /*===================*/
323 	os_fast_mutex_t*	fast_mutex,	/*!< in/out: mutex to acquire */
324 	const char*		file_name,	/*!< in: file name where
325 						 locked */
326 	ulint			line);		/*!< in: line where locked */
327 /**********************************************************//**
328 NOTE! Please use the corresponding macro os_fast_mutex_unlock, not directly
329 this function!
330 Wrapper function of os_fast_mutex_unlock. Releases ownership of a fast mutex. */
331 UNIV_INLINE
332 void
333 pfs_os_fast_mutex_unlock(
334 /*=====================*/
335 	os_fast_mutex_t*	fast_mutex);	/*!< in/out: mutex to release */
336 
337 #else /* UNIV_PFS_MUTEX */
338 
339 # define os_fast_mutex_init(K, M)			\
340 	os_fast_mutex_init_func(&((os_fast_mutex_t*)(M))->mutex)
341 
342 # define os_fast_mutex_lock(M)				\
343 	os_fast_mutex_lock_func(&((os_fast_mutex_t*)(M))->mutex)
344 
345 # define os_fast_mutex_unlock(M)			\
346 	os_fast_mutex_unlock_func(&((os_fast_mutex_t*)(M))->mutex)
347 
348 # define os_fast_mutex_free(M)				\
349 	os_fast_mutex_free_func(&((os_fast_mutex_t*)(M))->mutex)
350 #endif /* UNIV_PFS_MUTEX */
351 
352 /**********************************************************//**
353 Releases ownership of a fast mutex. */
354 UNIV_INTERN
355 void
356 os_fast_mutex_unlock_func(
357 /*======================*/
358 	fast_mutex_t*		fast_mutex);	/*!< in: mutex to release */
359 /*********************************************************//**
360 Initializes an operating system fast mutex semaphore. */
361 UNIV_INTERN
362 void
363 os_fast_mutex_init_func(
364 /*====================*/
365 	fast_mutex_t*		fast_mutex);	/*!< in: fast mutex */
366 /**********************************************************//**
367 Acquires ownership of a fast mutex. */
368 UNIV_INTERN
369 void
370 os_fast_mutex_lock_func(
371 /*====================*/
372 	fast_mutex_t*		fast_mutex);	/*!< in: mutex to acquire */
373 /**********************************************************//**
374 Frees an mutex object. */
375 UNIV_INTERN
376 void
377 os_fast_mutex_free_func(
378 /*====================*/
379 	fast_mutex_t*		fast_mutex);	/*!< in: mutex to free */
380 
381 /**********************************************************//**
382 Atomic compare-and-swap and increment for InnoDB. */
383 
384 #if defined(HAVE_IB_GCC_ATOMIC_BUILTINS)
385 
386 # define HAVE_ATOMIC_BUILTINS
387 
388 # ifdef HAVE_IB_GCC_ATOMIC_BUILTINS_BYTE
389 #  define HAVE_ATOMIC_BUILTINS_BYTE
390 # endif
391 
392 # ifdef HAVE_IB_GCC_ATOMIC_BUILTINS_64
393 #  define HAVE_ATOMIC_BUILTINS_64
394 # endif
395 
396 /**********************************************************//**
397 Returns true if swapped, ptr is pointer to target, old_val is value to
398 compare to, new_val is the value to swap in. */
399 
400 # define os_compare_and_swap(ptr, old_val, new_val) \
401 	__sync_bool_compare_and_swap(ptr, old_val, new_val)
402 
403 # define os_compare_and_swap_ulint(ptr, old_val, new_val) \
404 	os_compare_and_swap(ptr, old_val, new_val)
405 
406 # define os_compare_and_swap_lint(ptr, old_val, new_val) \
407 	os_compare_and_swap(ptr, old_val, new_val)
408 
409 #  define os_compare_and_swap_uint32(ptr, old_val, new_val) \
410 	os_compare_and_swap(ptr, old_val, new_val)
411 
412 # ifdef HAVE_IB_ATOMIC_PTHREAD_T_GCC
413 #  define os_compare_and_swap_thread_id(ptr, old_val, new_val) \
414 	os_compare_and_swap(ptr, old_val, new_val)
415 #  define INNODB_RW_LOCKS_USE_ATOMICS
416 #  define IB_ATOMICS_STARTUP_MSG \
417 	"Mutexes and rw_locks use GCC atomic builtins"
418 # else /* HAVE_IB_ATOMIC_PTHREAD_T_GCC */
419 #  define IB_ATOMICS_STARTUP_MSG \
420 	"Mutexes use GCC atomic builtins, rw_locks do not"
421 # endif /* HAVE_IB_ATOMIC_PTHREAD_T_GCC */
422 
423 /**********************************************************//**
424 Returns the resulting value, ptr is pointer to target, amount is the
425 amount of increment. */
426 
427 # define os_atomic_increment(ptr, amount) \
428 	__sync_add_and_fetch(ptr, amount)
429 
430 # define os_atomic_increment_lint(ptr, amount) \
431 	os_atomic_increment(ptr, amount)
432 
433 # define os_atomic_increment_uint32(ptr, amount ) \
434 	os_atomic_increment(ptr, amount)
435 
436 # define os_atomic_increment_ulint(ptr, amount) \
437 	os_atomic_increment(ptr, amount)
438 
439 # define os_atomic_increment_uint64(ptr, amount) \
440 	os_atomic_increment(ptr, amount)
441 
442 /* Returns the resulting value, ptr is pointer to target, amount is the
443 amount to decrement. */
444 
445 # define os_atomic_decrement(ptr, amount) \
446 	__sync_sub_and_fetch(ptr, amount)
447 
448 # define os_atomic_decrement_uint32(ptr, amount) \
449 	os_atomic_decrement(ptr, amount)
450 
451 # define os_atomic_decrement_lint(ptr, amount) \
452 	os_atomic_decrement(ptr, amount)
453 
454 # define os_atomic_decrement_ulint(ptr, amount) \
455 	os_atomic_decrement(ptr, amount)
456 
457 # define os_atomic_decrement_uint64(ptr, amount) \
458 	os_atomic_decrement(ptr, amount)
459 
460 # if defined(IB_STRONG_MEMORY_MODEL)
461 
462 /** Do an atomic test and set.
463 @param[in,out]	ptr		Memory location to set to non-zero
464 @return the previous value */
465 inline
466 lock_word_t
os_atomic_test_and_set(volatile lock_word_t * ptr)467 os_atomic_test_and_set(volatile lock_word_t* ptr)
468 {
469 	return(__sync_lock_test_and_set(ptr, 1));
470 }
471 
472 /** Do an atomic release.
473 
474 In theory __sync_lock_release should be used to release the lock.
475 Unfortunately, it does not work properly alone. The workaround is
476 that more conservative __sync_lock_test_and_set is used instead.
477 
478 Performance regression was observed at some conditions for Intel
479 architecture. Disable release barrier on Intel architecture for now.
480 @param[in,out]	ptr		Memory location to write to
481 @return the previous value */
482 inline
483 lock_word_t
os_atomic_clear(volatile lock_word_t * ptr)484 os_atomic_clear(volatile lock_word_t* ptr)
485 {
486 	return(__sync_lock_test_and_set(ptr, 0));
487 }
488 
489 # elif defined(HAVE_IB_GCC_ATOMIC_TEST_AND_SET)
490 
491 /** Do an atomic test-and-set.
492 @param[in,out]	ptr		Memory location to set to non-zero
493 @return the previous value */
494 inline
495 lock_word_t
os_atomic_test_and_set(volatile lock_word_t * ptr)496 os_atomic_test_and_set(volatile lock_word_t* ptr)
497 {
498        return(__atomic_test_and_set(ptr, __ATOMIC_ACQUIRE));
499 }
500 
501 /** Do an atomic clear.
502 @param[in,out]	ptr		Memory location to set to zero */
503 inline
504 void
os_atomic_clear(volatile lock_word_t * ptr)505 os_atomic_clear(volatile lock_word_t* ptr)
506 {
507 	__atomic_clear(ptr, __ATOMIC_RELEASE);
508 }
509 
510 # else
511 
512 #  error "Unsupported platform"
513 
514 # endif /* HAVE_IB_GCC_ATOMIC_TEST_AND_SET */
515 
516 #elif defined(HAVE_IB_SOLARIS_ATOMICS)
517 
518 # define HAVE_ATOMIC_BUILTINS
519 # define HAVE_ATOMIC_BUILTINS_BYTE
520 # define HAVE_ATOMIC_BUILTINS_64
521 
522 /* If not compiling with GCC or GCC doesn't support the atomic
523 intrinsics and running on Solaris >= 10 use Solaris atomics */
524 
525 # include <atomic.h>
526 
527 /**********************************************************//**
528 Returns true if swapped, ptr is pointer to target, old_val is value to
529 compare to, new_val is the value to swap in. */
530 
531 # define os_compare_and_swap_uint32(ptr, old_val, new_val) \
532 	(atomic_cas_32(ptr, old_val, new_val) == old_val)
533 
534 # define os_compare_and_swap_ulint(ptr, old_val, new_val) \
535 	(atomic_cas_ulong(ptr, old_val, new_val) == old_val)
536 
537 # define os_compare_and_swap_lint(ptr, old_val, new_val) \
538 	((lint) atomic_cas_ulong((ulong_t*) ptr, old_val, new_val) == old_val)
539 
540 # ifdef HAVE_IB_ATOMIC_PTHREAD_T_SOLARIS
541 #  if SIZEOF_PTHREAD_T == 4
542 #   define os_compare_and_swap_thread_id(ptr, old_val, new_val) \
543 	((pthread_t) atomic_cas_32(ptr, old_val, new_val) == old_val)
544 #  elif SIZEOF_PTHREAD_T == 8
545 #   define os_compare_and_swap_thread_id(ptr, old_val, new_val) \
546 	((pthread_t) atomic_cas_64(ptr, old_val, new_val) == old_val)
547 #  else
548 #   error "SIZEOF_PTHREAD_T != 4 or 8"
549 #  endif /* SIZEOF_PTHREAD_T CHECK */
550 #  define INNODB_RW_LOCKS_USE_ATOMICS
551 #  define IB_ATOMICS_STARTUP_MSG \
552 	"Mutexes and rw_locks use Solaris atomic functions"
553 # else /* HAVE_IB_ATOMIC_PTHREAD_T_SOLARIS */
554 #  define IB_ATOMICS_STARTUP_MSG \
555 	"Mutexes use Solaris atomic functions, rw_locks do not"
556 # endif /* HAVE_IB_ATOMIC_PTHREAD_T_SOLARIS */
557 
558 /**********************************************************//**
559 Returns the resulting value, ptr is pointer to target, amount is the
560 amount of increment. */
561 
562 # define os_atomic_increment_uint32(ptr, amount) \
563 	atomic_add_32_nv(ptr, amount)
564 
565 # define os_atomic_increment_ulint(ptr, amount) \
566 	atomic_add_long_nv(ptr, amount)
567 
568 # define os_atomic_increment_lint(ptr, amount) \
569 	os_atomic_increment_ulint((ulong_t*) ptr, amount)
570 
571 # define os_atomic_increment_uint64(ptr, amount) \
572 	atomic_add_64_nv(ptr, amount)
573 
574 /* Returns the resulting value, ptr is pointer to target, amount is the
575 amount to decrement. */
576 
577 # define os_atomic_decrement_uint32(ptr, amount) \
578 	os_atomic_increment_uint32(ptr, -(amount))
579 
580 # define os_atomic_decrement_lint(ptr, amount) \
581 	os_atomic_increment_ulint((ulong_t*) ptr, -(amount))
582 
583 # define os_atomic_decrement_ulint(ptr, amount) \
584 	os_atomic_increment_ulint(ptr, -(amount))
585 
586 # define os_atomic_decrement_uint64(ptr, amount) \
587 	os_atomic_increment_uint64(ptr, -(amount))
588 
589 # ifdef IB_LOCK_WORD_IS_BYTE
590 
591 /** Do an atomic xchg and set to non-zero.
592 @param[in,out]	ptr		Memory location to set to non-zero
593 @return the previous value */
594 inline
595 lock_word_t
os_atomic_test_and_set(volatile lock_word_t * ptr)596 os_atomic_test_and_set(volatile lock_word_t* ptr)
597 {
598 	return(atomic_swap_uchar(ptr, 1));
599 }
600 
601 /** Do an atomic xchg and set to zero.
602 @param[in,out]	ptr		Memory location to set to zero
603 @return the previous value */
604 inline
605 lock_word_t
os_atomic_clear(volatile lock_word_t * ptr)606 os_atomic_clear(volatile lock_word_t* ptr)
607 {
608 	return(atomic_swap_uchar(ptr, 0));
609 }
610 
611 # else
612 
613 /** Do an atomic xchg and set to non-zero.
614 @param[in,out]	ptr		Memory location to set to non-zero
615 @return the previous value */
616 inline
617 lock_word_t
os_atomic_test_and_set(volatile lock_word_t * ptr)618 os_atomic_test_and_set(volatile lock_word_t* ptr)
619 {
620 	return(atomic_swap_ulong(ptr, 1));
621 }
622 
623 /** Do an atomic xchg and set to zero.
624 @param[in,out]	ptr		Memory location to set to zero
625 @return the previous value */
626 inline
627 lock_word_t
os_atomic_clear(volatile lock_word_t * ptr)628 os_atomic_clear(volatile lock_word_t* ptr)
629 {
630 	return(atomic_swap_ulong(ptr, 0));
631 }
632 
633 # endif /* IB_LOCK_WORD_IS_BYTE */
634 
635 #elif defined(HAVE_WINDOWS_ATOMICS)
636 
637 # define HAVE_ATOMIC_BUILTINS
638 # define HAVE_ATOMIC_BUILTINS_BYTE
639 
640 # ifndef _WIN32
641 #  define HAVE_ATOMIC_BUILTINS_64
642 # endif
643 
644 /**********************************************************//**
645 Atomic compare and exchange of signed integers (both 32 and 64 bit).
646 @return value found before the exchange.
647 If it is not equal to old_value the exchange did not happen. */
648 UNIV_INLINE
649 lint
650 win_cmp_and_xchg_lint(
651 /*==================*/
652 	volatile lint*	ptr,		/*!< in/out: source/destination */
653 	lint		new_val,	/*!< in: exchange value */
654 	lint		old_val);	/*!< in: value to compare to */
655 
656 /**********************************************************//**
657 Atomic addition of signed integers.
658 @return Initial value of the variable pointed to by ptr */
659 UNIV_INLINE
660 lint
661 win_xchg_and_add(
662 /*=============*/
663 	volatile lint*	ptr,	/*!< in/out: address of destination */
664 	lint		val);	/*!< in: number to be added */
665 
666 /**********************************************************//**
667 Atomic compare and exchange of unsigned integers.
668 @return value found before the exchange.
669 If it is not equal to old_value the exchange did not happen. */
670 UNIV_INLINE
671 ulint
672 win_cmp_and_xchg_ulint(
673 /*===================*/
674 	volatile ulint*	ptr,		/*!< in/out: source/destination */
675 	ulint		new_val,	/*!< in: exchange value */
676 	ulint		old_val);	/*!< in: value to compare to */
677 
678 /**********************************************************//**
679 Atomic compare and exchange of 32 bit unsigned integers.
680 @return value found before the exchange.
681 If it is not equal to old_value the exchange did not happen. */
682 UNIV_INLINE
683 DWORD
684 win_cmp_and_xchg_dword(
685 /*===================*/
686 	volatile DWORD*	ptr,		/*!< in/out: source/destination */
687 	DWORD		new_val,	/*!< in: exchange value */
688 	DWORD		old_val);	/*!< in: value to compare to */
689 
690 /**********************************************************//**
691 Returns true if swapped, ptr is pointer to target, old_val is value to
692 compare to, new_val is the value to swap in. */
693 
694 # define os_compare_and_swap_uint32(ptr, old_val, new_val) \
695 	(InterlockedCompareExchange(reinterpret_cast<volatile long*>(ptr), \
696 				    new_val, old_val) == old_val)
697 
698 # define os_compare_and_swap_ulint(ptr, old_val, new_val) \
699 	(win_cmp_and_xchg_ulint(ptr, new_val, old_val) == old_val)
700 
701 # define os_compare_and_swap_lint(ptr, old_val, new_val) \
702 	(win_cmp_and_xchg_lint(ptr, new_val, old_val) == old_val)
703 
704 /* windows thread objects can always be passed to windows atomic functions */
705 # define os_compare_and_swap_thread_id(ptr, old_val, new_val) \
706 	(win_cmp_and_xchg_dword(ptr, new_val, old_val) == old_val)
707 
708 # define INNODB_RW_LOCKS_USE_ATOMICS
709 # define IB_ATOMICS_STARTUP_MSG \
710 	"Mutexes and rw_locks use Windows interlocked functions"
711 
712 /**********************************************************//**
713 Returns the resulting value, ptr is pointer to target, amount is the
714 amount of increment. */
715 
716 # define os_atomic_increment_lint(ptr, amount) \
717 	(win_xchg_and_add(ptr, amount) + amount)
718 
719 # define os_atomic_increment_uint32(ptr, amount) \
720 	((ulint) InterlockedExchangeAdd((long*) ptr, amount))
721 
722 # define os_atomic_increment_ulint(ptr, amount) \
723 	((ulint) (win_xchg_and_add((lint*) ptr, (lint) amount) + amount))
724 
725 # define os_atomic_increment_uint64(ptr, amount)		\
726 	((ib_uint64_t) (InterlockedExchangeAdd64(		\
727 				(ib_int64_t*) ptr,		\
728 				(ib_int64_t) amount) + amount))
729 
730 /**********************************************************//**
731 Returns the resulting value, ptr is pointer to target, amount is the
732 amount to decrement. There is no atomic substract function on Windows */
733 
734 # define os_atomic_decrement_uint32(ptr, amount) \
735 	((ulint) InterlockedExchangeAdd((long*) ptr, (-amount)))
736 
737 # define os_atomic_decrement_lint(ptr, amount) \
738 	(win_xchg_and_add(ptr, -(lint) amount) - amount)
739 
740 # define os_atomic_decrement_ulint(ptr, amount) \
741 	((ulint) (win_xchg_and_add((lint*) ptr, -(lint) amount) - amount))
742 
743 # define os_atomic_decrement_uint64(ptr, amount)		\
744 	((ib_uint64_t) (InterlockedExchangeAdd64(		\
745 				(ib_int64_t*) ptr,		\
746 				-(ib_int64_t) amount) - amount))
747 
748 /** Do an atomic test and set.
749 InterlockedExchange() operates on LONG, and the LONG will be clobbered
750 @param[in,out]	ptr		Memory location to set to non-zero
751 @return the previous value */
752 inline
753 lock_word_t
os_atomic_test_and_set(volatile lock_word_t * ptr)754 os_atomic_test_and_set(volatile lock_word_t* ptr)
755 {
756 	return(InterlockedExchange(ptr, 1));
757 }
758 
759 /** Do an atomic release.
760 InterlockedExchange() operates on LONG, and the LONG will be clobbered
761 @param[in,out]	ptr		Memory location to set to zero
762 @return the previous value */
763 inline
764 lock_word_t
os_atomic_clear(volatile lock_word_t * ptr)765 os_atomic_clear(volatile lock_word_t* ptr)
766 {
767 	return(InterlockedExchange(ptr, 0));
768 }
769 
770 #else
771 # define IB_ATOMICS_STARTUP_MSG \
772 	"Mutexes and rw_locks use InnoDB's own implementation"
773 #endif
774 #ifdef HAVE_ATOMIC_BUILTINS
775 #define os_atomic_inc_ulint(m,v,d)	os_atomic_increment_ulint(v, d)
776 #define os_atomic_dec_ulint(m,v,d)	os_atomic_decrement_ulint(v, d)
777 #else
778 #define os_atomic_inc_ulint(m,v,d)	os_atomic_inc_ulint_func(m, v, d)
779 #define os_atomic_dec_ulint(m,v,d)	os_atomic_dec_ulint_func(m, v, d)
780 #endif /* HAVE_ATOMIC_BUILTINS */
781 
782 /**********************************************************//**
783 Following macros are used to update specified counter atomically
784 if HAVE_ATOMIC_BUILTINS defined. Otherwise, use mutex passed in
785 for synchronization */
786 #ifdef HAVE_ATOMIC_BUILTINS
787 #define os_increment_counter_by_amount(mutex, counter, amount)	\
788 	(void) os_atomic_increment_ulint(&counter, amount)
789 
790 #define os_decrement_counter_by_amount(mutex, counter, amount)	\
791 	(void) os_atomic_increment_ulint(&counter, (-((lint) amount)))
792 #else
793 #define os_increment_counter_by_amount(mutex, counter, amount)	\
794 	do {							\
795 		mutex_enter(&(mutex));				\
796 		(counter) += (amount);				\
797 		mutex_exit(&(mutex));				\
798 	} while (0)
799 
800 #define os_decrement_counter_by_amount(mutex, counter, amount)	\
801 	do {							\
802 		ut_a(counter >= amount);			\
803 		mutex_enter(&(mutex));				\
804 		(counter) -= (amount);				\
805 		mutex_exit(&(mutex));				\
806 	} while (0)
807 #endif  /* HAVE_ATOMIC_BUILTINS */
808 
809 #define os_inc_counter(mutex, counter)				\
810 	os_increment_counter_by_amount(mutex, counter, 1)
811 
812 #define os_dec_counter(mutex, counter)				\
813 	do {							\
814 		os_decrement_counter_by_amount(mutex, counter, 1);\
815 	} while (0);
816 
817 /** barrier definitions for memory ordering */
818 #ifdef IB_STRONG_MEMORY_MODEL
819 /* Performance regression was observed at some conditions for Intel
820 architecture. Disable memory barrier for Intel architecture for now. */
821 # define os_rmb
822 # define os_wmb
823 # define IB_MEMORY_BARRIER_STARTUP_MSG \
824 	"Memory barrier is not used"
825 #elif defined(HAVE_IB_GCC_ATOMIC_THREAD_FENCE)
826 # define HAVE_MEMORY_BARRIER
827 # define os_rmb	__atomic_thread_fence(__ATOMIC_ACQUIRE)
828 # define os_wmb	__atomic_thread_fence(__ATOMIC_RELEASE)
829 # define IB_MEMORY_BARRIER_STARTUP_MSG \
830 	"GCC builtin __atomic_thread_fence() is used for memory barrier"
831 
832 #elif defined(HAVE_IB_GCC_SYNC_SYNCHRONISE)
833 # define HAVE_MEMORY_BARRIER
834 # define os_rmb	__sync_synchronize()
835 # define os_wmb	__sync_synchronize()
836 # define IB_MEMORY_BARRIER_STARTUP_MSG \
837 	"GCC builtin __sync_synchronize() is used for memory barrier"
838 
839 #elif defined(HAVE_IB_MACHINE_BARRIER_SOLARIS)
840 # define HAVE_MEMORY_BARRIER
841 # include <mbarrier.h>
842 # define os_rmb	__machine_r_barrier()
843 # define os_wmb	__machine_w_barrier()
844 # define IB_MEMORY_BARRIER_STARTUP_MSG \
845 	"Solaris memory ordering functions are used for memory barrier"
846 
847 #elif defined(HAVE_WINDOWS_MM_FENCE) && defined(_WIN64)
848 # define HAVE_MEMORY_BARRIER
849 # include <mmintrin.h>
850 # define os_rmb	_mm_lfence()
851 # define os_wmb	_mm_sfence()
852 # define IB_MEMORY_BARRIER_STARTUP_MSG \
853 	"_mm_lfence() and _mm_sfence() are used for memory barrier"
854 
855 #else
856 # define os_rmb
857 # define os_wmb
858 # define IB_MEMORY_BARRIER_STARTUP_MSG \
859 	"Memory barrier is not used"
860 #endif
861 
862 #ifndef UNIV_NONINL
863 #include "os0sync.ic"
864 #endif
865 
866 #endif
867