1 /*****************************************************************************
2 
3 Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
4 Copyright (c) 2008, Google Inc.
5 
6 Portions of this file contain modifications contributed and copyrighted by
7 Google, Inc. Those modifications are gratefully acknowledged and are described
8 briefly in the InnoDB documentation. The contributions by Google are
9 incorporated with their permission, and subject to the conditions contained in
10 the file COPYING.Google.
11 
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License, version 2.0,
14 as published by the Free Software Foundation.
15 
16 This program is also distributed with certain software (including
17 but not limited to OpenSSL) that is licensed under separate terms,
18 as designated in a particular file or component or in included license
19 documentation.  The authors of MySQL hereby grant you an additional
20 permission to link the program and your derivative works with the
21 separately licensed software that they have included with MySQL.
22 
23 This program is distributed in the hope that it will be useful,
24 but WITHOUT ANY WARRANTY; without even the implied warranty of
25 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
26 GNU General Public License, version 2.0, for more details.
27 
28 You should have received a copy of the GNU General Public License along with
29 this program; if not, write to the Free Software Foundation, Inc.,
30 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
31 
32 *****************************************************************************/
33 
34 /**************************************************//**
35 @file include/os0sync.h
36 The interface to the operating system
37 synchronization primitives.
38 
39 Created 9/6/1995 Heikki Tuuri
40 *******************************************************/
41 
42 #ifndef os0sync_h
43 #define os0sync_h
44 
45 #include "univ.i"
46 #include "ut0lst.h"
47 #include "sync0types.h"
48 
49 #if defined __i386__ || defined __x86_64__ || defined _M_IX86 \
50     || defined _M_X64 || defined __WIN__
51 
52 #define IB_STRONG_MEMORY_MODEL
53 
54 #endif /* __i386__ || __x86_64__ || _M_IX86 || _M_X64 || __WIN__ */
55 
56 #ifdef HAVE_WINDOWS_ATOMICS
57 typedef LONG lock_word_t;	/*!< On Windows, InterlockedExchange operates
58 				on LONG variable */
59 #elif defined(HAVE_ATOMIC_BUILTINS) && !defined(HAVE_ATOMIC_BUILTINS_BYTE)
60 typedef ulint lock_word_t;
61 #else
62 
63 #define IB_LOCK_WORD_IS_BYTE
64 
65 typedef byte lock_word_t;
66 
67 #endif /* HAVE_WINDOWS_ATOMICS */
68 
69 #ifdef __WIN__
70 /** Native event (slow)*/
71 typedef HANDLE			os_native_event_t;
72 /** Native mutex */
73 typedef CRITICAL_SECTION	fast_mutex_t;
74 /** Native condition variable. */
75 typedef CONDITION_VARIABLE	os_cond_t;
76 #else
77 /** Native mutex */
78 typedef pthread_mutex_t		fast_mutex_t;
79 /** Native condition variable */
80 typedef pthread_cond_t		os_cond_t;
81 #endif
82 
83 /** Structure that includes Performance Schema Probe pfs_psi
84 in the os_fast_mutex structure if UNIV_PFS_MUTEX is defined */
85 struct os_fast_mutex_t {
86 	fast_mutex_t		mutex;	/*!< os_fast_mutex */
87 #ifdef UNIV_PFS_MUTEX
88 	struct PSI_mutex*	pfs_psi;/*!< The performance schema
89 					instrumentation hook */
90 #endif
91 };
92 
93 /** Operating system event handle */
94 typedef struct os_event*	os_event_t;
95 
96 /** An asynchronous signal sent between threads */
97 struct os_event {
98 #ifdef __WIN__
99 	HANDLE		handle;		/*!< kernel event object, slow,
100 					used on older Windows */
101 #endif
102 	os_fast_mutex_t	os_mutex;	/*!< this mutex protects the next
103 					fields */
104 private:
105 	/** Masks for the event signal count and set flag in the count_and_set
106 	field */
107 	enum { count_mask = 0x7fffffffffffffffULL,
108 	       set_mask   = 0x8000000000000000ULL};
109 
110 	/** The MSB is set whenever when the event is in the signaled state,
111 	i.e. a thread does not stop if it tries to wait for this event. Lower
112 	bits are incremented each time the event becomes signaled. */
113 	ib_uint64_t	count_and_set;
114 public:
115 	os_cond_t	cond_var;	/*!< condition variable is used in
116 					waiting for the event */
117 
118 	/** Initialise count_and_set field */
init_count_and_setos_event119 	void init_count_and_set(void)
120 	{
121 		/* We return this value in os_event_reset(), which can then be
122 		be used to pass to the os_event_wait_low(). The value of zero
123 		is reserved in os_event_wait_low() for the case when the
124 		caller does not want to pass any signal_count value. To
125 		distinguish between the two cases we initialize signal_count
126 		to 1 here. */
127 		count_and_set = 1;
128 	}
129 
130 	/** Mark this event as set */
setos_event131 	void set(void)
132 	{
133 		count_and_set |= set_mask;
134 	}
135 
136 	/** Unmark this event as set */
resetos_event137 	void reset(void)
138 	{
139 		count_and_set &= count_mask;
140 	}
141 
142 	/** Return true if this event is set */
is_setos_event143 	bool is_set(void) const
144 	{
145 		return count_and_set & set_mask;
146 	}
147 
148 	/** Bump signal count for this event */
inc_signal_countos_event149 	void inc_signal_count(void)
150 	{
151 		ut_ad(static_cast<ib_uint64_t>(signal_count()) < count_mask);
152 		count_and_set++;
153 	}
154 
155 	/** Return how many times this event has been signalled */
signal_countos_event156 	ib_int64_t signal_count(void) const
157 	{
158 		return (count_and_set & count_mask);
159 	}
160 };
161 
162 /** Denotes an infinite delay for os_event_wait_time() */
163 #define OS_SYNC_INFINITE_TIME   ULINT_UNDEFINED
164 
165 /** Return value of os_event_wait_time() when the time is exceeded */
166 #define OS_SYNC_TIME_EXCEEDED   1
167 
168 /** Operating system mutex handle */
169 typedef struct os_mutex_t*	os_ib_mutex_t;
170 
171 // All the os_*_count variables are accessed atomically
172 
173 /** This is incremented by 1 in os_thread_create and decremented by 1 in
174 os_thread_exit */
175 extern ulint		os_thread_count;
176 
177 extern ulint		os_event_count;
178 extern ulint		os_mutex_count;
179 extern ulint		os_fast_mutex_count;
180 
181 /*********************************************************//**
182 Initializes global event and OS 'slow' mutex lists. */
183 UNIV_INTERN
184 void
185 os_sync_init(void);
186 /*==============*/
187 
188 /** Create an event semaphore, i.e., a semaphore which may just have two
189 states: signaled and nonsignaled. The created event is manual reset: it must be
190 reset explicitly by calling sync_os_reset_event.
191 @param[in,out]	event	memory block where to create the event */
192 UNIV_INTERN
193 void
194 os_event_create(os_event_t event);
195 
196 /*********************************************************//**
197 Creates an event semaphore, i.e., a semaphore which may just have two states:
198 signaled and nonsignaled. The created event is manual reset: it must be reset
199 explicitly by calling sync_os_reset_event.
200 @return	the event handle */
201 UNIV_INTERN
202 os_event_t
203 os_event_create(void);
204 /*==================*/
205 /**********************************************************//**
206 Sets an event semaphore to the signaled state: lets waiting threads
207 proceed. */
208 UNIV_INTERN
209 void
210 os_event_set(
211 /*=========*/
212 	os_event_t	event);	/*!< in: event to set */
213 /**********************************************************//**
214 Resets an event semaphore to the nonsignaled state. Waiting threads will
215 stop to wait for the event.
216 The return value should be passed to os_even_wait_low() if it is desired
217 that this thread should not wait in case of an intervening call to
218 os_event_set() between this os_event_reset() and the
219 os_event_wait_low() call. See comments for os_event_wait_low(). */
220 UNIV_INTERN
221 ib_int64_t
222 os_event_reset(
223 /*===========*/
224 	os_event_t	event);	/*!< in: event to reset */
225 /**********************************************************//**
226 Frees an event object. */
227 UNIV_INTERN
228 void
229 os_event_free(
230 /*==========*/
231 	os_event_t	event,	/*!< in: event to free */
232 	bool		free_memory = true);
233 				/*!< in: if true, deallocate the event memory
234 				block too */
235 
236 /**********************************************************//**
237 Waits for an event object until it is in the signaled state.
238 
239 Typically, if the event has been signalled after the os_event_reset()
240 we'll return immediately because event->is_set == TRUE.
241 There are, however, situations (e.g.: sync_array code) where we may
242 lose this information. For example:
243 
244 thread A calls os_event_reset()
245 thread B calls os_event_set()   [event->is_set == TRUE]
246 thread C calls os_event_reset() [event->is_set == FALSE]
247 thread A calls os_event_wait()  [infinite wait!]
248 thread C calls os_event_wait()  [infinite wait!]
249 
250 Where such a scenario is possible, to avoid infinite wait, the
251 value returned by os_event_reset() should be passed in as
252 reset_sig_count. */
253 UNIV_INTERN
254 void
255 os_event_wait_low(
256 /*==============*/
257 	os_event_t	event,		/*!< in: event to wait */
258 	ib_int64_t	reset_sig_count);/*!< in: zero or the value
259 					returned by previous call of
260 					os_event_reset(). */
261 
262 #define os_event_wait(event) os_event_wait_low(event, 0)
263 #define os_event_wait_time(event, t) os_event_wait_time_low(event, t, 0)
264 
265 /**********************************************************//**
266 Waits for an event object until it is in the signaled state or
267 a timeout is exceeded. In Unix the timeout is always infinite.
268 @return 0 if success, OS_SYNC_TIME_EXCEEDED if timeout was exceeded */
269 UNIV_INTERN
270 ulint
271 os_event_wait_time_low(
272 /*===================*/
273 	os_event_t	event,			/*!< in: event to wait */
274 	ulint		time_in_usec,		/*!< in: timeout in
275 						microseconds, or
276 						OS_SYNC_INFINITE_TIME */
277 	ib_int64_t	reset_sig_count);	/*!< in: zero or the value
278 						returned by previous call of
279 						os_event_reset(). */
280 /*********************************************************//**
281 Creates an operating system mutex semaphore. Because these are slow, the
282 mutex semaphore of InnoDB itself (ib_mutex_t) should be used where possible.
283 @return	the mutex handle */
284 UNIV_INTERN
285 os_ib_mutex_t
286 os_mutex_create(void);
287 /*=================*/
288 /**********************************************************//**
289 Acquires ownership of a mutex semaphore. */
290 UNIV_INTERN
291 void
292 os_mutex_enter(
293 /*===========*/
294 	os_ib_mutex_t	mutex);	/*!< in: mutex to acquire */
295 /**********************************************************//**
296 Releases ownership of a mutex. */
297 UNIV_INTERN
298 void
299 os_mutex_exit(
300 /*==========*/
301 	os_ib_mutex_t	mutex);	/*!< in: mutex to release */
302 /**********************************************************//**
303 Frees an mutex object. */
304 UNIV_INTERN
305 void
306 os_mutex_free(
307 /*==========*/
308 	os_ib_mutex_t	mutex);	/*!< in: mutex to free */
309 /**********************************************************//**
310 Acquires ownership of a fast mutex. Currently in Windows this is the same
311 as os_fast_mutex_lock!
312 @return	0 if success, != 0 if was reserved by another thread */
313 UNIV_INLINE
314 ulint
315 os_fast_mutex_trylock(
316 /*==================*/
317 	os_fast_mutex_t*	fast_mutex);	/*!< in: mutex to acquire */
318 
319 /**********************************************************************
320 Following os_fast_ mutex APIs would be performance schema instrumented:
321 
322 os_fast_mutex_init
323 os_fast_mutex_lock
324 os_fast_mutex_unlock
325 os_fast_mutex_free
326 
327 These mutex APIs will point to corresponding wrapper functions that contain
328 the performance schema instrumentation.
329 
330 NOTE! The following macro should be used in mutex operation, not the
331 corresponding function. */
332 
333 #ifdef UNIV_PFS_MUTEX
334 # define os_fast_mutex_init(K, M)			\
335 	pfs_os_fast_mutex_init(K, M)
336 
337 # define os_fast_mutex_lock(M)				\
338 	pfs_os_fast_mutex_lock(M, __FILE__, __LINE__)
339 
340 # define os_fast_mutex_unlock(M)	pfs_os_fast_mutex_unlock(M)
341 
342 # define os_fast_mutex_free(M)		pfs_os_fast_mutex_free(M)
343 
344 /*********************************************************//**
345 NOTE! Please use the corresponding macro os_fast_mutex_init(), not directly
346 this function!
347 A wrapper function for os_fast_mutex_init_func(). Initializes an operating
348 system fast mutex semaphore. */
349 UNIV_INLINE
350 void
351 pfs_os_fast_mutex_init(
352 /*===================*/
353 	PSI_mutex_key		key,		/*!< in: Performance Schema
354 						key */
355 	os_fast_mutex_t*	fast_mutex);	/*!< out: fast mutex */
356 /**********************************************************//**
357 NOTE! Please use the corresponding macro os_fast_mutex_free(), not directly
358 this function!
359 Wrapper function for pfs_os_fast_mutex_free(). Also destroys the performance
360 schema probes when freeing the mutex */
361 UNIV_INLINE
362 void
363 pfs_os_fast_mutex_free(
364 /*===================*/
365 	os_fast_mutex_t*	fast_mutex);	/*!< in/out: mutex to free */
366 /**********************************************************//**
367 NOTE! Please use the corresponding macro os_fast_mutex_lock, not directly
368 this function!
369 Wrapper function of os_fast_mutex_lock. Acquires ownership of a fast mutex. */
370 UNIV_INLINE
371 void
372 pfs_os_fast_mutex_lock(
373 /*===================*/
374 	os_fast_mutex_t*	fast_mutex,	/*!< in/out: mutex to acquire */
375 	const char*		file_name,	/*!< in: file name where
376 						 locked */
377 	ulint			line);		/*!< in: line where locked */
378 /**********************************************************//**
379 NOTE! Please use the corresponding macro os_fast_mutex_unlock, not directly
380 this function!
381 Wrapper function of os_fast_mutex_unlock. Releases ownership of a fast mutex. */
382 UNIV_INLINE
383 void
384 pfs_os_fast_mutex_unlock(
385 /*=====================*/
386 	os_fast_mutex_t*	fast_mutex);	/*!< in/out: mutex to release */
387 
388 #else /* UNIV_PFS_MUTEX */
389 
390 # define os_fast_mutex_init(K, M)			\
391 	os_fast_mutex_init_func(&((os_fast_mutex_t*)(M))->mutex)
392 
393 # define os_fast_mutex_lock(M)				\
394 	os_fast_mutex_lock_func(&((os_fast_mutex_t*)(M))->mutex)
395 
396 # define os_fast_mutex_unlock(M)			\
397 	os_fast_mutex_unlock_func(&((os_fast_mutex_t*)(M))->mutex)
398 
399 # define os_fast_mutex_free(M)				\
400 	os_fast_mutex_free_func(&((os_fast_mutex_t*)(M))->mutex)
401 #endif /* UNIV_PFS_MUTEX */
402 
403 /**********************************************************//**
404 Releases ownership of a fast mutex. */
405 UNIV_INTERN
406 void
407 os_fast_mutex_unlock_func(
408 /*======================*/
409 	fast_mutex_t*		fast_mutex);	/*!< in: mutex to release */
410 /*********************************************************//**
411 Initializes an operating system fast mutex semaphore. */
412 UNIV_INTERN
413 void
414 os_fast_mutex_init_func(
415 /*====================*/
416 	fast_mutex_t*		fast_mutex);	/*!< in: fast mutex */
417 /**********************************************************//**
418 Acquires ownership of a fast mutex. */
419 UNIV_INTERN
420 void
421 os_fast_mutex_lock_func(
422 /*====================*/
423 	fast_mutex_t*		fast_mutex);	/*!< in: mutex to acquire */
424 /**********************************************************//**
425 Frees an mutex object. */
426 UNIV_INTERN
427 void
428 os_fast_mutex_free_func(
429 /*====================*/
430 	fast_mutex_t*		fast_mutex);	/*!< in: mutex to free */
431 
432 /**********************************************************//**
433 Atomic compare-and-swap and increment for InnoDB. */
434 
435 #if defined(HAVE_IB_GCC_ATOMIC_BUILTINS)
436 
437 # define HAVE_ATOMIC_BUILTINS
438 
439 # ifdef HAVE_IB_GCC_ATOMIC_BUILTINS_BYTE
440 #  define HAVE_ATOMIC_BUILTINS_BYTE
441 # endif
442 
443 # ifdef HAVE_IB_GCC_ATOMIC_BUILTINS_64
444 #  define HAVE_ATOMIC_BUILTINS_64
445 # endif
446 
447 /**********************************************************//**
448 Returns true if swapped, ptr is pointer to target, old_val is value to
449 compare to, new_val is the value to swap in. */
450 
451 # define os_compare_and_swap(ptr, old_val, new_val) \
452 	__sync_bool_compare_and_swap(ptr, old_val, new_val)
453 
454 # define os_compare_and_swap_ulint(ptr, old_val, new_val) \
455 	os_compare_and_swap(ptr, old_val, new_val)
456 
457 # define os_compare_and_swap_lint(ptr, old_val, new_val) \
458 	os_compare_and_swap(ptr, old_val, new_val)
459 
460 #  define os_compare_and_swap_uint32(ptr, old_val, new_val) \
461 	os_compare_and_swap(ptr, old_val, new_val)
462 
463 # ifdef HAVE_IB_ATOMIC_PTHREAD_T_GCC
464 #  define os_compare_and_swap_thread_id(ptr, old_val, new_val) \
465 	os_compare_and_swap(ptr, old_val, new_val)
466 #  define INNODB_RW_LOCKS_USE_ATOMICS
467 #  define IB_ATOMICS_STARTUP_MSG \
468 	"Mutexes and rw_locks use GCC atomic builtins"
469 # else /* HAVE_IB_ATOMIC_PTHREAD_T_GCC */
470 #  define IB_ATOMICS_STARTUP_MSG \
471 	"Mutexes use GCC atomic builtins, rw_locks do not"
472 # endif /* HAVE_IB_ATOMIC_PTHREAD_T_GCC */
473 
474 /**********************************************************//**
475 Returns the resulting value, ptr is pointer to target, amount is the
476 amount of increment. */
477 
478 # define os_atomic_increment(ptr, amount) \
479 	__sync_add_and_fetch(ptr, amount)
480 
481 # define os_atomic_increment_lint(ptr, amount) \
482 	os_atomic_increment(ptr, amount)
483 
484 # define os_atomic_increment_uint32(ptr, amount ) \
485 	os_atomic_increment(ptr, amount)
486 
487 # define os_atomic_increment_ulint(ptr, amount) \
488 	os_atomic_increment(ptr, amount)
489 
490 # define os_atomic_increment_uint64(ptr, amount) \
491 	os_atomic_increment(ptr, amount)
492 
493 /* Returns the resulting value, ptr is pointer to target, amount is the
494 amount to decrement. */
495 
496 # define os_atomic_decrement(ptr, amount) \
497 	__sync_sub_and_fetch(ptr, amount)
498 
499 # define os_atomic_decrement_uint32(ptr, amount) \
500 	os_atomic_decrement(ptr, amount)
501 
502 # define os_atomic_decrement_lint(ptr, amount) \
503 	os_atomic_decrement(ptr, amount)
504 
505 # define os_atomic_decrement_ulint(ptr, amount) \
506 	os_atomic_decrement(ptr, amount)
507 
508 # define os_atomic_decrement_uint64(ptr, amount) \
509 	os_atomic_decrement(ptr, amount)
510 
511 # if defined(IB_STRONG_MEMORY_MODEL)
512 
513 /** Do an atomic test and set.
514 @param[in,out]	ptr		Memory location to set to non-zero
515 @return the previous value */
516 inline
517 lock_word_t
os_atomic_test_and_set(volatile lock_word_t * ptr)518 os_atomic_test_and_set(volatile lock_word_t* ptr)
519 {
520 	return(__sync_lock_test_and_set(ptr, 1));
521 }
522 
523 /** Do an atomic release.
524 
525 In theory __sync_lock_release should be used to release the lock.
526 Unfortunately, it does not work properly alone. The workaround is
527 that more conservative __sync_lock_test_and_set is used instead.
528 
529 Performance regression was observed at some conditions for Intel
530 architecture. Disable release barrier on Intel architecture for now.
531 @param[in,out]	ptr		Memory location to write to
532 @return the previous value */
533 inline
534 lock_word_t
os_atomic_clear(volatile lock_word_t * ptr)535 os_atomic_clear(volatile lock_word_t* ptr)
536 {
537 	return(__sync_lock_test_and_set(ptr, 0));
538 }
539 
540 # elif defined(HAVE_IB_GCC_ATOMIC_TEST_AND_SET)
541 
542 /** Do an atomic test-and-set.
543 @param[in,out]	ptr		Memory location to set to non-zero
544 @return the previous value */
545 inline
546 lock_word_t
os_atomic_test_and_set(volatile lock_word_t * ptr)547 os_atomic_test_and_set(volatile lock_word_t* ptr)
548 {
549        return(__atomic_test_and_set(ptr, __ATOMIC_ACQUIRE));
550 }
551 
552 /** Do an atomic clear.
553 @param[in,out]	ptr		Memory location to set to zero */
554 inline
555 void
os_atomic_clear(volatile lock_word_t * ptr)556 os_atomic_clear(volatile lock_word_t* ptr)
557 {
558 	__atomic_clear(ptr, __ATOMIC_RELEASE);
559 }
560 
561 # else
562 
563 #  error "Unsupported platform"
564 
565 # endif /* HAVE_IB_GCC_ATOMIC_TEST_AND_SET */
566 
567 #elif defined(HAVE_IB_SOLARIS_ATOMICS)
568 
569 # define HAVE_ATOMIC_BUILTINS
570 # define HAVE_ATOMIC_BUILTINS_BYTE
571 # define HAVE_ATOMIC_BUILTINS_64
572 
573 /* If not compiling with GCC or GCC doesn't support the atomic
574 intrinsics and running on Solaris >= 10 use Solaris atomics */
575 
576 # include <atomic.h>
577 
578 /**********************************************************//**
579 Returns true if swapped, ptr is pointer to target, old_val is value to
580 compare to, new_val is the value to swap in. */
581 
582 # define os_compare_and_swap_uint32(ptr, old_val, new_val) \
583 	(atomic_cas_32(ptr, old_val, new_val) == old_val)
584 
585 # define os_compare_and_swap_ulint(ptr, old_val, new_val) \
586 	(atomic_cas_ulong(ptr, old_val, new_val) == old_val)
587 
588 # define os_compare_and_swap_lint(ptr, old_val, new_val) \
589 	((lint) atomic_cas_ulong((ulong_t*) ptr, old_val, new_val) == old_val)
590 
591 # ifdef HAVE_IB_ATOMIC_PTHREAD_T_SOLARIS
592 #  if SIZEOF_PTHREAD_T == 4
593 #   define os_compare_and_swap_thread_id(ptr, old_val, new_val) \
594 	((pthread_t) atomic_cas_32(ptr, old_val, new_val) == old_val)
595 #  elif SIZEOF_PTHREAD_T == 8
596 #   define os_compare_and_swap_thread_id(ptr, old_val, new_val) \
597 	((pthread_t) atomic_cas_64(ptr, old_val, new_val) == old_val)
598 #  else
599 #   error "SIZEOF_PTHREAD_T != 4 or 8"
600 #  endif /* SIZEOF_PTHREAD_T CHECK */
601 #  define INNODB_RW_LOCKS_USE_ATOMICS
602 #  define IB_ATOMICS_STARTUP_MSG \
603 	"Mutexes and rw_locks use Solaris atomic functions"
604 # else /* HAVE_IB_ATOMIC_PTHREAD_T_SOLARIS */
605 #  define IB_ATOMICS_STARTUP_MSG \
606 	"Mutexes use Solaris atomic functions, rw_locks do not"
607 # endif /* HAVE_IB_ATOMIC_PTHREAD_T_SOLARIS */
608 
609 /**********************************************************//**
610 Returns the resulting value, ptr is pointer to target, amount is the
611 amount of increment. */
612 
613 # define os_atomic_increment_uint32(ptr, amount) \
614 	atomic_add_32_nv(ptr, amount)
615 
616 # define os_atomic_increment_ulint(ptr, amount) \
617 	atomic_add_long_nv(ptr, amount)
618 
619 # define os_atomic_increment_lint(ptr, amount) \
620 	os_atomic_increment_ulint((ulong_t*) ptr, amount)
621 
622 # define os_atomic_increment_uint64(ptr, amount) \
623 	atomic_add_64_nv(ptr, amount)
624 
625 /* Returns the resulting value, ptr is pointer to target, amount is the
626 amount to decrement. */
627 
628 # define os_atomic_decrement_uint32(ptr, amount) \
629 	os_atomic_increment_uint32(ptr, -(amount))
630 
631 # define os_atomic_decrement_lint(ptr, amount) \
632 	os_atomic_increment_ulint((ulong_t*) ptr, -(amount))
633 
634 # define os_atomic_decrement_ulint(ptr, amount) \
635 	os_atomic_increment_ulint(ptr, -(amount))
636 
637 # define os_atomic_decrement_uint64(ptr, amount) \
638 	os_atomic_increment_uint64(ptr, -(amount))
639 
640 # ifdef IB_LOCK_WORD_IS_BYTE
641 
642 /** Do an atomic xchg and set to non-zero.
643 @param[in,out]	ptr		Memory location to set to non-zero
644 @return the previous value */
645 inline
646 lock_word_t
os_atomic_test_and_set(volatile lock_word_t * ptr)647 os_atomic_test_and_set(volatile lock_word_t* ptr)
648 {
649 	return(atomic_swap_uchar(ptr, 1));
650 }
651 
652 /** Do an atomic xchg and set to zero.
653 @param[in,out]	ptr		Memory location to set to zero
654 @return the previous value */
655 inline
656 lock_word_t
os_atomic_clear(volatile lock_word_t * ptr)657 os_atomic_clear(volatile lock_word_t* ptr)
658 {
659 	return(atomic_swap_uchar(ptr, 0));
660 }
661 
662 # else
663 
664 /** Do an atomic xchg and set to non-zero.
665 @param[in,out]	ptr		Memory location to set to non-zero
666 @return the previous value */
667 inline
668 lock_word_t
os_atomic_test_and_set(volatile lock_word_t * ptr)669 os_atomic_test_and_set(volatile lock_word_t* ptr)
670 {
671 	return(atomic_swap_ulong(ptr, 1));
672 }
673 
674 /** Do an atomic xchg and set to zero.
675 @param[in,out]	ptr		Memory location to set to zero
676 @return the previous value */
677 inline
678 lock_word_t
os_atomic_clear(volatile lock_word_t * ptr)679 os_atomic_clear(volatile lock_word_t* ptr)
680 {
681 	return(atomic_swap_ulong(ptr, 0));
682 }
683 
684 # endif /* IB_LOCK_WORD_IS_BYTE */
685 
686 #elif defined(HAVE_WINDOWS_ATOMICS)
687 
688 # define HAVE_ATOMIC_BUILTINS
689 # define HAVE_ATOMIC_BUILTINS_BYTE
690 
691 # ifndef _WIN32
692 #  define HAVE_ATOMIC_BUILTINS_64
693 # endif
694 
695 /**********************************************************//**
696 Atomic compare and exchange of signed integers (both 32 and 64 bit).
697 @return value found before the exchange.
698 If it is not equal to old_value the exchange did not happen. */
699 UNIV_INLINE
700 lint
701 win_cmp_and_xchg_lint(
702 /*==================*/
703 	volatile lint*	ptr,		/*!< in/out: source/destination */
704 	lint		new_val,	/*!< in: exchange value */
705 	lint		old_val);	/*!< in: value to compare to */
706 
707 /**********************************************************//**
708 Atomic addition of signed integers.
709 @return Initial value of the variable pointed to by ptr */
710 UNIV_INLINE
711 lint
712 win_xchg_and_add(
713 /*=============*/
714 	volatile lint*	ptr,	/*!< in/out: address of destination */
715 	lint		val);	/*!< in: number to be added */
716 
717 /**********************************************************//**
718 Atomic compare and exchange of unsigned integers.
719 @return value found before the exchange.
720 If it is not equal to old_value the exchange did not happen. */
721 UNIV_INLINE
722 ulint
723 win_cmp_and_xchg_ulint(
724 /*===================*/
725 	volatile ulint*	ptr,		/*!< in/out: source/destination */
726 	ulint		new_val,	/*!< in: exchange value */
727 	ulint		old_val);	/*!< in: value to compare to */
728 
729 /**********************************************************//**
730 Atomic compare and exchange of 32 bit unsigned integers.
731 @return value found before the exchange.
732 If it is not equal to old_value the exchange did not happen. */
733 UNIV_INLINE
734 DWORD
735 win_cmp_and_xchg_dword(
736 /*===================*/
737 	volatile DWORD*	ptr,		/*!< in/out: source/destination */
738 	DWORD		new_val,	/*!< in: exchange value */
739 	DWORD		old_val);	/*!< in: value to compare to */
740 
741 /**********************************************************//**
742 Returns true if swapped, ptr is pointer to target, old_val is value to
743 compare to, new_val is the value to swap in. */
744 
745 # define os_compare_and_swap_uint32(ptr, old_val, new_val) \
746 	(InterlockedCompareExchange(reinterpret_cast<volatile long*>(ptr), \
747 				    new_val, old_val) == old_val)
748 
749 # define os_compare_and_swap_ulint(ptr, old_val, new_val) \
750 	(win_cmp_and_xchg_ulint(ptr, new_val, old_val) == old_val)
751 
752 # define os_compare_and_swap_lint(ptr, old_val, new_val) \
753 	(win_cmp_and_xchg_lint(ptr, new_val, old_val) == old_val)
754 
755 /* windows thread objects can always be passed to windows atomic functions */
756 # define os_compare_and_swap_thread_id(ptr, old_val, new_val) \
757 	(win_cmp_and_xchg_dword(ptr, new_val, old_val) == old_val)
758 
759 # define INNODB_RW_LOCKS_USE_ATOMICS
760 # define IB_ATOMICS_STARTUP_MSG \
761 	"Mutexes and rw_locks use Windows interlocked functions"
762 
763 /**********************************************************//**
764 Returns the resulting value, ptr is pointer to target, amount is the
765 amount of increment. */
766 
767 # define os_atomic_increment_lint(ptr, amount) \
768 	(win_xchg_and_add(ptr, amount) + amount)
769 
770 # define os_atomic_increment_uint32(ptr, amount) \
771 	((ulint) InterlockedExchangeAdd((long*) ptr, amount))
772 
773 # define os_atomic_increment_ulint(ptr, amount) \
774 	((ulint) (win_xchg_and_add((lint*) ptr, (lint) amount) + amount))
775 
776 # define os_atomic_increment_uint64(ptr, amount)		\
777 	((ib_uint64_t) (InterlockedExchangeAdd64(		\
778 				(ib_int64_t*) ptr,		\
779 				(ib_int64_t) amount) + amount))
780 
781 /**********************************************************//**
782 Returns the resulting value, ptr is pointer to target, amount is the
783 amount to decrement. There is no atomic substract function on Windows */
784 
785 # define os_atomic_decrement_uint32(ptr, amount) \
786 	((ulint) InterlockedExchangeAdd((long*) ptr, (-amount)))
787 
788 # define os_atomic_decrement_lint(ptr, amount) \
789 	(win_xchg_and_add(ptr, -(lint) amount) - amount)
790 
791 # define os_atomic_decrement_ulint(ptr, amount) \
792 	((ulint) (win_xchg_and_add((lint*) ptr, -(lint) amount) - amount))
793 
794 # define os_atomic_decrement_uint64(ptr, amount)		\
795 	((ib_uint64_t) (InterlockedExchangeAdd64(		\
796 				(ib_int64_t*) ptr,		\
797 				-(ib_int64_t) amount) - amount))
798 
799 /** Do an atomic test and set.
800 InterlockedExchange() operates on LONG, and the LONG will be clobbered
801 @param[in,out]	ptr		Memory location to set to non-zero
802 @return the previous value */
803 inline
804 lock_word_t
os_atomic_test_and_set(volatile lock_word_t * ptr)805 os_atomic_test_and_set(volatile lock_word_t* ptr)
806 {
807 	return(InterlockedExchange(ptr, 1));
808 }
809 
810 /** Do an atomic release.
811 InterlockedExchange() operates on LONG, and the LONG will be clobbered
812 @param[in,out]	ptr		Memory location to set to zero
813 @return the previous value */
814 inline
815 lock_word_t
os_atomic_clear(volatile lock_word_t * ptr)816 os_atomic_clear(volatile lock_word_t* ptr)
817 {
818 	return(InterlockedExchange(ptr, 0));
819 }
820 
821 #else
822 # define IB_ATOMICS_STARTUP_MSG \
823 	"Mutexes and rw_locks use InnoDB's own implementation"
824 #endif
825 #ifdef HAVE_ATOMIC_BUILTINS
826 #define os_atomic_inc_ulint(m,v,d)	os_atomic_increment_ulint(v, d)
827 #define os_atomic_dec_ulint(m,v,d)	os_atomic_decrement_ulint(v, d)
828 #else
829 #define os_atomic_inc_ulint(m,v,d)	os_atomic_inc_ulint_func(m, v, d)
830 #define os_atomic_dec_ulint(m,v,d)	os_atomic_dec_ulint_func(m, v, d)
831 #endif /* HAVE_ATOMIC_BUILTINS */
832 
833 /**********************************************************//**
834 Following macros are used to update specified counter atomically
835 if HAVE_ATOMIC_BUILTINS defined. Otherwise, use mutex passed in
836 for synchronization */
837 #ifdef HAVE_ATOMIC_BUILTINS
838 #define os_increment_counter_by_amount(mutex, counter, amount)	\
839 	(void) os_atomic_increment_ulint(&counter, amount)
840 
841 #define os_decrement_counter_by_amount(mutex, counter, amount)	\
842 	(void) os_atomic_increment_ulint(&counter, (-((lint) amount)))
843 #else
844 #define os_increment_counter_by_amount(mutex, counter, amount)	\
845 	do {							\
846 		mutex_enter(&(mutex));				\
847 		(counter) += (amount);				\
848 		mutex_exit(&(mutex));				\
849 	} while (0)
850 
851 #define os_decrement_counter_by_amount(mutex, counter, amount)	\
852 	do {							\
853 		ut_a(counter >= amount);			\
854 		mutex_enter(&(mutex));				\
855 		(counter) -= (amount);				\
856 		mutex_exit(&(mutex));				\
857 	} while (0)
858 #endif  /* HAVE_ATOMIC_BUILTINS */
859 
860 #define os_inc_counter(mutex, counter)				\
861 	os_increment_counter_by_amount(mutex, counter, 1)
862 
863 #define os_dec_counter(mutex, counter)				\
864 	do {							\
865 		os_decrement_counter_by_amount(mutex, counter, 1);\
866 	} while (0);
867 
868 /** barrier definitions for memory ordering */
869 #ifdef IB_STRONG_MEMORY_MODEL
870 /* Performance regression was observed at some conditions for Intel
871 architecture. Disable memory barrier for Intel architecture for now. */
872 # define os_rmb
873 # define os_wmb
874 # define IB_MEMORY_BARRIER_STARTUP_MSG \
875 	"Memory barrier is not used"
876 #elif defined(HAVE_IB_GCC_ATOMIC_THREAD_FENCE)
877 # define HAVE_MEMORY_BARRIER
878 # define os_rmb	__atomic_thread_fence(__ATOMIC_ACQUIRE)
879 # define os_wmb	__atomic_thread_fence(__ATOMIC_RELEASE)
880 # define IB_MEMORY_BARRIER_STARTUP_MSG \
881 	"GCC builtin __atomic_thread_fence() is used for memory barrier"
882 
883 #elif defined(HAVE_IB_GCC_SYNC_SYNCHRONISE)
884 # define HAVE_MEMORY_BARRIER
885 # define os_rmb	__sync_synchronize()
886 # define os_wmb	__sync_synchronize()
887 # define IB_MEMORY_BARRIER_STARTUP_MSG \
888 	"GCC builtin __sync_synchronize() is used for memory barrier"
889 
890 #elif defined(HAVE_IB_MACHINE_BARRIER_SOLARIS)
891 # define HAVE_MEMORY_BARRIER
892 # include <mbarrier.h>
893 # define os_rmb	__machine_r_barrier()
894 # define os_wmb	__machine_w_barrier()
895 # define IB_MEMORY_BARRIER_STARTUP_MSG \
896 	"Solaris memory ordering functions are used for memory barrier"
897 
898 #elif defined(HAVE_WINDOWS_MM_FENCE) && defined(_WIN64)
899 # define HAVE_MEMORY_BARRIER
900 # include <mmintrin.h>
901 # define os_rmb	_mm_lfence()
902 # define os_wmb	_mm_sfence()
903 # define IB_MEMORY_BARRIER_STARTUP_MSG \
904 	"_mm_lfence() and _mm_sfence() are used for memory barrier"
905 
906 #else
907 # define os_rmb
908 # define os_wmb
909 # define IB_MEMORY_BARRIER_STARTUP_MSG \
910 	"Memory barrier is not used"
911 #endif
912 
913 #ifndef UNIV_NONINL
914 #include "os0sync.ic"
915 #endif
916 
917 #endif
918