1/*****************************************************************************
2
3Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
4Copyright (c) 2008, Google Inc.
5Copyright (c) 2017, 2020, MariaDB Corporation.
6
7Portions of this file contain modifications contributed and copyrighted by
8Google, Inc. Those modifications are gratefully acknowledged and are described
9briefly in the InnoDB documentation. The contributions by Google are
10incorporated with their permission, and subject to the conditions contained in
11the file COPYING.Google.
12
13This program is free software; you can redistribute it and/or modify it under
14the terms of the GNU General Public License as published by the Free Software
15Foundation; version 2 of the License.
16
17This program is distributed in the hope that it will be useful, but WITHOUT
18ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
19FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
20
21You should have received a copy of the GNU General Public License along with
22this program; if not, write to the Free Software Foundation, Inc.,
2351 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
24
25*****************************************************************************/
26
27/**************************************************//**
28@file include/sync0rw.ic
29The read-write lock (for threads)
30
31Created 9/11/1995 Heikki Tuuri
32*******************************************************/
33
34#include "os0event.h"
35
36/******************************************************************//**
37Lock an rw-lock in shared mode for the current thread. If the rw-lock is
38locked in exclusive mode, or there is an exclusive lock request waiting,
39the function spins a preset time (controlled by srv_n_spin_wait_rounds),
40waiting for the lock before suspending the thread. */
41void
42rw_lock_s_lock_spin(
43/*================*/
44	rw_lock_t*	lock,	/*!< in: pointer to rw-lock */
45	ulint		pass,	/*!< in: pass value; != 0, if the lock will
46				be passed to another thread to unlock */
47	const char*	file_name,/*!< in: file name where lock requested */
48	unsigned	line);	/*!< in: line where requested */
49#ifdef UNIV_DEBUG
50/******************************************************************//**
51Inserts the debug information for an rw-lock. */
52void
53rw_lock_add_debug_info(
54/*===================*/
55	rw_lock_t*	lock,		/*!< in: rw-lock */
56	ulint		pass,		/*!< in: pass value */
57	ulint		lock_type,	/*!< in: lock type */
58	const char*	file_name,	/*!< in: file where requested */
59	unsigned	line);		/*!< in: line where requested */
60/******************************************************************//**
61Removes a debug information struct for an rw-lock. */
62void
63rw_lock_remove_debug_info(
64/*======================*/
65	rw_lock_t*	lock,		/*!< in: rw-lock */
66	ulint		pass,		/*!< in: pass value */
67	ulint		lock_type);	/*!< in: lock type */
68#endif /* UNIV_DEBUG */
69
70/******************************************************************//**
71Returns the write-status of the lock - this function made more sense
72with the old rw_lock implementation.
73@return RW_LOCK_NOT_LOCKED, RW_LOCK_X, RW_LOCK_X_WAIT, RW_LOCK_SX */
74UNIV_INLINE
75ulint
76rw_lock_get_writer(
77/*===============*/
78	const rw_lock_t*	lock)	/*!< in: rw-lock */
79{
80	int32_t lock_word = lock->lock_word;
81
82	ut_ad(lock_word <= X_LOCK_DECR);
83	if (lock_word > X_LOCK_HALF_DECR) {
84		/* return NOT_LOCKED in s-lock state, like the writer
85		member of the old lock implementation. */
86		return(RW_LOCK_NOT_LOCKED);
87	} else if (lock_word > 0) {
88		/* sx-locked, no x-locks */
89		return(RW_LOCK_SX);
90	} else if (lock_word == 0
91		   || lock_word == -X_LOCK_HALF_DECR
92		   || lock_word <= -X_LOCK_DECR) {
93		/* x-lock with sx-lock is also treated as RW_LOCK_EX */
94		return(RW_LOCK_X);
95	} else {
96		/* x-waiter with sx-lock is also treated as RW_LOCK_WAIT_EX
97		e.g. -X_LOCK_HALF_DECR < lock_word < 0 : without sx
98		     -X_LOCK_DECR < lock_word < -X_LOCK_HALF_DECR : with sx */
99		return(RW_LOCK_X_WAIT);
100	}
101}
102
103/******************************************************************//**
104Returns the number of readers (s-locks).
105@return number of readers */
106UNIV_INLINE
107ulint
108rw_lock_get_reader_count(
109/*=====================*/
110	const rw_lock_t*	lock)	/*!< in: rw-lock */
111{
112	int32_t lock_word = lock->lock_word;
113	ut_ad(lock_word <= X_LOCK_DECR);
114
115	if (lock_word > X_LOCK_HALF_DECR) {
116		/* s-locked, no x-waiter */
117		return ulint(X_LOCK_DECR - lock_word);
118	} else if (lock_word > 0) {
119		/* s-locked, with sx-locks only */
120		return ulint(X_LOCK_HALF_DECR - lock_word);
121	} else if (lock_word == 0) {
122		/* x-locked */
123		return(0);
124	} else if (lock_word > -X_LOCK_HALF_DECR) {
125		/* s-locked, with x-waiter */
126		return((ulint)(-lock_word));
127	} else if (lock_word == -X_LOCK_HALF_DECR) {
128		/* x-locked with sx-locks */
129		return(0);
130	} else if (lock_word > -X_LOCK_DECR) {
131		/* s-locked, with x-waiter and sx-lock */
132		return((ulint)(-(lock_word + X_LOCK_HALF_DECR)));
133	}
134	/* no s-locks */
135	return(0);
136}
137
138/******************************************************************//**
139Returns the value of writer_count for the lock. Does not reserve the lock
140mutex, so the caller must be sure it is not changed during the call.
141@return value of writer_count */
142UNIV_INLINE
143ulint
144rw_lock_get_x_lock_count(
145/*=====================*/
146	const rw_lock_t*	lock)	/*!< in: rw-lock */
147{
148	int32_t lock_copy = lock->lock_word;
149	ut_ad(lock_copy <= X_LOCK_DECR);
150
151	if (lock_copy == 0 || lock_copy == -X_LOCK_HALF_DECR) {
152		/* "1 x-lock" or "1 x-lock + sx-locks" */
153		return(1);
154	} else if (lock_copy > -X_LOCK_DECR) {
155		/* s-locks, one or more sx-locks if > 0, or x-waiter if < 0 */
156		return(0);
157	} else if (lock_copy > -(X_LOCK_DECR + X_LOCK_HALF_DECR)) {
158		/* no s-lock, no sx-lock, 2 or more x-locks.
159		First 2 x-locks are set with -X_LOCK_DECR,
160		all other recursive x-locks are set with -1 */
161		return ulint(2 - X_LOCK_DECR - lock_copy);
162	} else {
163		/* no s-lock, 1 or more sx-lock, 2 or more x-locks.
164		First 2 x-locks are set with -(X_LOCK_DECR + X_LOCK_HALF_DECR),
165		all other recursive x-locks are set with -1 */
166		return ulint(2 - X_LOCK_DECR - X_LOCK_HALF_DECR - lock_copy);
167	}
168}
169
170/******************************************************************//**
171Returns the number of sx-lock for the lock. Does not reserve the lock
172mutex, so the caller must be sure it is not changed during the call.
173@return value of sx-lock count */
174UNIV_INLINE
175ulint
176rw_lock_get_sx_lock_count(
177/*======================*/
178	const rw_lock_t*	lock)	/*!< in: rw-lock */
179{
180#ifdef UNIV_DEBUG
181	int32_t lock_copy = lock->lock_word;
182
183	ut_ad(lock_copy <= X_LOCK_DECR);
184
185	while (lock_copy < 0) {
186		lock_copy += X_LOCK_DECR;
187	}
188
189	if (lock_copy > 0 && lock_copy <= X_LOCK_HALF_DECR) {
190		return(lock->sx_recursive);
191	}
192
193	return(0);
194#else /* UNIV_DEBUG */
195	return(lock->sx_recursive);
196#endif /* UNIV_DEBUG */
197}
198
199/******************************************************************//**
200Recursive x-locks are not supported: they should be handled by the caller and
201need not be atomic since they are performed by the current lock holder.
202Returns true if the decrement was made, false if not.
203@return true if decr occurs */
204UNIV_INLINE
205bool
206rw_lock_lock_word_decr(
207/*===================*/
208	rw_lock_t*	lock,		/*!< in/out: rw-lock */
209	int32_t		amount,		/*!< in: amount to decrement */
210	int32_t		threshold)	/*!< in: threshold of judgement */
211{
212	int32_t lock_copy = lock->lock_word;
213
214	while (lock_copy > threshold) {
215		if (lock->lock_word.compare_exchange_strong(
216			lock_copy,
217			lock_copy - amount,
218			std::memory_order_acquire,
219			std::memory_order_relaxed)) {
220
221			return(true);
222		}
223	}
224	return(false);
225}
226
227/******************************************************************//**
228Low-level function which tries to lock an rw-lock in s-mode. Performs no
229spinning.
230@return TRUE if success */
231UNIV_INLINE
232ibool
233rw_lock_s_lock_low(
234/*===============*/
235	rw_lock_t*	lock,	/*!< in: pointer to rw-lock */
236	ulint		pass MY_ATTRIBUTE((unused)),
237				/*!< in: pass value; != 0, if the lock will be
238				passed to another thread to unlock */
239	const char*	file_name, /*!< in: file name where lock requested */
240	unsigned	line)	/*!< in: line where requested */
241{
242	if (!rw_lock_lock_word_decr(lock, 1, 0)) {
243		/* Locking did not succeed */
244		return(FALSE);
245	}
246
247	ut_d(rw_lock_add_debug_info(lock, pass, RW_LOCK_S, file_name, line));
248
249	return(TRUE);	/* locking succeeded */
250}
251
252/******************************************************************//**
253NOTE! Use the corresponding macro, not directly this function! Lock an
254rw-lock in shared mode for the current thread. If the rw-lock is locked
255in exclusive mode, or there is an exclusive lock request waiting, the
256function spins a preset time (controlled by srv_n_spin_wait_rounds), waiting for
257the lock, before suspending the thread. */
258UNIV_INLINE
259void
260rw_lock_s_lock_func(
261/*================*/
262	rw_lock_t*	lock,	/*!< in: pointer to rw-lock */
263	ulint		pass,	/*!< in: pass value; != 0, if the lock will
264				be passed to another thread to unlock */
265	const char*	file_name,/*!< in: file name where lock requested */
266	unsigned	line)	/*!< in: line where requested */
267{
268	/* NOTE: As we do not know the thread ids for threads which have
269	s-locked a latch, and s-lockers will be served only after waiting
270	x-lock requests have been fulfilled, then if this thread already
271	owns an s-lock here, it may end up in a deadlock with another thread
272	which requests an x-lock here. Therefore, we will forbid recursive
273	s-locking of a latch: the following assert will warn the programmer
274	of the possibility of this kind of a deadlock. If we want to implement
275	safe recursive s-locking, we should keep in a list the thread ids of
276	the threads which have s-locked a latch. This would use some CPU
277	time. */
278
279	ut_ad(!rw_lock_own_flagged(lock, RW_LOCK_FLAG_X | RW_LOCK_FLAG_S));
280
281	if (!rw_lock_s_lock_low(lock, pass, file_name, line)) {
282
283		/* Did not succeed, try spin wait */
284
285		rw_lock_s_lock_spin(lock, pass, file_name, line);
286	}
287}
288
289/******************************************************************//**
290NOTE! Use the corresponding macro, not directly this function! Lock an
291rw-lock in exclusive mode for the current thread if the lock can be
292obtained immediately.
293@return TRUE if success */
294UNIV_INLINE
295ibool
296rw_lock_x_lock_func_nowait(
297/*=======================*/
298	rw_lock_t*	lock,	/*!< in: pointer to rw-lock */
299	const char*	file_name,/*!< in: file name where lock requested */
300	unsigned	line)	/*!< in: line where requested */
301{
302	int32_t oldval = X_LOCK_DECR;
303
304	if (lock->lock_word.compare_exchange_strong(oldval, 0,
305						std::memory_order_acquire,
306						std::memory_order_relaxed)) {
307		lock->writer_thread = os_thread_get_curr_id();
308
309	} else if (os_thread_eq(lock->writer_thread, os_thread_get_curr_id())) {
310		/* Relock: even though no other thread can modify (lock, unlock
311		or reserve) lock_word while there is an exclusive writer and
312		this is the writer thread, we still want concurrent threads to
313		observe consistent values. */
314		if (oldval == 0 || oldval == -X_LOCK_HALF_DECR) {
315			/* There are 1 x-locks */
316			lock->lock_word.fetch_sub(X_LOCK_DECR,
317						  std::memory_order_relaxed);
318		} else if (oldval <= -X_LOCK_DECR) {
319			/* There are 2 or more x-locks */
320			lock->lock_word.fetch_sub(1,
321						  std::memory_order_relaxed);
322			/* Watch for too many recursive locks */
323			ut_ad(oldval < 1);
324		} else {
325			/* Failure */
326			return(FALSE);
327		}
328	} else {
329		/* Failure */
330		return(FALSE);
331	}
332
333	ut_d(rw_lock_add_debug_info(lock, 0, RW_LOCK_X, file_name, line));
334
335	lock->last_x_file_name = file_name;
336	lock->last_x_line = line;
337
338	ut_ad(rw_lock_validate(lock));
339
340	return(TRUE);
341}
342
343/******************************************************************//**
344Releases a shared mode lock. */
345UNIV_INLINE
346void
347rw_lock_s_unlock_func(
348/*==================*/
349#ifdef UNIV_DEBUG
350	ulint		pass,	/*!< in: pass value; != 0, if the lock may have
351				been passed to another thread to unlock */
352#endif /* UNIV_DEBUG */
353	rw_lock_t*	lock)	/*!< in/out: rw-lock */
354{
355	ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_S));
356
357	/* Increment lock_word to indicate 1 less reader */
358	int32_t lock_word = lock->lock_word.fetch_add(
359		1, std::memory_order_release);
360
361	if (lock_word == -1 || lock_word == -X_LOCK_HALF_DECR - 1) {
362		/* wait_ex waiter exists. It may not be asleep, but we signal
363		anyway. We do not wake other waiters, because they can't
364		exist without wait_ex waiter and wait_ex waiter goes first.*/
365		os_event_set(lock->wait_ex_event);
366		sync_array_object_signalled();
367	} else {
368		ut_ad(lock_word > -X_LOCK_DECR);
369		ut_ad(lock_word < X_LOCK_DECR);
370	}
371
372	ut_ad(rw_lock_validate(lock));
373}
374
375/******************************************************************//**
376Releases an exclusive mode lock. */
377UNIV_INLINE
378void
379rw_lock_x_unlock_func(
380/*==================*/
381#ifdef UNIV_DEBUG
382	ulint		pass,	/*!< in: pass value; != 0, if the lock may have
383				been passed to another thread to unlock */
384#endif /* UNIV_DEBUG */
385	rw_lock_t*	lock)	/*!< in/out: rw-lock */
386{
387	int32_t lock_word = lock->lock_word;
388
389	if (lock_word == 0) {
390		/* Last caller in a possible recursive chain. */
391		lock->writer_thread = 0;
392	}
393
394	ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_X));
395
396	if (lock_word == 0 || lock_word == -X_LOCK_HALF_DECR) {
397		/* Last X-lock owned by this thread, it may still hold SX-locks.
398		ACQ_REL due to...
399		RELEASE: we release rw-lock
400		ACQUIRE: we want waiters to be loaded after lock_word is stored */
401		lock->lock_word.fetch_add(X_LOCK_DECR,
402					  std::memory_order_acq_rel);
403
404		/* This no longer has an X-lock but it may still have
405		an SX-lock. So it is now free for S-locks by other threads.
406		We need to signal read/write waiters.
407		We do not need to signal wait_ex waiters, since they cannot
408		exist when there is a writer. */
409		if (lock->waiters) {
410			lock->waiters = 0;
411			os_event_set(lock->event);
412			sync_array_object_signalled();
413		}
414	} else if (lock_word == -X_LOCK_DECR
415		   || lock_word == -(X_LOCK_DECR + X_LOCK_HALF_DECR)) {
416		/* There are 2 x-locks */
417		lock->lock_word.fetch_add(X_LOCK_DECR);
418	} else {
419		/* There are more than 2 x-locks. */
420		ut_ad(lock_word < -X_LOCK_DECR);
421		lock->lock_word.fetch_add(1);
422	}
423
424	ut_ad(rw_lock_validate(lock));
425}
426
427/******************************************************************//**
428Releases a sx mode lock. */
429UNIV_INLINE
430void
431rw_lock_sx_unlock_func(
432/*===================*/
433#ifdef UNIV_DEBUG
434	ulint		pass,	/*!< in: pass value; != 0, if the lock may have
435				been passed to another thread to unlock */
436#endif /* UNIV_DEBUG */
437	rw_lock_t*	lock)	/*!< in/out: rw-lock */
438{
439	ut_ad(rw_lock_get_sx_lock_count(lock));
440	ut_ad(lock->sx_recursive > 0);
441
442	--lock->sx_recursive;
443
444	ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_SX));
445
446	if (lock->sx_recursive == 0) {
447		int32_t lock_word = lock->lock_word;
448		/* Last caller in a possible recursive chain. */
449		if (lock_word > 0) {
450			lock->writer_thread = 0;
451			ut_ad(lock_word <= INT_MAX32 - X_LOCK_HALF_DECR);
452
453			/* Last SX-lock owned by this thread, doesn't own X-lock.
454			ACQ_REL due to...
455			RELEASE: we release rw-lock
456			ACQUIRE: we want waiters to be loaded after lock_word is stored */
457			lock->lock_word.fetch_add(X_LOCK_HALF_DECR,
458						  std::memory_order_acq_rel);
459
460			/* Lock is now free. May have to signal read/write
461			waiters. We do not need to signal wait_ex waiters,
462			since they cannot exist when there is an sx-lock
463			holder. */
464			if (lock->waiters) {
465				lock->waiters = 0;
466				os_event_set(lock->event);
467				sync_array_object_signalled();
468			}
469		} else {
470			/* still has x-lock */
471			ut_ad(lock_word == -X_LOCK_HALF_DECR ||
472			      lock_word <= -(X_LOCK_DECR + X_LOCK_HALF_DECR));
473			lock->lock_word.fetch_add(X_LOCK_HALF_DECR);
474		}
475	}
476
477	ut_ad(rw_lock_validate(lock));
478}
479
480#ifdef UNIV_PFS_RWLOCK
481
482/******************************************************************//**
483Performance schema instrumented wrap function for rw_lock_create_func().
484NOTE! Please use the corresponding macro rw_lock_create(), not directly
485this function! */
486UNIV_INLINE
487void
488pfs_rw_lock_create_func(
489/*====================*/
490	mysql_pfs_key_t	key,		/*!< in: key registered with
491					performance schema */
492	rw_lock_t*	lock,		/*!< in/out: pointer to memory */
493# ifdef UNIV_DEBUG
494	latch_level_t	level,		/*!< in: level */
495# endif /* UNIV_DEBUG */
496	const char*	cfile_name,	/*!< in: file name where created */
497	unsigned	cline)		/*!< in: file line where created */
498{
499	ut_d(new(lock) rw_lock_t());
500
501	/* Initialize the rwlock for performance schema */
502	lock->pfs_psi = PSI_RWLOCK_CALL(init_rwlock)(key, lock);
503
504	/* The actual function to initialize an rwlock */
505	rw_lock_create_func(lock,
506#ifdef UNIV_DEBUG
507			    level,
508#endif /* UNIV_DEBUG */
509			    cfile_name,
510			    cline);
511}
512/******************************************************************//**
513Performance schema instrumented wrap function for rw_lock_x_lock_func()
514NOTE! Please use the corresponding macro rw_lock_x_lock(), not directly
515this function! */
516UNIV_INLINE
517void
518pfs_rw_lock_x_lock_func(
519/*====================*/
520	rw_lock_t*	lock,	/*!< in: pointer to rw-lock */
521	ulint		pass,	/*!< in: pass value; != 0, if the lock will
522				be passed to another thread to unlock */
523	const char*	file_name,/*!< in: file name where lock requested */
524	unsigned	line)	/*!< in: line where requested */
525{
526	if (lock->pfs_psi != NULL) {
527		PSI_rwlock_locker*	locker;
528		PSI_rwlock_locker_state	state;
529
530		/* Record the acquisition of a read-write lock in exclusive
531		mode in performance schema */
532/* MySQL 5.7 New PSI */
533#define PSI_RWLOCK_EXCLUSIVELOCK PSI_RWLOCK_WRITELOCK
534
535		locker = PSI_RWLOCK_CALL(start_rwlock_wrwait)(
536			&state, lock->pfs_psi, PSI_RWLOCK_EXCLUSIVELOCK,
537			file_name, static_cast<uint>(line));
538
539		rw_lock_x_lock_func(
540			lock, pass, file_name, static_cast<uint>(line));
541
542		if (locker != NULL) {
543			PSI_RWLOCK_CALL(end_rwlock_wrwait)(locker, 0);
544		}
545	} else {
546		rw_lock_x_lock_func(lock, pass, file_name, line);
547	}
548}
549/******************************************************************//**
550Performance schema instrumented wrap function for
551rw_lock_x_lock_func_nowait()
552NOTE! Please use the corresponding macro rw_lock_x_lock_func(),
553not directly this function!
554@return TRUE if success */
555UNIV_INLINE
556ibool
557pfs_rw_lock_x_lock_func_nowait(
558/*===========================*/
559	rw_lock_t*	lock,	/*!< in: pointer to rw-lock */
560	const char*	file_name,/*!< in: file name where lock
561				requested */
562	unsigned	line)	/*!< in: line where requested */
563{
564	ibool		ret;
565
566	if (lock->pfs_psi != NULL) {
567		PSI_rwlock_locker*	locker;
568		PSI_rwlock_locker_state	state;
569
570		/* Record the acquisition of a read-write trylock in exclusive
571		mode in performance schema */
572
573#define PSI_RWLOCK_TRYEXCLUSIVELOCK PSI_RWLOCK_TRYWRITELOCK
574		locker = PSI_RWLOCK_CALL(start_rwlock_wrwait)(
575			&state, lock->pfs_psi, PSI_RWLOCK_TRYEXCLUSIVELOCK,
576			file_name, static_cast<uint>(line));
577
578		ret = rw_lock_x_lock_func_nowait(lock, file_name, line);
579
580		if (locker != NULL) {
581			PSI_RWLOCK_CALL(end_rwlock_wrwait)(
582				locker, static_cast<int>(ret));
583		}
584	} else {
585		ret = rw_lock_x_lock_func_nowait(lock, file_name, line);
586	}
587
588	return(ret);
589}
590/******************************************************************//**
591Performance schema instrumented wrap function for rw_lock_free_func()
592NOTE! Please use the corresponding macro rw_lock_free(), not directly
593this function! */
594UNIV_INLINE
595void
596pfs_rw_lock_free_func(
597/*==================*/
598	rw_lock_t*	lock)	/*!< in: pointer to rw-lock */
599{
600	if (lock->pfs_psi != NULL) {
601		PSI_RWLOCK_CALL(destroy_rwlock)(lock->pfs_psi);
602		lock->pfs_psi = NULL;
603	}
604
605	rw_lock_free_func(lock);
606}
607/******************************************************************//**
608Performance schema instrumented wrap function for rw_lock_s_lock_func()
609NOTE! Please use the corresponding macro rw_lock_s_lock(), not
610directly this function! */
611UNIV_INLINE
612void
613pfs_rw_lock_s_lock_func(
614/*====================*/
615	rw_lock_t*	lock,	/*!< in: pointer to rw-lock */
616	ulint		pass,	/*!< in: pass value; != 0, if the
617				lock will be passed to another
618				thread to unlock */
619	const char*	file_name,/*!< in: file name where lock
620				requested */
621	unsigned	line)	/*!< in: line where requested */
622{
623	if (lock->pfs_psi != NULL) {
624		PSI_rwlock_locker*	locker;
625		PSI_rwlock_locker_state	state;
626
627#define  PSI_RWLOCK_SHAREDLOCK  PSI_RWLOCK_READLOCK
628		/* Instrumented to inform we are aquiring a shared rwlock */
629		locker = PSI_RWLOCK_CALL(start_rwlock_rdwait)(
630			&state, lock->pfs_psi, PSI_RWLOCK_SHAREDLOCK,
631			file_name, static_cast<uint>(line));
632
633		rw_lock_s_lock_func(lock, pass, file_name, line);
634
635		if (locker != NULL) {
636			PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0);
637		}
638	} else {
639		rw_lock_s_lock_func(lock, pass, file_name, line);
640	}
641}
642/******************************************************************//**
643Performance schema instrumented wrap function for rw_lock_sx_lock_func()
644NOTE! Please use the corresponding macro rw_lock_sx_lock(), not
645directly this function! */
646UNIV_INLINE
647void
648pfs_rw_lock_sx_lock_func(
649/*====================*/
650	rw_lock_t*	lock,	/*!< in: pointer to rw-lock */
651	ulint		pass,	/*!< in: pass value; != 0, if the
652				lock will be passed to another
653				thread to unlock */
654	const char*	file_name,/*!< in: file name where lock
655				requested */
656	unsigned	line)	/*!< in: line where requested */
657{
658	if (lock->pfs_psi != NULL) {
659		PSI_rwlock_locker*	locker;
660		PSI_rwlock_locker_state	state;
661
662#define PSI_RWLOCK_SHAREDEXCLUSIVELOCK PSI_RWLOCK_WRITELOCK
663		/* Instrumented to inform we are aquiring a shared rwlock */
664		locker = PSI_RWLOCK_CALL(start_rwlock_wrwait)(
665			&state, lock->pfs_psi, PSI_RWLOCK_SHAREDEXCLUSIVELOCK,
666			file_name, static_cast<uint>(line));
667
668		rw_lock_sx_lock_func(lock, pass, file_name, line);
669
670		if (locker != NULL) {
671			PSI_RWLOCK_CALL(end_rwlock_wrwait)(locker, 0);
672		}
673	} else {
674		rw_lock_sx_lock_func(lock, pass, file_name, line);
675	}
676}
677/******************************************************************//**
678Performance schema instrumented wrap function for rw_lock_s_lock_func()
679NOTE! Please use the corresponding macro rw_lock_s_lock(), not
680directly this function!
681@return TRUE if success */
682UNIV_INLINE
683ibool
684pfs_rw_lock_s_lock_low(
685/*===================*/
686	rw_lock_t*	lock,	/*!< in: pointer to rw-lock */
687	ulint		pass,	/*!< in: pass value; != 0, if the
688				lock will be passed to another
689				thread to unlock */
690	const char*	file_name, /*!< in: file name where lock requested */
691	unsigned	line)	/*!< in: line where requested */
692{
693	ibool		ret;
694
695	if (lock->pfs_psi != NULL) {
696		PSI_rwlock_locker*	locker;
697		PSI_rwlock_locker_state	state;
698
699#define PSI_RWLOCK_TRYSHAREDLOCK PSI_RWLOCK_TRYREADLOCK
700		/* Instrumented to inform we are aquiring a shared rwlock */
701		locker = PSI_RWLOCK_CALL(start_rwlock_rdwait)(
702			&state, lock->pfs_psi, PSI_RWLOCK_TRYSHAREDLOCK,
703			file_name, static_cast<uint>(line));
704
705		ret = rw_lock_s_lock_low(lock, pass, file_name, line);
706
707		if (locker != NULL) {
708			PSI_RWLOCK_CALL(end_rwlock_rdwait)(
709				locker, static_cast<int>(ret));
710		}
711	} else {
712		ret = rw_lock_s_lock_low(lock, pass, file_name, line);
713	}
714
715	return(ret);
716}
717/******************************************************************//**
718Performance schema instrumented wrap function for rw_lock_sx_lock_nowait()
719NOTE! Please use the corresponding macro, not
720directly this function!
721@return TRUE if success */
722UNIV_INLINE
723ibool
724pfs_rw_lock_sx_lock_low(
725/*====================*/
726	rw_lock_t*	lock,	/*!< in: pointer to rw-lock */
727	ulint		pass,	/*!< in: pass value; != 0, if the
728				lock will be passed to another
729				thread to unlock */
730	const char*	file_name, /*!< in: file name where lock requested */
731	unsigned	line)	/*!< in: line where requested */
732{
733	ibool		ret;
734
735	if (lock->pfs_psi != NULL) {
736		PSI_rwlock_locker*	locker;
737		PSI_rwlock_locker_state	state;
738
739#define PSI_RWLOCK_TRYSHAREDEXCLUSIVELOCK PSI_RWLOCK_TRYWRITELOCK
740		/* Instrumented to inform we are aquiring a shared
741		exclusive rwlock */
742		locker = PSI_RWLOCK_CALL(start_rwlock_rdwait)(
743			&state, lock->pfs_psi,
744			PSI_RWLOCK_TRYSHAREDEXCLUSIVELOCK,
745			file_name, static_cast<uint>(line));
746
747		ret = rw_lock_sx_lock_low(lock, pass, file_name, line);
748
749		if (locker != NULL) {
750			PSI_RWLOCK_CALL(end_rwlock_rdwait)(
751				locker, static_cast<int>(ret));
752		}
753	} else {
754		ret = rw_lock_sx_lock_low(lock, pass, file_name, line);
755	}
756
757	return(ret);
758}
759/******************************************************************//**
760Performance schema instrumented wrap function for rw_lock_x_unlock_func()
761NOTE! Please use the corresponding macro rw_lock_x_unlock(), not directly
762this function! */
763UNIV_INLINE
764void
765pfs_rw_lock_x_unlock_func(
766/*======================*/
767#ifdef UNIV_DEBUG
768	ulint		pass,	/*!< in: pass value; != 0, if the
769				lock may have been passed to another
770				thread to unlock */
771#endif /* UNIV_DEBUG */
772	rw_lock_t*	lock)	/*!< in/out: rw-lock */
773{
774	/* Inform performance schema we are unlocking the lock */
775	if (lock->pfs_psi != NULL) {
776		PSI_RWLOCK_CALL(unlock_rwlock)(lock->pfs_psi);
777	}
778
779	rw_lock_x_unlock_func(
780#ifdef UNIV_DEBUG
781		pass,
782#endif /* UNIV_DEBUG */
783		lock);
784}
785
786/******************************************************************//**
787Performance schema instrumented wrap function for rw_lock_sx_unlock_func()
788NOTE! Please use the corresponding macro rw_lock_sx_unlock(), not directly
789this function! */
790UNIV_INLINE
791void
792pfs_rw_lock_sx_unlock_func(
793/*======================*/
794#ifdef UNIV_DEBUG
795	ulint		pass,	/*!< in: pass value; != 0, if the
796				lock may have been passed to another
797				thread to unlock */
798#endif /* UNIV_DEBUG */
799	rw_lock_t*	lock)	/*!< in/out: rw-lock */
800{
801	/* Inform performance schema we are unlocking the lock */
802	if (lock->pfs_psi != NULL) {
803		PSI_RWLOCK_CALL(unlock_rwlock)(lock->pfs_psi);
804	}
805
806	rw_lock_sx_unlock_func(
807#ifdef UNIV_DEBUG
808		pass,
809#endif /* UNIV_DEBUG */
810		lock);
811}
812
813/******************************************************************//**
814Performance schema instrumented wrap function for rw_lock_s_unlock_func()
815NOTE! Please use the corresponding macro pfs_rw_lock_s_unlock(), not
816directly this function! */
817UNIV_INLINE
818void
819pfs_rw_lock_s_unlock_func(
820/*======================*/
821#ifdef UNIV_DEBUG
822	ulint		pass,	/*!< in: pass value; != 0, if the
823				lock may have been passed to another
824				thread to unlock */
825#endif /* UNIV_DEBUG */
826	rw_lock_t*	lock)	/*!< in/out: rw-lock */
827{
828	/* Inform performance schema we are unlocking the lock */
829	if (lock->pfs_psi != NULL) {
830		PSI_RWLOCK_CALL(unlock_rwlock)(lock->pfs_psi);
831	}
832
833	rw_lock_s_unlock_func(
834#ifdef UNIV_DEBUG
835		pass,
836#endif /* UNIV_DEBUG */
837		lock);
838
839}
840#endif /* UNIV_PFS_RWLOCK */
841