1/*****************************************************************************
2
3Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
4Copyright (c) 2008, Google Inc.
5Copyright (c) 2017, 2020, MariaDB Corporation.
6
7Portions of this file contain modifications contributed and copyrighted by
8Google, Inc. Those modifications are gratefully acknowledged and are described
9briefly in the InnoDB documentation. The contributions by Google are
10incorporated with their permission, and subject to the conditions contained in
11the file COPYING.Google.
12
13This program is free software; you can redistribute it and/or modify it under
14the terms of the GNU General Public License as published by the Free Software
15Foundation; version 2 of the License.
16
17This program is distributed in the hope that it will be useful, but WITHOUT
18ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
19FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
20
21You should have received a copy of the GNU General Public License along with
22this program; if not, write to the Free Software Foundation, Inc.,
2351 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
24
25*****************************************************************************/
26
27/**************************************************//**
28@file include/sync0rw.ic
29The read-write lock (for threads)
30
31Created 9/11/1995 Heikki Tuuri
32*******************************************************/
33
34#include "os0event.h"
35
36/******************************************************************//**
37Lock an rw-lock in shared mode for the current thread. If the rw-lock is
38locked in exclusive mode, or there is an exclusive lock request waiting,
39the function spins a preset time (controlled by srv_n_spin_wait_rounds),
40waiting for the lock before suspending the thread. */
41void
42rw_lock_s_lock_spin(
43/*================*/
44	rw_lock_t*	lock,	/*!< in: pointer to rw-lock */
45	ulint		pass,	/*!< in: pass value; != 0, if the lock will
46				be passed to another thread to unlock */
47	const char*	file_name,/*!< in: file name where lock requested */
48	unsigned	line);	/*!< in: line where requested */
49#ifdef UNIV_DEBUG
50/******************************************************************//**
51Inserts the debug information for an rw-lock. */
52void
53rw_lock_add_debug_info(
54/*===================*/
55	rw_lock_t*	lock,		/*!< in: rw-lock */
56	ulint		pass,		/*!< in: pass value */
57	ulint		lock_type,	/*!< in: lock type */
58	const char*	file_name,	/*!< in: file where requested */
59	unsigned	line);		/*!< in: line where requested */
60/******************************************************************//**
61Removes a debug information struct for an rw-lock. */
62void
63rw_lock_remove_debug_info(
64/*======================*/
65	rw_lock_t*	lock,		/*!< in: rw-lock */
66	ulint		pass,		/*!< in: pass value */
67	ulint		lock_type);	/*!< in: lock type */
68#endif /* UNIV_DEBUG */
69
70/******************************************************************//**
71Returns the write-status of the lock - this function made more sense
72with the old rw_lock implementation.
73@return RW_LOCK_NOT_LOCKED, RW_LOCK_X, RW_LOCK_X_WAIT, RW_LOCK_SX */
74UNIV_INLINE
75ulint
76rw_lock_get_writer(
77/*===============*/
78	const rw_lock_t*	lock)	/*!< in: rw-lock */
79{
80	int32_t lock_word = lock->lock_word;
81
82	ut_ad(lock_word <= X_LOCK_DECR);
83	if (lock_word > X_LOCK_HALF_DECR) {
84		/* return NOT_LOCKED in s-lock state, like the writer
85		member of the old lock implementation. */
86		return(RW_LOCK_NOT_LOCKED);
87	} else if (lock_word > 0) {
88		/* sx-locked, no x-locks */
89		return(RW_LOCK_SX);
90	} else if (lock_word == 0
91		   || lock_word == -X_LOCK_HALF_DECR
92		   || lock_word <= -X_LOCK_DECR) {
93		/* x-lock with sx-lock is also treated as RW_LOCK_EX */
94		return(RW_LOCK_X);
95	} else {
96		/* x-waiter with sx-lock is also treated as RW_LOCK_WAIT_EX
97		e.g. -X_LOCK_HALF_DECR < lock_word < 0 : without sx
98		     -X_LOCK_DECR < lock_word < -X_LOCK_HALF_DECR : with sx */
99		return(RW_LOCK_X_WAIT);
100	}
101}
102
103/******************************************************************//**
104Returns the number of readers (s-locks).
105@return number of readers */
106UNIV_INLINE
107ulint
108rw_lock_get_reader_count(
109/*=====================*/
110	const rw_lock_t*	lock)	/*!< in: rw-lock */
111{
112	int32_t lock_word = lock->lock_word;
113	ut_ad(lock_word <= X_LOCK_DECR);
114
115	if (lock_word > X_LOCK_HALF_DECR) {
116		/* s-locked, no x-waiter */
117		return ulint(X_LOCK_DECR - lock_word);
118	} else if (lock_word > 0) {
119		/* s-locked, with sx-locks only */
120		return ulint(X_LOCK_HALF_DECR - lock_word);
121	} else if (lock_word == 0) {
122		/* x-locked */
123		return(0);
124	} else if (lock_word > -X_LOCK_HALF_DECR) {
125		/* s-locked, with x-waiter */
126		return((ulint)(-lock_word));
127	} else if (lock_word == -X_LOCK_HALF_DECR) {
128		/* x-locked with sx-locks */
129		return(0);
130	} else if (lock_word > -X_LOCK_DECR) {
131		/* s-locked, with x-waiter and sx-lock */
132		return((ulint)(-(lock_word + X_LOCK_HALF_DECR)));
133	}
134	/* no s-locks */
135	return(0);
136}
137
138/******************************************************************//**
139Returns the value of writer_count for the lock. Does not reserve the lock
140mutex, so the caller must be sure it is not changed during the call.
141@return value of writer_count */
142UNIV_INLINE
143ulint
144rw_lock_get_x_lock_count(
145/*=====================*/
146	const rw_lock_t*	lock)	/*!< in: rw-lock */
147{
148	int32_t lock_copy = lock->lock_word;
149	ut_ad(lock_copy <= X_LOCK_DECR);
150
151	if (lock_copy == 0 || lock_copy == -X_LOCK_HALF_DECR) {
152		/* "1 x-lock" or "1 x-lock + sx-locks" */
153		return(1);
154	} else if (lock_copy > -X_LOCK_DECR) {
155		/* s-locks, one or more sx-locks if > 0, or x-waiter if < 0 */
156		return(0);
157	} else if (lock_copy > -(X_LOCK_DECR + X_LOCK_HALF_DECR)) {
158		/* no s-lock, no sx-lock, 2 or more x-locks.
159		First 2 x-locks are set with -X_LOCK_DECR,
160		all other recursive x-locks are set with -1 */
161		return ulint(2 - X_LOCK_DECR - lock_copy);
162	} else {
163		/* no s-lock, 1 or more sx-lock, 2 or more x-locks.
164		First 2 x-locks are set with -(X_LOCK_DECR + X_LOCK_HALF_DECR),
165		all other recursive x-locks are set with -1 */
166		return ulint(2 - X_LOCK_DECR - X_LOCK_HALF_DECR - lock_copy);
167	}
168}
169
170/******************************************************************//**
171Returns the number of sx-lock for the lock. Does not reserve the lock
172mutex, so the caller must be sure it is not changed during the call.
173@return value of sx-lock count */
174UNIV_INLINE
175ulint
176rw_lock_get_sx_lock_count(
177/*======================*/
178	const rw_lock_t*	lock)	/*!< in: rw-lock */
179{
180#ifdef UNIV_DEBUG
181	int32_t lock_copy = lock->lock_word;
182
183	ut_ad(lock_copy <= X_LOCK_DECR);
184
185	while (lock_copy < 0) {
186		lock_copy += X_LOCK_DECR;
187	}
188
189	if (lock_copy > 0 && lock_copy <= X_LOCK_HALF_DECR) {
190		return(lock->sx_recursive);
191	}
192
193	return(0);
194#else /* UNIV_DEBUG */
195	return(lock->sx_recursive);
196#endif /* UNIV_DEBUG */
197}
198
199/******************************************************************//**
200Recursive x-locks are not supported: they should be handled by the caller and
201need not be atomic since they are performed by the current lock holder.
202Returns true if the decrement was made, false if not.
203@return true if decr occurs */
204UNIV_INLINE
205bool
206rw_lock_lock_word_decr(
207/*===================*/
208	rw_lock_t*	lock,		/*!< in/out: rw-lock */
209	int32_t		amount,		/*!< in: amount to decrement */
210	int32_t		threshold)	/*!< in: threshold of judgement */
211{
212	int32_t lock_copy = lock->lock_word;
213
214	while (lock_copy > threshold) {
215		if (lock->lock_word.compare_exchange_strong(
216			lock_copy,
217			lock_copy - amount,
218			std::memory_order_acquire,
219			std::memory_order_relaxed)) {
220
221			return(true);
222		}
223
224		/* Note that lock_copy was reloaded above. We will
225		keep trying if a spurious conflict occurred, typically
226		caused by concurrent executions of
227		rw_lock_s_lock(). */
228
229		/* Note: unlike this implementation, rw_lock::read_lock()
230		allows concurrent calls without a spin loop */
231	}
232
233	/* A real conflict was detected. */
234	return(false);
235}
236
237/******************************************************************//**
238Low-level function which tries to lock an rw-lock in s-mode.
239@return TRUE if success */
240UNIV_INLINE
241ibool
242rw_lock_s_lock_low(
243/*===============*/
244	rw_lock_t*	lock,	/*!< in: pointer to rw-lock */
245	ulint		pass MY_ATTRIBUTE((unused)),
246				/*!< in: pass value; != 0, if the lock will be
247				passed to another thread to unlock */
248	const char*	file_name, /*!< in: file name where lock requested */
249	unsigned	line)	/*!< in: line where requested */
250{
251	if (!rw_lock_lock_word_decr(lock, 1, 0)) {
252		/* Locking did not succeed */
253		return(FALSE);
254	}
255
256	ut_d(rw_lock_add_debug_info(lock, pass, RW_LOCK_S, file_name, line));
257
258	return(TRUE);	/* locking succeeded */
259}
260
261/******************************************************************//**
262NOTE! Use the corresponding macro, not directly this function! Lock an
263rw-lock in shared mode for the current thread. If the rw-lock is locked
264in exclusive mode, or there is an exclusive lock request waiting, the
265function spins a preset time (controlled by srv_n_spin_wait_rounds), waiting for
266the lock, before suspending the thread. */
267UNIV_INLINE
268void
269rw_lock_s_lock_func(
270/*================*/
271	rw_lock_t*	lock,	/*!< in: pointer to rw-lock */
272	ulint		pass,	/*!< in: pass value; != 0, if the lock will
273				be passed to another thread to unlock */
274	const char*	file_name,/*!< in: file name where lock requested */
275	unsigned	line)	/*!< in: line where requested */
276{
277	/* NOTE: As we do not know the thread ids for threads which have
278	s-locked a latch, and s-lockers will be served only after waiting
279	x-lock requests have been fulfilled, then if this thread already
280	owns an s-lock here, it may end up in a deadlock with another thread
281	which requests an x-lock here. Therefore, we will forbid recursive
282	s-locking of a latch: the following assert will warn the programmer
283	of the possibility of this kind of a deadlock. If we want to implement
284	safe recursive s-locking, we should keep in a list the thread ids of
285	the threads which have s-locked a latch. This would use some CPU
286	time. */
287
288	ut_ad(!rw_lock_own_flagged(lock, RW_LOCK_FLAG_X | RW_LOCK_FLAG_S));
289
290	if (!rw_lock_s_lock_low(lock, pass, file_name, line)) {
291
292		/* Did not succeed, try spin wait */
293
294		rw_lock_s_lock_spin(lock, pass, file_name, line);
295	}
296}
297
298/******************************************************************//**
299NOTE! Use the corresponding macro, not directly this function! Lock an
300rw-lock in exclusive mode for the current thread if the lock can be
301obtained immediately.
302@return TRUE if success */
303UNIV_INLINE
304ibool
305rw_lock_x_lock_func_nowait(
306/*=======================*/
307	rw_lock_t*	lock,	/*!< in: pointer to rw-lock */
308	const char*	file_name,/*!< in: file name where lock requested */
309	unsigned	line)	/*!< in: line where requested */
310{
311	int32_t oldval = X_LOCK_DECR;
312
313	if (lock->lock_word.compare_exchange_strong(oldval, 0,
314						std::memory_order_acquire,
315						std::memory_order_relaxed)) {
316		lock->writer_thread = os_thread_get_curr_id();
317
318	} else if (os_thread_eq(lock->writer_thread, os_thread_get_curr_id())) {
319		/* Relock: even though no other thread can modify (lock, unlock
320		or reserve) lock_word while there is an exclusive writer and
321		this is the writer thread, we still want concurrent threads to
322		observe consistent values. */
323		if (oldval == 0 || oldval == -X_LOCK_HALF_DECR) {
324			/* There are 1 x-locks */
325			lock->lock_word.fetch_sub(X_LOCK_DECR,
326						  std::memory_order_relaxed);
327		} else if (oldval <= -X_LOCK_DECR) {
328			/* There are 2 or more x-locks */
329			lock->lock_word.fetch_sub(1,
330						  std::memory_order_relaxed);
331			/* Watch for too many recursive locks */
332			ut_ad(oldval < 1);
333		} else {
334			/* Failure */
335			return(FALSE);
336		}
337	} else {
338		/* Failure */
339		return(FALSE);
340	}
341
342	ut_d(rw_lock_add_debug_info(lock, 0, RW_LOCK_X, file_name, line));
343
344	lock->last_x_file_name = file_name;
345	lock->last_x_line = line & ((1 << 14) - 1);
346
347	ut_ad(rw_lock_validate(lock));
348
349	return(TRUE);
350}
351
352/******************************************************************//**
353Releases a shared mode lock. */
354UNIV_INLINE
355void
356rw_lock_s_unlock_func(
357/*==================*/
358#ifdef UNIV_DEBUG
359	ulint		pass,	/*!< in: pass value; != 0, if the lock may have
360				been passed to another thread to unlock */
361#endif /* UNIV_DEBUG */
362	rw_lock_t*	lock)	/*!< in/out: rw-lock */
363{
364	ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_S));
365
366	/* Increment lock_word to indicate 1 less reader */
367	int32_t lock_word = lock->lock_word.fetch_add(
368		1, std::memory_order_release);
369
370	if (lock_word == -1 || lock_word == -X_LOCK_HALF_DECR - 1) {
371		/* wait_ex waiter exists. It may not be asleep, but we signal
372		anyway. We do not wake other waiters, because they can't
373		exist without wait_ex waiter and wait_ex waiter goes first.*/
374		os_event_set(lock->wait_ex_event);
375		sync_array_object_signalled();
376	} else {
377		ut_ad(lock_word > -X_LOCK_DECR);
378		ut_ad(lock_word < X_LOCK_DECR);
379	}
380
381	ut_ad(rw_lock_validate(lock));
382}
383
384/******************************************************************//**
385Releases an exclusive mode lock. */
386UNIV_INLINE
387void
388rw_lock_x_unlock_func(
389/*==================*/
390#ifdef UNIV_DEBUG
391	ulint		pass,	/*!< in: pass value; != 0, if the lock may have
392				been passed to another thread to unlock */
393#endif /* UNIV_DEBUG */
394	rw_lock_t*	lock)	/*!< in/out: rw-lock */
395{
396	int32_t lock_word = lock->lock_word;
397
398	if (lock_word == 0) {
399		/* Last caller in a possible recursive chain. */
400		lock->writer_thread = 0;
401	}
402
403	ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_X));
404
405	if (lock_word == 0 || lock_word == -X_LOCK_HALF_DECR) {
406		/* Last X-lock owned by this thread, it may still hold SX-locks.
407		ACQ_REL due to...
408		RELEASE: we release rw-lock
409		ACQUIRE: we want waiters to be loaded after lock_word is stored */
410		lock->lock_word.fetch_add(X_LOCK_DECR,
411					  std::memory_order_acq_rel);
412
413		/* This no longer has an X-lock but it may still have
414		an SX-lock. So it is now free for S-locks by other threads.
415		We need to signal read/write waiters.
416		We do not need to signal wait_ex waiters, since they cannot
417		exist when there is a writer. */
418		if (lock->waiters) {
419			lock->waiters = 0;
420			os_event_set(lock->event);
421			sync_array_object_signalled();
422		}
423	} else if (lock_word == -X_LOCK_DECR
424		   || lock_word == -(X_LOCK_DECR + X_LOCK_HALF_DECR)) {
425		/* There are 2 x-locks */
426		lock->lock_word.fetch_add(X_LOCK_DECR);
427	} else {
428		/* There are more than 2 x-locks. */
429		ut_ad(lock_word < -X_LOCK_DECR);
430		lock->lock_word.fetch_add(1);
431	}
432
433	ut_ad(rw_lock_validate(lock));
434}
435
436/******************************************************************//**
437Releases a sx mode lock. */
438UNIV_INLINE
439void
440rw_lock_sx_unlock_func(
441/*===================*/
442#ifdef UNIV_DEBUG
443	ulint		pass,	/*!< in: pass value; != 0, if the lock may have
444				been passed to another thread to unlock */
445#endif /* UNIV_DEBUG */
446	rw_lock_t*	lock)	/*!< in/out: rw-lock */
447{
448	ut_ad(rw_lock_get_sx_lock_count(lock));
449	ut_ad(lock->sx_recursive > 0);
450
451	--lock->sx_recursive;
452
453	ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_SX));
454
455	if (lock->sx_recursive == 0) {
456		int32_t lock_word = lock->lock_word;
457		/* Last caller in a possible recursive chain. */
458		if (lock_word > 0) {
459			lock->writer_thread = 0;
460			ut_ad(lock_word <= INT_MAX32 - X_LOCK_HALF_DECR);
461
462			/* Last SX-lock owned by this thread, doesn't own X-lock.
463			ACQ_REL due to...
464			RELEASE: we release rw-lock
465			ACQUIRE: we want waiters to be loaded after lock_word is stored */
466			lock->lock_word.fetch_add(X_LOCK_HALF_DECR,
467						  std::memory_order_acq_rel);
468
469			/* Lock is now free. May have to signal read/write
470			waiters. We do not need to signal wait_ex waiters,
471			since they cannot exist when there is an sx-lock
472			holder. */
473			if (lock->waiters) {
474				lock->waiters = 0;
475				os_event_set(lock->event);
476				sync_array_object_signalled();
477			}
478		} else {
479			/* still has x-lock */
480			ut_ad(lock_word == -X_LOCK_HALF_DECR ||
481			      lock_word <= -(X_LOCK_DECR + X_LOCK_HALF_DECR));
482			lock->lock_word.fetch_add(X_LOCK_HALF_DECR);
483		}
484	}
485
486	ut_ad(rw_lock_validate(lock));
487}
488
489#ifdef UNIV_PFS_RWLOCK
490
491/******************************************************************//**
492Performance schema instrumented wrap function for rw_lock_create_func().
493NOTE! Please use the corresponding macro rw_lock_create(), not directly
494this function! */
495UNIV_INLINE
496void
497pfs_rw_lock_create_func(
498/*====================*/
499	mysql_pfs_key_t	key,		/*!< in: key registered with
500					performance schema */
501	rw_lock_t*	lock,		/*!< in/out: pointer to memory */
502# ifdef UNIV_DEBUG
503	latch_level_t	level,		/*!< in: level */
504# endif /* UNIV_DEBUG */
505	const char*	cfile_name,	/*!< in: file name where created */
506	unsigned	cline)		/*!< in: file line where created */
507{
508	ut_d(new(lock) rw_lock_t());
509
510	/* Initialize the rwlock for performance schema */
511	lock->pfs_psi = PSI_RWLOCK_CALL(init_rwlock)(key, lock);
512
513	/* The actual function to initialize an rwlock */
514	rw_lock_create_func(lock,
515#ifdef UNIV_DEBUG
516			    level,
517#endif /* UNIV_DEBUG */
518			    cfile_name,
519			    cline);
520}
521/******************************************************************//**
522Performance schema instrumented wrap function for rw_lock_x_lock_func()
523NOTE! Please use the corresponding macro rw_lock_x_lock(), not directly
524this function! */
525UNIV_INLINE
526void
527pfs_rw_lock_x_lock_func(
528/*====================*/
529	rw_lock_t*	lock,	/*!< in: pointer to rw-lock */
530	ulint		pass,	/*!< in: pass value; != 0, if the lock will
531				be passed to another thread to unlock */
532	const char*	file_name,/*!< in: file name where lock requested */
533	unsigned	line)	/*!< in: line where requested */
534{
535	if (lock->pfs_psi != NULL) {
536		PSI_rwlock_locker*	locker;
537		PSI_rwlock_locker_state	state;
538
539		/* Record the acquisition of a read-write lock in exclusive
540		mode in performance schema */
541
542		locker = PSI_RWLOCK_CALL(start_rwlock_wrwait)(
543			&state, lock->pfs_psi, PSI_RWLOCK_EXCLUSIVELOCK,
544			file_name, static_cast<uint>(line));
545
546		rw_lock_x_lock_func(
547			lock, pass, file_name, static_cast<uint>(line));
548
549		if (locker != NULL) {
550			PSI_RWLOCK_CALL(end_rwlock_wrwait)(locker, 0);
551		}
552	} else {
553		rw_lock_x_lock_func(lock, pass, file_name, line);
554	}
555}
556/******************************************************************//**
557Performance schema instrumented wrap function for
558rw_lock_x_lock_func_nowait()
559NOTE! Please use the corresponding macro rw_lock_x_lock_func(),
560not directly this function!
561@return TRUE if success */
562UNIV_INLINE
563ibool
564pfs_rw_lock_x_lock_func_nowait(
565/*===========================*/
566	rw_lock_t*	lock,	/*!< in: pointer to rw-lock */
567	const char*	file_name,/*!< in: file name where lock
568				requested */
569	unsigned	line)	/*!< in: line where requested */
570{
571	ibool		ret;
572
573	if (lock->pfs_psi != NULL) {
574		PSI_rwlock_locker*	locker;
575		PSI_rwlock_locker_state	state;
576
577		/* Record the acquisition of a read-write trylock in exclusive
578		mode in performance schema */
579
580		locker = PSI_RWLOCK_CALL(start_rwlock_wrwait)(
581			&state, lock->pfs_psi, PSI_RWLOCK_TRYEXCLUSIVELOCK,
582			file_name, static_cast<uint>(line));
583
584		ret = rw_lock_x_lock_func_nowait(lock, file_name, line);
585
586		if (locker != NULL) {
587			PSI_RWLOCK_CALL(end_rwlock_wrwait)(
588				locker, static_cast<int>(ret));
589		}
590	} else {
591		ret = rw_lock_x_lock_func_nowait(lock, file_name, line);
592	}
593
594	return(ret);
595}
596/******************************************************************//**
597Performance schema instrumented wrap function for rw_lock_free_func()
598NOTE! Please use the corresponding macro rw_lock_free(), not directly
599this function! */
600UNIV_INLINE
601void
602pfs_rw_lock_free_func(
603/*==================*/
604	rw_lock_t*	lock)	/*!< in: pointer to rw-lock */
605{
606	if (lock->pfs_psi != NULL) {
607		PSI_RWLOCK_CALL(destroy_rwlock)(lock->pfs_psi);
608		lock->pfs_psi = NULL;
609	}
610
611	rw_lock_free_func(lock);
612}
613/******************************************************************//**
614Performance schema instrumented wrap function for rw_lock_s_lock_func()
615NOTE! Please use the corresponding macro rw_lock_s_lock(), not
616directly this function! */
617UNIV_INLINE
618void
619pfs_rw_lock_s_lock_func(
620/*====================*/
621	rw_lock_t*	lock,	/*!< in: pointer to rw-lock */
622	ulint		pass,	/*!< in: pass value; != 0, if the
623				lock will be passed to another
624				thread to unlock */
625	const char*	file_name,/*!< in: file name where lock
626				requested */
627	unsigned	line)	/*!< in: line where requested */
628{
629	if (lock->pfs_psi != NULL) {
630		PSI_rwlock_locker*	locker;
631		PSI_rwlock_locker_state	state;
632
633		/* Instrumented to inform we are aquiring a shared rwlock */
634		locker = PSI_RWLOCK_CALL(start_rwlock_rdwait)(
635			&state, lock->pfs_psi, PSI_RWLOCK_SHAREDLOCK,
636			file_name, static_cast<uint>(line));
637
638		rw_lock_s_lock_func(lock, pass, file_name, line);
639
640		if (locker != NULL) {
641			PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0);
642		}
643	} else {
644		rw_lock_s_lock_func(lock, pass, file_name, line);
645	}
646}
647/******************************************************************//**
648Performance schema instrumented wrap function for rw_lock_sx_lock_func()
649NOTE! Please use the corresponding macro rw_lock_sx_lock(), not
650directly this function! */
651UNIV_INLINE
652void
653pfs_rw_lock_sx_lock_func(
654/*====================*/
655	rw_lock_t*	lock,	/*!< in: pointer to rw-lock */
656	ulint		pass,	/*!< in: pass value; != 0, if the
657				lock will be passed to another
658				thread to unlock */
659	const char*	file_name,/*!< in: file name where lock
660				requested */
661	unsigned	line)	/*!< in: line where requested */
662{
663	if (lock->pfs_psi != NULL) {
664		PSI_rwlock_locker*	locker;
665		PSI_rwlock_locker_state	state;
666
667		/* Instrumented to inform we are aquiring a shared rwlock */
668		locker = PSI_RWLOCK_CALL(start_rwlock_wrwait)(
669			&state, lock->pfs_psi, PSI_RWLOCK_SHAREDEXCLUSIVELOCK,
670			file_name, static_cast<uint>(line));
671
672		rw_lock_sx_lock_func(lock, pass, file_name, line);
673
674		if (locker != NULL) {
675			PSI_RWLOCK_CALL(end_rwlock_wrwait)(locker, 0);
676		}
677	} else {
678		rw_lock_sx_lock_func(lock, pass, file_name, line);
679	}
680}
681/******************************************************************//**
682Performance schema instrumented wrap function for rw_lock_s_lock_func()
683NOTE! Please use the corresponding macro rw_lock_s_lock(), not
684directly this function!
685@return TRUE if success */
686UNIV_INLINE
687ibool
688pfs_rw_lock_s_lock_low(
689/*===================*/
690	rw_lock_t*	lock,	/*!< in: pointer to rw-lock */
691	ulint		pass,	/*!< in: pass value; != 0, if the
692				lock will be passed to another
693				thread to unlock */
694	const char*	file_name, /*!< in: file name where lock requested */
695	unsigned	line)	/*!< in: line where requested */
696{
697	ibool		ret;
698
699	if (lock->pfs_psi != NULL) {
700		PSI_rwlock_locker*	locker;
701		PSI_rwlock_locker_state	state;
702
703		/* Instrumented to inform we are aquiring a shared rwlock */
704		locker = PSI_RWLOCK_CALL(start_rwlock_rdwait)(
705			&state, lock->pfs_psi, PSI_RWLOCK_TRYSHAREDLOCK,
706			file_name, static_cast<uint>(line));
707
708		ret = rw_lock_s_lock_low(lock, pass, file_name, line);
709
710		if (locker != NULL) {
711			PSI_RWLOCK_CALL(end_rwlock_rdwait)(
712				locker, static_cast<int>(ret));
713		}
714	} else {
715		ret = rw_lock_s_lock_low(lock, pass, file_name, line);
716	}
717
718	return(ret);
719}
720/******************************************************************//**
721Performance schema instrumented wrap function for rw_lock_sx_lock_nowait()
722NOTE! Please use the corresponding macro, not
723directly this function!
724@return TRUE if success */
725UNIV_INLINE
726ibool
727pfs_rw_lock_sx_lock_low(
728/*====================*/
729	rw_lock_t*	lock,	/*!< in: pointer to rw-lock */
730	ulint		pass,	/*!< in: pass value; != 0, if the
731				lock will be passed to another
732				thread to unlock */
733	const char*	file_name, /*!< in: file name where lock requested */
734	unsigned	line)	/*!< in: line where requested */
735{
736	ibool		ret;
737
738	if (lock->pfs_psi != NULL) {
739		PSI_rwlock_locker*	locker;
740		PSI_rwlock_locker_state	state;
741
742		/* Instrumented to inform we are aquiring a shared
743		exclusive rwlock */
744		locker = PSI_RWLOCK_CALL(start_rwlock_rdwait)(
745			&state, lock->pfs_psi,
746			PSI_RWLOCK_TRYSHAREDEXCLUSIVELOCK,
747			file_name, static_cast<uint>(line));
748
749		ret = rw_lock_sx_lock_low(lock, pass, file_name, line);
750
751		if (locker != NULL) {
752			PSI_RWLOCK_CALL(end_rwlock_rdwait)(
753				locker, static_cast<int>(ret));
754		}
755	} else {
756		ret = rw_lock_sx_lock_low(lock, pass, file_name, line);
757	}
758
759	return(ret);
760}
761/******************************************************************//**
762Performance schema instrumented wrap function for rw_lock_x_unlock_func()
763NOTE! Please use the corresponding macro rw_lock_x_unlock(), not directly
764this function! */
765UNIV_INLINE
766void
767pfs_rw_lock_x_unlock_func(
768/*======================*/
769#ifdef UNIV_DEBUG
770	ulint		pass,	/*!< in: pass value; != 0, if the
771				lock may have been passed to another
772				thread to unlock */
773#endif /* UNIV_DEBUG */
774	rw_lock_t*	lock)	/*!< in/out: rw-lock */
775{
776	/* Inform performance schema we are unlocking the lock */
777	if (lock->pfs_psi != NULL) {
778		PSI_RWLOCK_CALL(unlock_rwlock)(lock->pfs_psi);
779	}
780
781	rw_lock_x_unlock_func(
782#ifdef UNIV_DEBUG
783		pass,
784#endif /* UNIV_DEBUG */
785		lock);
786}
787
788/******************************************************************//**
789Performance schema instrumented wrap function for rw_lock_sx_unlock_func()
790NOTE! Please use the corresponding macro rw_lock_sx_unlock(), not directly
791this function! */
792UNIV_INLINE
793void
794pfs_rw_lock_sx_unlock_func(
795/*======================*/
796#ifdef UNIV_DEBUG
797	ulint		pass,	/*!< in: pass value; != 0, if the
798				lock may have been passed to another
799				thread to unlock */
800#endif /* UNIV_DEBUG */
801	rw_lock_t*	lock)	/*!< in/out: rw-lock */
802{
803	/* Inform performance schema we are unlocking the lock */
804	if (lock->pfs_psi != NULL) {
805		PSI_RWLOCK_CALL(unlock_rwlock)(lock->pfs_psi);
806	}
807
808	rw_lock_sx_unlock_func(
809#ifdef UNIV_DEBUG
810		pass,
811#endif /* UNIV_DEBUG */
812		lock);
813}
814
815/******************************************************************//**
816Performance schema instrumented wrap function for rw_lock_s_unlock_func()
817NOTE! Please use the corresponding macro pfs_rw_lock_s_unlock(), not
818directly this function! */
819UNIV_INLINE
820void
821pfs_rw_lock_s_unlock_func(
822/*======================*/
823#ifdef UNIV_DEBUG
824	ulint		pass,	/*!< in: pass value; != 0, if the
825				lock may have been passed to another
826				thread to unlock */
827#endif /* UNIV_DEBUG */
828	rw_lock_t*	lock)	/*!< in/out: rw-lock */
829{
830	/* Inform performance schema we are unlocking the lock */
831	if (lock->pfs_psi != NULL) {
832		PSI_RWLOCK_CALL(unlock_rwlock)(lock->pfs_psi);
833	}
834
835	rw_lock_s_unlock_func(
836#ifdef UNIV_DEBUG
837		pass,
838#endif /* UNIV_DEBUG */
839		lock);
840
841}
842#endif /* UNIV_PFS_RWLOCK */
843