1/*****************************************************************************
2
3Copyright (c) 1995, 2020, Oracle and/or its affiliates. All Rights Reserved.
4Copyright (c) 2008, Google Inc.
5
6Portions of this file contain modifications contributed and copyrighted by
7Google, Inc. Those modifications are gratefully acknowledged and are described
8briefly in the InnoDB documentation. The contributions by Google are
9incorporated with their permission, and subject to the conditions contained in
10the file COPYING.Google.
11
12This program is free software; you can redistribute it and/or modify it under
13the terms of the GNU General Public License, version 2.0, as published by the
14Free Software Foundation.
15
16This program is also distributed with certain software (including but not
17limited to OpenSSL) that is licensed under separate terms, as designated in a
18particular file or component or in included license documentation. The authors
19of MySQL hereby grant you an additional permission to link the program and
20your derivative works with the separately licensed software that they have
21included with MySQL.
22
23This program is distributed in the hope that it will be useful, but WITHOUT
24ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
25FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0,
26for more details.
27
28You should have received a copy of the GNU General Public License along with
29this program; if not, write to the Free Software Foundation, Inc.,
3051 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
31
32*****************************************************************************/
33
34/** @file include/sync0rw.ic
35 The read-write lock (for threads)
36
37 Created 9/11/1995 Heikki Tuuri
38 *******************************************************/
39
40#include "os0event.h"
41
42/** Lock an rw-lock in shared mode for the current thread. If the rw-lock is
43 locked in exclusive mode, or there is an exclusive lock request waiting,
44 the function spins a preset time (controlled by srv_n_spin_wait_rounds),
45 waiting for the lock before suspending the thread. */
46void rw_lock_s_lock_spin(
47    rw_lock_t *lock,       /*!< in: pointer to rw-lock */
48    ulint pass,            /*!< in: pass value; != 0, if the lock will
49                           be passed to another thread to unlock */
50    const char *file_name, /*!< in: file name where lock requested */
51    ulint line);           /*!< in: line where requested */
52#ifdef UNIV_DEBUG
53/** Inserts the debug information for an rw-lock. */
54void rw_lock_add_debug_info(
55    rw_lock_t *lock,       /*!< in: rw-lock */
56    ulint pass,            /*!< in: pass value */
57    ulint lock_type,       /*!< in: lock type */
58    const char *file_name, /*!< in: file where requested */
59    ulint line);           /*!< in: line where requested */
60/** Removes a debug information struct for an rw-lock. */
61void rw_lock_remove_debug_info(rw_lock_t *lock,  /*!< in: rw-lock */
62                               ulint pass,       /*!< in: pass value */
63                               ulint lock_type); /*!< in: lock type */
64#endif                                           /* UNIV_DEBUG */
65
66/** Check if there are threads waiting for the rw-lock.
67 @return 1 if waiters, 0 otherwise */
68UNIV_INLINE
69ulint rw_lock_get_waiters(const rw_lock_t *lock) /*!< in: rw-lock */
70{
71  return (lock->waiters);
72}
73
74/** Sets lock->waiters to 1. It is not an error if lock->waiters is already
75 1. On platforms where ATOMIC builtins are used this function enforces a
76 memory barrier. */
77UNIV_INLINE
78void rw_lock_set_waiter_flag(rw_lock_t *lock) /*!< in/out: rw-lock */
79{
80#ifdef INNODB_RW_LOCKS_USE_ATOMICS
81  (void)os_compare_and_swap_ulint(&lock->waiters, 0, 1);
82#else  /* INNODB_RW_LOCKS_USE_ATOMICS */
83  lock->waiters = 1;
84  os_wmb;
85#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
86}
87
88/** Resets lock->waiters to 0. It is not an error if lock->waiters is already
89 0. On platforms where ATOMIC builtins are used this function enforces a
90 memory barrier. */
91UNIV_INLINE
92void rw_lock_reset_waiter_flag(rw_lock_t *lock) /*!< in/out: rw-lock */
93{
94#ifdef INNODB_RW_LOCKS_USE_ATOMICS
95  (void)os_compare_and_swap_ulint(&lock->waiters, 1, 0);
96#else  /* INNODB_RW_LOCKS_USE_ATOMICS */
97  lock->waiters = 0;
98  os_wmb;
99#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
100}
101
102/** Returns the write-status of the lock - this function made more sense
103 with the old rw_lock implementation.
104 @return RW_LOCK_NOT_LOCKED, RW_LOCK_X, RW_LOCK_X_WAIT, RW_LOCK_SX */
105UNIV_INLINE
106ulint rw_lock_get_writer(const rw_lock_t *lock) /*!< in: rw-lock */
107{
108  lint lock_word = lock->lock_word;
109
110  ut_ad(lock_word <= X_LOCK_DECR);
111  if (lock_word > X_LOCK_HALF_DECR) {
112    /* return NOT_LOCKED in s-lock state, like the writer
113    member of the old lock implementation. */
114    return (RW_LOCK_NOT_LOCKED);
115  } else if (lock_word > 0) {
116    /* sx-locked, no x-locks */
117    return (RW_LOCK_SX);
118  } else if (lock_word == 0 || lock_word == -X_LOCK_HALF_DECR ||
119             lock_word <= -X_LOCK_DECR) {
120    /* x-lock with sx-lock is also treated as RW_LOCK_EX */
121    return (RW_LOCK_X);
122  } else {
123    /* x-waiter with sx-lock is also treated as RW_LOCK_WAIT_EX
124    e.g. -X_LOCK_HALF_DECR < lock_word < 0 : without sx
125         -X_LOCK_DECR < lock_word < -X_LOCK_HALF_DECR : with sx */
126    return (RW_LOCK_X_WAIT);
127  }
128}
129
130/** Returns the number of readers (s-locks).
131 @return number of readers */
132UNIV_INLINE
133ulint rw_lock_get_reader_count(const rw_lock_t *lock) /*!< in: rw-lock */
134{
135  lint lock_word = lock->lock_word;
136  ut_ad(lock_word <= X_LOCK_DECR);
137
138  if (lock_word > X_LOCK_HALF_DECR) {
139    /* s-locked, no x-waiter */
140    return (X_LOCK_DECR - lock_word);
141  } else if (lock_word > 0) {
142    /* s-locked, with sx-locks only */
143    return (X_LOCK_HALF_DECR - lock_word);
144  } else if (lock_word == 0) {
145    /* x-locked */
146    return (0);
147  } else if (lock_word > -X_LOCK_HALF_DECR) {
148    /* s-locked, with x-waiter */
149    return ((ulint)(-lock_word));
150  } else if (lock_word == -X_LOCK_HALF_DECR) {
151    /* x-locked with sx-locks */
152    return (0);
153  } else if (lock_word > -X_LOCK_DECR) {
154    /* s-locked, with x-waiter and sx-lock */
155    return ((ulint)(-(lock_word + X_LOCK_HALF_DECR)));
156  }
157  /* no s-locks */
158  return (0);
159}
160
161#ifndef INNODB_RW_LOCKS_USE_ATOMICS
162UNIV_INLINE
163ib_mutex_t *rw_lock_get_mutex(rw_lock_t *lock) { return (&(lock->mutex)); }
164#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
165
166/** Returns the value of writer_count for the lock. Does not reserve the lock
167 mutex, so the caller must be sure it is not changed during the call.
168 @return value of writer_count */
169UNIV_INLINE
170ulint rw_lock_get_x_lock_count(const rw_lock_t *lock) /*!< in: rw-lock */
171{
172  lint lock_copy = lock->lock_word;
173  ut_ad(lock_copy <= X_LOCK_DECR);
174
175  if (lock_copy == 0 || lock_copy == -X_LOCK_HALF_DECR) {
176    /* "1 x-lock" or "1 x-lock + sx-locks" */
177    return (1);
178  } else if (lock_copy > -X_LOCK_DECR) {
179    /* s-locks, one or more sx-locks if > 0, or x-waiter if < 0 */
180    return (0);
181  } else if (lock_copy > -(X_LOCK_DECR + X_LOCK_HALF_DECR)) {
182    /* no s-lock, no sx-lock, 2 or more x-locks.
183    First 2 x-locks are set with -X_LOCK_DECR,
184    all other recursive x-locks are set with -1 */
185    return (2 - (lock_copy + X_LOCK_DECR));
186  } else {
187    /* no s-lock, 1 or more sx-lock, 2 or more x-locks.
188    First 2 x-locks are set with -(X_LOCK_DECR + X_LOCK_HALF_DECR),
189    all other recursive x-locks are set with -1 */
190    return (2 - (lock_copy + X_LOCK_DECR + X_LOCK_HALF_DECR));
191  }
192}
193
194/** Returns the number of sx-lock for the lock. Does not reserve the lock
195 mutex, so the caller must be sure it is not changed during the call.
196 @return value of sx-lock count */
197UNIV_INLINE
198ulint rw_lock_get_sx_lock_count(const rw_lock_t *lock) /*!< in: rw-lock */
199{
200#ifdef UNIV_DEBUG
201  lint lock_copy = lock->lock_word;
202
203  ut_ad(lock_copy <= X_LOCK_DECR);
204
205  while (lock_copy < 0) {
206    lock_copy += X_LOCK_DECR;
207  }
208
209  if (lock_copy > 0 && lock_copy <= X_LOCK_HALF_DECR) {
210    return (lock->sx_recursive);
211  }
212
213  return (0);
214#else  /* UNIV_DEBUG */
215  return (lock->sx_recursive);
216#endif /* UNIV_DEBUG */
217}
218
219/** Two different implementations for decrementing the lock_word of a rw_lock:
220 one for systems supporting atomic operations, one for others. This does
221 does not support recusive x-locks: they should be handled by the caller and
222 need not be atomic since they are performed by the current lock holder.
223 Returns true if the decrement was made, false if not.
224 @return true if decr occurs */
225ALWAYS_INLINE
226bool rw_lock_lock_word_decr(rw_lock_t *lock, /*!< in/out: rw-lock */
227                            ulint amount,    /*!< in: amount to decrement */
228                            lint threshold)  /*!< in: threshold of judgement */
229{
230#ifdef INNODB_RW_LOCKS_USE_ATOMICS
231  lint local_lock_word;
232
233  os_rmb;
234  local_lock_word = lock->lock_word;
235  while (local_lock_word > threshold) {
236    if (os_compare_and_swap_lint(&lock->lock_word, local_lock_word,
237                                 local_lock_word - amount)) {
238      return (true);
239    }
240    local_lock_word = lock->lock_word;
241  }
242  return (false);
243#else  /* INNODB_RW_LOCKS_USE_ATOMICS */
244  bool success = false;
245  mutex_enter(&(lock->mutex));
246  if (lock->lock_word > threshold) {
247    lock->lock_word -= amount;
248    success = true;
249  }
250  mutex_exit(&(lock->mutex));
251  return (success);
252#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
253}
254
255/** Increments lock_word the specified amount and returns new value.
256 @return lock->lock_word after increment */
257UNIV_INLINE
258lint rw_lock_lock_word_incr(rw_lock_t *lock, /*!< in/out: rw-lock */
259                            ulint amount)    /*!< in: amount of increment */
260{
261#ifdef INNODB_RW_LOCKS_USE_ATOMICS
262  return (os_atomic_increment_lint(&lock->lock_word, amount));
263#else  /* INNODB_RW_LOCKS_USE_ATOMICS */
264  lint local_lock_word;
265
266  mutex_enter(&(lock->mutex));
267
268  lock->lock_word += amount;
269  local_lock_word = lock->lock_word;
270
271  mutex_exit(&(lock->mutex));
272
273  return (local_lock_word);
274#endif /* INNODB_RW_LOCKS_USE_ATOMICS */
275}
276
277UNIV_INLINE
278void rw_lock_set_writer_id_and_recursion_flag(rw_lock_t *lock, bool recursive) {
279  lock->writer_thread.store(os_thread_get_curr_id(), std::memory_order_relaxed);
280  lock->recursive.store(recursive, std::memory_order_release);
281}
282
283/** Low-level function which tries to lock an rw-lock in s-mode. Performs no
284 spinning.
285 @return TRUE if success */
286ALWAYS_INLINE
287ibool rw_lock_s_lock_low(
288    rw_lock_t *lock, /*!< in: pointer to rw-lock */
289    ulint pass MY_ATTRIBUTE((unused)),
290    /*!< in: pass value; != 0, if the lock will be
291    passed to another thread to unlock */
292    const char *file_name, /*!< in: file name where lock requested */
293    ulint line)            /*!< in: line where requested */
294{
295  if (!rw_lock_lock_word_decr(lock, 1, 0)) {
296    /* Locking did not succeed */
297    return (FALSE);
298  }
299
300  ut_d(rw_lock_add_debug_info(lock, pass, RW_LOCK_S, file_name, line));
301
302  /* These debugging values are not set safely: they may be incorrect
303  or even refer to a line that is invalid for the file name. */
304  lock->last_s_file_name = file_name;
305  lock->last_s_line = line;
306
307  return (TRUE); /* locking succeeded */
308}
309
310/** NOTE! Use the corresponding macro, not directly this function! Lock an
311 rw-lock in shared mode for the current thread. If the rw-lock is locked
312 in exclusive mode, or there is an exclusive lock request waiting, the
313 function spins a preset time (controlled by srv_n_spin_wait_rounds), waiting
314 for the lock, before suspending the thread. */
315UNIV_INLINE
316void rw_lock_s_lock_func(
317    rw_lock_t *lock,       /*!< in: pointer to rw-lock */
318    ulint pass,            /*!< in: pass value; != 0, if the lock will
319                           be passed to another thread to unlock */
320    const char *file_name, /*!< in: file name where lock requested */
321    ulint line)            /*!< in: line where requested */
322{
323  /* NOTE: As we do not know the thread ids for threads which have
324  s-locked a latch, and s-lockers will be served only after waiting
325  x-lock requests have been fulfilled, then if this thread already
326  owns an s-lock here, it may end up in a deadlock with another thread
327  which requests an x-lock here. Therefore, we will forbid recursive
328  s-locking of a latch: the following assert will warn the programmer
329  of the possibility of this kind of a deadlock. If we want to implement
330  safe recursive s-locking, we should keep in a list the thread ids of
331  the threads which have s-locked a latch. This would use some CPU
332  time. */
333
334  ut_ad(!rw_lock_own(lock, RW_LOCK_S)); /* see NOTE above */
335  ut_ad(!rw_lock_own(lock, RW_LOCK_X));
336
337  if (!rw_lock_s_lock_low(lock, pass, file_name, line)) {
338    /* Did not succeed, try spin wait */
339
340    rw_lock_s_lock_spin(lock, pass, file_name, line);
341  }
342}
343
344/** NOTE! Use the corresponding macro, not directly this function! Lock an
345 rw-lock in exclusive mode for the current thread if the lock can be
346 obtained immediately.
347 @return true if success */
348UNIV_INLINE
349ibool rw_lock_x_lock_func_nowait(
350    rw_lock_t *lock,       /*!< in: pointer to rw-lock */
351    const char *file_name, /*!< in: file name where lock requested */
352    ulint line)            /*!< in: line where requested */
353{
354  ibool success;
355
356#ifdef INNODB_RW_LOCKS_USE_ATOMICS
357  success = os_compare_and_swap_lint(&lock->lock_word, X_LOCK_DECR, 0);
358#else
359
360  success = FALSE;
361  mutex_enter(&(lock->mutex));
362  if (lock->lock_word == X_LOCK_DECR) {
363    lock->lock_word = 0;
364    success = TRUE;
365  }
366  mutex_exit(&(lock->mutex));
367
368#endif
369  if (success) {
370    rw_lock_set_writer_id_and_recursion_flag(lock, true);
371  } else if (lock->recursive.load(std::memory_order_acquire) &&
372             os_thread_eq(lock->writer_thread.load(std::memory_order_relaxed),
373                          os_thread_get_curr_id())) {
374    /* Relock: this lock_word modification is safe since no other
375    threads can modify (lock, unlock, or reserve) lock_word while
376    there is an exclusive writer and this is the writer thread. */
377    if (lock->lock_word == 0 || lock->lock_word == -X_LOCK_HALF_DECR) {
378      /* There are 1 x-locks */
379      lock->lock_word -= X_LOCK_DECR;
380    } else if (lock->lock_word <= -X_LOCK_DECR) {
381      /* There are 2 or more x-locks */
382      lock->lock_word--;
383    } else {
384      /* Failure */
385      return (FALSE);
386    }
387
388    /* Watch for too many recursive locks */
389    ut_ad(lock->lock_word < 0);
390
391  } else {
392    /* Failure */
393    return (FALSE);
394  }
395
396  ut_d(rw_lock_add_debug_info(lock, 0, RW_LOCK_X, file_name, line));
397
398  lock->last_x_file_name = file_name;
399  lock->last_x_line = line;
400
401  ut_ad(rw_lock_validate(lock));
402
403  return (TRUE);
404}
405
406/** Releases a shared mode lock. */
407UNIV_INLINE
408void rw_lock_s_unlock_func(
409#ifdef UNIV_DEBUG
410    ulint pass,      /*!< in: pass value; != 0, if the lock may have
411                     been passed to another thread to unlock */
412#endif               /* UNIV_DEBUG */
413    rw_lock_t *lock) /*!< in/out: rw-lock */
414{
415  ut_ad(lock->lock_word > -X_LOCK_DECR);
416  ut_ad(lock->lock_word != 0);
417  ut_ad(lock->lock_word < X_LOCK_DECR);
418
419  ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_S));
420
421  /* Increment lock_word to indicate 1 less reader */
422  lint lock_word = rw_lock_lock_word_incr(lock, 1);
423  if (lock_word == 0 || lock_word == -X_LOCK_HALF_DECR) {
424    /* wait_ex waiter exists. It may not be asleep, but we signal
425    anyway. We do not wake other waiters, because they can't
426    exist without wait_ex waiter and wait_ex waiter goes first.*/
427    os_event_set(lock->wait_ex_event);
428    sync_array_object_signalled();
429  }
430
431  ut_ad(rw_lock_validate(lock));
432}
433
434/** Releases an exclusive mode lock. */
435UNIV_INLINE
436void rw_lock_x_unlock_func(
437#ifdef UNIV_DEBUG
438    ulint pass,      /*!< in: pass value; != 0, if the lock may have
439                     been passed to another thread to unlock */
440#endif               /* UNIV_DEBUG */
441    rw_lock_t *lock) /*!< in/out: rw-lock */
442{
443  ut_ad(lock->lock_word == 0 || lock->lock_word == -X_LOCK_HALF_DECR ||
444        lock->lock_word <= -X_LOCK_DECR);
445
446  /* lock->recursive == true implies that the lock->writer_thread is the
447  current writer. If we are the last of the recursive callers then we must unset
448  lock->recursive flag (or reset lock->writer_thread to the impossible
449  std::thread().native_handle()) to indicate that the lock->writer_thread is now
450  stale. Otherwise if our thread tried to reacquire the lock it would wrongly
451  believe it already has it.
452  Note that since we still hold the x-lock we can safely read the lock_word. */
453  if (lock->lock_word == 0) {
454    /* Last caller in a possible recursive chain. */
455    lock->recursive.store(false, std::memory_order_relaxed);
456  }
457
458  ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_X));
459
460  if (lock->lock_word == 0 || lock->lock_word == -X_LOCK_HALF_DECR) {
461    /* There is 1 x-lock */
462    /* atomic increment is needed, because it is last */
463    if (rw_lock_lock_word_incr(lock, X_LOCK_DECR) <= 0) {
464      ut_error;
465    }
466
467    /* This no longer has an X-lock but it may still have
468    an SX-lock. So it is now free for S-locks by other threads.
469    We need to signal read/write waiters.
470    We do not need to signal wait_ex waiters, since they cannot
471    exist when there is a writer. */
472    if (lock->waiters) {
473      rw_lock_reset_waiter_flag(lock);
474      os_event_set(lock->event);
475      sync_array_object_signalled();
476    }
477  } else if (lock->lock_word == -X_LOCK_DECR ||
478             lock->lock_word == -(X_LOCK_DECR + X_LOCK_HALF_DECR)) {
479    /* There are 2 x-locks */
480    lock->lock_word += X_LOCK_DECR;
481  } else {
482    /* There are more than 2 x-locks. */
483    ut_ad(lock->lock_word < -X_LOCK_DECR);
484    lock->lock_word += 1;
485  }
486
487  ut_ad(rw_lock_validate(lock));
488}
489
490/** Releases a sx mode lock. */
491UNIV_INLINE
492void rw_lock_sx_unlock_func(
493#ifdef UNIV_DEBUG
494    ulint pass,      /*!< in: pass value; != 0, if the lock may have
495                     been passed to another thread to unlock */
496#endif               /* UNIV_DEBUG */
497    rw_lock_t *lock) /*!< in/out: rw-lock */
498{
499  ut_ad(rw_lock_get_sx_lock_count(lock));
500  ut_ad(lock->sx_recursive > 0);
501
502  --lock->sx_recursive;
503
504  ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_SX));
505
506  if (lock->sx_recursive == 0) {
507    /* Last caller in a possible recursive chain. */
508    if (lock->lock_word > 0) {
509      lock->recursive.store(false, std::memory_order_relaxed);
510
511      if (rw_lock_lock_word_incr(lock, X_LOCK_HALF_DECR) <= X_LOCK_HALF_DECR) {
512        ut_error;
513      }
514      /* Lock is now free. May have to signal read/write
515      waiters. We do not need to signal wait_ex waiters,
516      since they cannot exist when there is an sx-lock
517      holder. */
518      if (lock->waiters) {
519        rw_lock_reset_waiter_flag(lock);
520        os_event_set(lock->event);
521        sync_array_object_signalled();
522      }
523    } else {
524      /* still has x-lock */
525      ut_ad(lock->lock_word == -X_LOCK_HALF_DECR ||
526            lock->lock_word <= -(X_LOCK_DECR + X_LOCK_HALF_DECR));
527      lock->lock_word += X_LOCK_HALF_DECR;
528    }
529  }
530
531  ut_ad(rw_lock_validate(lock));
532}
533
534#ifdef UNIV_PFS_RWLOCK
535
536/** Performance schema instrumented wrap function for rw_lock_create_func().
537 NOTE! Please use the corresponding macro rw_lock_create(), not directly
538 this function! */
539UNIV_INLINE
540void pfs_rw_lock_create_func(
541    mysql_pfs_key_t key, /*!< in: key registered with
542                         performance schema */
543    rw_lock_t *lock,     /*!< in/out: pointer to memory */
544#ifdef UNIV_DEBUG
545    latch_level_t level,     /*!< in: level */
546    const char *cmutex_name, /*!< in: mutex name */
547#endif                       /* UNIV_DEBUG */
548    const char *cfile_name,  /*!< in: file name where created */
549    ulint cline)             /*!< in: file line where created */
550{
551  new (lock) rw_lock_t;
552
553  /* Initialize the rwlock for performance schema */
554  lock->pfs_psi = PSI_RWLOCK_CALL(init_rwlock)(key.m_value, lock);
555
556  /* The actual function to initialize an rwlock */
557  rw_lock_create_func(lock,
558#ifdef UNIV_DEBUG
559                      level, cmutex_name,
560#endif /* UNIV_DEBUG */
561                      cfile_name, cline);
562}
563/** Performance schema instrumented wrap function for rw_lock_x_lock_func()
564 NOTE! Please use the corresponding macro rw_lock_x_lock(), not directly
565 this function! */
566UNIV_INLINE
567void pfs_rw_lock_x_lock_func(
568    rw_lock_t *lock,       /*!< in: pointer to rw-lock */
569    ulint pass,            /*!< in: pass value; != 0, if the lock will
570                           be passed to another thread to unlock */
571    const char *file_name, /*!< in: file name where lock requested */
572    ulint line)            /*!< in: line where requested */
573{
574  if (lock->pfs_psi != nullptr) {
575    PSI_rwlock_locker *locker;
576    PSI_rwlock_locker_state state;
577
578    /* Record the acquisition of a read-write lock in exclusive
579    mode in performance schema */
580
581    locker = PSI_RWLOCK_CALL(start_rwlock_wrwait)(
582        &state, lock->pfs_psi, PSI_RWLOCK_EXCLUSIVELOCK, file_name,
583        static_cast<uint>(line));
584
585    rw_lock_x_lock_func(lock, pass, file_name, static_cast<uint>(line));
586
587    if (locker != nullptr) {
588      PSI_RWLOCK_CALL(end_rwlock_wrwait)(locker, 0);
589    }
590  } else {
591    rw_lock_x_lock_func(lock, pass, file_name, line);
592  }
593}
594/** Performance schema instrumented wrap function for
595 rw_lock_x_lock_func_nowait()
596 NOTE! Please use the corresponding macro rw_lock_x_lock_func(),
597 not directly this function!
598 @return true if success */
599UNIV_INLINE
600ibool pfs_rw_lock_x_lock_func_nowait(
601    rw_lock_t *lock,       /*!< in: pointer to rw-lock */
602    const char *file_name, /*!< in: file name where lock
603                         requested */
604    ulint line)            /*!< in: line where requested */
605{
606  ibool ret;
607
608  if (lock->pfs_psi != nullptr) {
609    PSI_rwlock_locker *locker;
610    PSI_rwlock_locker_state state;
611
612    /* Record the acquisition of a read-write trylock in exclusive
613    mode in performance schema */
614
615    locker = PSI_RWLOCK_CALL(start_rwlock_wrwait)(
616        &state, lock->pfs_psi, PSI_RWLOCK_TRYEXCLUSIVELOCK, file_name,
617        static_cast<uint>(line));
618
619    ret = rw_lock_x_lock_func_nowait(lock, file_name, line);
620
621    if (locker != nullptr) {
622      PSI_RWLOCK_CALL(end_rwlock_wrwait)(locker, static_cast<int>(ret));
623    }
624  } else {
625    ret = rw_lock_x_lock_func_nowait(lock, file_name, line);
626  }
627
628  return (ret);
629}
630/** Performance schema instrumented wrap function for rw_lock_free_func()
631 NOTE! Please use the corresponding macro rw_lock_free(), not directly
632 this function! */
633UNIV_INLINE
634void pfs_rw_lock_free_func(rw_lock_t *lock) /*!< in: pointer to rw-lock */
635{
636  if (lock->pfs_psi != nullptr) {
637    PSI_RWLOCK_CALL(destroy_rwlock)(lock->pfs_psi);
638    lock->pfs_psi = nullptr;
639  }
640
641  rw_lock_free_func(lock);
642}
643/** Performance schema instrumented wrap function for rw_lock_s_lock_func()
644 NOTE! Please use the corresponding macro rw_lock_s_lock(), not
645 directly this function! */
646ALWAYS_INLINE
647void pfs_rw_lock_s_lock_func(rw_lock_t *lock, /*!< in: pointer to rw-lock */
648                             ulint pass,      /*!< in: pass value; != 0, if the
649                                              lock will be passed to another
650                                              thread to unlock */
651                             const char *file_name, /*!< in: file name where
652                                                  lock requested */
653                             ulint line) /*!< in: line where requested */
654{
655  if (lock->pfs_psi != nullptr) {
656    PSI_rwlock_locker *locker;
657    PSI_rwlock_locker_state state;
658
659    /* Instrumented to inform we are aquiring a shared rwlock */
660    locker = PSI_RWLOCK_CALL(start_rwlock_rdwait)(
661        &state, lock->pfs_psi, PSI_RWLOCK_SHAREDLOCK, file_name,
662        static_cast<uint>(line));
663
664    rw_lock_s_lock_func(lock, pass, file_name, line);
665
666    if (locker != nullptr) {
667      PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, 0);
668    }
669  } else {
670    rw_lock_s_lock_func(lock, pass, file_name, line);
671  }
672}
673/** Performance schema instrumented wrap function for rw_lock_sx_lock_func()
674 NOTE! Please use the corresponding macro rw_lock_sx_lock(), not
675 directly this function! */
676UNIV_INLINE
677void pfs_rw_lock_sx_lock_func(rw_lock_t *lock, /*!< in: pointer to rw-lock */
678                              ulint pass,      /*!< in: pass value; != 0, if the
679                                               lock will be passed to another
680                                               thread to unlock */
681                              const char *file_name, /*!< in: file name where
682                                                   lock requested */
683                              ulint line) /*!< in: line where requested */
684{
685  if (lock->pfs_psi != nullptr) {
686    PSI_rwlock_locker *locker;
687    PSI_rwlock_locker_state state;
688
689    /* Instrumented to inform we are aquiring a shared rwlock */
690    locker = PSI_RWLOCK_CALL(start_rwlock_wrwait)(
691        &state, lock->pfs_psi, PSI_RWLOCK_SHAREDEXCLUSIVELOCK, file_name,
692        static_cast<uint>(line));
693
694    rw_lock_sx_lock_func(lock, pass, file_name, line);
695
696    if (locker != nullptr) {
697      PSI_RWLOCK_CALL(end_rwlock_wrwait)(locker, 0);
698    }
699  } else {
700    rw_lock_sx_lock_func(lock, pass, file_name, line);
701  }
702}
703/** Performance schema instrumented wrap function for rw_lock_s_lock_func()
704 NOTE! Please use the corresponding macro rw_lock_s_lock(), not
705 directly this function!
706 @return true if success */
707UNIV_INLINE
708ibool pfs_rw_lock_s_lock_low(
709    rw_lock_t *lock,       /*!< in: pointer to rw-lock */
710    ulint pass,            /*!< in: pass value; != 0, if the
711                           lock will be passed to another
712                           thread to unlock */
713    const char *file_name, /*!< in: file name where lock requested */
714    ulint line)            /*!< in: line where requested */
715{
716  ibool ret;
717
718  if (lock->pfs_psi != nullptr) {
719    PSI_rwlock_locker *locker;
720    PSI_rwlock_locker_state state;
721
722    /* Instrumented to inform we are aquiring a shared rwlock */
723    locker = PSI_RWLOCK_CALL(start_rwlock_rdwait)(
724        &state, lock->pfs_psi, PSI_RWLOCK_TRYSHAREDLOCK, file_name,
725        static_cast<uint>(line));
726
727    ret = rw_lock_s_lock_low(lock, pass, file_name, line);
728
729    if (locker != nullptr) {
730      PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, static_cast<int>(ret));
731    }
732  } else {
733    ret = rw_lock_s_lock_low(lock, pass, file_name, line);
734  }
735
736  return (ret);
737}
738/** Performance schema instrumented wrap function for rw_lock_sx_lock_nowait()
739 NOTE! Please use the corresponding macro, not
740 directly this function!
741 @return true if success */
742UNIV_INLINE
743ibool pfs_rw_lock_sx_lock_low(
744    rw_lock_t *lock,       /*!< in: pointer to rw-lock */
745    ulint pass,            /*!< in: pass value; != 0, if the
746                           lock will be passed to another
747                           thread to unlock */
748    const char *file_name, /*!< in: file name where lock requested */
749    ulint line)            /*!< in: line where requested */
750{
751  ibool ret;
752
753  if (lock->pfs_psi != nullptr) {
754    PSI_rwlock_locker *locker;
755    PSI_rwlock_locker_state state;
756
757    /* Instrumented to inform we are aquiring a shared
758    exclusive rwlock */
759    locker = PSI_RWLOCK_CALL(start_rwlock_rdwait)(
760        &state, lock->pfs_psi, PSI_RWLOCK_TRYSHAREDEXCLUSIVELOCK, file_name,
761        static_cast<uint>(line));
762
763    ret = rw_lock_sx_lock_low(lock, pass, file_name, line);
764
765    if (locker != nullptr) {
766      PSI_RWLOCK_CALL(end_rwlock_rdwait)(locker, static_cast<int>(ret));
767    }
768  } else {
769    ret = rw_lock_sx_lock_low(lock, pass, file_name, line);
770  }
771
772  return (ret);
773}
774/** Performance schema instrumented wrap function for rw_lock_x_unlock_func()
775 NOTE! Please use the corresponding macro rw_lock_x_unlock(), not directly
776 this function! */
777UNIV_INLINE
778void pfs_rw_lock_x_unlock_func(
779#ifdef UNIV_DEBUG
780    ulint pass,      /*!< in: pass value; != 0, if the
781                     lock may have been passed to another
782                     thread to unlock */
783#endif               /* UNIV_DEBUG */
784    rw_lock_t *lock) /*!< in/out: rw-lock */
785{
786  /* Inform performance schema we are unlocking the lock */
787  if (lock->pfs_psi != nullptr) {
788    PSI_RWLOCK_CALL(unlock_rwlock)(lock->pfs_psi, PSI_RWLOCK_EXCLUSIVEUNLOCK);
789  }
790
791  rw_lock_x_unlock_func(
792#ifdef UNIV_DEBUG
793      pass,
794#endif /* UNIV_DEBUG */
795      lock);
796}
797
798/** Performance schema instrumented wrap function for rw_lock_sx_unlock_func()
799 NOTE! Please use the corresponding macro rw_lock_sx_unlock(), not directly
800 this function! */
801UNIV_INLINE
802void pfs_rw_lock_sx_unlock_func(
803#ifdef UNIV_DEBUG
804    ulint pass,      /*!< in: pass value; != 0, if the
805                     lock may have been passed to another
806                     thread to unlock */
807#endif               /* UNIV_DEBUG */
808    rw_lock_t *lock) /*!< in/out: rw-lock */
809{
810  /* Inform performance schema we are unlocking the lock */
811  if (lock->pfs_psi != nullptr) {
812    PSI_RWLOCK_CALL(unlock_rwlock)
813    (lock->pfs_psi, PSI_RWLOCK_SHAREDEXCLUSIVEUNLOCK);
814  }
815
816  rw_lock_sx_unlock_func(
817#ifdef UNIV_DEBUG
818      pass,
819#endif /* UNIV_DEBUG */
820      lock);
821}
822
823/** Performance schema instrumented wrap function for rw_lock_s_unlock_func()
824 NOTE! Please use the corresponding macro pfs_rw_lock_s_unlock(), not
825 directly this function! */
826ALWAYS_INLINE
827void pfs_rw_lock_s_unlock_func(
828#ifdef UNIV_DEBUG
829    ulint pass,      /*!< in: pass value; != 0, if the
830                     lock may have been passed to another
831                     thread to unlock */
832#endif               /* UNIV_DEBUG */
833    rw_lock_t *lock) /*!< in/out: rw-lock */
834{
835  /* Inform performance schema we are unlocking the lock */
836  if (lock->pfs_psi != nullptr) {
837    PSI_RWLOCK_CALL(unlock_rwlock)(lock->pfs_psi, PSI_RWLOCK_SHAREDUNLOCK);
838  }
839
840  rw_lock_s_unlock_func(
841#ifdef UNIV_DEBUG
842      pass,
843#endif /* UNIV_DEBUG */
844      lock);
845}
846#endif /* UNIV_PFS_RWLOCK */
847