1 /*****************************************************************************
2
3 Copyright (c) 1995, 2020, Oracle and/or its affiliates.
4
5 This program is free software; you can redistribute it and/or modify it under
6 the terms of the GNU General Public License, version 2.0, as published by the
7 Free Software Foundation.
8
9 This program is also distributed with certain software (including but not
10 limited to OpenSSL) that is licensed under separate terms, as designated in a
11 particular file or component or in included license documentation. The authors
12 of MySQL hereby grant you an additional permission to link the program and
13 your derivative works with the separately licensed software that they have
14 included with MySQL.
15
16 This program is distributed in the hope that it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18 FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0,
19 for more details.
20
21 You should have received a copy of the GNU General Public License along with
22 this program; if not, write to the Free Software Foundation, Inc.,
23 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24
25 *****************************************************************************/
26
27 /** @file include/sync0types.h
28 Global types for sync
29
30 Created 9/5/1995 Heikki Tuuri
31 *******************************************************/
32
33 #ifndef sync0types_h
34 #define sync0types_h
35
36 #include <iostream>
37 #include <vector>
38
39 #include "sync0sync.h"
40 #include "univ.i"
41 #include "ut0counter.h"
42 #include "ut0new.h"
43
44 #ifdef UNIV_DEBUG
45 /** Set when InnoDB has invoked exit(). */
46 extern bool innodb_calling_exit;
47 #endif /* UNIV_DEBUG */
48
49 #ifdef _WIN32
50 /** Native mutex */
51 typedef CRITICAL_SECTION sys_mutex_t;
52 #else
53 /** Native mutex */
54 typedef pthread_mutex_t sys_mutex_t;
55 #endif /* _WIN32 */
56
57 /** The new (C++11) syntax allows the following and we should use it when it
58 is available on platforms that we support.
59
60 enum class mutex_state_t : lock_word_t { ... };
61 */
62
63 /** Mutex states. */
64 enum mutex_state_t {
65 /** Mutex is free */
66 MUTEX_STATE_UNLOCKED = 0,
67
68 /** Mutex is acquired by some thread. */
69 MUTEX_STATE_LOCKED = 1,
70
71 /** Mutex is contended and there are threads waiting on the lock. */
72 MUTEX_STATE_WAITERS = 2
73 };
74
75 /*
76 LATCHING ORDER WITHIN THE DATABASE
77 ==================================
78
79 The mutex or latch in the central memory object, for instance, a rollback
80 segment object, must be acquired before acquiring the latch or latches to
81 the corresponding file data structure. In the latching order below, these
82 file page object latches are placed immediately below the corresponding
83 central memory object latch or mutex.
84
85 Synchronization object Notes
86 ---------------------- -----
87
88 Dictionary mutex If we have a pointer to a dictionary
89 | object, e.g., a table, it can be
90 | accessed without reserving the
91 | dictionary mutex. We must have a
92 | reservation, a memoryfix, to the
93 | appropriate table object in this case,
94 | and the table must be explicitly
95 | released later.
96 V
97 Dictionary header
98 |
99 V
100 Secondary index tree latch The tree latch protects also all
101 | the B-tree non-leaf pages. These
102 V can be read with the page only
103 Secondary index non-leaf bufferfixed to save CPU time,
104 | no s-latch is needed on the page.
105 | Modification of a page requires an
106 | x-latch on the page, however. If a
107 | thread owns an x-latch to the tree,
108 | it is allowed to latch non-leaf pages
109 | even after it has acquired the fsp
110 | latch.
111 V
112 Secondary index leaf The latch on the secondary index leaf
113 | can be kept while accessing the
114 | clustered index, to save CPU time.
115 V
116 Clustered index tree latch To increase concurrency, the tree
117 | latch is usually released when the
118 | leaf page latch has been acquired.
119 V
120 Clustered index non-leaf
121 |
122 V
123 Clustered index leaf
124 |
125 V
126 Transaction system header
127 |
128 V
129 Transaction undo mutex The undo log entry must be written
130 | before any index page is modified.
131 | Transaction undo mutex is for the undo
132 | logs the analogue of the tree latch
133 | for a B-tree. If a thread has the
134 | trx undo mutex reserved, it is allowed
135 | to latch the undo log pages in any
136 | order, and also after it has acquired
137 | the fsp latch.
138 V
139 Rollback segment mutex The rollback segment mutex must be
140 | reserved, if, e.g., a new page must
141 | be added to an undo log. The rollback
142 | segment and the undo logs in its
143 | history list can be seen as an
144 | analogue of a B-tree, and the latches
145 | reserved similarly, using a version of
146 | lock-coupling. If an undo log must be
147 | extended by a page when inserting an
148 | undo log record, this corresponds to
149 | a pessimistic insert in a B-tree.
150 V
151 Rollback segment header
152 |
153 V
154 Purge system latch
155 |
156 V
157 Undo log pages If a thread owns the trx undo mutex,
158 | or for a log in the history list, the
159 | rseg mutex, it is allowed to latch
160 | undo log pages in any order, and even
161 | after it has acquired the fsp latch.
162 | If a thread does not have the
163 | appropriate mutex, it is allowed to
164 | latch only a single undo log page in
165 | a mini-transaction.
166 V
167 File space management latch If a mini-transaction must allocate
168 | several file pages, it can do that,
169 | because it keeps the x-latch to the
170 | file space management in its memo.
171 V
172 File system pages
173 |
174 V
175 lock_sys_wait_mutex Mutex protecting lock timeout data
176 |
177 V
178 lock_sys->global_sharded_latch Sharded rw-latch protecting lock_sys_t
179 |
180 V
181 lock_sys->table_mutexes Mutexes protecting lock_sys_t table
182 | lock queues
183 |
184 V
185 lock_sys->page_mutexes Mutexes protecting lock_sys_t page
186 | lock queues
187 |
188 V
189 trx_sys->mutex Mutex protecting trx_sys_t
190 |
191 V
192 Threads mutex Background thread scheduling mutex
193 |
194 V
195 query_thr_mutex Mutex protecting query threads
196 |
197 V
198 trx_mutex Mutex protecting trx_t fields
199 |
200 V
201 Search system mutex
202 |
203 V
204 Buffer pool mutexes
205 |
206 V
207 Log mutex
208 |
209 Any other latch
210 |
211 V
212 Memory pool mutex */
213
214 /** Latching order levels. If you modify these, you have to also update
215 LatchDebug internals in sync0debug.cc */
216
217 enum latch_level_t {
218 SYNC_UNKNOWN = 0,
219
220 SYNC_MUTEX = 1,
221
222 RW_LOCK_SX,
223 RW_LOCK_X_WAIT,
224 RW_LOCK_S,
225 RW_LOCK_X,
226 RW_LOCK_NOT_LOCKED,
227
228 SYNC_LOCK_FREE_HASH,
229
230 SYNC_MONITOR_MUTEX,
231
232 SYNC_ANY_LATCH,
233
234 SYNC_FIL_SHARD,
235
236 SYNC_PAGE_ARCH_OPER,
237
238 SYNC_BUF_FLUSH_LIST,
239 SYNC_BUF_FLUSH_STATE,
240 SYNC_BUF_ZIP_HASH,
241 SYNC_BUF_FREE_LIST,
242 SYNC_BUF_ZIP_FREE,
243 SYNC_BUF_BLOCK,
244 SYNC_BUF_PAGE_HASH,
245 SYNC_BUF_LRU_LIST,
246 SYNC_BUF_CHUNKS,
247
248 SYNC_DBLWR,
249
250 SYNC_SEARCH_SYS,
251
252 SYNC_WORK_QUEUE,
253
254 SYNC_FTS_TOKENIZE,
255 SYNC_FTS_OPTIMIZE,
256 SYNC_FTS_BG_THREADS,
257 SYNC_FTS_CACHE_INIT,
258 SYNC_RECV,
259
260 SYNC_LOG_LIMITS,
261 SYNC_LOG_WRITER,
262 SYNC_LOG_WRITE_NOTIFIER,
263 SYNC_LOG_FLUSH_NOTIFIER,
264 SYNC_LOG_FLUSHER,
265 SYNC_LOG_CLOSER,
266 SYNC_LOG_CHECKPOINTER,
267 SYNC_LOG_SN,
268 SYNC_PAGE_ARCH,
269 SYNC_PAGE_ARCH_CLIENT,
270 SYNC_LOG_ARCH,
271
272 SYNC_PAGE_CLEANER,
273 SYNC_PURGE_QUEUE,
274 SYNC_TRX_SYS_HEADER,
275 SYNC_THREADS,
276 SYNC_TRX,
277 SYNC_POOL,
278 SYNC_POOL_MANAGER,
279 SYNC_TRX_SYS,
280 SYNC_LOCK_SYS_SHARDED,
281 SYNC_LOCK_SYS_GLOBAL,
282 SYNC_LOCK_WAIT_SYS,
283
284 SYNC_INDEX_ONLINE_LOG,
285
286 SYNC_IBUF_BITMAP,
287 SYNC_IBUF_BITMAP_MUTEX,
288 SYNC_IBUF_TREE_NODE,
289 SYNC_IBUF_TREE_NODE_NEW,
290 SYNC_IBUF_INDEX_TREE,
291
292 SYNC_IBUF_MUTEX,
293
294 SYNC_FSP_PAGE,
295 SYNC_FSP,
296 SYNC_TEMP_POOL_MANAGER,
297 SYNC_EXTERN_STORAGE,
298 SYNC_RSEG_ARRAY_HEADER,
299 SYNC_TRX_UNDO_PAGE,
300 SYNC_RSEG_HEADER,
301 SYNC_RSEG_HEADER_NEW,
302 SYNC_TEMP_SPACE_RSEG,
303 SYNC_UNDO_SPACE_RSEG,
304 SYNC_TRX_SYS_RSEG,
305 SYNC_TRX_UNDO,
306 SYNC_PURGE_LATCH,
307 SYNC_TREE_NODE,
308 SYNC_TREE_NODE_FROM_HASH,
309 SYNC_TREE_NODE_NEW,
310 SYNC_INDEX_TREE,
311 SYNC_RSEGS,
312 SYNC_UNDO_SPACES,
313
314 SYNC_PERSIST_DIRTY_TABLES,
315 SYNC_PERSIST_AUTOINC,
316
317 SYNC_IBUF_PESS_INSERT_MUTEX,
318 SYNC_IBUF_HEADER,
319 SYNC_DICT_HEADER,
320 SYNC_TABLE,
321 SYNC_STATS_AUTO_RECALC,
322 SYNC_DICT_AUTOINC_MUTEX,
323 SYNC_DICT,
324 SYNC_PARSER,
325 SYNC_FTS_CACHE,
326 SYNC_UNDO_DDL,
327
328 SYNC_DICT_OPERATION,
329
330 SYNC_TRX_I_S_LAST_READ,
331
332 SYNC_TRX_I_S_RWLOCK,
333
334 SYNC_RECV_WRITER,
335
336 /** Level is varying. Only used with buffer pool page locks, which
337 do not have a fixed level, but instead have their level set after
338 the page is locked; see e.g. ibuf_bitmap_get_map_page(). */
339
340 SYNC_LEVEL_VARYING,
341
342 /** This can be used to suppress order checking. */
343 SYNC_NO_ORDER_CHECK,
344
345 /** Maximum level value */
346 SYNC_LEVEL_MAX = SYNC_NO_ORDER_CHECK
347 };
348
349 /** Each latch has an ID. This id is used for creating the latch and to look
350 up its meta-data. See sync0debug.c. The order does not matter here, but
351 alphabetical ordering seems useful */
352 enum latch_id_t {
353 LATCH_ID_NONE = 0,
354 LATCH_ID_AUTOINC,
355 LATCH_ID_BUF_BLOCK_MUTEX,
356 LATCH_ID_BUF_POOL_CHUNKS,
357 LATCH_ID_BUF_POOL_ZIP,
358 LATCH_ID_BUF_POOL_LRU_LIST,
359 LATCH_ID_BUF_POOL_FREE_LIST,
360 LATCH_ID_BUF_POOL_ZIP_FREE,
361 LATCH_ID_BUF_POOL_ZIP_HASH,
362 LATCH_ID_BUF_POOL_FLUSH_STATE,
363 LATCH_ID_DBLWR,
364 LATCH_ID_CACHE_LAST_READ,
365 LATCH_ID_DICT_FOREIGN_ERR,
366 LATCH_ID_DICT_SYS,
367 LATCH_ID_DICT_TABLE,
368 LATCH_ID_FIL_SHARD,
369 LATCH_ID_FLUSH_LIST,
370 LATCH_ID_FTS_BG_THREADS,
371 LATCH_ID_FTS_DELETE,
372 LATCH_ID_FTS_OPTIMIZE,
373 LATCH_ID_FTS_DOC_ID,
374 LATCH_ID_FTS_PLL_TOKENIZE,
375 LATCH_ID_HASH_TABLE_MUTEX,
376 LATCH_ID_IBUF_BITMAP,
377 LATCH_ID_IBUF,
378 LATCH_ID_IBUF_PESSIMISTIC_INSERT,
379 LATCH_ID_LOCK_FREE_HASH,
380 LATCH_ID_LOCK_SYS_GLOBAL,
381 LATCH_ID_LOCK_SYS_PAGE,
382 LATCH_ID_LOCK_SYS_TABLE,
383 LATCH_ID_LOCK_SYS_WAIT,
384 LATCH_ID_LOG_SN,
385 LATCH_ID_LOG_CHECKPOINTER,
386 LATCH_ID_LOG_CLOSER,
387 LATCH_ID_LOG_WRITER,
388 LATCH_ID_LOG_FLUSHER,
389 LATCH_ID_LOG_WRITE_NOTIFIER,
390 LATCH_ID_LOG_FLUSH_NOTIFIER,
391 LATCH_ID_LOG_LIMITS,
392 LATCH_ID_PARSER,
393 LATCH_ID_LOG_ARCH,
394 LATCH_ID_PAGE_ARCH,
395 LATCH_ID_PAGE_ARCH_OPER,
396 LATCH_ID_PAGE_ARCH_CLIENT,
397 LATCH_ID_PERSIST_METADATA_BUFFER,
398 LATCH_ID_DICT_PERSIST_DIRTY_TABLES,
399 LATCH_ID_PERSIST_AUTOINC,
400 LATCH_ID_DICT_PERSIST_CHECKPOINT,
401 LATCH_ID_PAGE_CLEANER,
402 LATCH_ID_PURGE_SYS_PQ,
403 LATCH_ID_RECALC_POOL,
404 LATCH_ID_RECV_SYS,
405 LATCH_ID_RECV_WRITER,
406 LATCH_ID_TEMP_SPACE_RSEG,
407 LATCH_ID_UNDO_SPACE_RSEG,
408 LATCH_ID_TRX_SYS_RSEG,
409 LATCH_ID_RW_LOCK_DEBUG,
410 LATCH_ID_RTR_SSN_MUTEX,
411 LATCH_ID_RTR_ACTIVE_MUTEX,
412 LATCH_ID_RTR_MATCH_MUTEX,
413 LATCH_ID_RTR_PATH_MUTEX,
414 LATCH_ID_RW_LOCK_LIST,
415 LATCH_ID_RW_LOCK_MUTEX,
416 LATCH_ID_SRV_INNODB_MONITOR,
417 LATCH_ID_SRV_MISC_TMPFILE,
418 LATCH_ID_SRV_MONITOR_FILE,
419 LATCH_ID_SYNC_THREAD,
420 LATCH_ID_TRX_UNDO,
421 LATCH_ID_TRX_POOL,
422 LATCH_ID_TRX_POOL_MANAGER,
423 LATCH_ID_TEMP_POOL_MANAGER,
424 LATCH_ID_TRX,
425 LATCH_ID_TRX_SYS,
426 LATCH_ID_SRV_SYS,
427 LATCH_ID_SRV_SYS_TASKS,
428 LATCH_ID_PAGE_ZIP_STAT_PER_INDEX,
429 LATCH_ID_EVENT_MANAGER,
430 LATCH_ID_EVENT_MUTEX,
431 LATCH_ID_SYNC_ARRAY_MUTEX,
432 LATCH_ID_ZIP_PAD_MUTEX,
433 LATCH_ID_OS_AIO_READ_MUTEX,
434 LATCH_ID_OS_AIO_WRITE_MUTEX,
435 LATCH_ID_OS_AIO_LOG_MUTEX,
436 LATCH_ID_OS_AIO_IBUF_MUTEX,
437 LATCH_ID_OS_AIO_SYNC_MUTEX,
438 LATCH_ID_ROW_DROP_LIST,
439 LATCH_ID_INDEX_ONLINE_LOG,
440 LATCH_ID_WORK_QUEUE,
441 LATCH_ID_BTR_SEARCH,
442 LATCH_ID_BUF_BLOCK_LOCK,
443 LATCH_ID_BUF_BLOCK_DEBUG,
444 LATCH_ID_DICT_OPERATION,
445 LATCH_ID_CHECKPOINT,
446 LATCH_ID_RSEGS,
447 LATCH_ID_UNDO_SPACES,
448 LATCH_ID_UNDO_DDL,
449 LATCH_ID_FIL_SPACE,
450 LATCH_ID_FTS_CACHE,
451 LATCH_ID_FTS_CACHE_INIT,
452 LATCH_ID_TRX_I_S_CACHE,
453 LATCH_ID_TRX_PURGE,
454 LATCH_ID_IBUF_INDEX_TREE,
455 LATCH_ID_INDEX_TREE,
456 LATCH_ID_DICT_TABLE_STATS,
457 LATCH_ID_HASH_TABLE_RW_LOCK,
458 LATCH_ID_BUF_CHUNK_MAP_LATCH,
459 LATCH_ID_SYNC_DEBUG_MUTEX,
460 LATCH_ID_MASTER_KEY_ID_MUTEX,
461 LATCH_ID_FILE_OPEN,
462 LATCH_ID_CLONE_SYS,
463 LATCH_ID_CLONE_TASK,
464 LATCH_ID_CLONE_SNAPSHOT,
465 LATCH_ID_PARALLEL_READ,
466 LATCH_ID_DBLR,
467 LATCH_ID_REDO_LOG_ARCHIVE_ADMIN_MUTEX,
468 LATCH_ID_REDO_LOG_ARCHIVE_QUEUE_MUTEX,
469 LATCH_ID_XTRA_DATAFILES_ITER_MUTEX,
470 LATCH_ID_XTRA_COUNT_MUTEX,
471 LATCH_ID_XTRA_DATADIR_ITER_T_MUTEX,
472 LATCH_ID_TEST_MUTEX,
473 LATCH_ID_MAX = LATCH_ID_TEST_MUTEX
474 };
475
476 /** OS mutex, without any policy. It is a thin wrapper around the
477 system mutexes. The interface is different from the policy mutexes,
478 to ensure that it is called directly and not confused with the
479 policy mutexes. */
480 struct OSMutex {
481 /** Constructor */
OSMutexOSMutex482 OSMutex() UNIV_NOTHROW { ut_d(m_freed = true); }
483
484 /** Create the mutex by calling the system functions. */
initOSMutex485 void init() UNIV_NOTHROW {
486 ut_ad(m_freed);
487
488 #ifdef _WIN32
489 InitializeCriticalSection((LPCRITICAL_SECTION)&m_mutex);
490 #else
491 {
492 int ret = pthread_mutex_init(&m_mutex, nullptr);
493 ut_a(ret == 0);
494 }
495 #endif /* _WIN32 */
496
497 ut_d(m_freed = false);
498 }
499
500 /** Destructor */
~OSMutexOSMutex501 ~OSMutex() {}
502
503 /** Destroy the mutex */
destroyOSMutex504 void destroy() UNIV_NOTHROW {
505 ut_ad(innodb_calling_exit || !m_freed);
506 #ifdef _WIN32
507 DeleteCriticalSection((LPCRITICAL_SECTION)&m_mutex);
508 #else
509 int ret;
510
511 ret = pthread_mutex_destroy(&m_mutex);
512
513 if (ret != 0) {
514 #ifdef UNIV_NO_ERR_MSGS
515 ib::error()
516 #else
517 ib::error(ER_IB_MSG_1372)
518 #endif
519 << "Return value " << ret << " when calling pthread_mutex_destroy().";
520 }
521 #endif /* _WIN32 */
522 ut_d(m_freed = true);
523 }
524
525 /** Release the mutex. */
exitOSMutex526 void exit() UNIV_NOTHROW {
527 ut_ad(innodb_calling_exit || !m_freed);
528 #ifdef _WIN32
529 LeaveCriticalSection(&m_mutex);
530 #else
531 int ret = pthread_mutex_unlock(&m_mutex);
532 ut_a(ret == 0);
533 #endif /* _WIN32 */
534 }
535
536 /** Acquire the mutex. */
enterOSMutex537 void enter() UNIV_NOTHROW {
538 ut_ad(innodb_calling_exit || !m_freed);
539 #ifdef _WIN32
540 EnterCriticalSection((LPCRITICAL_SECTION)&m_mutex);
541 #else
542 int ret = pthread_mutex_lock(&m_mutex);
543 ut_a(ret == 0);
544 #endif /* _WIN32 */
545 }
546
547 /** @return true if locking succeeded */
try_lockOSMutex548 bool try_lock() UNIV_NOTHROW {
549 ut_ad(innodb_calling_exit || !m_freed);
550 #ifdef _WIN32
551 return (TryEnterCriticalSection(&m_mutex) != 0);
552 #else
553 return (pthread_mutex_trylock(&m_mutex) == 0);
554 #endif /* _WIN32 */
555 }
556
557 /** Required for os_event_t */
558 operator sys_mutex_t *() UNIV_NOTHROW { return (&m_mutex); }
559
560 #if defined(UNIV_LIBRARY) && defined(UNIV_DEBUG)
is_ownedOSMutex561 bool is_owned() {
562 /* This should never be reached. This is
563 added to fix is_owned() compilation errors
564 for library. We will never reach here because
565 mutexes are disabled in library. */
566 ut_error;
567 return (false);
568 }
569 #endif /* UNIV_LIBRARY && UNIV_DEBUG */
570
571 private:
572 #ifdef UNIV_DEBUG
573 /** true if the mutex has been freed/destroyed. */
574 bool m_freed;
575 #endif /* UNIV_DEBUG */
576
577 sys_mutex_t m_mutex;
578 };
579
580 #ifndef UNIV_LIBRARY
581 #ifdef UNIV_PFS_MUTEX
582 /** Latch element
583 Used for mutexes which have PFS keys defined under UNIV_PFS_MUTEX.
584 @param[in] id Latch id
585 @param[in] level Latch level
586 @param[in] key PFS key */
587 #define LATCH_ADD_MUTEX(id, level, key) \
588 latch_meta[LATCH_ID_##id] = \
589 UT_NEW_NOKEY(latch_meta_t(LATCH_ID_##id, #id, level, #level, key))
590
591 #ifdef UNIV_PFS_RWLOCK
592 /** Latch element.
593 Used for rwlocks which have PFS keys defined under UNIV_PFS_RWLOCK.
594 @param[in] id Latch id
595 @param[in] level Latch level
596 @param[in] key PFS key */
597 #define LATCH_ADD_RWLOCK(id, level, key) \
598 latch_meta[LATCH_ID_##id] = \
599 UT_NEW_NOKEY(latch_meta_t(LATCH_ID_##id, #id, level, #level, key))
600 #else
601 #define LATCH_ADD_RWLOCK(id, level, key) \
602 latch_meta[LATCH_ID_##id] = UT_NEW_NOKEY( \
603 latch_meta_t(LATCH_ID_##id, #id, level, #level, PSI_NOT_INSTRUMENTED))
604 #endif /* UNIV_PFS_RWLOCK */
605
606 #else
607 #define LATCH_ADD_MUTEX(id, level, key) \
608 latch_meta[LATCH_ID_##id] = \
609 UT_NEW_NOKEY(latch_meta_t(LATCH_ID_##id, #id, level, #level))
610 #define LATCH_ADD_RWLOCK(id, level, key) \
611 latch_meta[LATCH_ID_##id] = \
612 UT_NEW_NOKEY(latch_meta_t(LATCH_ID_##id, #id, level, #level))
613 #endif /* UNIV_PFS_MUTEX */
614
615 /** Default latch counter */
616 class LatchCounter {
617 public:
618 /** The counts we collect for a mutex */
619 struct Count {
620 /** Constructor */
CountCount621 Count() UNIV_NOTHROW : m_spins(), m_waits(), m_calls(), m_enabled() {
622 /* No op */
623 }
624
625 /** Rest the values to zero */
resetCount626 void reset() UNIV_NOTHROW {
627 m_spins = 0;
628 m_waits = 0;
629 m_calls = 0;
630 }
631
632 /** Number of spins trying to acquire the latch. */
633 uint32_t m_spins;
634
635 /** Number of waits trying to acquire the latch */
636 uint32_t m_waits;
637
638 /** Number of times it was called */
639 uint32_t m_calls;
640
641 /** true if enabled */
642 bool m_enabled;
643 };
644
645 /** Constructor */
LatchCounter()646 LatchCounter() UNIV_NOTHROW : m_active(false) { m_mutex.init(); }
647
648 /** Destructor */
~LatchCounter()649 ~LatchCounter() UNIV_NOTHROW {
650 m_mutex.destroy();
651
652 for (Counters::iterator it = m_counters.begin(); it != m_counters.end();
653 ++it) {
654 Count *count = *it;
655
656 UT_DELETE(count);
657 }
658 }
659
660 /** Reset all counters to zero. It is not protected by any
661 mutex and we don't care about atomicity. Unless it is a
662 demonstrated problem. The information collected is not
663 required for the correct functioning of the server. */
reset()664 void reset() UNIV_NOTHROW {
665 m_mutex.enter();
666
667 Counters::iterator end = m_counters.end();
668
669 for (Counters::iterator it = m_counters.begin(); it != end; ++it) {
670 (*it)->reset();
671 }
672
673 m_mutex.exit();
674 }
675
676 /** @return the aggregate counter */
sum_register()677 Count *sum_register() UNIV_NOTHROW {
678 m_mutex.enter();
679
680 Count *count;
681
682 if (m_counters.empty()) {
683 count = UT_NEW_NOKEY(Count());
684 m_counters.push_back(count);
685 } else {
686 ut_a(m_counters.size() == 1);
687 count = m_counters[0];
688 }
689
690 m_mutex.exit();
691
692 return (count);
693 }
694
695 /** Deregister the count. We don't do anything
696 @param[in] count The count instance to deregister */
sum_deregister(Count * count)697 void sum_deregister(Count *count) UNIV_NOTHROW { /* Do nothing */
698 }
699
700 /** Register a single instance counter */
single_register(Count * count)701 void single_register(Count *count) UNIV_NOTHROW {
702 m_mutex.enter();
703
704 m_counters.push_back(count);
705
706 m_mutex.exit();
707 }
708
709 /** Deregister a single instance counter
710 @param[in] count The count instance to deregister */
single_deregister(Count * count)711 void single_deregister(Count *count) UNIV_NOTHROW {
712 m_mutex.enter();
713
714 m_counters.erase(std::remove(m_counters.begin(), m_counters.end(), count),
715 m_counters.end());
716
717 m_mutex.exit();
718 }
719
720 /** Iterate over the counters */
721 template <typename Callback>
iterate(Callback & callback)722 void iterate(Callback &callback) const UNIV_NOTHROW {
723 Counters::const_iterator end = m_counters.end();
724
725 for (Counters::const_iterator it = m_counters.begin(); it != end; ++it) {
726 callback(*it);
727 }
728 }
729
730 /** Disable the monitoring */
enable()731 void enable() UNIV_NOTHROW {
732 m_mutex.enter();
733
734 Counters::const_iterator end = m_counters.end();
735
736 for (Counters::const_iterator it = m_counters.begin(); it != end; ++it) {
737 (*it)->m_enabled = true;
738 }
739
740 m_active = true;
741
742 m_mutex.exit();
743 }
744
745 /** Disable the monitoring */
disable()746 void disable() UNIV_NOTHROW {
747 m_mutex.enter();
748
749 Counters::const_iterator end = m_counters.end();
750
751 for (Counters::const_iterator it = m_counters.begin(); it != end; ++it) {
752 (*it)->m_enabled = false;
753 }
754
755 m_active = false;
756
757 m_mutex.exit();
758 }
759
760 /** @return if monitoring is active */
is_enabled()761 bool is_enabled() const UNIV_NOTHROW { return (m_active); }
762
763 private:
764 /* Disable copying */
765 LatchCounter(const LatchCounter &);
766 LatchCounter &operator=(const LatchCounter &);
767
768 private:
769 typedef OSMutex Mutex;
770 typedef std::vector<Count *> Counters;
771
772 /** Mutex protecting m_counters */
773 Mutex m_mutex;
774
775 /** Counters for the latches */
776 Counters m_counters;
777
778 /** if true then we collect the data */
779 bool m_active;
780 };
781
782 /** Latch meta data */
783 template <typename Counter = LatchCounter>
784 class LatchMeta {
785 public:
786 typedef Counter CounterType;
787
788 #ifdef UNIV_PFS_MUTEX
789 typedef mysql_pfs_key_t pfs_key_t;
790 #endif /* UNIV_PFS_MUTEX */
791
792 /** Constructor */
LatchMeta()793 LatchMeta()
794 : m_id(LATCH_ID_NONE),
795 m_name(),
796 m_level(SYNC_UNKNOWN),
797 m_level_name()
798 #ifdef UNIV_PFS_MUTEX
799 ,
800 m_pfs_key()
801 #endif /* UNIV_PFS_MUTEX */
802 {
803 }
804
805 /** Destructor */
~LatchMeta()806 ~LatchMeta() {}
807
808 /** Constructor
809 @param[in] id Latch id
810 @param[in] name Latch name
811 @param[in] level Latch level
812 @param[in] level_name Latch level text representation
813 @param[in] key PFS key */
LatchMeta(latch_id_t id,const char * name,latch_level_t level,const char * level_name,pfs_key_t key)814 LatchMeta(latch_id_t id, const char *name, latch_level_t level,
815 const char *level_name
816 #ifdef UNIV_PFS_MUTEX
817 ,
818 pfs_key_t key
819 #endif /* UNIV_PFS_MUTEX */
820 )
821 : m_id(id),
822 m_name(name),
823 m_level(level),
824 m_level_name(level_name)
825 #ifdef UNIV_PFS_MUTEX
826 ,
827 m_pfs_key(key)
828 #endif /* UNIV_PFS_MUTEX */
829 {
830 /* No op */
831 }
832
833 /* Less than operator.
834 @param[in] rhs Instance to compare against
835 @return true if this.get_id() < rhs.get_id() */
836 bool operator<(const LatchMeta &rhs) const {
837 return (get_id() < rhs.get_id());
838 }
839
840 /** @return the latch id */
get_id()841 latch_id_t get_id() const { return (m_id); }
842
843 /** @return the latch name */
get_name()844 const char *get_name() const { return (m_name); }
845
846 /** @return the latch level */
get_level()847 latch_level_t get_level() const { return (m_level); }
848
849 /** @return the latch level name */
get_level_name()850 const char *get_level_name() const { return (m_level_name); }
851
852 #ifdef UNIV_PFS_MUTEX
853 /** @return the PFS key for the latch */
get_pfs_key()854 pfs_key_t get_pfs_key() const { return (m_pfs_key); }
855 #endif /* UNIV_PFS_MUTEX */
856
857 /** @return the counter instance */
get_counter()858 Counter *get_counter() { return (&m_counter); }
859
860 private:
861 /** Latch id */
862 latch_id_t m_id;
863
864 /** Latch name */
865 const char *m_name;
866
867 /** Latch level in the ordering */
868 latch_level_t m_level;
869
870 /** Latch level text representation */
871 const char *m_level_name;
872
873 #ifdef UNIV_PFS_MUTEX
874 /** PFS key */
875 pfs_key_t m_pfs_key;
876 #endif /* UNIV_PFS_MUTEX */
877
878 /** For gathering latch statistics */
879 Counter m_counter;
880 };
881
882 typedef LatchMeta<LatchCounter> latch_meta_t;
883 typedef std::vector<latch_meta_t *, ut_allocator<latch_meta_t *>> LatchMetaData;
884
885 /** Note: This is accessed without any mutex protection. It is initialised
886 at startup and elements should not be added to or removed from it after
887 that. See sync_latch_meta_init() */
888 extern LatchMetaData latch_meta;
889
890 /** Get the latch meta-data from the latch ID
891 @param[in] id Latch ID
892 @return the latch meta data */
sync_latch_get_meta(latch_id_t id)893 inline latch_meta_t &sync_latch_get_meta(latch_id_t id) {
894 ut_ad(static_cast<size_t>(id) < latch_meta.size());
895 ut_ad(id == latch_meta[id]->get_id());
896
897 return (*latch_meta[id]);
898 }
899
900 /** Fetch the counter for the latch
901 @param[in] id Latch ID
902 @return the latch counter */
sync_latch_get_counter(latch_id_t id)903 inline latch_meta_t::CounterType *sync_latch_get_counter(latch_id_t id) {
904 latch_meta_t &meta = sync_latch_get_meta(id);
905
906 return (meta.get_counter());
907 }
908
909 /** Get the latch name from the latch ID
910 @param[in] id Latch ID
911 @return the name, will assert if not found */
sync_latch_get_name(latch_id_t id)912 inline const char *sync_latch_get_name(latch_id_t id) {
913 const latch_meta_t &meta = sync_latch_get_meta(id);
914
915 return (meta.get_name());
916 }
917
918 /** Get the latch ordering level
919 @param[in] id Latch id to lookup
920 @return the latch level */
sync_latch_get_level(latch_id_t id)921 inline latch_level_t sync_latch_get_level(latch_id_t id) {
922 const latch_meta_t &meta = sync_latch_get_meta(id);
923
924 return (meta.get_level());
925 }
926
927 #ifdef UNIV_PFS_MUTEX
928 /** Get the latch PFS key from the latch ID
929 @param[in] id Latch ID
930 @return the PFS key */
sync_latch_get_pfs_key(latch_id_t id)931 inline mysql_pfs_key_t sync_latch_get_pfs_key(latch_id_t id) {
932 const latch_meta_t &meta = sync_latch_get_meta(id);
933
934 return (meta.get_pfs_key());
935 }
936 #endif /* UNIV_PFS_MUTEX */
937
938 #ifndef UNIV_HOTBACKUP
939 /** String representation of the filename and line number where the
940 latch was created
941 @param[in] id Latch ID
942 @param[in] created Filename and line number where it was crated
943 @return the string representation */
944 std::string sync_mutex_to_string(latch_id_t id, const std::string &created);
945
946 /** Get the latch name from a sync level
947 @param[in] level Latch level to lookup
948 @return 0 if not found. */
949 const char *sync_latch_get_name(latch_level_t level);
950
951 /** Print the filename "basename"
952 @return the basename */
953 const char *sync_basename(const char *filename);
954 #endif /* !UNIV_HOTBACKUP */
955
956 /** Register a latch, called when it is created
957 @param[in] ptr Latch instance that was created
958 @param[in] filename Filename where it was created
959 @param[in] line Line number in filename */
960 void sync_file_created_register(const void *ptr, const char *filename,
961 uint16_t line);
962
963 /** Deregister a latch, called when it is destroyed
964 @param[in] ptr Latch to be destroyed */
965 void sync_file_created_deregister(const void *ptr);
966
967 /** Get the string where the file was created. Its format is "name:line"
968 @param[in] ptr Latch instance
969 @return created information or "" if can't be found */
970 std::string sync_file_created_get(const void *ptr);
971
972 #endif /* !UNIV_LIBRARY */
973
974 #ifdef UNIV_DEBUG
975
976 /** All (ordered) latches, used in debugging, must derive from this class. */
977 struct latch_t {
978 /** Constructor
979 @param[in] id The latch ID */
m_idlatch_t980 explicit latch_t(latch_id_t id = LATCH_ID_NONE) UNIV_NOTHROW : m_id(id),
981 m_rw_lock(),
982 m_temp_fsp() {}
983
984 latch_t &operator=(const latch_t &) = default;
985
986 /** Destructor */
~latch_tlatch_t987 virtual ~latch_t() UNIV_NOTHROW {}
988
989 /** @return the latch ID */
get_idlatch_t990 latch_id_t get_id() const { return (m_id); }
991
992 /** @return true if it is a rw-lock */
is_rw_locklatch_t993 bool is_rw_lock() const UNIV_NOTHROW { return (m_rw_lock); }
994
995 /** Print the latch context
996 @return the string representation */
997 virtual std::string to_string() const = 0;
998
999 /** @return "filename:line" from where the latch was last locked */
1000 virtual std::string locked_from() const = 0;
1001
1002 /** @return the latch level */
get_levellatch_t1003 latch_level_t get_level() const UNIV_NOTHROW {
1004 ut_a(m_id != LATCH_ID_NONE);
1005
1006 #ifndef UNIV_LIBRARY
1007 return (sync_latch_get_level(m_id));
1008 #else
1009 /* This should never be reached. This is
1010 added to fix compilation errors
1011 for library. We will never reach here because
1012 mutexes are disabled in library. */
1013 ut_error;
1014 return (SYNC_UNKNOWN);
1015 #endif /* !UNIV_LIBRARY */
1016 }
1017
1018 /** @return true if the latch is for a temporary file space*/
is_temp_fsplatch_t1019 bool is_temp_fsp() const UNIV_NOTHROW { return (m_temp_fsp); }
1020
1021 /** Set the temporary tablespace flag. The latch order constraints
1022 are different for intrinsic tables. We don't always acquire the
1023 index->lock. We need to figure out the context and add some special
1024 rules during the checks. */
set_temp_fsplatch_t1025 void set_temp_fsp() UNIV_NOTHROW {
1026 ut_ad(get_id() == LATCH_ID_FIL_SPACE);
1027 m_temp_fsp = true;
1028 }
1029
1030 /** @return the latch name, m_id must be set */
get_namelatch_t1031 const char *get_name() const UNIV_NOTHROW {
1032 ut_a(m_id != LATCH_ID_NONE);
1033
1034 #ifndef UNIV_LIBRARY
1035 return (sync_latch_get_name(m_id));
1036 #else
1037 /* This should never be reached. This is
1038 added to fix compilation errors
1039 for library. We will never reach here because
1040 mutexes are disabled in library. */
1041 ut_error;
1042 return (nullptr);
1043 #endif /* !UNIV_LIBRARY */
1044 }
1045
1046 /** Latch ID */
1047 latch_id_t m_id;
1048
1049 /** true if it is a rw-lock. In debug mode, rw_lock_t derives from
1050 this class and sets this variable. */
1051 bool m_rw_lock;
1052
1053 /** true if it is an temporary space latch */
1054 bool m_temp_fsp;
1055 };
1056
1057 /** Subclass this to iterate over a thread's acquired latch levels. */
1058 struct sync_check_functor_t {
~sync_check_functor_tsync_check_functor_t1059 virtual ~sync_check_functor_t() {}
1060 virtual bool operator()(const latch_level_t) = 0;
1061 virtual bool result() const = 0;
1062 };
1063
1064 /** Functor to check whether the calling thread owns the btr search mutex. */
1065 struct btrsea_sync_check : public sync_check_functor_t {
1066 /** Constructor
1067 @param[in] has_search_latch true if owns the latch */
btrsea_sync_checkbtrsea_sync_check1068 explicit btrsea_sync_check(bool has_search_latch)
1069 : m_result(), m_has_search_latch(has_search_latch) {}
1070
1071 /** Destructor */
~btrsea_sync_checkbtrsea_sync_check1072 ~btrsea_sync_check() override {}
1073
1074 /** Called for every latch owned by the calling thread.
1075 @param[in] level Level of the existing latch
1076 @return true if the predicate check fails */
operatorbtrsea_sync_check1077 virtual bool operator()(const latch_level_t level) {
1078 /* If calling thread doesn't hold search latch then
1079 check if there are latch level exception provided.
1080
1081 Note: Optimizer has added InnoDB intrinsic table as an
1082 alternative to MyISAM intrinsic table. With this a new
1083 control flow comes into existence, it is:
1084
1085 Server -> Plugin -> SE
1086
1087 Plugin in this case is I_S which is sharing the latch vector
1088 of InnoDB and so there could be lock conflicts. Ideally
1089 the Plugin should use a difference namespace latch vector
1090 as it doesn't have any depedency with SE latching protocol.
1091
1092 Added check that will allow thread to hold I_S latches */
1093
1094 if (!m_has_search_latch &&
1095 (level != SYNC_SEARCH_SYS && level != SYNC_DICT &&
1096 level != SYNC_FTS_CACHE && level != SYNC_UNDO_DDL &&
1097 level != SYNC_DICT_OPERATION && level != SYNC_TRX_I_S_LAST_READ &&
1098 level != SYNC_TRX_I_S_RWLOCK)) {
1099 m_result = true;
1100 #ifdef UNIV_NO_ERR_MSGS
1101 ib::error()
1102 #else
1103 ib::error(ER_IB_MSG_1373)
1104 #endif
1105 << "Debug: Calling thread does not hold search "
1106 "latch but does hold latch level "
1107 << level << ".";
1108
1109 return (m_result);
1110 }
1111
1112 return (false);
1113 }
1114
1115 /** @return result from the check */
resultbtrsea_sync_check1116 virtual bool result() const { return (m_result); }
1117
1118 private:
1119 /** True if all OK */
1120 bool m_result;
1121
1122 /** If the caller owns the search latch */
1123 const bool m_has_search_latch;
1124 };
1125
1126 /** Functor to check for dictionary latching constraints. */
1127 struct dict_sync_check : public sync_check_functor_t {
1128 /** Constructor
1129 @param[in] dict_mutex_allowed true if the dict mutex
1130 is allowed */
dict_sync_checkdict_sync_check1131 explicit dict_sync_check(bool dict_mutex_allowed)
1132 : m_result(), m_dict_mutex_allowed(dict_mutex_allowed) {}
1133
1134 /** Destructor */
~dict_sync_checkdict_sync_check1135 ~dict_sync_check() override {}
1136
1137 /** Check the latching constraints
1138 @param[in] level The level held by the thread */
operatordict_sync_check1139 virtual bool operator()(const latch_level_t level) {
1140 if (!m_dict_mutex_allowed ||
1141 (level != SYNC_DICT && level != SYNC_UNDO_SPACES &&
1142 level != SYNC_FTS_CACHE && level != SYNC_DICT_OPERATION &&
1143 /* This only happens in recv_apply_hashed_log_recs. */
1144 level != SYNC_RECV_WRITER && level != SYNC_NO_ORDER_CHECK)) {
1145 m_result = true;
1146 #ifdef UNIV_NO_ERR_MSGS
1147 ib::error()
1148 #else
1149 ib::error(ER_IB_MSG_1374)
1150 #endif
1151 << "Debug: Dictionary latch order violation for level " << level
1152 << ".";
1153
1154 return (true);
1155 }
1156
1157 return (false);
1158 }
1159
1160 /** @return the result of the check */
resultdict_sync_check1161 virtual bool result() const { return (m_result); }
1162
1163 private:
1164 /** True if all OK */
1165 bool m_result;
1166
1167 /** True if it is OK to hold the dict mutex */
1168 const bool m_dict_mutex_allowed;
1169 };
1170
1171 /** Functor to check for given latching constraints. */
1172 struct sync_allowed_latches : public sync_check_functor_t {
1173 /** Constructor
1174 @param[in] from first element in an array of latch_level_t
1175 @param[in] to last element in an array of latch_level_t */
sync_allowed_latchessync_allowed_latches1176 sync_allowed_latches(const latch_level_t *from, const latch_level_t *to)
1177 : m_result(), m_latches(from, to) {}
1178
1179 /** Check whether the given latch_t violates the latch constraint.
1180 This object maintains a list of allowed latch levels, and if the given
1181 latch belongs to a latch level that is not there in the allowed list,
1182 then it is a violation.
1183
1184 @param[in] level The latch level to check
1185 @return true if there is a latch ordering violation */
operatorsync_allowed_latches1186 virtual bool operator()(const latch_level_t level) {
1187 for (latches_t::const_iterator it = m_latches.begin();
1188 it != m_latches.end(); ++it) {
1189 if (level == *it) {
1190 m_result = false;
1191
1192 /* No violation */
1193 return (m_result);
1194 }
1195 }
1196
1197 #ifdef UNIV_NO_ERR_MSGS
1198 ib::error()
1199 #else
1200 ib::error(ER_IB_MSG_1375)
1201 #endif
1202 << "Debug: sync_allowed_latches violation for level=" << level;
1203 m_result = true;
1204 return (m_result);
1205 }
1206
1207 /** @return the result of the check */
resultsync_allowed_latches1208 virtual bool result() const { return (m_result); }
1209
1210 private:
1211 /** Save the result of validation check here
1212 True if all OK */
1213 bool m_result;
1214
1215 typedef std::vector<latch_level_t, ut_allocator<latch_level_t>> latches_t;
1216
1217 /** List of latch levels that are allowed to be held */
1218 latches_t m_latches;
1219 };
1220
1221 /** Get the latch id from a latch name.
1222 @param[in] name Latch name
1223 @return LATCH_ID_NONE. */
1224 latch_id_t sync_latch_get_id(const char *name);
1225
1226 typedef ulint rw_lock_flags_t;
1227
1228 /* Flags to specify lock types for rw_lock_own_flagged() */
1229 enum rw_lock_flag_t {
1230 RW_LOCK_FLAG_S = 1 << 0,
1231 RW_LOCK_FLAG_X = 1 << 1,
1232 RW_LOCK_FLAG_SX = 1 << 2
1233 };
1234
1235 #endif /* UNIV_DBEUG */
1236
1237 #endif /* sync0types_h */
1238