1 /*****************************************************************************
2
3 Copyright (c) 2014, 2016, Oracle and/or its affiliates. All Rights Reserved.
4 Copyright (c) 2017, 2020, MariaDB Corporation.
5
6 Portions of this file contain modifications contributed and copyrighted by
7 Google, Inc. Those modifications are gratefully acknowledged and are described
8 briefly in the InnoDB documentation. The contributions by Google are
9 incorporated with their permission, and subject to the conditions contained in
10 the file COPYING.Google.
11
12 This program is free software; you can redistribute it and/or modify it under
13 the terms of the GNU General Public License as published by the Free Software
14 Foundation; version 2 of the License.
15
16 This program is distributed in the hope that it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc.,
22 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
23
24 *****************************************************************************/
25
26 /**************************************************//**
27 @file sync/sync0debug.cc
28 Debug checks for latches.
29
30 Created 2012-08-21 Sunny Bains
31 *******************************************************/
32
33 #include "sync0sync.h"
34 #include "sync0debug.h"
35 #include "srv0start.h"
36 #include "fil0fil.h"
37
38 #include <vector>
39 #include <string>
40 #include <algorithm>
41 #include <iostream>
42
43 #ifdef UNIV_DEBUG
44
45 my_bool srv_sync_debug;
46
47 /** The global mutex which protects debug info lists of all rw-locks.
48 To modify the debug info list of an rw-lock, this mutex has to be
49 acquired in addition to the mutex protecting the lock. */
50 static SysMutex rw_lock_debug_mutex;
51
52 /** The latch held by a thread */
53 struct Latched {
54
55 /** Constructor */
LatchedLatched56 Latched() : m_latch(), m_level(SYNC_UNKNOWN) { }
57
58 /** Constructor
59 @param[in] latch Latch instance
60 @param[in] level Level of latch held */
LatchedLatched61 Latched(const latch_t* latch,
62 latch_level_t level)
63 :
64 m_latch(latch),
65 m_level(level)
66 {
67 /* No op */
68 }
69
70 /** @return the latch level */
get_levelLatched71 latch_level_t get_level() const
72 {
73 return(m_level);
74 }
75
76 /** Check if the rhs latch and level match
77 @param[in] rhs instance to compare with
78 @return true on match */
operator ==Latched79 bool operator==(const Latched& rhs) const
80 {
81 return(m_latch == rhs.m_latch && m_level == rhs.m_level);
82 }
83
84 /** The latch instance */
85 const latch_t* m_latch;
86
87 /** The latch level. For buffer blocks we can pass a separate latch
88 level to check against, see buf_block_dbg_add_level() */
89 latch_level_t m_level;
90 };
91
92 /** Thread specific latches. This is ordered on level in descending order. */
93 typedef std::vector<Latched, ut_allocator<Latched> > Latches;
94
95 /** The deadlock detector. */
96 struct LatchDebug {
97
98 /** Debug mutex for control structures, should not be tracked
99 by this module. */
100 typedef OSMutex Mutex;
101
102 /** Comparator for the ThreadMap. */
103 struct os_thread_id_less
104 : public std::binary_function<
105 os_thread_id_t,
106 os_thread_id_t,
107 bool>
108 {
109 /** @return true if lhs < rhs */
operator ()LatchDebug::os_thread_id_less110 bool operator()(
111 const os_thread_id_t& lhs,
112 const os_thread_id_t& rhs) const
113 UNIV_NOTHROW
114 {
115 return(os_thread_pf(lhs) < os_thread_pf(rhs));
116 }
117 };
118
119 /** For tracking a thread's latches. */
120 typedef std::map<
121 os_thread_id_t,
122 Latches*,
123 os_thread_id_less,
124 ut_allocator<std::pair<const os_thread_id_t, Latches*> > >
125 ThreadMap;
126
127 /** Constructor */
128 LatchDebug()
129 UNIV_NOTHROW;
130
131 /** Destructor */
~LatchDebugLatchDebug132 ~LatchDebug()
133 UNIV_NOTHROW
134 {
135 m_mutex.destroy();
136 }
137
138 /** Create a new instance if one doesn't exist else return
139 the existing one.
140 @param[in] add add an empty entry if one is not
141 found (default no)
142 @return pointer to a thread's acquired latches. */
143 Latches* thread_latches(bool add = false)
144 UNIV_NOTHROW;
145
146 /** Check that all the latches already owned by a thread have a lower
147 level than limit.
148 @param[in] latches the thread's existing (acquired) latches
149 @param[in] limit to check against
150 @return latched if there is one with a level <= limit . */
151 const Latched* less(
152 const Latches* latches,
153 latch_level_t limit) const
154 UNIV_NOTHROW;
155
156 /** Checks if the level value exists in the thread's acquired latches.
157 @param[in] latches the thread's existing (acquired) latches
158 @param[in] level to lookup
159 @return latch if found or 0 */
160 const latch_t* find(
161 const Latches* Latches,
162 latch_level_t level) const
163 UNIV_NOTHROW;
164
165 /**
166 Checks if the level value exists in the thread's acquired latches.
167 @param[in] level to lookup
168 @return latch if found or 0 */
169 const latch_t* find(latch_level_t level)
170 UNIV_NOTHROW;
171
172 /** Report error and abort.
173 @param[in] latches thread's existing latches
174 @param[in] latched The existing latch causing the
175 invariant to fail
176 @param[in] level The new level request that breaks
177 the order */
178 void crash(
179 const Latches* latches,
180 const Latched* latched,
181 latch_level_t level) const
182 UNIV_NOTHROW;
183
184 /** Do a basic ordering check.
185 @param[in] latches thread's existing latches
186 @param[in] requested_level Level requested by latch
187 @param[in] level declared ulint so that we can
188 do level - 1. The level of the
189 latch that the thread is trying
190 to acquire
191 @return true if passes, else crash with error message. */
192 inline bool basic_check(
193 const Latches* latches,
194 latch_level_t requested_level,
195 lint level) const
196 UNIV_NOTHROW;
197
198 /** Adds a latch and its level in the thread level array. Allocates
199 the memory for the array if called for the first time for this
200 OS thread. Makes the checks against other latch levels stored
201 in the array for this thread.
202
203 @param[in] latch latch that the thread wants to acqire.
204 @param[in] level latch level to check against */
lock_validateLatchDebug205 void lock_validate(
206 const latch_t* latch,
207 latch_level_t level)
208 UNIV_NOTHROW
209 {
210 /* Ignore diagnostic latches, starting with '.' */
211
212 if (*latch->get_name() != '.'
213 && latch->get_level() != SYNC_LEVEL_VARYING) {
214
215 ut_ad(level != SYNC_LEVEL_VARYING);
216
217 Latches* latches = check_order(latch, level);
218
219 ut_a(latches->empty()
220 || level == SYNC_LEVEL_VARYING
221 || level == SYNC_NO_ORDER_CHECK
222 || latches->back().get_level()
223 == SYNC_NO_ORDER_CHECK
224 || latches->back().m_latch->get_level()
225 == SYNC_LEVEL_VARYING
226 || latches->back().get_level() >= level);
227 }
228 }
229
230 /** Adds a latch and its level in the thread level array. Allocates
231 the memory for the array if called for the first time for this
232 OS thread. Makes the checks against other latch levels stored
233 in the array for this thread.
234
235 @param[in] latch latch that the thread wants to acqire.
236 @param[in] level latch level to check against */
lock_grantedLatchDebug237 void lock_granted(
238 const latch_t* latch,
239 latch_level_t level)
240 UNIV_NOTHROW
241 {
242 /* Ignore diagnostic latches, starting with '.' */
243
244 if (*latch->get_name() != '.'
245 && latch->get_level() != SYNC_LEVEL_VARYING) {
246
247 Latches* latches = thread_latches(true);
248
249 latches->push_back(Latched(latch, level));
250 }
251 }
252
253 /** For recursive X rw-locks.
254 @param[in] latch The RW-Lock to relock */
relockLatchDebug255 void relock(const latch_t* latch)
256 UNIV_NOTHROW
257 {
258 ut_a(latch->m_rw_lock);
259
260 latch_level_t level = latch->get_level();
261
262 /* Ignore diagnostic latches, starting with '.' */
263
264 if (*latch->get_name() != '.'
265 && latch->get_level() != SYNC_LEVEL_VARYING) {
266
267 Latches* latches = thread_latches(true);
268
269 Latches::iterator it = std::find(
270 latches->begin(), latches->end(),
271 Latched(latch, level));
272
273 ut_a(latches->empty()
274 || level == SYNC_LEVEL_VARYING
275 || level == SYNC_NO_ORDER_CHECK
276 || latches->back().m_latch->get_level()
277 == SYNC_LEVEL_VARYING
278 || latches->back().m_latch->get_level()
279 == SYNC_NO_ORDER_CHECK
280 || latches->back().get_level() >= level
281 || it != latches->end());
282
283 if (it == latches->end()) {
284 latches->push_back(Latched(latch, level));
285 } else {
286 latches->insert(it, Latched(latch, level));
287 }
288 }
289 }
290
291 /** Iterate over a thread's latches.
292 @param[in] functor The callback
293 @return true if the functor returns true. */
for_eachLatchDebug294 bool for_each(const sync_check_functor_t& functor)
295 UNIV_NOTHROW
296 {
297 if (const Latches* latches = thread_latches()) {
298 Latches::const_iterator end = latches->end();
299 for (Latches::const_iterator it = latches->begin();
300 it != end; ++it) {
301
302 if (functor(it->m_level)) {
303 return(true);
304 }
305 }
306 }
307
308 return(false);
309 }
310
311 /** Removes a latch from the thread level array if it is found there.
312 @param[in] latch The latch that was released
313 @return true if found in the array; it is not an error if the latch is
314 not found, as we presently are not able to determine the level for
315 every latch reservation the program does */
316 void unlock(const latch_t* latch) UNIV_NOTHROW;
317
318 /** Get the level name
319 @param[in] level The level ID to lookup
320 @return level name */
get_level_nameLatchDebug321 const std::string& get_level_name(latch_level_t level) const
322 UNIV_NOTHROW
323 {
324 Levels::const_iterator it = m_levels.find(level);
325
326 ut_ad(it != m_levels.end());
327
328 return(it->second);
329 }
330
331 /** Initialise the debug data structures */
332 static void init()
333 UNIV_NOTHROW;
334
335 /** Shutdown the latch debug checking */
336 static void shutdown()
337 UNIV_NOTHROW;
338
339 /** @return the singleton instance */
instanceLatchDebug340 static LatchDebug* instance()
341 UNIV_NOTHROW
342 {
343 return(s_instance);
344 }
345
346 /** Create the singleton instance */
create_instanceLatchDebug347 static void create_instance()
348 UNIV_NOTHROW
349 {
350 ut_ad(s_instance == NULL);
351
352 s_instance = UT_NEW_NOKEY(LatchDebug());
353 }
354
355 private:
356 /** Disable copying */
357 LatchDebug(const LatchDebug&);
358 LatchDebug& operator=(const LatchDebug&);
359
360 /** Adds a latch and its level in the thread level array. Allocates
361 the memory for the array if called first time for this OS thread.
362 Makes the checks against other latch levels stored in the array
363 for this thread.
364
365 @param[in] latch pointer to a mutex or an rw-lock
366 @param[in] level level in the latching order
367 @return the thread's latches */
368 Latches* check_order(
369 const latch_t* latch,
370 latch_level_t level)
371 UNIV_NOTHROW;
372
373 /** Print the latches acquired by a thread
374 @param[in] latches Latches acquired by a thread */
375 void print_latches(const Latches* latches) const
376 UNIV_NOTHROW;
377
378 /** Special handling for the RTR mutexes. We need to add proper
379 levels for them if possible.
380 @param[in] latch Latch to check
381 @return true if it is a an _RTR_ mutex */
is_rtr_mutexLatchDebug382 bool is_rtr_mutex(const latch_t* latch) const
383 UNIV_NOTHROW
384 {
385 return(latch->get_id() == LATCH_ID_RTR_ACTIVE_MUTEX
386 || latch->get_id() == LATCH_ID_RTR_PATH_MUTEX
387 || latch->get_id() == LATCH_ID_RTR_MATCH_MUTEX);
388 }
389
390 private:
391 /** Comparator for the Levels . */
392 struct latch_level_less
393 : public std::binary_function<
394 latch_level_t,
395 latch_level_t,
396 bool>
397 {
398 /** @return true if lhs < rhs */
operator ()LatchDebug::latch_level_less399 bool operator()(
400 const latch_level_t& lhs,
401 const latch_level_t& rhs) const
402 UNIV_NOTHROW
403 {
404 return(lhs < rhs);
405 }
406 };
407
408 typedef std::map<
409 latch_level_t,
410 std::string,
411 latch_level_less,
412 ut_allocator<std::pair<const latch_level_t, std::string> > >
413 Levels;
414
415 /** Mutex protecting the deadlock detector data structures. */
416 Mutex m_mutex;
417
418 /** Thread specific data. Protected by m_mutex. */
419 ThreadMap m_threads;
420
421 /** Mapping from latche level to its string representation. */
422 Levels m_levels;
423
424 /** The singleton instance. Must be created in single threaded mode. */
425 static LatchDebug* s_instance;
426
427 public:
428 /** For checking whether this module has been initialised or not. */
429 static bool s_initialized;
430 };
431
432 /** The latch order checking infra-structure */
433 LatchDebug* LatchDebug::s_instance = NULL;
434 bool LatchDebug::s_initialized = false;
435
436 #define LEVEL_MAP_INSERT(T) \
437 do { \
438 std::pair<Levels::iterator, bool> result = \
439 m_levels.insert(Levels::value_type(T, #T)); \
440 ut_ad(result.second); \
441 } while(0)
442
443 /** Setup the mapping from level ID to level name mapping */
LatchDebug()444 LatchDebug::LatchDebug()
445 {
446 m_mutex.init();
447
448 LEVEL_MAP_INSERT(SYNC_UNKNOWN);
449 LEVEL_MAP_INSERT(SYNC_MUTEX);
450 LEVEL_MAP_INSERT(RW_LOCK_SX);
451 LEVEL_MAP_INSERT(RW_LOCK_X_WAIT);
452 LEVEL_MAP_INSERT(RW_LOCK_S);
453 LEVEL_MAP_INSERT(RW_LOCK_X);
454 LEVEL_MAP_INSERT(RW_LOCK_NOT_LOCKED);
455 LEVEL_MAP_INSERT(SYNC_MONITOR_MUTEX);
456 LEVEL_MAP_INSERT(SYNC_ANY_LATCH);
457 LEVEL_MAP_INSERT(SYNC_DOUBLEWRITE);
458 LEVEL_MAP_INSERT(SYNC_BUF_FLUSH_LIST);
459 LEVEL_MAP_INSERT(SYNC_BUF_BLOCK);
460 LEVEL_MAP_INSERT(SYNC_BUF_PAGE_HASH);
461 LEVEL_MAP_INSERT(SYNC_BUF_POOL);
462 LEVEL_MAP_INSERT(SYNC_POOL);
463 LEVEL_MAP_INSERT(SYNC_POOL_MANAGER);
464 LEVEL_MAP_INSERT(SYNC_SEARCH_SYS);
465 LEVEL_MAP_INSERT(SYNC_WORK_QUEUE);
466 LEVEL_MAP_INSERT(SYNC_FTS_TOKENIZE);
467 LEVEL_MAP_INSERT(SYNC_FTS_OPTIMIZE);
468 LEVEL_MAP_INSERT(SYNC_FTS_CACHE_INIT);
469 LEVEL_MAP_INSERT(SYNC_RECV);
470 LEVEL_MAP_INSERT(SYNC_LOG_FLUSH_ORDER);
471 LEVEL_MAP_INSERT(SYNC_LOG);
472 LEVEL_MAP_INSERT(SYNC_LOG_WRITE);
473 LEVEL_MAP_INSERT(SYNC_PAGE_CLEANER);
474 LEVEL_MAP_INSERT(SYNC_PURGE_QUEUE);
475 LEVEL_MAP_INSERT(SYNC_TRX_SYS_HEADER);
476 LEVEL_MAP_INSERT(SYNC_THREADS);
477 LEVEL_MAP_INSERT(SYNC_TRX);
478 LEVEL_MAP_INSERT(SYNC_RW_TRX_HASH_ELEMENT);
479 LEVEL_MAP_INSERT(SYNC_TRX_SYS);
480 LEVEL_MAP_INSERT(SYNC_LOCK_SYS);
481 LEVEL_MAP_INSERT(SYNC_LOCK_WAIT_SYS);
482 LEVEL_MAP_INSERT(SYNC_INDEX_ONLINE_LOG);
483 LEVEL_MAP_INSERT(SYNC_IBUF_BITMAP);
484 LEVEL_MAP_INSERT(SYNC_IBUF_BITMAP_MUTEX);
485 LEVEL_MAP_INSERT(SYNC_IBUF_TREE_NODE);
486 LEVEL_MAP_INSERT(SYNC_IBUF_TREE_NODE_NEW);
487 LEVEL_MAP_INSERT(SYNC_IBUF_INDEX_TREE);
488 LEVEL_MAP_INSERT(SYNC_IBUF_MUTEX);
489 LEVEL_MAP_INSERT(SYNC_FSP_PAGE);
490 LEVEL_MAP_INSERT(SYNC_FSP);
491 LEVEL_MAP_INSERT(SYNC_EXTERN_STORAGE);
492 LEVEL_MAP_INSERT(SYNC_TRX_UNDO_PAGE);
493 LEVEL_MAP_INSERT(SYNC_RSEG_HEADER);
494 LEVEL_MAP_INSERT(SYNC_RSEG_HEADER_NEW);
495 LEVEL_MAP_INSERT(SYNC_NOREDO_RSEG);
496 LEVEL_MAP_INSERT(SYNC_REDO_RSEG);
497 LEVEL_MAP_INSERT(SYNC_PURGE_LATCH);
498 LEVEL_MAP_INSERT(SYNC_TREE_NODE);
499 LEVEL_MAP_INSERT(SYNC_TREE_NODE_FROM_HASH);
500 LEVEL_MAP_INSERT(SYNC_TREE_NODE_NEW);
501 LEVEL_MAP_INSERT(SYNC_INDEX_TREE);
502 LEVEL_MAP_INSERT(SYNC_IBUF_PESS_INSERT_MUTEX);
503 LEVEL_MAP_INSERT(SYNC_IBUF_HEADER);
504 LEVEL_MAP_INSERT(SYNC_DICT_HEADER);
505 LEVEL_MAP_INSERT(SYNC_STATS_AUTO_RECALC);
506 LEVEL_MAP_INSERT(SYNC_DICT_AUTOINC_MUTEX);
507 LEVEL_MAP_INSERT(SYNC_DICT);
508 LEVEL_MAP_INSERT(SYNC_FTS_CACHE);
509 LEVEL_MAP_INSERT(SYNC_DICT_OPERATION);
510 LEVEL_MAP_INSERT(SYNC_TRX_I_S_LAST_READ);
511 LEVEL_MAP_INSERT(SYNC_TRX_I_S_RWLOCK);
512 LEVEL_MAP_INSERT(SYNC_RECV_WRITER);
513 LEVEL_MAP_INSERT(SYNC_LEVEL_VARYING);
514 LEVEL_MAP_INSERT(SYNC_NO_ORDER_CHECK);
515
516 /* Enum count starts from 0 */
517 ut_ad(m_levels.size() == SYNC_LEVEL_MAX + 1);
518 }
519
520 /** Print the latches acquired by a thread
521 @param[in] latches Latches acquired by a thread */
522 void
print_latches(const Latches * latches) const523 LatchDebug::print_latches(const Latches* latches) const
524 UNIV_NOTHROW
525 {
526 ib::error() << "Latches already owned by this thread: ";
527
528 Latches::const_iterator end = latches->end();
529
530 for (Latches::const_iterator it = latches->begin();
531 it != end;
532 ++it) {
533
534 ib::error()
535 << sync_latch_get_name(it->m_latch->get_id())
536 << " -> "
537 << it->m_level << " "
538 << "(" << get_level_name(it->m_level) << ")";
539 }
540 }
541
542 /** Report error and abort
543 @param[in] latches thread's existing latches
544 @param[in] latched The existing latch causing the invariant to fail
545 @param[in] level The new level request that breaks the order */
546 void
crash(const Latches * latches,const Latched * latched,latch_level_t level) const547 LatchDebug::crash(
548 const Latches* latches,
549 const Latched* latched,
550 latch_level_t level) const
551 UNIV_NOTHROW
552 {
553 const latch_t* latch = latched->m_latch;
554 const std::string& in_level_name = get_level_name(level);
555
556 const std::string& latch_level_name =
557 get_level_name(latched->m_level);
558
559 ib::error()
560 << "Thread " << os_thread_pf(os_thread_get_curr_id())
561 << " already owns a latch "
562 << sync_latch_get_name(latch->m_id) << " at level"
563 << " " << latched->m_level << " (" << latch_level_name
564 << " ), which is at a lower/same level than the"
565 << " requested latch: "
566 << level << " (" << in_level_name << "). "
567 << latch->to_string();
568
569 print_latches(latches);
570
571 ut_error;
572 }
573
574 /** Check that all the latches already owned by a thread have a lower
575 level than limit.
576 @param[in] latches the thread's existing (acquired) latches
577 @param[in] limit to check against
578 @return latched info if there is one with a level <= limit . */
579 const Latched*
less(const Latches * latches,latch_level_t limit) const580 LatchDebug::less(
581 const Latches* latches,
582 latch_level_t limit) const
583 UNIV_NOTHROW
584 {
585 Latches::const_iterator end = latches->end();
586
587 for (Latches::const_iterator it = latches->begin(); it != end; ++it) {
588
589 if (it->m_level <= limit) {
590 return(&(*it));
591 }
592 }
593
594 return(NULL);
595 }
596
597 /** Do a basic ordering check.
598 @param[in] latches thread's existing latches
599 @param[in] requested_level Level requested by latch
600 @param[in] in_level declared ulint so that we can do level - 1.
601 The level of the latch that the thread is
602 trying to acquire
603 @return true if passes, else crash with error message. */
604 inline bool
basic_check(const Latches * latches,latch_level_t requested_level,lint in_level) const605 LatchDebug::basic_check(
606 const Latches* latches,
607 latch_level_t requested_level,
608 lint in_level) const
609 UNIV_NOTHROW
610 {
611 latch_level_t level = latch_level_t(in_level);
612
613 ut_ad(level < SYNC_LEVEL_MAX);
614
615 const Latched* latched = less(latches, level);
616
617 if (latched != NULL) {
618 crash(latches, latched, requested_level);
619 return(false);
620 }
621
622 return(true);
623 }
624
625 /** Create a new instance if one doesn't exist else return the existing one.
626 @param[in] add add an empty entry if one is not found
627 (default no)
628 @return pointer to a thread's acquired latches. */
629 Latches*
thread_latches(bool add)630 LatchDebug::thread_latches(bool add)
631 UNIV_NOTHROW
632 {
633 m_mutex.enter();
634
635 os_thread_id_t thread_id = os_thread_get_curr_id();
636 ThreadMap::iterator lb = m_threads.lower_bound(thread_id);
637
638 if (lb != m_threads.end()
639 && !(m_threads.key_comp()(thread_id, lb->first))) {
640
641 Latches* latches = lb->second;
642
643 m_mutex.exit();
644
645 return(latches);
646
647 } else if (!add) {
648
649 m_mutex.exit();
650
651 return(NULL);
652
653 } else {
654 typedef ThreadMap::value_type value_type;
655
656 Latches* latches = UT_NEW_NOKEY(Latches());
657
658 ut_a(latches != NULL);
659
660 latches->reserve(32);
661
662 m_threads.insert(lb, value_type(thread_id, latches));
663
664 m_mutex.exit();
665
666 return(latches);
667 }
668 }
669
670 /** Checks if the level value exists in the thread's acquired latches.
671 @param[in] levels the thread's existing (acquired) latches
672 @param[in] level to lookup
673 @return latch if found or 0 */
674 const latch_t*
find(const Latches * latches,latch_level_t level) const675 LatchDebug::find(
676 const Latches* latches,
677 latch_level_t level) const UNIV_NOTHROW
678 {
679 Latches::const_iterator end = latches->end();
680
681 for (Latches::const_iterator it = latches->begin(); it != end; ++it) {
682
683 if (it->m_level == level) {
684
685 return(it->m_latch);
686 }
687 }
688
689 return(0);
690 }
691
692 /** Checks if the level value exists in the thread's acquired latches.
693 @param[in] level The level to lookup
694 @return latch if found or NULL */
695 const latch_t*
find(latch_level_t level)696 LatchDebug::find(latch_level_t level)
697 UNIV_NOTHROW
698 {
699 return(find(thread_latches(), level));
700 }
701
702 /**
703 Adds a latch and its level in the thread level array. Allocates the memory
704 for the array if called first time for this OS thread. Makes the checks
705 against other latch levels stored in the array for this thread.
706 @param[in] latch pointer to a mutex or an rw-lock
707 @param[in] level level in the latching order
708 @return the thread's latches */
709 Latches*
check_order(const latch_t * latch,latch_level_t level)710 LatchDebug::check_order(
711 const latch_t* latch,
712 latch_level_t level)
713 UNIV_NOTHROW
714 {
715 ut_ad(latch->get_level() != SYNC_LEVEL_VARYING);
716
717 Latches* latches = thread_latches(true);
718
719 /* NOTE that there is a problem with _NODE and _LEAF levels: if the
720 B-tree height changes, then a leaf can change to an internal node
721 or the other way around. We do not know at present if this can cause
722 unnecessary assertion failures below. */
723
724 switch (level) {
725 case SYNC_NO_ORDER_CHECK:
726 case SYNC_EXTERN_STORAGE:
727 case SYNC_TREE_NODE_FROM_HASH:
728 /* Do no order checking */
729 break;
730
731 case SYNC_TRX_SYS_HEADER:
732
733 if (srv_is_being_started) {
734 /* This is violated during trx_sys_create_rsegs()
735 when creating additional rollback segments when
736 upgrading in srv_start(). */
737 break;
738 }
739
740 /* Fall through */
741
742 case SYNC_MONITOR_MUTEX:
743 case SYNC_RECV:
744 case SYNC_WORK_QUEUE:
745 case SYNC_FTS_TOKENIZE:
746 case SYNC_FTS_OPTIMIZE:
747 case SYNC_FTS_CACHE:
748 case SYNC_FTS_CACHE_INIT:
749 case SYNC_PAGE_CLEANER:
750 case SYNC_LOG:
751 case SYNC_LOG_WRITE:
752 case SYNC_LOG_FLUSH_ORDER:
753 case SYNC_DOUBLEWRITE:
754 case SYNC_SEARCH_SYS:
755 case SYNC_THREADS:
756 case SYNC_LOCK_SYS:
757 case SYNC_LOCK_WAIT_SYS:
758 case SYNC_RW_TRX_HASH_ELEMENT:
759 case SYNC_TRX_SYS:
760 case SYNC_IBUF_BITMAP_MUTEX:
761 case SYNC_REDO_RSEG:
762 case SYNC_NOREDO_RSEG:
763 case SYNC_PURGE_LATCH:
764 case SYNC_PURGE_QUEUE:
765 case SYNC_DICT_AUTOINC_MUTEX:
766 case SYNC_DICT_OPERATION:
767 case SYNC_DICT_HEADER:
768 case SYNC_TRX_I_S_RWLOCK:
769 case SYNC_TRX_I_S_LAST_READ:
770 case SYNC_IBUF_MUTEX:
771 case SYNC_INDEX_ONLINE_LOG:
772 case SYNC_STATS_AUTO_RECALC:
773 case SYNC_POOL:
774 case SYNC_POOL_MANAGER:
775 case SYNC_RECV_WRITER:
776
777 basic_check(latches, level, level);
778 break;
779
780 case SYNC_ANY_LATCH:
781
782 /* Temporary workaround for LATCH_ID_RTR_*_MUTEX */
783 if (is_rtr_mutex(latch)) {
784
785 const Latched* latched = less(latches, level);
786
787 if (latched == NULL
788 || (latched != NULL
789 && is_rtr_mutex(latched->m_latch))) {
790
791 /* No violation */
792 break;
793
794 }
795
796 crash(latches, latched, level);
797
798 } else {
799 basic_check(latches, level, level);
800 }
801
802 break;
803
804 case SYNC_TRX:
805
806 /* Either the thread must own the lock_sys.mutex, or
807 it is allowed to own only ONE trx_t::mutex. */
808
809 if (less(latches, level) != NULL) {
810 basic_check(latches, level, level - 1);
811 ut_a(find(latches, SYNC_LOCK_SYS) != 0);
812 }
813 break;
814
815 case SYNC_BUF_FLUSH_LIST:
816 case SYNC_BUF_POOL:
817
818 /* We can have multiple mutexes of this type therefore we
819 can only check whether the greater than condition holds. */
820
821 basic_check(latches, level, level - 1);
822 break;
823
824 case SYNC_BUF_PAGE_HASH:
825
826 /* Multiple page_hash locks are only allowed during
827 buf_validate and that is where buf_pool mutex is already
828 held. */
829
830 /* Fall through */
831
832 case SYNC_BUF_BLOCK:
833
834 /* Either the thread must own the (buffer pool) buf_pool->mutex
835 or it is allowed to latch only ONE of (buffer block)
836 block->mutex or buf_pool->zip_mutex. */
837
838 if (less(latches, level) != NULL) {
839 basic_check(latches, level, level - 1);
840 ut_a(find(latches, SYNC_BUF_POOL) != 0);
841 }
842 break;
843
844 case SYNC_IBUF_BITMAP:
845
846 /* Either the thread must own the master mutex to all
847 the bitmap pages, or it is allowed to latch only ONE
848 bitmap page. */
849
850 if (find(latches, SYNC_IBUF_BITMAP_MUTEX) != 0) {
851
852 basic_check(latches, level, SYNC_IBUF_BITMAP - 1);
853
854 } else if (!srv_is_being_started) {
855
856 /* This is violated during trx_sys_create_rsegs()
857 when creating additional rollback segments during
858 upgrade. */
859
860 basic_check(latches, level, SYNC_IBUF_BITMAP);
861 }
862 break;
863
864 case SYNC_FSP_PAGE:
865 ut_a(find(latches, SYNC_FSP) != 0);
866 break;
867
868 case SYNC_FSP:
869
870 ut_a(find(latches, SYNC_FSP) != 0
871 || basic_check(latches, level, SYNC_FSP));
872 break;
873
874 case SYNC_TRX_UNDO_PAGE:
875
876 /* Purge is allowed to read in as many UNDO pages as it likes.
877 The purge thread can read the UNDO pages without any covering
878 mutex. */
879
880 ut_a(find(latches, SYNC_REDO_RSEG) != 0
881 || find(latches, SYNC_NOREDO_RSEG) != 0
882 || basic_check(latches, level, level - 1));
883 break;
884
885 case SYNC_RSEG_HEADER:
886
887 ut_a(find(latches, SYNC_REDO_RSEG) != 0
888 || find(latches, SYNC_NOREDO_RSEG) != 0);
889 break;
890
891 case SYNC_RSEG_HEADER_NEW:
892
893 ut_a(find(latches, SYNC_FSP_PAGE) != 0);
894 break;
895
896 case SYNC_TREE_NODE:
897
898 ut_a(find(latches, SYNC_FSP) == &fil_system.temp_space->latch
899 || find(latches, SYNC_INDEX_TREE)
900 || find(latches, SYNC_DICT_OPERATION)
901 || basic_check(latches, level, SYNC_TREE_NODE - 1));
902 break;
903
904 case SYNC_TREE_NODE_NEW:
905
906 ut_a(find(latches, SYNC_FSP_PAGE) != 0);
907 break;
908
909 case SYNC_INDEX_TREE:
910
911 basic_check(latches, level, SYNC_TREE_NODE - 1);
912 break;
913
914 case SYNC_IBUF_TREE_NODE:
915
916 ut_a(find(latches, SYNC_IBUF_INDEX_TREE) != 0
917 || basic_check(latches, level, SYNC_IBUF_TREE_NODE - 1));
918 break;
919
920 case SYNC_IBUF_TREE_NODE_NEW:
921
922 /* ibuf_add_free_page() allocates new pages for the change
923 buffer while only holding the tablespace x-latch. These
924 pre-allocated new pages may only be used while holding
925 ibuf_mutex, in btr_page_alloc_for_ibuf(). */
926
927 ut_a(find(latches, SYNC_IBUF_MUTEX) != 0
928 || find(latches, SYNC_FSP) != 0);
929 break;
930
931 case SYNC_IBUF_INDEX_TREE:
932
933 if (find(latches, SYNC_FSP) != 0) {
934 basic_check(latches, level, level - 1);
935 } else {
936 basic_check(latches, level, SYNC_IBUF_TREE_NODE - 1);
937 }
938 break;
939
940 case SYNC_IBUF_PESS_INSERT_MUTEX:
941
942 basic_check(latches, level, SYNC_FSP - 1);
943 ut_a(find(latches, SYNC_IBUF_MUTEX) == 0);
944 break;
945
946 case SYNC_IBUF_HEADER:
947
948 basic_check(latches, level, SYNC_FSP - 1);
949 ut_a(find(latches, SYNC_IBUF_MUTEX) == NULL);
950 ut_a(find(latches, SYNC_IBUF_PESS_INSERT_MUTEX) == NULL);
951 break;
952
953 case SYNC_DICT:
954 basic_check(latches, level, SYNC_DICT);
955 break;
956
957 case SYNC_MUTEX:
958 case SYNC_UNKNOWN:
959 case SYNC_LEVEL_VARYING:
960 case RW_LOCK_X:
961 case RW_LOCK_X_WAIT:
962 case RW_LOCK_S:
963 case RW_LOCK_SX:
964 case RW_LOCK_NOT_LOCKED:
965 /* These levels should never be set for a latch. */
966 ut_error;
967 break;
968 }
969
970 return(latches);
971 }
972
973 /** Removes a latch from the thread level array if it is found there.
974 @param[in] latch that was released/unlocked
975 @param[in] level level of the latch
976 @return true if found in the array; it is not an error if the latch is
977 not found, as we presently are not able to determine the level for
978 every latch reservation the program does */
979 void
unlock(const latch_t * latch)980 LatchDebug::unlock(const latch_t* latch)
981 UNIV_NOTHROW
982 {
983 if (latch->get_level() == SYNC_LEVEL_VARYING) {
984 // We don't have varying level mutexes
985 ut_ad(latch->m_rw_lock);
986 }
987
988 Latches* latches;
989
990 if (*latch->get_name() == '.') {
991
992 /* Ignore diagnostic latches, starting with '.' */
993
994 } else if ((latches = thread_latches()) != NULL) {
995
996 Latches::reverse_iterator rend = latches->rend();
997
998 for (Latches::reverse_iterator it = latches->rbegin();
999 it != rend;
1000 ++it) {
1001
1002 if (it->m_latch != latch) {
1003
1004 continue;
1005 }
1006
1007 Latches::iterator i = it.base();
1008
1009 latches->erase(--i);
1010
1011 /* If this thread doesn't own any more
1012 latches remove from the map.
1013
1014 FIXME: Perhaps use the master thread
1015 to do purge. Or, do it from close connection.
1016 This could be expensive. */
1017
1018 if (latches->empty()) {
1019
1020 m_mutex.enter();
1021
1022 os_thread_id_t thread_id;
1023
1024 thread_id = os_thread_get_curr_id();
1025
1026 m_threads.erase(thread_id);
1027
1028 m_mutex.exit();
1029
1030 UT_DELETE(latches);
1031 }
1032
1033 return;
1034 }
1035
1036 if (latch->get_level() != SYNC_LEVEL_VARYING) {
1037 ib::error()
1038 << "Couldn't find latch "
1039 << sync_latch_get_name(latch->get_id());
1040
1041 print_latches(latches);
1042
1043 /** Must find the latch. */
1044 ut_error;
1045 }
1046 }
1047 }
1048
1049 /** Get the latch id from a latch name.
1050 @param[in] name Latch name
1051 @return latch id if found else LATCH_ID_NONE. */
1052 latch_id_t
sync_latch_get_id(const char * name)1053 sync_latch_get_id(const char* name)
1054 {
1055 LatchMetaData::const_iterator end = latch_meta.end();
1056
1057 /* Linear scan should be OK, this should be extremely rare. */
1058
1059 for (LatchMetaData::const_iterator it = latch_meta.begin();
1060 it != end;
1061 ++it) {
1062
1063 if (*it == NULL || (*it)->get_id() == LATCH_ID_NONE) {
1064
1065 continue;
1066
1067 } else if (strcmp((*it)->get_name(), name) == 0) {
1068
1069 return((*it)->get_id());
1070 }
1071 }
1072
1073 return(LATCH_ID_NONE);
1074 }
1075
1076 /** Get the latch name from a sync level
1077 @param[in] level Latch level to lookup
1078 @return NULL if not found. */
1079 const char*
sync_latch_get_name(latch_level_t level)1080 sync_latch_get_name(latch_level_t level)
1081 {
1082 LatchMetaData::const_iterator end = latch_meta.end();
1083
1084 /* Linear scan should be OK, this should be extremely rare. */
1085
1086 for (LatchMetaData::const_iterator it = latch_meta.begin();
1087 it != end;
1088 ++it) {
1089
1090 if (*it == NULL || (*it)->get_id() == LATCH_ID_NONE) {
1091
1092 continue;
1093
1094 } else if ((*it)->get_level() == level) {
1095
1096 return((*it)->get_name());
1097 }
1098 }
1099
1100 return(0);
1101 }
1102
1103 /** Check if it is OK to acquire the latch.
1104 @param[in] latch latch type */
1105 void
sync_check_lock_validate(const latch_t * latch)1106 sync_check_lock_validate(const latch_t* latch)
1107 {
1108 if (LatchDebug::instance() != NULL) {
1109 LatchDebug::instance()->lock_validate(
1110 latch, latch->get_level());
1111 }
1112 }
1113
1114 /** Note that the lock has been granted
1115 @param[in] latch latch type */
1116 void
sync_check_lock_granted(const latch_t * latch)1117 sync_check_lock_granted(const latch_t* latch)
1118 {
1119 if (LatchDebug::instance() != NULL) {
1120 LatchDebug::instance()->lock_granted(latch, latch->get_level());
1121 }
1122 }
1123
1124 /** Check if it is OK to acquire the latch.
1125 @param[in] latch latch type
1126 @param[in] level Latch level */
1127 void
sync_check_lock(const latch_t * latch,latch_level_t level)1128 sync_check_lock(
1129 const latch_t* latch,
1130 latch_level_t level)
1131 {
1132 if (LatchDebug::instance() != NULL) {
1133
1134 ut_ad(latch->get_level() == SYNC_LEVEL_VARYING);
1135 ut_ad(latch->get_id() == LATCH_ID_BUF_BLOCK_LOCK);
1136
1137 LatchDebug::instance()->lock_validate(latch, level);
1138 LatchDebug::instance()->lock_granted(latch, level);
1139 }
1140 }
1141
1142 /** Check if it is OK to re-acquire the lock.
1143 @param[in] latch RW-LOCK to relock (recursive X locks) */
1144 void
sync_check_relock(const latch_t * latch)1145 sync_check_relock(const latch_t* latch)
1146 {
1147 if (LatchDebug::instance() != NULL) {
1148 LatchDebug::instance()->relock(latch);
1149 }
1150 }
1151
1152 /** Removes a latch from the thread level array if it is found there.
1153 @param[in] latch The latch to unlock */
1154 void
sync_check_unlock(const latch_t * latch)1155 sync_check_unlock(const latch_t* latch)
1156 {
1157 if (LatchDebug::instance() != NULL) {
1158 LatchDebug::instance()->unlock(latch);
1159 }
1160 }
1161
1162 /** Checks if the level array for the current thread contains a
1163 mutex or rw-latch at the specified level.
1164 @param[in] level to find
1165 @return a matching latch, or NULL if not found */
1166 const latch_t*
sync_check_find(latch_level_t level)1167 sync_check_find(latch_level_t level)
1168 {
1169 if (LatchDebug::instance() != NULL) {
1170 return(LatchDebug::instance()->find(level));
1171 }
1172
1173 return(NULL);
1174 }
1175
1176 /** Iterate over the thread's latches.
1177 @param[in,out] functor called for each element.
1178 @return true if the functor returns true for any element */
1179 bool
sync_check_iterate(const sync_check_functor_t & functor)1180 sync_check_iterate(const sync_check_functor_t& functor)
1181 {
1182 if (LatchDebug* debug = LatchDebug::instance()) {
1183 return(debug->for_each(functor));
1184 }
1185
1186 return(false);
1187 }
1188
1189 /** Enable sync order checking.
1190
1191 Note: We don't enforce any synchronisation checks. The caller must ensure
1192 that no races can occur */
1193 void
sync_check_enable()1194 sync_check_enable()
1195 {
1196 if (!srv_sync_debug) {
1197
1198 return;
1199 }
1200
1201 /* We should always call this before we create threads. */
1202
1203 LatchDebug::create_instance();
1204 }
1205
1206 /** Initialise the debug data structures */
1207 void
init()1208 LatchDebug::init()
1209 UNIV_NOTHROW
1210 {
1211 mutex_create(LATCH_ID_RW_LOCK_DEBUG, &rw_lock_debug_mutex);
1212 }
1213
1214 /** Shutdown the latch debug checking
1215
1216 Note: We don't enforce any synchronisation checks. The caller must ensure
1217 that no races can occur */
1218 void
shutdown()1219 LatchDebug::shutdown()
1220 UNIV_NOTHROW
1221 {
1222 mutex_free(&rw_lock_debug_mutex);
1223
1224 ut_a(s_initialized);
1225
1226 s_initialized = false;
1227
1228 UT_DELETE(s_instance);
1229
1230 LatchDebug::s_instance = NULL;
1231 }
1232
1233 /** Acquires the debug mutex. We cannot use the mutex defined in sync0sync,
1234 because the debug mutex is also acquired in sync0arr while holding the OS
1235 mutex protecting the sync array, and the ordinary mutex_enter might
1236 recursively call routines in sync0arr, leading to a deadlock on the OS
1237 mutex. */
1238 void
rw_lock_debug_mutex_enter()1239 rw_lock_debug_mutex_enter()
1240 {
1241 mutex_enter(&rw_lock_debug_mutex);
1242 }
1243
1244 /** Releases the debug mutex. */
1245 void
rw_lock_debug_mutex_exit()1246 rw_lock_debug_mutex_exit()
1247 {
1248 mutex_exit(&rw_lock_debug_mutex);
1249 }
1250 #endif /* UNIV_DEBUG */
1251
1252 /* Meta data for all the InnoDB latches. If the latch is not in recorded
1253 here then it will be be considered for deadlock checks. */
1254 LatchMetaData latch_meta;
1255
1256 /** Load the latch meta data. */
1257 static
1258 void
sync_latch_meta_init()1259 sync_latch_meta_init()
1260 UNIV_NOTHROW
1261 {
1262 latch_meta.resize(LATCH_ID_MAX);
1263
1264 /* The latches should be ordered on latch_id_t. So that we can
1265 index directly into the vector to update and fetch meta-data. */
1266
1267 LATCH_ADD_MUTEX(AUTOINC, SYNC_DICT_AUTOINC_MUTEX, autoinc_mutex_key);
1268
1269 #if defined PFS_SKIP_BUFFER_MUTEX_RWLOCK || defined PFS_GROUP_BUFFER_SYNC
1270 LATCH_ADD_MUTEX(BUF_BLOCK_MUTEX, SYNC_BUF_BLOCK, PFS_NOT_INSTRUMENTED);
1271 #else
1272 LATCH_ADD_MUTEX(BUF_BLOCK_MUTEX, SYNC_BUF_BLOCK,
1273 buffer_block_mutex_key);
1274 #endif /* PFS_SKIP_BUFFER_MUTEX_RWLOCK || PFS_GROUP_BUFFER_SYNC */
1275
1276 LATCH_ADD_MUTEX(BUF_POOL, SYNC_BUF_POOL, buf_pool_mutex_key);
1277
1278 LATCH_ADD_MUTEX(BUF_POOL_ZIP, SYNC_BUF_BLOCK, buf_pool_zip_mutex_key);
1279
1280 LATCH_ADD_MUTEX(CACHE_LAST_READ, SYNC_TRX_I_S_LAST_READ,
1281 cache_last_read_mutex_key);
1282
1283 LATCH_ADD_MUTEX(DICT_FOREIGN_ERR, SYNC_NO_ORDER_CHECK,
1284 dict_foreign_err_mutex_key);
1285
1286 LATCH_ADD_MUTEX(DICT_SYS, SYNC_DICT, dict_sys_mutex_key);
1287
1288 LATCH_ADD_MUTEX(FIL_SYSTEM, SYNC_ANY_LATCH, fil_system_mutex_key);
1289
1290 LATCH_ADD_MUTEX(FLUSH_LIST, SYNC_BUF_FLUSH_LIST, flush_list_mutex_key);
1291
1292 LATCH_ADD_MUTEX(FTS_DELETE, SYNC_FTS_OPTIMIZE, fts_delete_mutex_key);
1293
1294 LATCH_ADD_MUTEX(FTS_DOC_ID, SYNC_FTS_OPTIMIZE, fts_doc_id_mutex_key);
1295
1296 LATCH_ADD_MUTEX(FTS_PLL_TOKENIZE, SYNC_FTS_TOKENIZE,
1297 fts_pll_tokenize_mutex_key);
1298
1299 LATCH_ADD_MUTEX(HASH_TABLE_MUTEX, SYNC_BUF_PAGE_HASH,
1300 hash_table_mutex_key);
1301
1302 LATCH_ADD_MUTEX(IBUF_BITMAP, SYNC_IBUF_BITMAP_MUTEX,
1303 ibuf_bitmap_mutex_key);
1304
1305 LATCH_ADD_MUTEX(IBUF, SYNC_IBUF_MUTEX, ibuf_mutex_key);
1306
1307 LATCH_ADD_MUTEX(IBUF_PESSIMISTIC_INSERT, SYNC_IBUF_PESS_INSERT_MUTEX,
1308 ibuf_pessimistic_insert_mutex_key);
1309
1310 LATCH_ADD_MUTEX(LOG_SYS, SYNC_LOG, log_sys_mutex_key);
1311
1312 LATCH_ADD_MUTEX(LOG_WRITE, SYNC_LOG_WRITE, log_sys_write_mutex_key);
1313
1314 LATCH_ADD_MUTEX(LOG_FLUSH_ORDER, SYNC_LOG_FLUSH_ORDER,
1315 log_flush_order_mutex_key);
1316
1317 LATCH_ADD_MUTEX(MUTEX_LIST, SYNC_NO_ORDER_CHECK, mutex_list_mutex_key);
1318
1319 LATCH_ADD_MUTEX(PAGE_CLEANER, SYNC_PAGE_CLEANER,
1320 page_cleaner_mutex_key);
1321
1322 LATCH_ADD_MUTEX(PURGE_SYS_PQ, SYNC_PURGE_QUEUE,
1323 purge_sys_pq_mutex_key);
1324
1325 LATCH_ADD_MUTEX(RECALC_POOL, SYNC_STATS_AUTO_RECALC,
1326 recalc_pool_mutex_key);
1327
1328 LATCH_ADD_MUTEX(RECV_SYS, SYNC_RECV, recv_sys_mutex_key);
1329
1330 LATCH_ADD_MUTEX(RECV_WRITER, SYNC_RECV_WRITER, recv_writer_mutex_key);
1331
1332 LATCH_ADD_MUTEX(REDO_RSEG, SYNC_REDO_RSEG, redo_rseg_mutex_key);
1333
1334 LATCH_ADD_MUTEX(NOREDO_RSEG, SYNC_NOREDO_RSEG, noredo_rseg_mutex_key);
1335
1336 #ifdef UNIV_DEBUG
1337 /* Mutex names starting with '.' are not tracked. They are assumed
1338 to be diagnostic mutexes used in debugging. */
1339 latch_meta[LATCH_ID_RW_LOCK_DEBUG] =
1340 LATCH_ADD_MUTEX(RW_LOCK_DEBUG,
1341 SYNC_NO_ORDER_CHECK,
1342 rw_lock_debug_mutex_key);
1343 #endif /* UNIV_DEBUG */
1344
1345 LATCH_ADD_MUTEX(RTR_ACTIVE_MUTEX, SYNC_ANY_LATCH,
1346 rtr_active_mutex_key);
1347
1348 LATCH_ADD_MUTEX(RTR_MATCH_MUTEX, SYNC_ANY_LATCH, rtr_match_mutex_key);
1349
1350 LATCH_ADD_MUTEX(RTR_PATH_MUTEX, SYNC_ANY_LATCH, rtr_path_mutex_key);
1351
1352 LATCH_ADD_MUTEX(RW_LOCK_LIST, SYNC_NO_ORDER_CHECK,
1353 rw_lock_list_mutex_key);
1354
1355 LATCH_ADD_MUTEX(RW_LOCK_MUTEX, SYNC_NO_ORDER_CHECK, rw_lock_mutex_key);
1356
1357 LATCH_ADD_MUTEX(SRV_INNODB_MONITOR, SYNC_NO_ORDER_CHECK,
1358 srv_innodb_monitor_mutex_key);
1359
1360 LATCH_ADD_MUTEX(SRV_MISC_TMPFILE, SYNC_ANY_LATCH,
1361 srv_misc_tmpfile_mutex_key);
1362
1363 LATCH_ADD_MUTEX(SRV_MONITOR_FILE, SYNC_NO_ORDER_CHECK,
1364 srv_monitor_file_mutex_key);
1365
1366 LATCH_ADD_MUTEX(BUF_DBLWR, SYNC_DOUBLEWRITE, buf_dblwr_mutex_key);
1367
1368 LATCH_ADD_MUTEX(TRX_POOL, SYNC_POOL, trx_pool_mutex_key);
1369
1370 LATCH_ADD_MUTEX(TRX_POOL_MANAGER, SYNC_POOL_MANAGER,
1371 trx_pool_manager_mutex_key);
1372
1373 LATCH_ADD_MUTEX(TRX, SYNC_TRX, trx_mutex_key);
1374
1375 LATCH_ADD_MUTEX(LOCK_SYS, SYNC_LOCK_SYS, lock_mutex_key);
1376
1377 LATCH_ADD_MUTEX(LOCK_SYS_WAIT, SYNC_LOCK_WAIT_SYS,
1378 lock_wait_mutex_key);
1379
1380 LATCH_ADD_MUTEX(TRX_SYS, SYNC_TRX_SYS, trx_sys_mutex_key);
1381
1382 LATCH_ADD_MUTEX(SRV_SYS, SYNC_THREADS, srv_sys_mutex_key);
1383
1384 LATCH_ADD_MUTEX(SRV_SYS_TASKS, SYNC_ANY_LATCH, srv_threads_mutex_key);
1385
1386 LATCH_ADD_MUTEX(PAGE_ZIP_STAT_PER_INDEX, SYNC_ANY_LATCH,
1387 page_zip_stat_per_index_mutex_key);
1388
1389 #ifndef PFS_SKIP_EVENT_MUTEX
1390 LATCH_ADD_MUTEX(EVENT_MANAGER, SYNC_NO_ORDER_CHECK,
1391 event_manager_mutex_key);
1392 #else
1393 LATCH_ADD_MUTEX(EVENT_MANAGER, SYNC_NO_ORDER_CHECK,
1394 PFS_NOT_INSTRUMENTED);
1395 #endif /* !PFS_SKIP_EVENT_MUTEX */
1396
1397 LATCH_ADD_MUTEX(EVENT_MUTEX, SYNC_NO_ORDER_CHECK, event_mutex_key);
1398
1399 LATCH_ADD_MUTEX(SYNC_ARRAY_MUTEX, SYNC_NO_ORDER_CHECK,
1400 sync_array_mutex_key);
1401
1402 LATCH_ADD_MUTEX(ZIP_PAD_MUTEX, SYNC_NO_ORDER_CHECK, zip_pad_mutex_key);
1403
1404 LATCH_ADD_MUTEX(OS_AIO_READ_MUTEX, SYNC_NO_ORDER_CHECK,
1405 PFS_NOT_INSTRUMENTED);
1406
1407 LATCH_ADD_MUTEX(OS_AIO_WRITE_MUTEX, SYNC_NO_ORDER_CHECK,
1408 PFS_NOT_INSTRUMENTED);
1409
1410 LATCH_ADD_MUTEX(OS_AIO_LOG_MUTEX, SYNC_NO_ORDER_CHECK,
1411 PFS_NOT_INSTRUMENTED);
1412
1413 LATCH_ADD_MUTEX(OS_AIO_IBUF_MUTEX, SYNC_NO_ORDER_CHECK,
1414 PFS_NOT_INSTRUMENTED);
1415
1416 LATCH_ADD_MUTEX(OS_AIO_SYNC_MUTEX, SYNC_NO_ORDER_CHECK,
1417 PFS_NOT_INSTRUMENTED);
1418
1419 LATCH_ADD_MUTEX(ROW_DROP_LIST, SYNC_NO_ORDER_CHECK,
1420 row_drop_list_mutex_key);
1421
1422 LATCH_ADD_MUTEX(INDEX_ONLINE_LOG, SYNC_INDEX_ONLINE_LOG,
1423 index_online_log_key);
1424
1425 LATCH_ADD_MUTEX(WORK_QUEUE, SYNC_WORK_QUEUE, PFS_NOT_INSTRUMENTED);
1426
1427 // Add the RW locks
1428 LATCH_ADD_RWLOCK(BTR_SEARCH, SYNC_SEARCH_SYS, btr_search_latch_key);
1429
1430 LATCH_ADD_RWLOCK(BUF_BLOCK_LOCK, SYNC_LEVEL_VARYING,
1431 buf_block_lock_key);
1432
1433 #ifdef UNIV_DEBUG
1434 LATCH_ADD_RWLOCK(BUF_BLOCK_DEBUG, SYNC_LEVEL_VARYING,
1435 buf_block_debug_latch_key);
1436 #endif /* UNIV_DEBUG */
1437
1438 LATCH_ADD_RWLOCK(DICT_OPERATION, SYNC_DICT_OPERATION,
1439 dict_operation_lock_key);
1440
1441 LATCH_ADD_RWLOCK(CHECKPOINT, SYNC_NO_ORDER_CHECK, checkpoint_lock_key);
1442
1443 LATCH_ADD_RWLOCK(FIL_SPACE, SYNC_FSP, fil_space_latch_key);
1444
1445 LATCH_ADD_RWLOCK(FTS_CACHE, SYNC_FTS_CACHE, fts_cache_rw_lock_key);
1446
1447 LATCH_ADD_RWLOCK(FTS_CACHE_INIT, SYNC_FTS_CACHE_INIT,
1448 fts_cache_init_rw_lock_key);
1449
1450 LATCH_ADD_RWLOCK(TRX_I_S_CACHE, SYNC_TRX_I_S_RWLOCK,
1451 trx_i_s_cache_lock_key);
1452
1453 LATCH_ADD_RWLOCK(TRX_PURGE, SYNC_PURGE_LATCH, trx_purge_latch_key);
1454
1455 LATCH_ADD_RWLOCK(IBUF_INDEX_TREE, SYNC_IBUF_INDEX_TREE,
1456 index_tree_rw_lock_key);
1457
1458 LATCH_ADD_RWLOCK(INDEX_TREE, SYNC_INDEX_TREE, index_tree_rw_lock_key);
1459
1460 LATCH_ADD_RWLOCK(HASH_TABLE_RW_LOCK, SYNC_BUF_PAGE_HASH,
1461 hash_table_locks_key);
1462
1463 LATCH_ADD_MUTEX(SYNC_DEBUG_MUTEX, SYNC_NO_ORDER_CHECK,
1464 PFS_NOT_INSTRUMENTED);
1465
1466 /* JAN: TODO: Add PFS instrumentation */
1467 LATCH_ADD_MUTEX(SCRUB_STAT_MUTEX, SYNC_NO_ORDER_CHECK,
1468 PFS_NOT_INSTRUMENTED);
1469 LATCH_ADD_MUTEX(DEFRAGMENT_MUTEX, SYNC_NO_ORDER_CHECK,
1470 PFS_NOT_INSTRUMENTED);
1471 LATCH_ADD_MUTEX(BTR_DEFRAGMENT_MUTEX, SYNC_NO_ORDER_CHECK,
1472 PFS_NOT_INSTRUMENTED);
1473 LATCH_ADD_MUTEX(FIL_CRYPT_STAT_MUTEX, SYNC_NO_ORDER_CHECK,
1474 PFS_NOT_INSTRUMENTED);
1475 LATCH_ADD_MUTEX(FIL_CRYPT_DATA_MUTEX, SYNC_NO_ORDER_CHECK,
1476 PFS_NOT_INSTRUMENTED);
1477 LATCH_ADD_MUTEX(FIL_CRYPT_THREADS_MUTEX, SYNC_NO_ORDER_CHECK,
1478 PFS_NOT_INSTRUMENTED);
1479 LATCH_ADD_MUTEX(RW_TRX_HASH_ELEMENT, SYNC_RW_TRX_HASH_ELEMENT,
1480 rw_trx_hash_element_mutex_key);
1481
1482 latch_id_t id = LATCH_ID_NONE;
1483
1484 /* The array should be ordered on latch ID.We need to
1485 index directly into it from the mutex policy to update
1486 the counters and access the meta-data. */
1487
1488 for (LatchMetaData::iterator it = latch_meta.begin();
1489 it != latch_meta.end();
1490 ++it) {
1491
1492 const latch_meta_t* meta = *it;
1493
1494
1495 /* Skip blank entries */
1496 if (meta == NULL || meta->get_id() == LATCH_ID_NONE) {
1497 continue;
1498 }
1499
1500 ut_a(id < meta->get_id());
1501
1502 id = meta->get_id();
1503 }
1504 }
1505
1506 /** Destroy the latch meta data */
1507 static
1508 void
sync_latch_meta_destroy()1509 sync_latch_meta_destroy()
1510 {
1511 for (LatchMetaData::iterator it = latch_meta.begin();
1512 it != latch_meta.end();
1513 ++it) {
1514
1515 UT_DELETE(*it);
1516 }
1517
1518 latch_meta.clear();
1519 }
1520
1521 /** Track mutex file creation name and line number. This is to avoid storing
1522 { const char* name; uint16_t line; } in every instance. This results in the
1523 sizeof(Mutex) > 64. We use a lookup table to store it separately. Fetching
1524 the values is very rare, only required for diagnostic purposes. And, we
1525 don't create/destroy mutexes that frequently. */
1526 struct CreateTracker {
1527
1528 /** Constructor */
CreateTrackerCreateTracker1529 CreateTracker()
1530 UNIV_NOTHROW
1531 {
1532 m_mutex.init();
1533 }
1534
1535 /** Destructor */
~CreateTrackerCreateTracker1536 ~CreateTracker()
1537 UNIV_NOTHROW
1538 {
1539 ut_ad(m_files.empty());
1540
1541 m_mutex.destroy();
1542 }
1543
1544 /** Register where the latch was created
1545 @param[in] ptr Latch instance
1546 @param[in] filename Where created
1547 @param[in] line Line number in filename */
register_latchCreateTracker1548 void register_latch(
1549 const void* ptr,
1550 const char* filename,
1551 uint16_t line)
1552 UNIV_NOTHROW
1553 {
1554 m_mutex.enter();
1555
1556 Files::iterator lb = m_files.lower_bound(ptr);
1557
1558 ut_ad(lb == m_files.end()
1559 || m_files.key_comp()(ptr, lb->first));
1560
1561 typedef Files::value_type value_type;
1562
1563 m_files.insert(lb, value_type(ptr, File(filename, line)));
1564
1565 m_mutex.exit();
1566 }
1567
1568 /** Deregister a latch - when it is destroyed
1569 @param[in] ptr Latch instance being destroyed */
deregister_latchCreateTracker1570 void deregister_latch(const void* ptr)
1571 UNIV_NOTHROW
1572 {
1573 m_mutex.enter();
1574
1575 Files::iterator lb = m_files.lower_bound(ptr);
1576
1577 ut_ad(lb != m_files.end()
1578 && !(m_files.key_comp()(ptr, lb->first)));
1579
1580 m_files.erase(lb);
1581
1582 m_mutex.exit();
1583 }
1584
1585 /** Get the create string, format is "name:line"
1586 @param[in] ptr Latch instance
1587 @return the create string or "" if not found */
getCreateTracker1588 std::string get(const void* ptr)
1589 UNIV_NOTHROW
1590 {
1591 m_mutex.enter();
1592
1593 std::string created;
1594
1595 Files::iterator lb = m_files.lower_bound(ptr);
1596
1597 if (lb != m_files.end()
1598 && !(m_files.key_comp()(ptr, lb->first))) {
1599
1600 std::ostringstream msg;
1601
1602 msg << lb->second.m_name << ":" << lb->second.m_line;
1603
1604 created = msg.str();
1605 }
1606
1607 m_mutex.exit();
1608
1609 return(created);
1610 }
1611
1612 private:
1613 /** For tracking the filename and line number */
1614 struct File {
1615
1616 /** Constructor */
FileCreateTracker::File1617 File() UNIV_NOTHROW : m_name(), m_line() { }
1618
1619 /** Constructor
1620 @param[in] name Filename where created
1621 @param[in] line Line number where created */
FileCreateTracker::File1622 File(const char* name, uint16_t line)
1623 UNIV_NOTHROW
1624 :
1625 m_name(sync_basename(name)),
1626 m_line(line)
1627 {
1628 /* No op */
1629 }
1630
1631 /** Filename where created */
1632 std::string m_name;
1633
1634 /** Line number where created */
1635 uint16_t m_line;
1636 };
1637
1638 /** Map the mutex instance to where it was created */
1639 typedef std::map<
1640 const void*,
1641 File,
1642 std::less<const void*>,
1643 ut_allocator<std::pair<const void* const, File> > >
1644 Files;
1645
1646 typedef OSMutex Mutex;
1647
1648 /** Mutex protecting m_files */
1649 Mutex m_mutex;
1650
1651 /** Track the latch creation */
1652 Files m_files;
1653 };
1654
1655 /** Track latch creation location. For reducing the size of the latches */
1656 static CreateTracker create_tracker;
1657
1658 /** Register a latch, called when it is created
1659 @param[in] ptr Latch instance that was created
1660 @param[in] filename Filename where it was created
1661 @param[in] line Line number in filename */
1662 void
sync_file_created_register(const void * ptr,const char * filename,uint16_t line)1663 sync_file_created_register(
1664 const void* ptr,
1665 const char* filename,
1666 uint16_t line)
1667 {
1668 create_tracker.register_latch(ptr, filename, line);
1669 }
1670
1671 /** Deregister a latch, called when it is destroyed
1672 @param[in] ptr Latch to be destroyed */
1673 void
sync_file_created_deregister(const void * ptr)1674 sync_file_created_deregister(const void* ptr)
1675 {
1676 create_tracker.deregister_latch(ptr);
1677 }
1678
1679 /** Get the string where the file was created. Its format is "name:line"
1680 @param[in] ptr Latch instance
1681 @return created information or "" if can't be found */
1682 std::string
sync_file_created_get(const void * ptr)1683 sync_file_created_get(const void* ptr)
1684 {
1685 return(create_tracker.get(ptr));
1686 }
1687
1688 /** Initializes the synchronization data structures. */
1689 void
sync_check_init()1690 sync_check_init()
1691 {
1692 ut_ad(!LatchDebug::s_initialized);
1693 ut_d(LatchDebug::s_initialized = true);
1694
1695 sync_latch_meta_init();
1696
1697 /* Init the rw-lock & mutex list and create the mutex to protect it. */
1698
1699 UT_LIST_INIT(rw_lock_list, &rw_lock_t::list);
1700
1701 mutex_create(LATCH_ID_RW_LOCK_LIST, &rw_lock_list_mutex);
1702
1703 ut_d(LatchDebug::init());
1704
1705 sync_array_init();
1706 }
1707
1708 /** Free the InnoDB synchronization data structures. */
1709 void
sync_check_close()1710 sync_check_close()
1711 {
1712 ut_d(LatchDebug::shutdown());
1713
1714 mutex_free(&rw_lock_list_mutex);
1715
1716 sync_array_close();
1717
1718 sync_latch_meta_destroy();
1719 }
1720
1721