1 /*****************************************************************************
2 
3 Copyright (c) 2014, 2016, Oracle and/or its affiliates. All Rights Reserved.
4 Copyright (c) 2017, 2020, MariaDB Corporation.
5 
6 Portions of this file contain modifications contributed and copyrighted by
7 Google, Inc. Those modifications are gratefully acknowledged and are described
8 briefly in the InnoDB documentation. The contributions by Google are
9 incorporated with their permission, and subject to the conditions contained in
10 the file COPYING.Google.
11 
12 This program is free software; you can redistribute it and/or modify it under
13 the terms of the GNU General Public License as published by the Free Software
14 Foundation; version 2 of the License.
15 
16 This program is distributed in the hope that it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
19 
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc.,
22 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
23 
24 *****************************************************************************/
25 
26 /**************************************************//**
27 @file sync/sync0debug.cc
28 Debug checks for latches.
29 
30 Created 2012-08-21 Sunny Bains
31 *******************************************************/
32 
33 #include "sync0sync.h"
34 #include "sync0debug.h"
35 #include "srv0start.h"
36 #include "fil0fil.h"
37 
38 #include <vector>
39 #include <string>
40 #include <algorithm>
41 #include <map>
42 
43 #ifdef UNIV_DEBUG
44 
45 my_bool		srv_sync_debug;
46 
47 /** The global mutex which protects debug info lists of all rw-locks.
48 To modify the debug info list of an rw-lock, this mutex has to be
49 acquired in addition to the mutex protecting the lock. */
50 static SysMutex		rw_lock_debug_mutex;
51 
52 /** The latch held by a thread */
53 struct Latched {
54 
55 	/** Constructor */
LatchedLatched56 	Latched() : m_latch(), m_level(SYNC_UNKNOWN) { }
57 
58 	/** Constructor
59 	@param[in]	latch		Latch instance
60 	@param[in]	level		Level of latch held */
LatchedLatched61 	Latched(const latch_t*	latch,
62 		latch_level_t	level)
63 		:
64 		m_latch(latch),
65 		m_level(level)
66 	{
67 		/* No op */
68 	}
69 
70 	/** @return the latch level */
get_levelLatched71 	latch_level_t get_level() const
72 	{
73 		return(m_level);
74 	}
75 
76 	/** Check if the rhs latch and level match
77 	@param[in]	rhs		instance to compare with
78 	@return true on match */
operator ==Latched79 	bool operator==(const Latched& rhs) const
80 	{
81 		return(m_latch == rhs.m_latch && m_level == rhs.m_level);
82 	}
83 
84 	/** The latch instance */
85 	const latch_t*		m_latch;
86 
87 	/** The latch level. For buffer blocks we can pass a separate latch
88 	level to check against, see buf_block_dbg_add_level() */
89 	latch_level_t		m_level;
90 };
91 
92 /** Thread specific latches. This is ordered on level in descending order. */
93 typedef std::vector<Latched, ut_allocator<Latched> > Latches;
94 
95 /** The deadlock detector. */
96 struct LatchDebug {
97 
98 	/** Debug mutex for control structures, should not be tracked
99 	by this module. */
100 	typedef OSMutex Mutex;
101 
102 	/** Comparator for the ThreadMap. */
103 	struct os_thread_id_less
104 		: public std::binary_function<
105 		  os_thread_id_t,
106 		  os_thread_id_t,
107 		  bool>
108 	{
109 		/** @return true if lhs < rhs */
operator ()LatchDebug::os_thread_id_less110 		bool operator()(
111 			const os_thread_id_t& lhs,
112 			const os_thread_id_t& rhs) const
113 			UNIV_NOTHROW
114 		{
115 			return(ulint(lhs) < ulint(rhs));
116 		}
117 	};
118 
119 	/** For tracking a thread's latches. */
120 	typedef std::map<
121 		os_thread_id_t,
122 		Latches*,
123 		os_thread_id_less,
124 		ut_allocator<std::pair<const os_thread_id_t, Latches*> > >
125 		ThreadMap;
126 
127 	/** Constructor */
128 	LatchDebug()
129 		UNIV_NOTHROW;
130 
131 	/** Destructor */
~LatchDebugLatchDebug132 	~LatchDebug()
133 		UNIV_NOTHROW
134 	{
135 		m_mutex.destroy();
136 	}
137 
138 	/** Create a new instance if one doesn't exist else return
139 	the existing one.
140 	@param[in]	add		add an empty entry if one is not
141 					found (default no)
142 	@return	pointer to a thread's acquired latches. */
143 	Latches* thread_latches(bool add = false)
144 		UNIV_NOTHROW;
145 
146 	/** Check that all the latches already owned by a thread have a lower
147 	level than limit.
148 	@param[in]	latches		the thread's existing (acquired) latches
149 	@param[in]	limit		to check against
150 	@return latched if there is one with a level <= limit . */
151 	const Latched* less(
152 		const Latches*	latches,
153 		latch_level_t	limit) const
154 		UNIV_NOTHROW;
155 
156 	/** Checks if the level value exists in the thread's acquired latches.
157 	@param[in]	latches		the thread's existing (acquired) latches
158 	@param[in]	level		to lookup
159 	@return	latch if found or 0 */
160 	const latch_t* find(
161 		const Latches*	Latches,
162 		latch_level_t	level) const
163 		UNIV_NOTHROW;
164 
165 	/**
166 	Checks if the level value exists in the thread's acquired latches.
167 	@param[in]	level		to lookup
168 	@return	latch if found or 0 */
169 	const latch_t* find(latch_level_t level)
170 		UNIV_NOTHROW;
171 
172 	/** Report error and abort.
173 	@param[in]	latches		thread's existing latches
174 	@param[in]	latched		The existing latch causing the
175 					invariant to fail
176 	@param[in]	level		The new level request that breaks
177 					the order */
178 	void crash(
179 		const Latches*	latches,
180 		const Latched*	latched,
181 		latch_level_t	level) const
182 		UNIV_NOTHROW;
183 
184 	/** Do a basic ordering check.
185 	@param[in]	latches		thread's existing latches
186 	@param[in]	requested_level	Level requested by latch
187 	@param[in]	level		declared ulint so that we can
188 					do level - 1. The level of the
189 					latch that the thread is trying
190 					to acquire
191 	@return true if passes, else crash with error message. */
192 	inline bool basic_check(
193 		const Latches*	latches,
194 		latch_level_t	requested_level,
195 		lint		level) const
196 		UNIV_NOTHROW;
197 
198 	/** Adds a latch and its level in the thread level array. Allocates
199 	the memory for the array if called for the first time for this
200 	OS thread.  Makes the checks against other latch levels stored
201 	in the array for this thread.
202 
203 	@param[in]	latch	latch that the thread wants to acqire.
204 	@param[in]	level	latch level to check against */
lock_validateLatchDebug205 	void lock_validate(
206 		const latch_t*	latch,
207 		latch_level_t	level)
208 		UNIV_NOTHROW
209 	{
210 		/* Ignore diagnostic latches, starting with '.' */
211 
212 		if (*latch->get_name() != '.'
213 		    && latch->get_level() != SYNC_LEVEL_VARYING) {
214 
215 			ut_ad(level != SYNC_LEVEL_VARYING);
216 
217 			Latches*	latches = check_order(latch, level);
218 
219 			ut_a(latches->empty()
220 			     || level == SYNC_LEVEL_VARYING
221 			     || level == SYNC_NO_ORDER_CHECK
222 			     || latches->back().get_level()
223 			     == SYNC_NO_ORDER_CHECK
224 			     || latches->back().m_latch->get_level()
225 			     == SYNC_LEVEL_VARYING
226 			     || latches->back().get_level() >= level);
227 		}
228 	}
229 
230 	/** Adds a latch and its level in the thread level array. Allocates
231 	the memory for the array if called for the first time for this
232 	OS thread.  Makes the checks against other latch levels stored
233 	in the array for this thread.
234 
235 	@param[in]	latch	latch that the thread wants to acqire.
236 	@param[in]	level	latch level to check against */
lock_grantedLatchDebug237 	void lock_granted(
238 		const latch_t*	latch,
239 		latch_level_t	level)
240 		UNIV_NOTHROW
241 	{
242 		/* Ignore diagnostic latches, starting with '.' */
243 
244 		if (*latch->get_name() != '.'
245 		    && latch->get_level() != SYNC_LEVEL_VARYING) {
246 
247 			Latches*	latches = thread_latches(true);
248 
249 			latches->push_back(Latched(latch, level));
250 		}
251 	}
252 
253 	/** For recursive X rw-locks.
254 	@param[in]	latch		The RW-Lock to relock  */
relockLatchDebug255 	void relock(const latch_t* latch)
256 		UNIV_NOTHROW
257 	{
258 		ut_a(latch->m_rw_lock);
259 
260 		latch_level_t	level = latch->get_level();
261 
262 		/* Ignore diagnostic latches, starting with '.' */
263 
264 		if (*latch->get_name() != '.'
265 		    && latch->get_level() != SYNC_LEVEL_VARYING) {
266 
267 			Latches*	latches = thread_latches(true);
268 
269 			Latches::iterator	it = std::find(
270 				latches->begin(), latches->end(),
271 				Latched(latch, level));
272 
273 			ut_a(latches->empty()
274 			     || level == SYNC_LEVEL_VARYING
275 			     || level == SYNC_NO_ORDER_CHECK
276 			     || latches->back().m_latch->get_level()
277 			     == SYNC_LEVEL_VARYING
278 			     || latches->back().m_latch->get_level()
279 			     == SYNC_NO_ORDER_CHECK
280 			     || latches->back().get_level() >= level
281 			     || it != latches->end());
282 
283 			if (it == latches->end()) {
284 				latches->push_back(Latched(latch, level));
285 			} else {
286 				latches->insert(it, Latched(latch, level));
287 			}
288 		}
289 	}
290 
291 	/** Iterate over a thread's latches.
292 	@param[in]	functor		The callback
293 	@return true if the functor returns true. */
for_eachLatchDebug294 	bool for_each(const sync_check_functor_t& functor)
295 		UNIV_NOTHROW
296 	{
297 		if (const Latches* latches = thread_latches()) {
298 			Latches::const_iterator	end = latches->end();
299 			for (Latches::const_iterator it = latches->begin();
300 			     it != end; ++it) {
301 
302 				if (functor(it->m_level)) {
303 					return(true);
304 				}
305 			}
306 		}
307 
308 		return(false);
309 	}
310 
311 	/** Removes a latch from the thread level array if it is found there.
312 	@param[in]	latch		The latch that was released
313 	@return true if found in the array; it is not an error if the latch is
314 	not found, as we presently are not able to determine the level for
315 	every latch reservation the program does */
316 	void unlock(const latch_t* latch) UNIV_NOTHROW;
317 
318 	/** Get the level name
319 	@param[in]	level		The level ID to lookup
320 	@return level name */
get_level_nameLatchDebug321 	const std::string& get_level_name(latch_level_t level) const
322 		UNIV_NOTHROW
323 	{
324 		Levels::const_iterator	it = m_levels.find(level);
325 
326 		ut_ad(it != m_levels.end());
327 
328 		return(it->second);
329 	}
330 
331 	/** Initialise the debug data structures */
332 	static void init()
333 		UNIV_NOTHROW;
334 
335 	/** Shutdown the latch debug checking */
336 	static void shutdown()
337 		UNIV_NOTHROW;
338 
339 	/** @return the singleton instance */
instanceLatchDebug340 	static LatchDebug* instance()
341 		UNIV_NOTHROW
342 	{
343 		return(s_instance);
344 	}
345 
346 	/** Create the singleton instance */
create_instanceLatchDebug347 	static void create_instance()
348 		UNIV_NOTHROW
349 	{
350 		ut_ad(s_instance == NULL);
351 
352 		s_instance = UT_NEW_NOKEY(LatchDebug());
353 	}
354 
355 private:
356 	/** Disable copying */
357 	LatchDebug(const LatchDebug&);
358 	LatchDebug& operator=(const LatchDebug&);
359 
360 	/** Adds a latch and its level in the thread level array. Allocates
361 	the memory for the array if called first time for this OS thread.
362 	Makes the checks against other latch levels stored in the array
363 	for this thread.
364 
365 	@param[in]	latch	 pointer to a mutex or an rw-lock
366 	@param[in]	level	level in the latching order
367 	@return the thread's latches */
368 	Latches* check_order(
369 		const latch_t*	latch,
370 		latch_level_t	level)
371 		UNIV_NOTHROW;
372 
373 	/** Print the latches acquired by a thread
374 	@param[in]	latches		Latches acquired by a thread */
375 	void print_latches(const Latches* latches) const
376 		UNIV_NOTHROW;
377 
378 	/** Special handling for the RTR mutexes. We need to add proper
379 	levels for them if possible.
380 	@param[in]	latch		Latch to check
381 	@return true if it is a an _RTR_ mutex */
is_rtr_mutexLatchDebug382 	bool is_rtr_mutex(const latch_t* latch) const
383 		UNIV_NOTHROW
384 	{
385 		return(latch->get_id() == LATCH_ID_RTR_ACTIVE_MUTEX
386 		       || latch->get_id() == LATCH_ID_RTR_PATH_MUTEX
387 		       || latch->get_id() == LATCH_ID_RTR_MATCH_MUTEX);
388 	}
389 
390 private:
391 	/** Comparator for the Levels . */
392 	struct latch_level_less
393 		: public std::binary_function<
394 		  latch_level_t,
395 		  latch_level_t,
396 		  bool>
397 	{
398 		/** @return true if lhs < rhs */
operator ()LatchDebug::latch_level_less399 		bool operator()(
400 			const latch_level_t& lhs,
401 			const latch_level_t& rhs) const
402 			UNIV_NOTHROW
403 		{
404 			return(lhs < rhs);
405 		}
406 	};
407 
408 	typedef std::map<
409 		latch_level_t,
410 		std::string,
411 		latch_level_less,
412 		ut_allocator<std::pair<const latch_level_t, std::string> > >
413 		Levels;
414 
415 	/** Mutex protecting the deadlock detector data structures. */
416 	Mutex			m_mutex;
417 
418 	/** Thread specific data. Protected by m_mutex. */
419 	ThreadMap		m_threads;
420 
421 	/** Mapping from latche level to its string representation. */
422 	Levels			m_levels;
423 
424 	/** The singleton instance. Must be created in single threaded mode. */
425 	static LatchDebug*	s_instance;
426 
427 public:
428 	/** For checking whether this module has been initialised or not. */
429 	static bool		s_initialized;
430 };
431 
432 /** The latch order checking infra-structure */
433 LatchDebug* LatchDebug::s_instance = NULL;
434 bool LatchDebug::s_initialized = false;
435 
436 #define LEVEL_MAP_INSERT(T)						\
437 do {									\
438 	std::pair<Levels::iterator, bool>	result =		\
439 		m_levels.insert(Levels::value_type(T, #T));		\
440 	ut_ad(result.second);						\
441 } while(0)
442 
443 /** Setup the mapping from level ID to level name mapping */
LatchDebug()444 LatchDebug::LatchDebug()
445 {
446 	m_mutex.init();
447 
448 	LEVEL_MAP_INSERT(SYNC_UNKNOWN);
449 	LEVEL_MAP_INSERT(SYNC_MUTEX);
450 	LEVEL_MAP_INSERT(RW_LOCK_SX);
451 	LEVEL_MAP_INSERT(RW_LOCK_X_WAIT);
452 	LEVEL_MAP_INSERT(RW_LOCK_S);
453 	LEVEL_MAP_INSERT(RW_LOCK_X);
454 	LEVEL_MAP_INSERT(RW_LOCK_NOT_LOCKED);
455 	LEVEL_MAP_INSERT(SYNC_ANY_LATCH);
456 	LEVEL_MAP_INSERT(SYNC_POOL);
457 	LEVEL_MAP_INSERT(SYNC_POOL_MANAGER);
458 	LEVEL_MAP_INSERT(SYNC_SEARCH_SYS);
459 	LEVEL_MAP_INSERT(SYNC_WORK_QUEUE);
460 	LEVEL_MAP_INSERT(SYNC_FTS_TOKENIZE);
461 	LEVEL_MAP_INSERT(SYNC_FTS_OPTIMIZE);
462 	LEVEL_MAP_INSERT(SYNC_FTS_CACHE_INIT);
463 	LEVEL_MAP_INSERT(SYNC_RECV);
464 	LEVEL_MAP_INSERT(SYNC_PURGE_QUEUE);
465 	LEVEL_MAP_INSERT(SYNC_TRX_SYS_HEADER);
466 	LEVEL_MAP_INSERT(SYNC_TRX);
467 	LEVEL_MAP_INSERT(SYNC_RW_TRX_HASH_ELEMENT);
468 	LEVEL_MAP_INSERT(SYNC_READ_VIEW);
469 	LEVEL_MAP_INSERT(SYNC_TRX_SYS);
470 	LEVEL_MAP_INSERT(SYNC_LOCK_SYS);
471 	LEVEL_MAP_INSERT(SYNC_LOCK_WAIT_SYS);
472 	LEVEL_MAP_INSERT(SYNC_INDEX_ONLINE_LOG);
473 	LEVEL_MAP_INSERT(SYNC_IBUF_BITMAP);
474 	LEVEL_MAP_INSERT(SYNC_IBUF_BITMAP_MUTEX);
475 	LEVEL_MAP_INSERT(SYNC_IBUF_TREE_NODE);
476 	LEVEL_MAP_INSERT(SYNC_IBUF_TREE_NODE_NEW);
477 	LEVEL_MAP_INSERT(SYNC_IBUF_INDEX_TREE);
478 	LEVEL_MAP_INSERT(SYNC_IBUF_MUTEX);
479 	LEVEL_MAP_INSERT(SYNC_FSP_PAGE);
480 	LEVEL_MAP_INSERT(SYNC_FSP);
481 	LEVEL_MAP_INSERT(SYNC_EXTERN_STORAGE);
482 	LEVEL_MAP_INSERT(SYNC_TRX_UNDO_PAGE);
483 	LEVEL_MAP_INSERT(SYNC_RSEG_HEADER);
484 	LEVEL_MAP_INSERT(SYNC_RSEG_HEADER_NEW);
485 	LEVEL_MAP_INSERT(SYNC_NOREDO_RSEG);
486 	LEVEL_MAP_INSERT(SYNC_REDO_RSEG);
487 	LEVEL_MAP_INSERT(SYNC_PURGE_LATCH);
488 	LEVEL_MAP_INSERT(SYNC_TREE_NODE);
489 	LEVEL_MAP_INSERT(SYNC_TREE_NODE_FROM_HASH);
490 	LEVEL_MAP_INSERT(SYNC_TREE_NODE_NEW);
491 	LEVEL_MAP_INSERT(SYNC_INDEX_TREE);
492 	LEVEL_MAP_INSERT(SYNC_IBUF_PESS_INSERT_MUTEX);
493 	LEVEL_MAP_INSERT(SYNC_IBUF_HEADER);
494 	LEVEL_MAP_INSERT(SYNC_DICT_HEADER);
495 	LEVEL_MAP_INSERT(SYNC_STATS_AUTO_RECALC);
496 	LEVEL_MAP_INSERT(SYNC_DICT);
497 	LEVEL_MAP_INSERT(SYNC_FTS_CACHE);
498 	LEVEL_MAP_INSERT(SYNC_DICT_OPERATION);
499 	LEVEL_MAP_INSERT(SYNC_TRX_I_S_RWLOCK);
500 	LEVEL_MAP_INSERT(SYNC_LEVEL_VARYING);
501 	LEVEL_MAP_INSERT(SYNC_NO_ORDER_CHECK);
502 
503 	/* Enum count starts from 0 */
504 	ut_ad(m_levels.size() == SYNC_LEVEL_MAX + 1);
505 }
506 
507 /** Print the latches acquired by a thread
508 @param[in]	latches		Latches acquired by a thread */
509 void
print_latches(const Latches * latches) const510 LatchDebug::print_latches(const Latches* latches) const
511 	UNIV_NOTHROW
512 {
513 	ib::error() << "Latches already owned by this thread: ";
514 
515 	Latches::const_iterator	end = latches->end();
516 
517 	for (Latches::const_iterator it = latches->begin();
518 	     it != end;
519 	     ++it) {
520 
521 		ib::error()
522 			<< sync_latch_get_name(it->m_latch->get_id())
523 			<< " -> "
524 			<< it->m_level << " "
525 			<< "(" << get_level_name(it->m_level) << ")";
526 	}
527 }
528 
529 /** Report error and abort
530 @param[in]	latches		thread's existing latches
531 @param[in]	latched		The existing latch causing the invariant to fail
532 @param[in]	level		The new level request that breaks the order */
533 void
crash(const Latches * latches,const Latched * latched,latch_level_t level) const534 LatchDebug::crash(
535 	const Latches*	latches,
536 	const Latched*	latched,
537 	latch_level_t	level) const
538 	UNIV_NOTHROW
539 {
540 	const latch_t*		latch = latched->m_latch;
541 	const std::string&	in_level_name = get_level_name(level);
542 
543 	const std::string&	latch_level_name =
544 		get_level_name(latched->m_level);
545 
546 	ib::error()
547 		<< "Thread " << os_thread_get_curr_id()
548 		<< " already owns a latch "
549 		<< sync_latch_get_name(latch->m_id) << " at level"
550 		<< " " << latched->m_level << " (" << latch_level_name
551 		<< " ), which is at a lower/same level than the"
552 		<< " requested latch: "
553 		<< level << " (" << in_level_name << "). "
554 		<< latch->to_string();
555 
556 	print_latches(latches);
557 
558 	ut_error;
559 }
560 
561 /** Check that all the latches already owned by a thread have a lower
562 level than limit.
563 @param[in]	latches		the thread's existing (acquired) latches
564 @param[in]	limit		to check against
565 @return latched info if there is one with a level <= limit . */
566 const Latched*
less(const Latches * latches,latch_level_t limit) const567 LatchDebug::less(
568 	const Latches*	latches,
569 	latch_level_t	limit) const
570 	UNIV_NOTHROW
571 {
572 	Latches::const_iterator	end = latches->end();
573 
574 	for (Latches::const_iterator it = latches->begin(); it != end; ++it) {
575 
576 		if (it->m_level <= limit) {
577 			return(&(*it));
578 		}
579 	}
580 
581 	return(NULL);
582 }
583 
584 /** Do a basic ordering check.
585 @param[in]	latches		thread's existing latches
586 @param[in]	requested_level	Level requested by latch
587 @param[in]	in_level	declared ulint so that we can do level - 1.
588 				The level of the latch that the thread is
589 				trying to acquire
590 @return true if passes, else crash with error message. */
591 inline bool
basic_check(const Latches * latches,latch_level_t requested_level,lint in_level) const592 LatchDebug::basic_check(
593 	const Latches*	latches,
594 	latch_level_t	requested_level,
595 	lint		in_level) const
596 	UNIV_NOTHROW
597 {
598 	latch_level_t	level = latch_level_t(in_level);
599 
600 	ut_ad(level < SYNC_LEVEL_MAX);
601 
602 	const Latched*	latched = less(latches, level);
603 
604 	if (latched != NULL) {
605 		crash(latches, latched, requested_level);
606 		return(false);
607 	}
608 
609 	return(true);
610 }
611 
612 /** Create a new instance if one doesn't exist else return the existing one.
613 @param[in]	add		add an empty entry if one is not found
614 				(default no)
615 @return	pointer to a thread's acquired latches. */
616 Latches*
thread_latches(bool add)617 LatchDebug::thread_latches(bool add)
618 	UNIV_NOTHROW
619 {
620 	m_mutex.enter();
621 
622 	os_thread_id_t		thread_id = os_thread_get_curr_id();
623 	ThreadMap::iterator	lb = m_threads.lower_bound(thread_id);
624 
625 	if (lb != m_threads.end()
626 	    && !(m_threads.key_comp()(thread_id, lb->first))) {
627 
628 		Latches*	latches = lb->second;
629 
630 		m_mutex.exit();
631 
632 		return(latches);
633 
634 	} else if (!add) {
635 
636 		m_mutex.exit();
637 
638 		return(NULL);
639 
640 	} else {
641 		typedef ThreadMap::value_type value_type;
642 
643 		Latches*	latches = UT_NEW_NOKEY(Latches());
644 
645 		ut_a(latches != NULL);
646 
647 		latches->reserve(32);
648 
649 		m_threads.insert(lb, value_type(thread_id, latches));
650 
651 		m_mutex.exit();
652 
653 		return(latches);
654 	}
655 }
656 
657 /** Checks if the level value exists in the thread's acquired latches.
658 @param[in]	levels		the thread's existing (acquired) latches
659 @param[in]	level		to lookup
660 @return	latch if found or 0 */
661 const latch_t*
find(const Latches * latches,latch_level_t level) const662 LatchDebug::find(
663 	const Latches*	latches,
664 	latch_level_t	level) const UNIV_NOTHROW
665 {
666 	Latches::const_iterator	end = latches->end();
667 
668 	for (Latches::const_iterator it = latches->begin(); it != end; ++it) {
669 
670 		if (it->m_level == level) {
671 
672 			return(it->m_latch);
673 		}
674 	}
675 
676 	return(0);
677 }
678 
679 /** Checks if the level value exists in the thread's acquired latches.
680 @param[in]	 level		The level to lookup
681 @return	latch if found or NULL */
682 const latch_t*
find(latch_level_t level)683 LatchDebug::find(latch_level_t level)
684 	UNIV_NOTHROW
685 {
686 	return(find(thread_latches(), level));
687 }
688 
689 /**
690 Adds a latch and its level in the thread level array. Allocates the memory
691 for the array if called first time for this OS thread. Makes the checks
692 against other latch levels stored in the array for this thread.
693 @param[in]	latch	pointer to a mutex or an rw-lock
694 @param[in]	level	level in the latching order
695 @return the thread's latches */
696 Latches*
check_order(const latch_t * latch,latch_level_t level)697 LatchDebug::check_order(
698 	const latch_t*	latch,
699 	latch_level_t	level)
700 	UNIV_NOTHROW
701 {
702 	ut_ad(latch->get_level() != SYNC_LEVEL_VARYING);
703 
704 	Latches*	latches = thread_latches(true);
705 
706 	/* NOTE that there is a problem with _NODE and _LEAF levels: if the
707 	B-tree height changes, then a leaf can change to an internal node
708 	or the other way around. We do not know at present if this can cause
709 	unnecessary assertion failures below. */
710 
711 	switch (level) {
712 	case SYNC_NO_ORDER_CHECK:
713 	case SYNC_EXTERN_STORAGE:
714 	case SYNC_TREE_NODE_FROM_HASH:
715 		/* Do no order checking */
716 		break;
717 
718 	case SYNC_TRX_SYS_HEADER:
719 
720 		if (srv_is_being_started) {
721 			/* This is violated during trx_sys_create_rsegs()
722 			when creating additional rollback segments when
723 			upgrading in srv_start(). */
724 			break;
725 		}
726 
727 		/* Fall through */
728 
729 	case SYNC_RECV:
730 	case SYNC_WORK_QUEUE:
731 	case SYNC_FTS_TOKENIZE:
732 	case SYNC_FTS_OPTIMIZE:
733 	case SYNC_FTS_CACHE:
734 	case SYNC_FTS_CACHE_INIT:
735 	case SYNC_SEARCH_SYS:
736 	case SYNC_LOCK_SYS:
737 	case SYNC_LOCK_WAIT_SYS:
738 	case SYNC_RW_TRX_HASH_ELEMENT:
739 	case SYNC_READ_VIEW:
740 	case SYNC_TRX_SYS:
741 	case SYNC_IBUF_BITMAP_MUTEX:
742 	case SYNC_REDO_RSEG:
743 	case SYNC_NOREDO_RSEG:
744 	case SYNC_PURGE_LATCH:
745 	case SYNC_PURGE_QUEUE:
746 	case SYNC_DICT_OPERATION:
747 	case SYNC_DICT_HEADER:
748 	case SYNC_TRX_I_S_RWLOCK:
749 	case SYNC_IBUF_MUTEX:
750 	case SYNC_INDEX_ONLINE_LOG:
751 	case SYNC_STATS_AUTO_RECALC:
752 	case SYNC_POOL:
753 	case SYNC_POOL_MANAGER:
754 		basic_check(latches, level, level);
755 		break;
756 
757 	case SYNC_ANY_LATCH:
758 
759 		/* Temporary workaround for LATCH_ID_RTR_*_MUTEX */
760 		if (is_rtr_mutex(latch)) {
761 
762 			const Latched*	latched = less(latches, level);
763 
764 			if (latched == NULL
765 			    || (latched != NULL
766 				&& is_rtr_mutex(latched->m_latch))) {
767 
768 				/* No violation */
769 				break;
770 
771 			}
772 
773 			crash(latches, latched, level);
774 
775 		} else {
776 			basic_check(latches, level, level);
777 		}
778 
779 		break;
780 
781 	case SYNC_TRX:
782 
783 		/* Either the thread must own the lock_sys.mutex, or
784 		it is allowed to own only ONE trx_t::mutex. */
785 
786 		if (less(latches, level) != NULL) {
787 			basic_check(latches, level, level - 1);
788 			ut_a(find(latches, SYNC_LOCK_SYS) != 0);
789 		}
790 		break;
791 
792 	case SYNC_IBUF_BITMAP:
793 
794 		/* Either the thread must own the master mutex to all
795 		the bitmap pages, or it is allowed to latch only ONE
796 		bitmap page. */
797 
798 		if (find(latches, SYNC_IBUF_BITMAP_MUTEX) != 0) {
799 
800 			basic_check(latches, level, SYNC_IBUF_BITMAP - 1);
801 
802 		} else if (!srv_is_being_started) {
803 
804 			/* This is violated during trx_sys_create_rsegs()
805 			when creating additional rollback segments during
806 			upgrade. */
807 
808 			basic_check(latches, level, SYNC_IBUF_BITMAP);
809 		}
810 		break;
811 
812 	case SYNC_FSP_PAGE:
813 		ut_a(find(latches, SYNC_FSP) != 0);
814 		break;
815 
816 	case SYNC_FSP:
817 
818 		ut_a(find(latches, SYNC_FSP) != 0
819 		     || basic_check(latches, level, SYNC_FSP));
820 		break;
821 
822 	case SYNC_TRX_UNDO_PAGE:
823 
824 		/* Purge is allowed to read in as many UNDO pages as it likes.
825 		The purge thread can read the UNDO pages without any covering
826 		mutex. */
827 
828 		ut_a(find(latches, SYNC_REDO_RSEG) != 0
829 		     || find(latches, SYNC_NOREDO_RSEG) != 0
830 		     || basic_check(latches, level, level - 1));
831 		break;
832 
833 	case SYNC_RSEG_HEADER:
834 
835 		ut_a(find(latches, SYNC_REDO_RSEG) != 0
836 		     || find(latches, SYNC_NOREDO_RSEG) != 0);
837 		break;
838 
839 	case SYNC_RSEG_HEADER_NEW:
840 
841 		ut_a(find(latches, SYNC_FSP_PAGE) != 0);
842 		break;
843 
844 	case SYNC_TREE_NODE:
845 
846 		ut_a(find(latches, SYNC_FSP) == &fil_system.temp_space->latch
847 		     || find(latches, SYNC_INDEX_TREE)
848 		     || find(latches, SYNC_DICT_OPERATION)
849 		     || basic_check(latches, level, SYNC_TREE_NODE - 1));
850 		break;
851 
852 	case SYNC_TREE_NODE_NEW:
853 
854 		ut_a(find(latches, SYNC_FSP_PAGE) != 0);
855 		break;
856 
857 	case SYNC_INDEX_TREE:
858 
859 		basic_check(latches, level, SYNC_TREE_NODE - 1);
860 		break;
861 
862 	case SYNC_IBUF_TREE_NODE:
863 
864 		ut_a(find(latches, SYNC_IBUF_INDEX_TREE) != 0
865 		     || basic_check(latches, level, SYNC_IBUF_TREE_NODE - 1));
866 		break;
867 
868 	case SYNC_IBUF_TREE_NODE_NEW:
869 
870 		/* ibuf_add_free_page() allocates new pages for the change
871 		buffer while only holding the tablespace x-latch. These
872 		pre-allocated new pages may only be used while holding
873 		ibuf_mutex, in btr_page_alloc_for_ibuf(). */
874 
875 		ut_a(find(latches, SYNC_IBUF_MUTEX) != 0
876 		     || find(latches, SYNC_FSP) != 0);
877 		break;
878 
879 	case SYNC_IBUF_INDEX_TREE:
880 
881 		if (find(latches, SYNC_FSP) != 0) {
882 			basic_check(latches, level, level - 1);
883 		} else {
884 			basic_check(latches, level, SYNC_IBUF_TREE_NODE - 1);
885 		}
886 		break;
887 
888 	case SYNC_IBUF_PESS_INSERT_MUTEX:
889 
890 		basic_check(latches, level, SYNC_FSP - 1);
891 		ut_a(find(latches, SYNC_IBUF_MUTEX) == 0);
892 		break;
893 
894 	case SYNC_IBUF_HEADER:
895 
896 		basic_check(latches, level, SYNC_FSP - 1);
897 		ut_a(find(latches, SYNC_IBUF_MUTEX) == NULL);
898 		ut_a(find(latches, SYNC_IBUF_PESS_INSERT_MUTEX) == NULL);
899 		break;
900 
901 	case SYNC_DICT:
902 		basic_check(latches, level, SYNC_DICT);
903 		break;
904 
905 	case SYNC_MUTEX:
906 	case SYNC_UNKNOWN:
907 	case SYNC_LEVEL_VARYING:
908 	case RW_LOCK_X:
909 	case RW_LOCK_X_WAIT:
910 	case RW_LOCK_S:
911 	case RW_LOCK_SX:
912 	case RW_LOCK_NOT_LOCKED:
913 		/* These levels should never be set for a latch. */
914 		ut_error;
915 		break;
916 	}
917 
918 	return(latches);
919 }
920 
921 /** Removes a latch from the thread level array if it is found there.
922 @param[in]	latch		that was released/unlocked
923 @param[in]	level		level of the latch
924 @return true if found in the array; it is not an error if the latch is
925 not found, as we presently are not able to determine the level for
926 every latch reservation the program does */
927 void
unlock(const latch_t * latch)928 LatchDebug::unlock(const latch_t* latch)
929 	UNIV_NOTHROW
930 {
931 	if (latch->get_level() == SYNC_LEVEL_VARYING) {
932 		// We don't have varying level mutexes
933 		ut_ad(latch->m_rw_lock);
934 	}
935 
936 	Latches*	latches;
937 
938 	if (*latch->get_name() == '.') {
939 
940 		/* Ignore diagnostic latches, starting with '.' */
941 
942 	} else if ((latches = thread_latches()) != NULL) {
943 
944 		Latches::reverse_iterator	rend = latches->rend();
945 
946 		for (Latches::reverse_iterator it = latches->rbegin();
947 		     it != rend;
948 		     ++it) {
949 
950 			if (it->m_latch != latch) {
951 
952 				continue;
953 			}
954 
955 			Latches::iterator	i = it.base();
956 
957 			latches->erase(--i);
958 
959 			/* If this thread doesn't own any more
960 			latches remove from the map.
961 
962 			FIXME: Perhaps use the master thread
963 			to do purge. Or, do it from close connection.
964 			This could be expensive. */
965 
966 			if (latches->empty()) {
967 
968 				m_mutex.enter();
969 
970 				os_thread_id_t	thread_id;
971 
972 				thread_id = os_thread_get_curr_id();
973 
974 				m_threads.erase(thread_id);
975 
976 				m_mutex.exit();
977 
978 				UT_DELETE(latches);
979 			}
980 
981 			return;
982 		}
983 
984 		if (latch->get_level() != SYNC_LEVEL_VARYING) {
985 			ib::error()
986 				<< "Couldn't find latch "
987 				<< sync_latch_get_name(latch->get_id());
988 
989 			print_latches(latches);
990 
991 			/** Must find the latch. */
992 			ut_error;
993 		}
994 	}
995 }
996 
997 /** Get the latch id from a latch name.
998 @param[in]	name	Latch name
999 @return latch id if found else LATCH_ID_NONE. */
1000 latch_id_t
sync_latch_get_id(const char * name)1001 sync_latch_get_id(const char* name)
1002 {
1003 	LatchMetaData::const_iterator	end = latch_meta.end();
1004 
1005 	/* Linear scan should be OK, this should be extremely rare. */
1006 
1007 	for (LatchMetaData::const_iterator it = latch_meta.begin();
1008 	     it != end;
1009 	     ++it) {
1010 
1011 		if (*it == NULL || (*it)->get_id() == LATCH_ID_NONE) {
1012 
1013 			continue;
1014 
1015 		} else if (strcmp((*it)->get_name(), name) == 0) {
1016 
1017 			return((*it)->get_id());
1018 		}
1019 	}
1020 
1021 	return(LATCH_ID_NONE);
1022 }
1023 
1024 /** Get the latch name from a sync level
1025 @param[in]	level		Latch level to lookup
1026 @return NULL if not found. */
1027 const char*
sync_latch_get_name(latch_level_t level)1028 sync_latch_get_name(latch_level_t level)
1029 {
1030 	LatchMetaData::const_iterator	end = latch_meta.end();
1031 
1032 	/* Linear scan should be OK, this should be extremely rare. */
1033 
1034 	for (LatchMetaData::const_iterator it = latch_meta.begin();
1035 	     it != end;
1036 	     ++it) {
1037 
1038 		if (*it == NULL || (*it)->get_id() == LATCH_ID_NONE) {
1039 
1040 			continue;
1041 
1042 		} else if ((*it)->get_level() == level) {
1043 
1044 			return((*it)->get_name());
1045 		}
1046 	}
1047 
1048 	return(0);
1049 }
1050 
1051 /** Check if it is OK to acquire the latch.
1052 @param[in]	latch	latch type */
1053 void
sync_check_lock_validate(const latch_t * latch)1054 sync_check_lock_validate(const latch_t* latch)
1055 {
1056 	if (LatchDebug::instance() != NULL) {
1057 		LatchDebug::instance()->lock_validate(
1058 			latch, latch->get_level());
1059 	}
1060 }
1061 
1062 /** Note that the lock has been granted
1063 @param[in]	latch	latch type */
1064 void
sync_check_lock_granted(const latch_t * latch)1065 sync_check_lock_granted(const latch_t* latch)
1066 {
1067 	if (LatchDebug::instance() != NULL) {
1068 		LatchDebug::instance()->lock_granted(latch, latch->get_level());
1069 	}
1070 }
1071 
1072 /** Check if it is OK to acquire the latch.
1073 @param[in]	latch	latch type
1074 @param[in]	level	Latch level */
1075 void
sync_check_lock(const latch_t * latch,latch_level_t level)1076 sync_check_lock(
1077 	const latch_t*	latch,
1078 	latch_level_t	level)
1079 {
1080 	if (LatchDebug::instance() != NULL) {
1081 
1082 		ut_ad(latch->get_level() == SYNC_LEVEL_VARYING);
1083 		ut_ad(latch->get_id() == LATCH_ID_BUF_BLOCK_LOCK);
1084 
1085 		LatchDebug::instance()->lock_validate(latch, level);
1086 		LatchDebug::instance()->lock_granted(latch, level);
1087 	}
1088 }
1089 
1090 /** Check if it is OK to re-acquire the lock.
1091 @param[in]	latch		RW-LOCK to relock (recursive X locks) */
1092 void
sync_check_relock(const latch_t * latch)1093 sync_check_relock(const latch_t* latch)
1094 {
1095 	if (LatchDebug::instance() != NULL) {
1096 		LatchDebug::instance()->relock(latch);
1097 	}
1098 }
1099 
1100 /** Removes a latch from the thread level array if it is found there.
1101 @param[in]	latch		The latch to unlock */
1102 void
sync_check_unlock(const latch_t * latch)1103 sync_check_unlock(const latch_t* latch)
1104 {
1105 	if (LatchDebug::instance() != NULL) {
1106 		LatchDebug::instance()->unlock(latch);
1107 	}
1108 }
1109 
1110 /** Checks if the level array for the current thread contains a
1111 mutex or rw-latch at the specified level.
1112 @param[in]	level		to find
1113 @return	a matching latch, or NULL if not found */
1114 const latch_t*
sync_check_find(latch_level_t level)1115 sync_check_find(latch_level_t level)
1116 {
1117 	if (LatchDebug::instance() != NULL) {
1118 		return(LatchDebug::instance()->find(level));
1119 	}
1120 
1121 	return(NULL);
1122 }
1123 
1124 /** Iterate over the thread's latches.
1125 @param[in,out]	functor		called for each element.
1126 @return true if the functor returns true for any element */
1127 bool
sync_check_iterate(const sync_check_functor_t & functor)1128 sync_check_iterate(const sync_check_functor_t& functor)
1129 {
1130 	if (LatchDebug* debug = LatchDebug::instance()) {
1131 		return(debug->for_each(functor));
1132 	}
1133 
1134 	return(false);
1135 }
1136 
1137 /** Enable sync order checking.
1138 
1139 Note: We don't enforce any synchronisation checks. The caller must ensure
1140 that no races can occur */
sync_check_enable()1141 static void sync_check_enable()
1142 {
1143 	if (!srv_sync_debug) {
1144 
1145 		return;
1146 	}
1147 
1148 	/* We should always call this before we create threads. */
1149 
1150 	LatchDebug::create_instance();
1151 }
1152 
1153 /** Initialise the debug data structures */
1154 void
init()1155 LatchDebug::init()
1156 	UNIV_NOTHROW
1157 {
1158 	mutex_create(LATCH_ID_RW_LOCK_DEBUG, &rw_lock_debug_mutex);
1159 }
1160 
1161 /** Shutdown the latch debug checking
1162 
1163 Note: We don't enforce any synchronisation checks. The caller must ensure
1164 that no races can occur */
1165 void
shutdown()1166 LatchDebug::shutdown()
1167 	UNIV_NOTHROW
1168 {
1169 	mutex_free(&rw_lock_debug_mutex);
1170 
1171 	ut_a(s_initialized);
1172 
1173 	s_initialized = false;
1174 
1175 	UT_DELETE(s_instance);
1176 
1177 	LatchDebug::s_instance = NULL;
1178 }
1179 
1180 /** Acquires the debug mutex. We cannot use the mutex defined in sync0sync,
1181 because the debug mutex is also acquired in sync0arr while holding the OS
1182 mutex protecting the sync array, and the ordinary mutex_enter might
1183 recursively call routines in sync0arr, leading to a deadlock on the OS
1184 mutex. */
1185 void
rw_lock_debug_mutex_enter()1186 rw_lock_debug_mutex_enter()
1187 {
1188 	mutex_enter(&rw_lock_debug_mutex);
1189 }
1190 
1191 /** Releases the debug mutex. */
1192 void
rw_lock_debug_mutex_exit()1193 rw_lock_debug_mutex_exit()
1194 {
1195 	mutex_exit(&rw_lock_debug_mutex);
1196 }
1197 #endif /* UNIV_DEBUG */
1198 
1199 /* Meta data for all the InnoDB latches. If the latch is not in recorded
1200 here then it will be be considered for deadlock checks.  */
1201 LatchMetaData	latch_meta;
1202 
1203 /** Load the latch meta data. */
1204 static
1205 void
sync_latch_meta_init()1206 sync_latch_meta_init()
1207 	UNIV_NOTHROW
1208 {
1209 	latch_meta.resize(LATCH_ID_MAX + 1);
1210 
1211 	/* The latches should be ordered on latch_id_t. So that we can
1212 	index directly into the vector to update and fetch meta-data. */
1213 
1214 	LATCH_ADD_MUTEX(DICT_FOREIGN_ERR, SYNC_NO_ORDER_CHECK,
1215 			dict_foreign_err_mutex_key);
1216 
1217 	LATCH_ADD_MUTEX(DICT_SYS, SYNC_DICT, dict_sys_mutex_key);
1218 
1219 	LATCH_ADD_MUTEX(FIL_SYSTEM, SYNC_ANY_LATCH, fil_system_mutex_key);
1220 
1221 	LATCH_ADD_MUTEX(FTS_DELETE, SYNC_FTS_OPTIMIZE, fts_delete_mutex_key);
1222 
1223 	LATCH_ADD_MUTEX(FTS_DOC_ID, SYNC_FTS_OPTIMIZE, fts_doc_id_mutex_key);
1224 
1225 	LATCH_ADD_MUTEX(FTS_PLL_TOKENIZE, SYNC_FTS_TOKENIZE,
1226 			fts_pll_tokenize_mutex_key);
1227 
1228 	LATCH_ADD_MUTEX(IBUF_BITMAP, SYNC_IBUF_BITMAP_MUTEX,
1229 			ibuf_bitmap_mutex_key);
1230 
1231 	LATCH_ADD_MUTEX(IBUF, SYNC_IBUF_MUTEX, ibuf_mutex_key);
1232 
1233 	LATCH_ADD_MUTEX(IBUF_PESSIMISTIC_INSERT, SYNC_IBUF_PESS_INSERT_MUTEX,
1234 			ibuf_pessimistic_insert_mutex_key);
1235 
1236 	LATCH_ADD_MUTEX(PURGE_SYS_PQ, SYNC_PURGE_QUEUE,
1237 			purge_sys_pq_mutex_key);
1238 
1239 	LATCH_ADD_MUTEX(RECALC_POOL, SYNC_STATS_AUTO_RECALC,
1240 			recalc_pool_mutex_key);
1241 
1242 	LATCH_ADD_MUTEX(RECV_SYS, SYNC_RECV, recv_sys_mutex_key);
1243 
1244 	LATCH_ADD_MUTEX(REDO_RSEG, SYNC_REDO_RSEG, redo_rseg_mutex_key);
1245 
1246 	LATCH_ADD_MUTEX(NOREDO_RSEG, SYNC_NOREDO_RSEG, noredo_rseg_mutex_key);
1247 
1248 #ifdef UNIV_DEBUG
1249 	/* Mutex names starting with '.' are not tracked. They are assumed
1250 	to be diagnostic mutexes used in debugging. */
1251 	latch_meta[LATCH_ID_RW_LOCK_DEBUG] =
1252 		LATCH_ADD_MUTEX(RW_LOCK_DEBUG,
1253 			SYNC_NO_ORDER_CHECK,
1254 			rw_lock_debug_mutex_key);
1255 #endif /* UNIV_DEBUG */
1256 
1257 	LATCH_ADD_MUTEX(RTR_ACTIVE_MUTEX, SYNC_ANY_LATCH,
1258 			rtr_active_mutex_key);
1259 
1260 	LATCH_ADD_MUTEX(RTR_MATCH_MUTEX, SYNC_ANY_LATCH, rtr_match_mutex_key);
1261 
1262 	LATCH_ADD_MUTEX(RTR_PATH_MUTEX, SYNC_ANY_LATCH, rtr_path_mutex_key);
1263 
1264 	LATCH_ADD_MUTEX(RW_LOCK_LIST, SYNC_NO_ORDER_CHECK,
1265 			rw_lock_list_mutex_key);
1266 
1267 	LATCH_ADD_MUTEX(SRV_INNODB_MONITOR, SYNC_NO_ORDER_CHECK,
1268 			srv_innodb_monitor_mutex_key);
1269 
1270 	LATCH_ADD_MUTEX(SRV_MISC_TMPFILE, SYNC_ANY_LATCH,
1271 			srv_misc_tmpfile_mutex_key);
1272 
1273 	LATCH_ADD_MUTEX(SRV_MONITOR_FILE, SYNC_NO_ORDER_CHECK,
1274 			srv_monitor_file_mutex_key);
1275 
1276 	LATCH_ADD_MUTEX(TRX_POOL, SYNC_POOL, trx_pool_mutex_key);
1277 
1278 	LATCH_ADD_MUTEX(TRX_POOL_MANAGER, SYNC_POOL_MANAGER,
1279 			trx_pool_manager_mutex_key);
1280 
1281 	LATCH_ADD_MUTEX(TRX, SYNC_TRX, trx_mutex_key);
1282 
1283 	LATCH_ADD_MUTEX(LOCK_SYS, SYNC_LOCK_SYS, lock_mutex_key);
1284 
1285 	LATCH_ADD_MUTEX(LOCK_SYS_WAIT, SYNC_LOCK_WAIT_SYS,
1286 			lock_wait_mutex_key);
1287 
1288 	LATCH_ADD_MUTEX(TRX_SYS, SYNC_TRX_SYS, trx_sys_mutex_key);
1289 
1290 	LATCH_ADD_MUTEX(SRV_SYS_TASKS, SYNC_ANY_LATCH, srv_threads_mutex_key);
1291 
1292 	LATCH_ADD_MUTEX(PAGE_ZIP_STAT_PER_INDEX, SYNC_ANY_LATCH,
1293 			page_zip_stat_per_index_mutex_key);
1294 
1295 	LATCH_ADD_MUTEX(SYNC_ARRAY_MUTEX, SYNC_NO_ORDER_CHECK,
1296 			sync_array_mutex_key);
1297 
1298 	LATCH_ADD_MUTEX(ROW_DROP_LIST, SYNC_NO_ORDER_CHECK,
1299 			row_drop_list_mutex_key);
1300 
1301 	LATCH_ADD_MUTEX(INDEX_ONLINE_LOG, SYNC_INDEX_ONLINE_LOG,
1302 			index_online_log_key);
1303 
1304 	LATCH_ADD_MUTEX(WORK_QUEUE, SYNC_WORK_QUEUE, PFS_NOT_INSTRUMENTED);
1305 
1306 	// Add the RW locks
1307 	LATCH_ADD_RWLOCK(BTR_SEARCH, SYNC_SEARCH_SYS, btr_search_latch_key);
1308 
1309 	LATCH_ADD_RWLOCK(BUF_BLOCK_LOCK, SYNC_LEVEL_VARYING,
1310 			 PFS_NOT_INSTRUMENTED);
1311 
1312 #ifdef UNIV_DEBUG
1313 	LATCH_ADD_RWLOCK(BUF_BLOCK_DEBUG, SYNC_LEVEL_VARYING,
1314 			 PFS_NOT_INSTRUMENTED);
1315 #endif /* UNIV_DEBUG */
1316 
1317 	LATCH_ADD_RWLOCK(DICT_OPERATION, SYNC_DICT_OPERATION,
1318 			 dict_operation_lock_key);
1319 
1320 	LATCH_ADD_RWLOCK(FIL_SPACE, SYNC_FSP, fil_space_latch_key);
1321 
1322 	LATCH_ADD_RWLOCK(FTS_CACHE, SYNC_FTS_CACHE, fts_cache_rw_lock_key);
1323 
1324 	LATCH_ADD_RWLOCK(FTS_CACHE_INIT, SYNC_FTS_CACHE_INIT,
1325 			 fts_cache_init_rw_lock_key);
1326 
1327 	LATCH_ADD_RWLOCK(TRX_I_S_CACHE, SYNC_TRX_I_S_RWLOCK,
1328 			 trx_i_s_cache_lock_key);
1329 
1330 	LATCH_ADD_RWLOCK(TRX_PURGE, SYNC_PURGE_LATCH, trx_purge_latch_key);
1331 
1332 	LATCH_ADD_RWLOCK(IBUF_INDEX_TREE, SYNC_IBUF_INDEX_TREE,
1333 			 index_tree_rw_lock_key);
1334 
1335 	LATCH_ADD_RWLOCK(INDEX_TREE, SYNC_INDEX_TREE, index_tree_rw_lock_key);
1336 
1337 	/* JAN: TODO: Add PFS instrumentation */
1338 	LATCH_ADD_MUTEX(DEFRAGMENT_MUTEX, SYNC_NO_ORDER_CHECK,
1339 			PFS_NOT_INSTRUMENTED);
1340 	LATCH_ADD_MUTEX(BTR_DEFRAGMENT_MUTEX, SYNC_NO_ORDER_CHECK,
1341 			PFS_NOT_INSTRUMENTED);
1342 	LATCH_ADD_MUTEX(FIL_CRYPT_STAT_MUTEX, SYNC_NO_ORDER_CHECK,
1343 			PFS_NOT_INSTRUMENTED);
1344 	LATCH_ADD_MUTEX(FIL_CRYPT_DATA_MUTEX, SYNC_NO_ORDER_CHECK,
1345 			PFS_NOT_INSTRUMENTED);
1346 	LATCH_ADD_MUTEX(FIL_CRYPT_THREADS_MUTEX, SYNC_NO_ORDER_CHECK,
1347 			PFS_NOT_INSTRUMENTED);
1348 	LATCH_ADD_MUTEX(RW_TRX_HASH_ELEMENT, SYNC_RW_TRX_HASH_ELEMENT,
1349 			rw_trx_hash_element_mutex_key);
1350 	LATCH_ADD_MUTEX(READ_VIEW, SYNC_READ_VIEW, read_view_mutex_key);
1351 
1352 	latch_id_t	id = LATCH_ID_NONE;
1353 
1354 	/* The array should be ordered on latch ID.We need to
1355 	index directly into it from the mutex policy to update
1356 	the counters and access the meta-data. */
1357 
1358 	for (LatchMetaData::iterator it = latch_meta.begin();
1359 	     it != latch_meta.end();
1360 	     ++it) {
1361 
1362 		const latch_meta_t*	meta = *it;
1363 
1364 
1365 		/* Skip blank entries */
1366 		if (meta == NULL || meta->get_id() == LATCH_ID_NONE) {
1367 			continue;
1368 		}
1369 
1370 		ut_a(id < meta->get_id());
1371 
1372 		id = meta->get_id();
1373 	}
1374 }
1375 
1376 /** Destroy the latch meta data */
1377 static
1378 void
sync_latch_meta_destroy()1379 sync_latch_meta_destroy()
1380 {
1381 	for (LatchMetaData::iterator it = latch_meta.begin();
1382 	     it != latch_meta.end();
1383 	     ++it) {
1384 
1385 		UT_DELETE(*it);
1386 	}
1387 
1388 	latch_meta.clear();
1389 }
1390 
1391 /** Initializes the synchronization data structures. */
1392 void
sync_check_init()1393 sync_check_init()
1394 {
1395 	ut_ad(!LatchDebug::s_initialized);
1396 	ut_d(LatchDebug::s_initialized = true);
1397 
1398 	sync_latch_meta_init();
1399 
1400 	/* create the mutex to protect rw_lock list. */
1401 
1402 	mutex_create(LATCH_ID_RW_LOCK_LIST, &rw_lock_list_mutex);
1403 
1404 	ut_d(LatchDebug::init());
1405 
1406 	sync_array_init();
1407 
1408 	ut_d(sync_check_enable());
1409 }
1410 
1411 /** Free the InnoDB synchronization data structures. */
1412 void
sync_check_close()1413 sync_check_close()
1414 {
1415 	ut_d(LatchDebug::shutdown());
1416 
1417 	mutex_free(&rw_lock_list_mutex);
1418 
1419 	sync_array_close();
1420 
1421 	sync_latch_meta_destroy();
1422 }
1423 
1424