1 /*****************************************************************************
2 
3 Copyright (c) 2014, 2016, Oracle and/or its affiliates. All Rights Reserved.
4 Copyright (c) 2017, 2020, MariaDB Corporation.
5 
6 Portions of this file contain modifications contributed and copyrighted by
7 Google, Inc. Those modifications are gratefully acknowledged and are described
8 briefly in the InnoDB documentation. The contributions by Google are
9 incorporated with their permission, and subject to the conditions contained in
10 the file COPYING.Google.
11 
12 This program is free software; you can redistribute it and/or modify it under
13 the terms of the GNU General Public License as published by the Free Software
14 Foundation; version 2 of the License.
15 
16 This program is distributed in the hope that it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
19 
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc.,
22 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
23 
24 *****************************************************************************/
25 
26 /**************************************************//**
27 @file sync/sync0debug.cc
28 Debug checks for latches.
29 
30 Created 2012-08-21 Sunny Bains
31 *******************************************************/
32 
33 #include "sync0sync.h"
34 #include "sync0debug.h"
35 #include "srv0start.h"
36 #include "fil0fil.h"
37 
38 #include <vector>
39 #include <string>
40 #include <algorithm>
41 #include <iostream>
42 
43 #ifdef UNIV_DEBUG
44 
45 my_bool		srv_sync_debug;
46 
47 /** The global mutex which protects debug info lists of all rw-locks.
48 To modify the debug info list of an rw-lock, this mutex has to be
49 acquired in addition to the mutex protecting the lock. */
50 static SysMutex		rw_lock_debug_mutex;
51 
52 /** The latch held by a thread */
53 struct Latched {
54 
55 	/** Constructor */
LatchedLatched56 	Latched() : m_latch(), m_level(SYNC_UNKNOWN) { }
57 
58 	/** Constructor
59 	@param[in]	latch		Latch instance
60 	@param[in]	level		Level of latch held */
LatchedLatched61 	Latched(const latch_t*	latch,
62 		latch_level_t	level)
63 		:
64 		m_latch(latch),
65 		m_level(level)
66 	{
67 		/* No op */
68 	}
69 
70 	/** @return the latch level */
get_levelLatched71 	latch_level_t get_level() const
72 	{
73 		return(m_level);
74 	}
75 
76 	/** Check if the rhs latch and level match
77 	@param[in]	rhs		instance to compare with
78 	@return true on match */
operator ==Latched79 	bool operator==(const Latched& rhs) const
80 	{
81 		return(m_latch == rhs.m_latch && m_level == rhs.m_level);
82 	}
83 
84 	/** The latch instance */
85 	const latch_t*		m_latch;
86 
87 	/** The latch level. For buffer blocks we can pass a separate latch
88 	level to check against, see buf_block_dbg_add_level() */
89 	latch_level_t		m_level;
90 };
91 
92 /** Thread specific latches. This is ordered on level in descending order. */
93 typedef std::vector<Latched, ut_allocator<Latched> > Latches;
94 
95 /** The deadlock detector. */
96 struct LatchDebug {
97 
98 	/** Debug mutex for control structures, should not be tracked
99 	by this module. */
100 	typedef OSMutex Mutex;
101 
102 	/** Comparator for the ThreadMap. */
103 	struct os_thread_id_less
104 		: public std::binary_function<
105 		  os_thread_id_t,
106 		  os_thread_id_t,
107 		  bool>
108 	{
109 		/** @return true if lhs < rhs */
operator ()LatchDebug::os_thread_id_less110 		bool operator()(
111 			const os_thread_id_t& lhs,
112 			const os_thread_id_t& rhs) const
113 			UNIV_NOTHROW
114 		{
115 			return(os_thread_pf(lhs) < os_thread_pf(rhs));
116 		}
117 	};
118 
119 	/** For tracking a thread's latches. */
120 	typedef std::map<
121 		os_thread_id_t,
122 		Latches*,
123 		os_thread_id_less,
124 		ut_allocator<std::pair<const os_thread_id_t, Latches*> > >
125 		ThreadMap;
126 
127 	/** Constructor */
128 	LatchDebug()
129 		UNIV_NOTHROW;
130 
131 	/** Destructor */
~LatchDebugLatchDebug132 	~LatchDebug()
133 		UNIV_NOTHROW
134 	{
135 		m_mutex.destroy();
136 	}
137 
138 	/** Create a new instance if one doesn't exist else return
139 	the existing one.
140 	@param[in]	add		add an empty entry if one is not
141 					found (default no)
142 	@return	pointer to a thread's acquired latches. */
143 	Latches* thread_latches(bool add = false)
144 		UNIV_NOTHROW;
145 
146 	/** Check that all the latches already owned by a thread have a lower
147 	level than limit.
148 	@param[in]	latches		the thread's existing (acquired) latches
149 	@param[in]	limit		to check against
150 	@return latched if there is one with a level <= limit . */
151 	const Latched* less(
152 		const Latches*	latches,
153 		latch_level_t	limit) const
154 		UNIV_NOTHROW;
155 
156 	/** Checks if the level value exists in the thread's acquired latches.
157 	@param[in]	latches		the thread's existing (acquired) latches
158 	@param[in]	level		to lookup
159 	@return	latch if found or 0 */
160 	const latch_t* find(
161 		const Latches*	Latches,
162 		latch_level_t	level) const
163 		UNIV_NOTHROW;
164 
165 	/**
166 	Checks if the level value exists in the thread's acquired latches.
167 	@param[in]	level		to lookup
168 	@return	latch if found or 0 */
169 	const latch_t* find(latch_level_t level)
170 		UNIV_NOTHROW;
171 
172 	/** Report error and abort.
173 	@param[in]	latches		thread's existing latches
174 	@param[in]	latched		The existing latch causing the
175 					invariant to fail
176 	@param[in]	level		The new level request that breaks
177 					the order */
178 	void crash(
179 		const Latches*	latches,
180 		const Latched*	latched,
181 		latch_level_t	level) const
182 		UNIV_NOTHROW;
183 
184 	/** Do a basic ordering check.
185 	@param[in]	latches		thread's existing latches
186 	@param[in]	requested_level	Level requested by latch
187 	@param[in]	level		declared ulint so that we can
188 					do level - 1. The level of the
189 					latch that the thread is trying
190 					to acquire
191 	@return true if passes, else crash with error message. */
192 	inline bool basic_check(
193 		const Latches*	latches,
194 		latch_level_t	requested_level,
195 		lint		level) const
196 		UNIV_NOTHROW;
197 
198 	/** Adds a latch and its level in the thread level array. Allocates
199 	the memory for the array if called for the first time for this
200 	OS thread.  Makes the checks against other latch levels stored
201 	in the array for this thread.
202 
203 	@param[in]	latch	latch that the thread wants to acqire.
204 	@param[in]	level	latch level to check against */
lock_validateLatchDebug205 	void lock_validate(
206 		const latch_t*	latch,
207 		latch_level_t	level)
208 		UNIV_NOTHROW
209 	{
210 		/* Ignore diagnostic latches, starting with '.' */
211 
212 		if (*latch->get_name() != '.'
213 		    && latch->get_level() != SYNC_LEVEL_VARYING) {
214 
215 			ut_ad(level != SYNC_LEVEL_VARYING);
216 
217 			Latches*	latches = check_order(latch, level);
218 
219 			ut_a(latches->empty()
220 			     || level == SYNC_LEVEL_VARYING
221 			     || level == SYNC_NO_ORDER_CHECK
222 			     || latches->back().get_level()
223 			     == SYNC_NO_ORDER_CHECK
224 			     || latches->back().m_latch->get_level()
225 			     == SYNC_LEVEL_VARYING
226 			     || latches->back().get_level() >= level);
227 		}
228 	}
229 
230 	/** Adds a latch and its level in the thread level array. Allocates
231 	the memory for the array if called for the first time for this
232 	OS thread.  Makes the checks against other latch levels stored
233 	in the array for this thread.
234 
235 	@param[in]	latch	latch that the thread wants to acqire.
236 	@param[in]	level	latch level to check against */
lock_grantedLatchDebug237 	void lock_granted(
238 		const latch_t*	latch,
239 		latch_level_t	level)
240 		UNIV_NOTHROW
241 	{
242 		/* Ignore diagnostic latches, starting with '.' */
243 
244 		if (*latch->get_name() != '.'
245 		    && latch->get_level() != SYNC_LEVEL_VARYING) {
246 
247 			Latches*	latches = thread_latches(true);
248 
249 			latches->push_back(Latched(latch, level));
250 		}
251 	}
252 
253 	/** For recursive X rw-locks.
254 	@param[in]	latch		The RW-Lock to relock  */
relockLatchDebug255 	void relock(const latch_t* latch)
256 		UNIV_NOTHROW
257 	{
258 		ut_a(latch->m_rw_lock);
259 
260 		latch_level_t	level = latch->get_level();
261 
262 		/* Ignore diagnostic latches, starting with '.' */
263 
264 		if (*latch->get_name() != '.'
265 		    && latch->get_level() != SYNC_LEVEL_VARYING) {
266 
267 			Latches*	latches = thread_latches(true);
268 
269 			Latches::iterator	it = std::find(
270 				latches->begin(), latches->end(),
271 				Latched(latch, level));
272 
273 			ut_a(latches->empty()
274 			     || level == SYNC_LEVEL_VARYING
275 			     || level == SYNC_NO_ORDER_CHECK
276 			     || latches->back().m_latch->get_level()
277 			     == SYNC_LEVEL_VARYING
278 			     || latches->back().m_latch->get_level()
279 			     == SYNC_NO_ORDER_CHECK
280 			     || latches->back().get_level() >= level
281 			     || it != latches->end());
282 
283 			if (it == latches->end()) {
284 				latches->push_back(Latched(latch, level));
285 			} else {
286 				latches->insert(it, Latched(latch, level));
287 			}
288 		}
289 	}
290 
291 	/** Iterate over a thread's latches.
292 	@param[in]	functor		The callback
293 	@return true if the functor returns true. */
for_eachLatchDebug294 	bool for_each(const sync_check_functor_t& functor)
295 		UNIV_NOTHROW
296 	{
297 		if (const Latches* latches = thread_latches()) {
298 			Latches::const_iterator	end = latches->end();
299 			for (Latches::const_iterator it = latches->begin();
300 			     it != end; ++it) {
301 
302 				if (functor(it->m_level)) {
303 					return(true);
304 				}
305 			}
306 		}
307 
308 		return(false);
309 	}
310 
311 	/** Removes a latch from the thread level array if it is found there.
312 	@param[in]	latch		The latch that was released
313 	@return true if found in the array; it is not an error if the latch is
314 	not found, as we presently are not able to determine the level for
315 	every latch reservation the program does */
316 	void unlock(const latch_t* latch) UNIV_NOTHROW;
317 
318 	/** Get the level name
319 	@param[in]	level		The level ID to lookup
320 	@return level name */
get_level_nameLatchDebug321 	const std::string& get_level_name(latch_level_t level) const
322 		UNIV_NOTHROW
323 	{
324 		Levels::const_iterator	it = m_levels.find(level);
325 
326 		ut_ad(it != m_levels.end());
327 
328 		return(it->second);
329 	}
330 
331 	/** Initialise the debug data structures */
332 	static void init()
333 		UNIV_NOTHROW;
334 
335 	/** Shutdown the latch debug checking */
336 	static void shutdown()
337 		UNIV_NOTHROW;
338 
339 	/** @return the singleton instance */
instanceLatchDebug340 	static LatchDebug* instance()
341 		UNIV_NOTHROW
342 	{
343 		return(s_instance);
344 	}
345 
346 	/** Create the singleton instance */
create_instanceLatchDebug347 	static void create_instance()
348 		UNIV_NOTHROW
349 	{
350 		ut_ad(s_instance == NULL);
351 
352 		s_instance = UT_NEW_NOKEY(LatchDebug());
353 	}
354 
355 private:
356 	/** Disable copying */
357 	LatchDebug(const LatchDebug&);
358 	LatchDebug& operator=(const LatchDebug&);
359 
360 	/** Adds a latch and its level in the thread level array. Allocates
361 	the memory for the array if called first time for this OS thread.
362 	Makes the checks against other latch levels stored in the array
363 	for this thread.
364 
365 	@param[in]	latch	 pointer to a mutex or an rw-lock
366 	@param[in]	level	level in the latching order
367 	@return the thread's latches */
368 	Latches* check_order(
369 		const latch_t*	latch,
370 		latch_level_t	level)
371 		UNIV_NOTHROW;
372 
373 	/** Print the latches acquired by a thread
374 	@param[in]	latches		Latches acquired by a thread */
375 	void print_latches(const Latches* latches) const
376 		UNIV_NOTHROW;
377 
378 	/** Special handling for the RTR mutexes. We need to add proper
379 	levels for them if possible.
380 	@param[in]	latch		Latch to check
381 	@return true if it is a an _RTR_ mutex */
is_rtr_mutexLatchDebug382 	bool is_rtr_mutex(const latch_t* latch) const
383 		UNIV_NOTHROW
384 	{
385 		return(latch->get_id() == LATCH_ID_RTR_ACTIVE_MUTEX
386 		       || latch->get_id() == LATCH_ID_RTR_PATH_MUTEX
387 		       || latch->get_id() == LATCH_ID_RTR_MATCH_MUTEX);
388 	}
389 
390 private:
391 	/** Comparator for the Levels . */
392 	struct latch_level_less
393 		: public std::binary_function<
394 		  latch_level_t,
395 		  latch_level_t,
396 		  bool>
397 	{
398 		/** @return true if lhs < rhs */
operator ()LatchDebug::latch_level_less399 		bool operator()(
400 			const latch_level_t& lhs,
401 			const latch_level_t& rhs) const
402 			UNIV_NOTHROW
403 		{
404 			return(lhs < rhs);
405 		}
406 	};
407 
408 	typedef std::map<
409 		latch_level_t,
410 		std::string,
411 		latch_level_less,
412 		ut_allocator<std::pair<const latch_level_t, std::string> > >
413 		Levels;
414 
415 	/** Mutex protecting the deadlock detector data structures. */
416 	Mutex			m_mutex;
417 
418 	/** Thread specific data. Protected by m_mutex. */
419 	ThreadMap		m_threads;
420 
421 	/** Mapping from latche level to its string representation. */
422 	Levels			m_levels;
423 
424 	/** The singleton instance. Must be created in single threaded mode. */
425 	static LatchDebug*	s_instance;
426 
427 public:
428 	/** For checking whether this module has been initialised or not. */
429 	static bool		s_initialized;
430 };
431 
432 /** The latch order checking infra-structure */
433 LatchDebug* LatchDebug::s_instance = NULL;
434 bool LatchDebug::s_initialized = false;
435 
436 #define LEVEL_MAP_INSERT(T)						\
437 do {									\
438 	std::pair<Levels::iterator, bool>	result =		\
439 		m_levels.insert(Levels::value_type(T, #T));		\
440 	ut_ad(result.second);						\
441 } while(0)
442 
443 /** Setup the mapping from level ID to level name mapping */
LatchDebug()444 LatchDebug::LatchDebug()
445 {
446 	m_mutex.init();
447 
448 	LEVEL_MAP_INSERT(SYNC_UNKNOWN);
449 	LEVEL_MAP_INSERT(SYNC_MUTEX);
450 	LEVEL_MAP_INSERT(RW_LOCK_SX);
451 	LEVEL_MAP_INSERT(RW_LOCK_X_WAIT);
452 	LEVEL_MAP_INSERT(RW_LOCK_S);
453 	LEVEL_MAP_INSERT(RW_LOCK_X);
454 	LEVEL_MAP_INSERT(RW_LOCK_NOT_LOCKED);
455 	LEVEL_MAP_INSERT(SYNC_MONITOR_MUTEX);
456 	LEVEL_MAP_INSERT(SYNC_ANY_LATCH);
457 	LEVEL_MAP_INSERT(SYNC_DOUBLEWRITE);
458 	LEVEL_MAP_INSERT(SYNC_BUF_FLUSH_LIST);
459 	LEVEL_MAP_INSERT(SYNC_BUF_BLOCK);
460 	LEVEL_MAP_INSERT(SYNC_BUF_PAGE_HASH);
461 	LEVEL_MAP_INSERT(SYNC_BUF_POOL);
462 	LEVEL_MAP_INSERT(SYNC_POOL);
463 	LEVEL_MAP_INSERT(SYNC_POOL_MANAGER);
464 	LEVEL_MAP_INSERT(SYNC_SEARCH_SYS);
465 	LEVEL_MAP_INSERT(SYNC_WORK_QUEUE);
466 	LEVEL_MAP_INSERT(SYNC_FTS_TOKENIZE);
467 	LEVEL_MAP_INSERT(SYNC_FTS_OPTIMIZE);
468 	LEVEL_MAP_INSERT(SYNC_FTS_CACHE_INIT);
469 	LEVEL_MAP_INSERT(SYNC_RECV);
470 	LEVEL_MAP_INSERT(SYNC_LOG_FLUSH_ORDER);
471 	LEVEL_MAP_INSERT(SYNC_LOG);
472 	LEVEL_MAP_INSERT(SYNC_LOG_WRITE);
473 	LEVEL_MAP_INSERT(SYNC_PAGE_CLEANER);
474 	LEVEL_MAP_INSERT(SYNC_PURGE_QUEUE);
475 	LEVEL_MAP_INSERT(SYNC_TRX_SYS_HEADER);
476 	LEVEL_MAP_INSERT(SYNC_THREADS);
477 	LEVEL_MAP_INSERT(SYNC_TRX);
478 	LEVEL_MAP_INSERT(SYNC_RW_TRX_HASH_ELEMENT);
479 	LEVEL_MAP_INSERT(SYNC_TRX_SYS);
480 	LEVEL_MAP_INSERT(SYNC_LOCK_SYS);
481 	LEVEL_MAP_INSERT(SYNC_LOCK_WAIT_SYS);
482 	LEVEL_MAP_INSERT(SYNC_INDEX_ONLINE_LOG);
483 	LEVEL_MAP_INSERT(SYNC_IBUF_BITMAP);
484 	LEVEL_MAP_INSERT(SYNC_IBUF_BITMAP_MUTEX);
485 	LEVEL_MAP_INSERT(SYNC_IBUF_TREE_NODE);
486 	LEVEL_MAP_INSERT(SYNC_IBUF_TREE_NODE_NEW);
487 	LEVEL_MAP_INSERT(SYNC_IBUF_INDEX_TREE);
488 	LEVEL_MAP_INSERT(SYNC_IBUF_MUTEX);
489 	LEVEL_MAP_INSERT(SYNC_FSP_PAGE);
490 	LEVEL_MAP_INSERT(SYNC_FSP);
491 	LEVEL_MAP_INSERT(SYNC_EXTERN_STORAGE);
492 	LEVEL_MAP_INSERT(SYNC_TRX_UNDO_PAGE);
493 	LEVEL_MAP_INSERT(SYNC_RSEG_HEADER);
494 	LEVEL_MAP_INSERT(SYNC_RSEG_HEADER_NEW);
495 	LEVEL_MAP_INSERT(SYNC_NOREDO_RSEG);
496 	LEVEL_MAP_INSERT(SYNC_REDO_RSEG);
497 	LEVEL_MAP_INSERT(SYNC_PURGE_LATCH);
498 	LEVEL_MAP_INSERT(SYNC_TREE_NODE);
499 	LEVEL_MAP_INSERT(SYNC_TREE_NODE_FROM_HASH);
500 	LEVEL_MAP_INSERT(SYNC_TREE_NODE_NEW);
501 	LEVEL_MAP_INSERT(SYNC_INDEX_TREE);
502 	LEVEL_MAP_INSERT(SYNC_IBUF_PESS_INSERT_MUTEX);
503 	LEVEL_MAP_INSERT(SYNC_IBUF_HEADER);
504 	LEVEL_MAP_INSERT(SYNC_DICT_HEADER);
505 	LEVEL_MAP_INSERT(SYNC_STATS_AUTO_RECALC);
506 	LEVEL_MAP_INSERT(SYNC_DICT);
507 	LEVEL_MAP_INSERT(SYNC_FTS_CACHE);
508 	LEVEL_MAP_INSERT(SYNC_DICT_OPERATION);
509 	LEVEL_MAP_INSERT(SYNC_TRX_I_S_RWLOCK);
510 	LEVEL_MAP_INSERT(SYNC_RECV_WRITER);
511 	LEVEL_MAP_INSERT(SYNC_LEVEL_VARYING);
512 	LEVEL_MAP_INSERT(SYNC_NO_ORDER_CHECK);
513 
514 	/* Enum count starts from 0 */
515 	ut_ad(m_levels.size() == SYNC_LEVEL_MAX + 1);
516 }
517 
518 /** Print the latches acquired by a thread
519 @param[in]	latches		Latches acquired by a thread */
520 void
print_latches(const Latches * latches) const521 LatchDebug::print_latches(const Latches* latches) const
522 	UNIV_NOTHROW
523 {
524 	ib::error() << "Latches already owned by this thread: ";
525 
526 	Latches::const_iterator	end = latches->end();
527 
528 	for (Latches::const_iterator it = latches->begin();
529 	     it != end;
530 	     ++it) {
531 
532 		ib::error()
533 			<< sync_latch_get_name(it->m_latch->get_id())
534 			<< " -> "
535 			<< it->m_level << " "
536 			<< "(" << get_level_name(it->m_level) << ")";
537 	}
538 }
539 
540 /** Report error and abort
541 @param[in]	latches		thread's existing latches
542 @param[in]	latched		The existing latch causing the invariant to fail
543 @param[in]	level		The new level request that breaks the order */
544 void
crash(const Latches * latches,const Latched * latched,latch_level_t level) const545 LatchDebug::crash(
546 	const Latches*	latches,
547 	const Latched*	latched,
548 	latch_level_t	level) const
549 	UNIV_NOTHROW
550 {
551 	const latch_t*		latch = latched->m_latch;
552 	const std::string&	in_level_name = get_level_name(level);
553 
554 	const std::string&	latch_level_name =
555 		get_level_name(latched->m_level);
556 
557 	ib::error()
558 		<< "Thread " << os_thread_pf(os_thread_get_curr_id())
559 		<< " already owns a latch "
560 		<< sync_latch_get_name(latch->m_id) << " at level"
561 		<< " " << latched->m_level << " (" << latch_level_name
562 		<< " ), which is at a lower/same level than the"
563 		<< " requested latch: "
564 		<< level << " (" << in_level_name << "). "
565 		<< latch->to_string();
566 
567 	print_latches(latches);
568 
569 	ut_error;
570 }
571 
572 /** Check that all the latches already owned by a thread have a lower
573 level than limit.
574 @param[in]	latches		the thread's existing (acquired) latches
575 @param[in]	limit		to check against
576 @return latched info if there is one with a level <= limit . */
577 const Latched*
less(const Latches * latches,latch_level_t limit) const578 LatchDebug::less(
579 	const Latches*	latches,
580 	latch_level_t	limit) const
581 	UNIV_NOTHROW
582 {
583 	Latches::const_iterator	end = latches->end();
584 
585 	for (Latches::const_iterator it = latches->begin(); it != end; ++it) {
586 
587 		if (it->m_level <= limit) {
588 			return(&(*it));
589 		}
590 	}
591 
592 	return(NULL);
593 }
594 
595 /** Do a basic ordering check.
596 @param[in]	latches		thread's existing latches
597 @param[in]	requested_level	Level requested by latch
598 @param[in]	in_level	declared ulint so that we can do level - 1.
599 				The level of the latch that the thread is
600 				trying to acquire
601 @return true if passes, else crash with error message. */
602 inline bool
basic_check(const Latches * latches,latch_level_t requested_level,lint in_level) const603 LatchDebug::basic_check(
604 	const Latches*	latches,
605 	latch_level_t	requested_level,
606 	lint		in_level) const
607 	UNIV_NOTHROW
608 {
609 	latch_level_t	level = latch_level_t(in_level);
610 
611 	ut_ad(level < SYNC_LEVEL_MAX);
612 
613 	const Latched*	latched = less(latches, level);
614 
615 	if (latched != NULL) {
616 		crash(latches, latched, requested_level);
617 		return(false);
618 	}
619 
620 	return(true);
621 }
622 
623 /** Create a new instance if one doesn't exist else return the existing one.
624 @param[in]	add		add an empty entry if one is not found
625 				(default no)
626 @return	pointer to a thread's acquired latches. */
627 Latches*
thread_latches(bool add)628 LatchDebug::thread_latches(bool add)
629 	UNIV_NOTHROW
630 {
631 	m_mutex.enter();
632 
633 	os_thread_id_t		thread_id = os_thread_get_curr_id();
634 	ThreadMap::iterator	lb = m_threads.lower_bound(thread_id);
635 
636 	if (lb != m_threads.end()
637 	    && !(m_threads.key_comp()(thread_id, lb->first))) {
638 
639 		Latches*	latches = lb->second;
640 
641 		m_mutex.exit();
642 
643 		return(latches);
644 
645 	} else if (!add) {
646 
647 		m_mutex.exit();
648 
649 		return(NULL);
650 
651 	} else {
652 		typedef ThreadMap::value_type value_type;
653 
654 		Latches*	latches = UT_NEW_NOKEY(Latches());
655 
656 		ut_a(latches != NULL);
657 
658 		latches->reserve(32);
659 
660 		m_threads.insert(lb, value_type(thread_id, latches));
661 
662 		m_mutex.exit();
663 
664 		return(latches);
665 	}
666 }
667 
668 /** Checks if the level value exists in the thread's acquired latches.
669 @param[in]	levels		the thread's existing (acquired) latches
670 @param[in]	level		to lookup
671 @return	latch if found or 0 */
672 const latch_t*
find(const Latches * latches,latch_level_t level) const673 LatchDebug::find(
674 	const Latches*	latches,
675 	latch_level_t	level) const UNIV_NOTHROW
676 {
677 	Latches::const_iterator	end = latches->end();
678 
679 	for (Latches::const_iterator it = latches->begin(); it != end; ++it) {
680 
681 		if (it->m_level == level) {
682 
683 			return(it->m_latch);
684 		}
685 	}
686 
687 	return(0);
688 }
689 
690 /** Checks if the level value exists in the thread's acquired latches.
691 @param[in]	 level		The level to lookup
692 @return	latch if found or NULL */
693 const latch_t*
find(latch_level_t level)694 LatchDebug::find(latch_level_t level)
695 	UNIV_NOTHROW
696 {
697 	return(find(thread_latches(), level));
698 }
699 
700 /**
701 Adds a latch and its level in the thread level array. Allocates the memory
702 for the array if called first time for this OS thread. Makes the checks
703 against other latch levels stored in the array for this thread.
704 @param[in]	latch	pointer to a mutex or an rw-lock
705 @param[in]	level	level in the latching order
706 @return the thread's latches */
707 Latches*
check_order(const latch_t * latch,latch_level_t level)708 LatchDebug::check_order(
709 	const latch_t*	latch,
710 	latch_level_t	level)
711 	UNIV_NOTHROW
712 {
713 	ut_ad(latch->get_level() != SYNC_LEVEL_VARYING);
714 
715 	Latches*	latches = thread_latches(true);
716 
717 	/* NOTE that there is a problem with _NODE and _LEAF levels: if the
718 	B-tree height changes, then a leaf can change to an internal node
719 	or the other way around. We do not know at present if this can cause
720 	unnecessary assertion failures below. */
721 
722 	switch (level) {
723 	case SYNC_NO_ORDER_CHECK:
724 	case SYNC_EXTERN_STORAGE:
725 	case SYNC_TREE_NODE_FROM_HASH:
726 		/* Do no order checking */
727 		break;
728 
729 	case SYNC_TRX_SYS_HEADER:
730 
731 		if (srv_is_being_started) {
732 			/* This is violated during trx_sys_create_rsegs()
733 			when creating additional rollback segments when
734 			upgrading in srv_start(). */
735 			break;
736 		}
737 
738 		/* Fall through */
739 
740 	case SYNC_MONITOR_MUTEX:
741 	case SYNC_RECV:
742 	case SYNC_WORK_QUEUE:
743 	case SYNC_FTS_TOKENIZE:
744 	case SYNC_FTS_OPTIMIZE:
745 	case SYNC_FTS_CACHE:
746 	case SYNC_FTS_CACHE_INIT:
747 	case SYNC_PAGE_CLEANER:
748 	case SYNC_LOG:
749 	case SYNC_LOG_WRITE:
750 	case SYNC_LOG_FLUSH_ORDER:
751 	case SYNC_DOUBLEWRITE:
752 	case SYNC_SEARCH_SYS:
753 	case SYNC_THREADS:
754 	case SYNC_LOCK_SYS:
755 	case SYNC_LOCK_WAIT_SYS:
756 	case SYNC_RW_TRX_HASH_ELEMENT:
757 	case SYNC_TRX_SYS:
758 	case SYNC_IBUF_BITMAP_MUTEX:
759 	case SYNC_REDO_RSEG:
760 	case SYNC_NOREDO_RSEG:
761 	case SYNC_PURGE_LATCH:
762 	case SYNC_PURGE_QUEUE:
763 	case SYNC_DICT_OPERATION:
764 	case SYNC_DICT_HEADER:
765 	case SYNC_TRX_I_S_RWLOCK:
766 	case SYNC_IBUF_MUTEX:
767 	case SYNC_INDEX_ONLINE_LOG:
768 	case SYNC_STATS_AUTO_RECALC:
769 	case SYNC_POOL:
770 	case SYNC_POOL_MANAGER:
771 	case SYNC_RECV_WRITER:
772 
773 		basic_check(latches, level, level);
774 		break;
775 
776 	case SYNC_ANY_LATCH:
777 
778 		/* Temporary workaround for LATCH_ID_RTR_*_MUTEX */
779 		if (is_rtr_mutex(latch)) {
780 
781 			const Latched*	latched = less(latches, level);
782 
783 			if (latched == NULL
784 			    || (latched != NULL
785 				&& is_rtr_mutex(latched->m_latch))) {
786 
787 				/* No violation */
788 				break;
789 
790 			}
791 
792 			crash(latches, latched, level);
793 
794 		} else {
795 			basic_check(latches, level, level);
796 		}
797 
798 		break;
799 
800 	case SYNC_TRX:
801 
802 		/* Either the thread must own the lock_sys.mutex, or
803 		it is allowed to own only ONE trx_t::mutex. */
804 
805 		if (less(latches, level) != NULL) {
806 			basic_check(latches, level, level - 1);
807 			ut_a(find(latches, SYNC_LOCK_SYS) != 0);
808 		}
809 		break;
810 
811 	case SYNC_BUF_FLUSH_LIST:
812 	case SYNC_BUF_POOL:
813 
814 		/* We can have multiple mutexes of this type therefore we
815 		can only check whether the greater than condition holds. */
816 
817 		basic_check(latches, level, level - 1);
818 		break;
819 
820 	case SYNC_BUF_PAGE_HASH:
821 
822 		/* Multiple page_hash locks are only allowed during
823 		buf_validate and that is where buf_pool mutex is already
824 		held. */
825 
826 		/* Fall through */
827 
828 	case SYNC_BUF_BLOCK:
829 
830 		/* Either the thread must own the (buffer pool) buf_pool->mutex
831 		or it is allowed to latch only ONE of (buffer block)
832 		block->mutex or buf_pool->zip_mutex. */
833 
834 		if (less(latches, level) != NULL) {
835 			basic_check(latches, level, level - 1);
836 			ut_a(find(latches, SYNC_BUF_POOL) != 0);
837 		}
838 		break;
839 
840 	case SYNC_IBUF_BITMAP:
841 
842 		/* Either the thread must own the master mutex to all
843 		the bitmap pages, or it is allowed to latch only ONE
844 		bitmap page. */
845 
846 		if (find(latches, SYNC_IBUF_BITMAP_MUTEX) != 0) {
847 
848 			basic_check(latches, level, SYNC_IBUF_BITMAP - 1);
849 
850 		} else if (!srv_is_being_started) {
851 
852 			/* This is violated during trx_sys_create_rsegs()
853 			when creating additional rollback segments during
854 			upgrade. */
855 
856 			basic_check(latches, level, SYNC_IBUF_BITMAP);
857 		}
858 		break;
859 
860 	case SYNC_FSP_PAGE:
861 		ut_a(find(latches, SYNC_FSP) != 0);
862 		break;
863 
864 	case SYNC_FSP:
865 
866 		ut_a(find(latches, SYNC_FSP) != 0
867 		     || basic_check(latches, level, SYNC_FSP));
868 		break;
869 
870 	case SYNC_TRX_UNDO_PAGE:
871 
872 		/* Purge is allowed to read in as many UNDO pages as it likes.
873 		The purge thread can read the UNDO pages without any covering
874 		mutex. */
875 
876 		ut_a(find(latches, SYNC_REDO_RSEG) != 0
877 		     || find(latches, SYNC_NOREDO_RSEG) != 0
878 		     || basic_check(latches, level, level - 1));
879 		break;
880 
881 	case SYNC_RSEG_HEADER:
882 
883 		ut_a(find(latches, SYNC_REDO_RSEG) != 0
884 		     || find(latches, SYNC_NOREDO_RSEG) != 0);
885 		break;
886 
887 	case SYNC_RSEG_HEADER_NEW:
888 
889 		ut_a(find(latches, SYNC_FSP_PAGE) != 0);
890 		break;
891 
892 	case SYNC_TREE_NODE:
893 
894 		ut_a(find(latches, SYNC_FSP) == &fil_system.temp_space->latch
895 		     || find(latches, SYNC_INDEX_TREE)
896 		     || find(latches, SYNC_DICT_OPERATION)
897 		     || basic_check(latches, level, SYNC_TREE_NODE - 1));
898 		break;
899 
900 	case SYNC_TREE_NODE_NEW:
901 
902 		ut_a(find(latches, SYNC_FSP_PAGE) != 0);
903 		break;
904 
905 	case SYNC_INDEX_TREE:
906 
907 		basic_check(latches, level, SYNC_TREE_NODE - 1);
908 		break;
909 
910 	case SYNC_IBUF_TREE_NODE:
911 
912 		ut_a(find(latches, SYNC_IBUF_INDEX_TREE) != 0
913 		     || basic_check(latches, level, SYNC_IBUF_TREE_NODE - 1));
914 		break;
915 
916 	case SYNC_IBUF_TREE_NODE_NEW:
917 
918 		/* ibuf_add_free_page() allocates new pages for the change
919 		buffer while only holding the tablespace x-latch. These
920 		pre-allocated new pages may only be used while holding
921 		ibuf_mutex, in btr_page_alloc_for_ibuf(). */
922 
923 		ut_a(find(latches, SYNC_IBUF_MUTEX) != 0
924 		     || find(latches, SYNC_FSP) != 0);
925 		break;
926 
927 	case SYNC_IBUF_INDEX_TREE:
928 
929 		if (find(latches, SYNC_FSP) != 0) {
930 			basic_check(latches, level, level - 1);
931 		} else {
932 			basic_check(latches, level, SYNC_IBUF_TREE_NODE - 1);
933 		}
934 		break;
935 
936 	case SYNC_IBUF_PESS_INSERT_MUTEX:
937 
938 		basic_check(latches, level, SYNC_FSP - 1);
939 		ut_a(find(latches, SYNC_IBUF_MUTEX) == 0);
940 		break;
941 
942 	case SYNC_IBUF_HEADER:
943 
944 		basic_check(latches, level, SYNC_FSP - 1);
945 		ut_a(find(latches, SYNC_IBUF_MUTEX) == NULL);
946 		ut_a(find(latches, SYNC_IBUF_PESS_INSERT_MUTEX) == NULL);
947 		break;
948 
949 	case SYNC_DICT:
950 		basic_check(latches, level, SYNC_DICT);
951 		break;
952 
953 	case SYNC_MUTEX:
954 	case SYNC_UNKNOWN:
955 	case SYNC_LEVEL_VARYING:
956 	case RW_LOCK_X:
957 	case RW_LOCK_X_WAIT:
958 	case RW_LOCK_S:
959 	case RW_LOCK_SX:
960 	case RW_LOCK_NOT_LOCKED:
961 		/* These levels should never be set for a latch. */
962 		ut_error;
963 		break;
964 	}
965 
966 	return(latches);
967 }
968 
969 /** Removes a latch from the thread level array if it is found there.
970 @param[in]	latch		that was released/unlocked
971 @param[in]	level		level of the latch
972 @return true if found in the array; it is not an error if the latch is
973 not found, as we presently are not able to determine the level for
974 every latch reservation the program does */
975 void
unlock(const latch_t * latch)976 LatchDebug::unlock(const latch_t* latch)
977 	UNIV_NOTHROW
978 {
979 	if (latch->get_level() == SYNC_LEVEL_VARYING) {
980 		// We don't have varying level mutexes
981 		ut_ad(latch->m_rw_lock);
982 	}
983 
984 	Latches*	latches;
985 
986 	if (*latch->get_name() == '.') {
987 
988 		/* Ignore diagnostic latches, starting with '.' */
989 
990 	} else if ((latches = thread_latches()) != NULL) {
991 
992 		Latches::reverse_iterator	rend = latches->rend();
993 
994 		for (Latches::reverse_iterator it = latches->rbegin();
995 		     it != rend;
996 		     ++it) {
997 
998 			if (it->m_latch != latch) {
999 
1000 				continue;
1001 			}
1002 
1003 			Latches::iterator	i = it.base();
1004 
1005 			latches->erase(--i);
1006 
1007 			/* If this thread doesn't own any more
1008 			latches remove from the map.
1009 
1010 			FIXME: Perhaps use the master thread
1011 			to do purge. Or, do it from close connection.
1012 			This could be expensive. */
1013 
1014 			if (latches->empty()) {
1015 
1016 				m_mutex.enter();
1017 
1018 				os_thread_id_t	thread_id;
1019 
1020 				thread_id = os_thread_get_curr_id();
1021 
1022 				m_threads.erase(thread_id);
1023 
1024 				m_mutex.exit();
1025 
1026 				UT_DELETE(latches);
1027 			}
1028 
1029 			return;
1030 		}
1031 
1032 		if (latch->get_level() != SYNC_LEVEL_VARYING) {
1033 			ib::error()
1034 				<< "Couldn't find latch "
1035 				<< sync_latch_get_name(latch->get_id());
1036 
1037 			print_latches(latches);
1038 
1039 			/** Must find the latch. */
1040 			ut_error;
1041 		}
1042 	}
1043 }
1044 
1045 /** Get the latch id from a latch name.
1046 @param[in]	name	Latch name
1047 @return latch id if found else LATCH_ID_NONE. */
1048 latch_id_t
sync_latch_get_id(const char * name)1049 sync_latch_get_id(const char* name)
1050 {
1051 	LatchMetaData::const_iterator	end = latch_meta.end();
1052 
1053 	/* Linear scan should be OK, this should be extremely rare. */
1054 
1055 	for (LatchMetaData::const_iterator it = latch_meta.begin();
1056 	     it != end;
1057 	     ++it) {
1058 
1059 		if (*it == NULL || (*it)->get_id() == LATCH_ID_NONE) {
1060 
1061 			continue;
1062 
1063 		} else if (strcmp((*it)->get_name(), name) == 0) {
1064 
1065 			return((*it)->get_id());
1066 		}
1067 	}
1068 
1069 	return(LATCH_ID_NONE);
1070 }
1071 
1072 /** Get the latch name from a sync level
1073 @param[in]	level		Latch level to lookup
1074 @return NULL if not found. */
1075 const char*
sync_latch_get_name(latch_level_t level)1076 sync_latch_get_name(latch_level_t level)
1077 {
1078 	LatchMetaData::const_iterator	end = latch_meta.end();
1079 
1080 	/* Linear scan should be OK, this should be extremely rare. */
1081 
1082 	for (LatchMetaData::const_iterator it = latch_meta.begin();
1083 	     it != end;
1084 	     ++it) {
1085 
1086 		if (*it == NULL || (*it)->get_id() == LATCH_ID_NONE) {
1087 
1088 			continue;
1089 
1090 		} else if ((*it)->get_level() == level) {
1091 
1092 			return((*it)->get_name());
1093 		}
1094 	}
1095 
1096 	return(0);
1097 }
1098 
1099 /** Check if it is OK to acquire the latch.
1100 @param[in]	latch	latch type */
1101 void
sync_check_lock_validate(const latch_t * latch)1102 sync_check_lock_validate(const latch_t* latch)
1103 {
1104 	if (LatchDebug::instance() != NULL) {
1105 		LatchDebug::instance()->lock_validate(
1106 			latch, latch->get_level());
1107 	}
1108 }
1109 
1110 /** Note that the lock has been granted
1111 @param[in]	latch	latch type */
1112 void
sync_check_lock_granted(const latch_t * latch)1113 sync_check_lock_granted(const latch_t* latch)
1114 {
1115 	if (LatchDebug::instance() != NULL) {
1116 		LatchDebug::instance()->lock_granted(latch, latch->get_level());
1117 	}
1118 }
1119 
1120 /** Check if it is OK to acquire the latch.
1121 @param[in]	latch	latch type
1122 @param[in]	level	Latch level */
1123 void
sync_check_lock(const latch_t * latch,latch_level_t level)1124 sync_check_lock(
1125 	const latch_t*	latch,
1126 	latch_level_t	level)
1127 {
1128 	if (LatchDebug::instance() != NULL) {
1129 
1130 		ut_ad(latch->get_level() == SYNC_LEVEL_VARYING);
1131 		ut_ad(latch->get_id() == LATCH_ID_BUF_BLOCK_LOCK);
1132 
1133 		LatchDebug::instance()->lock_validate(latch, level);
1134 		LatchDebug::instance()->lock_granted(latch, level);
1135 	}
1136 }
1137 
1138 /** Check if it is OK to re-acquire the lock.
1139 @param[in]	latch		RW-LOCK to relock (recursive X locks) */
1140 void
sync_check_relock(const latch_t * latch)1141 sync_check_relock(const latch_t* latch)
1142 {
1143 	if (LatchDebug::instance() != NULL) {
1144 		LatchDebug::instance()->relock(latch);
1145 	}
1146 }
1147 
1148 /** Removes a latch from the thread level array if it is found there.
1149 @param[in]	latch		The latch to unlock */
1150 void
sync_check_unlock(const latch_t * latch)1151 sync_check_unlock(const latch_t* latch)
1152 {
1153 	if (LatchDebug::instance() != NULL) {
1154 		LatchDebug::instance()->unlock(latch);
1155 	}
1156 }
1157 
1158 /** Checks if the level array for the current thread contains a
1159 mutex or rw-latch at the specified level.
1160 @param[in]	level		to find
1161 @return	a matching latch, or NULL if not found */
1162 const latch_t*
sync_check_find(latch_level_t level)1163 sync_check_find(latch_level_t level)
1164 {
1165 	if (LatchDebug::instance() != NULL) {
1166 		return(LatchDebug::instance()->find(level));
1167 	}
1168 
1169 	return(NULL);
1170 }
1171 
1172 /** Iterate over the thread's latches.
1173 @param[in,out]	functor		called for each element.
1174 @return true if the functor returns true for any element */
1175 bool
sync_check_iterate(const sync_check_functor_t & functor)1176 sync_check_iterate(const sync_check_functor_t& functor)
1177 {
1178 	if (LatchDebug* debug = LatchDebug::instance()) {
1179 		return(debug->for_each(functor));
1180 	}
1181 
1182 	return(false);
1183 }
1184 
1185 /** Enable sync order checking.
1186 
1187 Note: We don't enforce any synchronisation checks. The caller must ensure
1188 that no races can occur */
1189 void
sync_check_enable()1190 sync_check_enable()
1191 {
1192 	if (!srv_sync_debug) {
1193 
1194 		return;
1195 	}
1196 
1197 	/* We should always call this before we create threads. */
1198 
1199 	LatchDebug::create_instance();
1200 }
1201 
1202 /** Initialise the debug data structures */
1203 void
init()1204 LatchDebug::init()
1205 	UNIV_NOTHROW
1206 {
1207 	mutex_create(LATCH_ID_RW_LOCK_DEBUG, &rw_lock_debug_mutex);
1208 }
1209 
1210 /** Shutdown the latch debug checking
1211 
1212 Note: We don't enforce any synchronisation checks. The caller must ensure
1213 that no races can occur */
1214 void
shutdown()1215 LatchDebug::shutdown()
1216 	UNIV_NOTHROW
1217 {
1218 	mutex_free(&rw_lock_debug_mutex);
1219 
1220 	ut_a(s_initialized);
1221 
1222 	s_initialized = false;
1223 
1224 	UT_DELETE(s_instance);
1225 
1226 	LatchDebug::s_instance = NULL;
1227 }
1228 
1229 /** Acquires the debug mutex. We cannot use the mutex defined in sync0sync,
1230 because the debug mutex is also acquired in sync0arr while holding the OS
1231 mutex protecting the sync array, and the ordinary mutex_enter might
1232 recursively call routines in sync0arr, leading to a deadlock on the OS
1233 mutex. */
1234 void
rw_lock_debug_mutex_enter()1235 rw_lock_debug_mutex_enter()
1236 {
1237 	mutex_enter(&rw_lock_debug_mutex);
1238 }
1239 
1240 /** Releases the debug mutex. */
1241 void
rw_lock_debug_mutex_exit()1242 rw_lock_debug_mutex_exit()
1243 {
1244 	mutex_exit(&rw_lock_debug_mutex);
1245 }
1246 #endif /* UNIV_DEBUG */
1247 
1248 /* Meta data for all the InnoDB latches. If the latch is not in recorded
1249 here then it will be be considered for deadlock checks.  */
1250 LatchMetaData	latch_meta;
1251 
1252 /** Load the latch meta data. */
1253 static
1254 void
sync_latch_meta_init()1255 sync_latch_meta_init()
1256 	UNIV_NOTHROW
1257 {
1258 	latch_meta.resize(LATCH_ID_MAX);
1259 
1260 	/* The latches should be ordered on latch_id_t. So that we can
1261 	index directly into the vector to update and fetch meta-data. */
1262 
1263 #if defined PFS_SKIP_BUFFER_MUTEX_RWLOCK || defined PFS_GROUP_BUFFER_SYNC
1264 	LATCH_ADD_MUTEX(BUF_BLOCK_MUTEX, SYNC_BUF_BLOCK, PFS_NOT_INSTRUMENTED);
1265 #else
1266 	LATCH_ADD_MUTEX(BUF_BLOCK_MUTEX, SYNC_BUF_BLOCK,
1267 			buffer_block_mutex_key);
1268 #endif /* PFS_SKIP_BUFFER_MUTEX_RWLOCK || PFS_GROUP_BUFFER_SYNC */
1269 
1270 	LATCH_ADD_MUTEX(BUF_POOL, SYNC_BUF_POOL, buf_pool_mutex_key);
1271 
1272 	LATCH_ADD_MUTEX(BUF_POOL_ZIP, SYNC_BUF_BLOCK, buf_pool_zip_mutex_key);
1273 
1274 	LATCH_ADD_MUTEX(DICT_FOREIGN_ERR, SYNC_NO_ORDER_CHECK,
1275 			dict_foreign_err_mutex_key);
1276 
1277 	LATCH_ADD_MUTEX(DICT_SYS, SYNC_DICT, dict_sys_mutex_key);
1278 
1279 	LATCH_ADD_MUTEX(FIL_SYSTEM, SYNC_ANY_LATCH, fil_system_mutex_key);
1280 
1281 	LATCH_ADD_MUTEX(FLUSH_LIST, SYNC_BUF_FLUSH_LIST, flush_list_mutex_key);
1282 
1283 	LATCH_ADD_MUTEX(FTS_DELETE, SYNC_FTS_OPTIMIZE, fts_delete_mutex_key);
1284 
1285 	LATCH_ADD_MUTEX(FTS_DOC_ID, SYNC_FTS_OPTIMIZE, fts_doc_id_mutex_key);
1286 
1287 	LATCH_ADD_MUTEX(FTS_PLL_TOKENIZE, SYNC_FTS_TOKENIZE,
1288 			fts_pll_tokenize_mutex_key);
1289 
1290 	LATCH_ADD_MUTEX(HASH_TABLE_MUTEX, SYNC_BUF_PAGE_HASH,
1291 			hash_table_mutex_key);
1292 
1293 	LATCH_ADD_MUTEX(IBUF_BITMAP, SYNC_IBUF_BITMAP_MUTEX,
1294 			ibuf_bitmap_mutex_key);
1295 
1296 	LATCH_ADD_MUTEX(IBUF, SYNC_IBUF_MUTEX, ibuf_mutex_key);
1297 
1298 	LATCH_ADD_MUTEX(IBUF_PESSIMISTIC_INSERT, SYNC_IBUF_PESS_INSERT_MUTEX,
1299 			ibuf_pessimistic_insert_mutex_key);
1300 
1301 	LATCH_ADD_MUTEX(LOG_SYS, SYNC_LOG, log_sys_mutex_key);
1302 
1303 	LATCH_ADD_MUTEX(LOG_WRITE, SYNC_LOG_WRITE, log_sys_write_mutex_key);
1304 
1305 	LATCH_ADD_MUTEX(LOG_FLUSH_ORDER, SYNC_LOG_FLUSH_ORDER,
1306 			log_flush_order_mutex_key);
1307 
1308 	LATCH_ADD_MUTEX(MUTEX_LIST, SYNC_NO_ORDER_CHECK, mutex_list_mutex_key);
1309 
1310 	LATCH_ADD_MUTEX(PAGE_CLEANER, SYNC_PAGE_CLEANER,
1311 			page_cleaner_mutex_key);
1312 
1313 	LATCH_ADD_MUTEX(PURGE_SYS_PQ, SYNC_PURGE_QUEUE,
1314 			purge_sys_pq_mutex_key);
1315 
1316 	LATCH_ADD_MUTEX(RECALC_POOL, SYNC_STATS_AUTO_RECALC,
1317 			recalc_pool_mutex_key);
1318 
1319 	LATCH_ADD_MUTEX(RECV_SYS, SYNC_RECV, recv_sys_mutex_key);
1320 
1321 	LATCH_ADD_MUTEX(RECV_WRITER, SYNC_RECV_WRITER, recv_writer_mutex_key);
1322 
1323 	LATCH_ADD_MUTEX(REDO_RSEG, SYNC_REDO_RSEG, redo_rseg_mutex_key);
1324 
1325 	LATCH_ADD_MUTEX(NOREDO_RSEG, SYNC_NOREDO_RSEG, noredo_rseg_mutex_key);
1326 
1327 #ifdef UNIV_DEBUG
1328 	/* Mutex names starting with '.' are not tracked. They are assumed
1329 	to be diagnostic mutexes used in debugging. */
1330 	latch_meta[LATCH_ID_RW_LOCK_DEBUG] =
1331 		LATCH_ADD_MUTEX(RW_LOCK_DEBUG,
1332 			SYNC_NO_ORDER_CHECK,
1333 			rw_lock_debug_mutex_key);
1334 #endif /* UNIV_DEBUG */
1335 
1336 	LATCH_ADD_MUTEX(RTR_ACTIVE_MUTEX, SYNC_ANY_LATCH,
1337 			rtr_active_mutex_key);
1338 
1339 	LATCH_ADD_MUTEX(RTR_MATCH_MUTEX, SYNC_ANY_LATCH, rtr_match_mutex_key);
1340 
1341 	LATCH_ADD_MUTEX(RTR_PATH_MUTEX, SYNC_ANY_LATCH, rtr_path_mutex_key);
1342 
1343 	LATCH_ADD_MUTEX(RW_LOCK_LIST, SYNC_NO_ORDER_CHECK,
1344 			rw_lock_list_mutex_key);
1345 
1346 	LATCH_ADD_MUTEX(RW_LOCK_MUTEX, SYNC_NO_ORDER_CHECK, rw_lock_mutex_key);
1347 
1348 	LATCH_ADD_MUTEX(SRV_INNODB_MONITOR, SYNC_NO_ORDER_CHECK,
1349 			srv_innodb_monitor_mutex_key);
1350 
1351 	LATCH_ADD_MUTEX(SRV_MISC_TMPFILE, SYNC_ANY_LATCH,
1352 			srv_misc_tmpfile_mutex_key);
1353 
1354 	LATCH_ADD_MUTEX(SRV_MONITOR_FILE, SYNC_NO_ORDER_CHECK,
1355 			srv_monitor_file_mutex_key);
1356 
1357 	LATCH_ADD_MUTEX(BUF_DBLWR, SYNC_DOUBLEWRITE, buf_dblwr_mutex_key);
1358 
1359 	LATCH_ADD_MUTEX(TRX_POOL, SYNC_POOL, trx_pool_mutex_key);
1360 
1361 	LATCH_ADD_MUTEX(TRX_POOL_MANAGER, SYNC_POOL_MANAGER,
1362 			trx_pool_manager_mutex_key);
1363 
1364 	LATCH_ADD_MUTEX(TRX, SYNC_TRX, trx_mutex_key);
1365 
1366 	LATCH_ADD_MUTEX(LOCK_SYS, SYNC_LOCK_SYS, lock_mutex_key);
1367 
1368 	LATCH_ADD_MUTEX(LOCK_SYS_WAIT, SYNC_LOCK_WAIT_SYS,
1369 			lock_wait_mutex_key);
1370 
1371 	LATCH_ADD_MUTEX(TRX_SYS, SYNC_TRX_SYS, trx_sys_mutex_key);
1372 
1373 	LATCH_ADD_MUTEX(SRV_SYS, SYNC_THREADS, srv_sys_mutex_key);
1374 
1375 	LATCH_ADD_MUTEX(SRV_SYS_TASKS, SYNC_ANY_LATCH, srv_threads_mutex_key);
1376 
1377 	LATCH_ADD_MUTEX(PAGE_ZIP_STAT_PER_INDEX, SYNC_ANY_LATCH,
1378 			page_zip_stat_per_index_mutex_key);
1379 
1380 #ifndef PFS_SKIP_EVENT_MUTEX
1381 	LATCH_ADD_MUTEX(EVENT_MANAGER, SYNC_NO_ORDER_CHECK,
1382 			event_manager_mutex_key);
1383 #else
1384 	LATCH_ADD_MUTEX(EVENT_MANAGER, SYNC_NO_ORDER_CHECK,
1385 			PFS_NOT_INSTRUMENTED);
1386 #endif /* !PFS_SKIP_EVENT_MUTEX */
1387 
1388 	LATCH_ADD_MUTEX(EVENT_MUTEX, SYNC_NO_ORDER_CHECK, event_mutex_key);
1389 
1390 	LATCH_ADD_MUTEX(SYNC_ARRAY_MUTEX, SYNC_NO_ORDER_CHECK,
1391 			sync_array_mutex_key);
1392 
1393 	LATCH_ADD_MUTEX(OS_AIO_READ_MUTEX, SYNC_NO_ORDER_CHECK,
1394 			PFS_NOT_INSTRUMENTED);
1395 
1396 	LATCH_ADD_MUTEX(OS_AIO_WRITE_MUTEX, SYNC_NO_ORDER_CHECK,
1397 			PFS_NOT_INSTRUMENTED);
1398 
1399 	LATCH_ADD_MUTEX(OS_AIO_LOG_MUTEX, SYNC_NO_ORDER_CHECK,
1400 			PFS_NOT_INSTRUMENTED);
1401 
1402 	LATCH_ADD_MUTEX(OS_AIO_IBUF_MUTEX, SYNC_NO_ORDER_CHECK,
1403 			PFS_NOT_INSTRUMENTED);
1404 
1405 	LATCH_ADD_MUTEX(OS_AIO_SYNC_MUTEX, SYNC_NO_ORDER_CHECK,
1406 			PFS_NOT_INSTRUMENTED);
1407 
1408 	LATCH_ADD_MUTEX(ROW_DROP_LIST, SYNC_NO_ORDER_CHECK,
1409 			row_drop_list_mutex_key);
1410 
1411 	LATCH_ADD_MUTEX(INDEX_ONLINE_LOG, SYNC_INDEX_ONLINE_LOG,
1412 			index_online_log_key);
1413 
1414 	LATCH_ADD_MUTEX(WORK_QUEUE, SYNC_WORK_QUEUE, PFS_NOT_INSTRUMENTED);
1415 
1416 	// Add the RW locks
1417 	LATCH_ADD_RWLOCK(BTR_SEARCH, SYNC_SEARCH_SYS, btr_search_latch_key);
1418 
1419 	LATCH_ADD_RWLOCK(BUF_BLOCK_LOCK, SYNC_LEVEL_VARYING,
1420 			 buf_block_lock_key);
1421 
1422 #ifdef UNIV_DEBUG
1423 	LATCH_ADD_RWLOCK(BUF_BLOCK_DEBUG, SYNC_LEVEL_VARYING,
1424 			 buf_block_debug_latch_key);
1425 #endif /* UNIV_DEBUG */
1426 
1427 	LATCH_ADD_RWLOCK(DICT_OPERATION, SYNC_DICT_OPERATION,
1428 			 dict_operation_lock_key);
1429 
1430 	LATCH_ADD_RWLOCK(CHECKPOINT, SYNC_NO_ORDER_CHECK, checkpoint_lock_key);
1431 
1432 	LATCH_ADD_RWLOCK(FIL_SPACE, SYNC_FSP, fil_space_latch_key);
1433 
1434 	LATCH_ADD_RWLOCK(FTS_CACHE, SYNC_FTS_CACHE, fts_cache_rw_lock_key);
1435 
1436 	LATCH_ADD_RWLOCK(FTS_CACHE_INIT, SYNC_FTS_CACHE_INIT,
1437 			 fts_cache_init_rw_lock_key);
1438 
1439 	LATCH_ADD_RWLOCK(TRX_I_S_CACHE, SYNC_TRX_I_S_RWLOCK,
1440 			 trx_i_s_cache_lock_key);
1441 
1442 	LATCH_ADD_RWLOCK(TRX_PURGE, SYNC_PURGE_LATCH, trx_purge_latch_key);
1443 
1444 	LATCH_ADD_RWLOCK(IBUF_INDEX_TREE, SYNC_IBUF_INDEX_TREE,
1445 			 index_tree_rw_lock_key);
1446 
1447 	LATCH_ADD_RWLOCK(INDEX_TREE, SYNC_INDEX_TREE, index_tree_rw_lock_key);
1448 
1449 	LATCH_ADD_RWLOCK(HASH_TABLE_RW_LOCK, SYNC_BUF_PAGE_HASH,
1450 		  hash_table_locks_key);
1451 
1452 	LATCH_ADD_MUTEX(SYNC_DEBUG_MUTEX, SYNC_NO_ORDER_CHECK,
1453 			PFS_NOT_INSTRUMENTED);
1454 
1455 	/* JAN: TODO: Add PFS instrumentation */
1456 	LATCH_ADD_MUTEX(SCRUB_STAT_MUTEX, SYNC_NO_ORDER_CHECK,
1457 			PFS_NOT_INSTRUMENTED);
1458 	LATCH_ADD_MUTEX(DEFRAGMENT_MUTEX, SYNC_NO_ORDER_CHECK,
1459 			PFS_NOT_INSTRUMENTED);
1460 	LATCH_ADD_MUTEX(BTR_DEFRAGMENT_MUTEX, SYNC_NO_ORDER_CHECK,
1461 			PFS_NOT_INSTRUMENTED);
1462 	LATCH_ADD_MUTEX(FIL_CRYPT_STAT_MUTEX, SYNC_NO_ORDER_CHECK,
1463 			PFS_NOT_INSTRUMENTED);
1464 	LATCH_ADD_MUTEX(FIL_CRYPT_DATA_MUTEX, SYNC_NO_ORDER_CHECK,
1465 			PFS_NOT_INSTRUMENTED);
1466 	LATCH_ADD_MUTEX(FIL_CRYPT_THREADS_MUTEX, SYNC_NO_ORDER_CHECK,
1467 			PFS_NOT_INSTRUMENTED);
1468 	LATCH_ADD_MUTEX(RW_TRX_HASH_ELEMENT, SYNC_RW_TRX_HASH_ELEMENT,
1469 			rw_trx_hash_element_mutex_key);
1470 
1471 	latch_id_t	id = LATCH_ID_NONE;
1472 
1473 	/* The array should be ordered on latch ID.We need to
1474 	index directly into it from the mutex policy to update
1475 	the counters and access the meta-data. */
1476 
1477 	for (LatchMetaData::iterator it = latch_meta.begin();
1478 	     it != latch_meta.end();
1479 	     ++it) {
1480 
1481 		const latch_meta_t*	meta = *it;
1482 
1483 
1484 		/* Skip blank entries */
1485 		if (meta == NULL || meta->get_id() == LATCH_ID_NONE) {
1486 			continue;
1487 		}
1488 
1489 		ut_a(id < meta->get_id());
1490 
1491 		id = meta->get_id();
1492 	}
1493 }
1494 
1495 /** Destroy the latch meta data */
1496 static
1497 void
sync_latch_meta_destroy()1498 sync_latch_meta_destroy()
1499 {
1500 	for (LatchMetaData::iterator it = latch_meta.begin();
1501 	     it != latch_meta.end();
1502 	     ++it) {
1503 
1504 		UT_DELETE(*it);
1505 	}
1506 
1507 	latch_meta.clear();
1508 }
1509 
1510 /** Track mutex file creation name and line number. This is to avoid storing
1511 { const char* name; uint16_t line; } in every instance. This results in the
1512 sizeof(Mutex) > 64. We use a lookup table to store it separately. Fetching
1513 the values is very rare, only required for diagnostic purposes. And, we
1514 don't create/destroy mutexes that frequently. */
1515 struct CreateTracker {
1516 
1517 	/** Constructor */
CreateTrackerCreateTracker1518 	CreateTracker()
1519 		UNIV_NOTHROW
1520 	{
1521 		m_mutex.init();
1522 	}
1523 
1524 	/** Destructor */
~CreateTrackerCreateTracker1525 	~CreateTracker()
1526 		UNIV_NOTHROW
1527 	{
1528 		ut_ad(m_files.empty());
1529 
1530 		m_mutex.destroy();
1531 	}
1532 
1533 	/** Register where the latch was created
1534 	@param[in]	ptr		Latch instance
1535 	@param[in]	filename	Where created
1536 	@param[in]	line		Line number in filename */
register_latchCreateTracker1537 	void register_latch(
1538 		const void*	ptr,
1539 		const char*	filename,
1540 		uint16_t	line)
1541 		UNIV_NOTHROW
1542 	{
1543 		m_mutex.enter();
1544 
1545 		Files::iterator	lb = m_files.lower_bound(ptr);
1546 
1547 		ut_ad(lb == m_files.end()
1548 		      || m_files.key_comp()(ptr, lb->first));
1549 
1550 		typedef Files::value_type value_type;
1551 
1552 		m_files.insert(lb, value_type(ptr, File(filename, line)));
1553 
1554 		m_mutex.exit();
1555 	}
1556 
1557 	/** Deregister a latch - when it is destroyed
1558 	@param[in]	ptr		Latch instance being destroyed */
deregister_latchCreateTracker1559 	void deregister_latch(const void* ptr)
1560 		UNIV_NOTHROW
1561 	{
1562 		m_mutex.enter();
1563 
1564 		Files::iterator	lb = m_files.lower_bound(ptr);
1565 
1566 		ut_ad(lb != m_files.end()
1567 		      && !(m_files.key_comp()(ptr, lb->first)));
1568 
1569 		m_files.erase(lb);
1570 
1571 		m_mutex.exit();
1572 	}
1573 
1574 	/** Get the create string, format is "name:line"
1575 	@param[in]	ptr		Latch instance
1576 	@return the create string or "" if not found */
getCreateTracker1577 	std::string get(const void* ptr)
1578 		UNIV_NOTHROW
1579 	{
1580 		m_mutex.enter();
1581 
1582 		std::string	created;
1583 
1584 		Files::iterator	lb = m_files.lower_bound(ptr);
1585 
1586 		if (lb != m_files.end()
1587 		    && !(m_files.key_comp()(ptr, lb->first))) {
1588 
1589 			std::ostringstream	msg;
1590 
1591 			msg << lb->second.m_name << ":" << lb->second.m_line;
1592 
1593 			created = msg.str();
1594 		}
1595 
1596 		m_mutex.exit();
1597 
1598 		return(created);
1599 	}
1600 
1601 private:
1602 	/** For tracking the filename and line number */
1603 	struct File {
1604 
1605 		/** Constructor */
FileCreateTracker::File1606 		File() UNIV_NOTHROW : m_name(), m_line() { }
1607 
1608 		/** Constructor
1609 		@param[in]	name		Filename where created
1610 		@param[in]	line		Line number where created */
FileCreateTracker::File1611 		File(const char*  name, uint16_t line)
1612 			UNIV_NOTHROW
1613 			:
1614 			m_name(sync_basename(name)),
1615 			m_line(line)
1616 		{
1617 			/* No op */
1618 		}
1619 
1620 		/** Filename where created */
1621 		std::string		m_name;
1622 
1623 		/** Line number where created */
1624 		uint16_t		m_line;
1625 	};
1626 
1627 	/** Map the mutex instance to where it was created */
1628 	typedef std::map<
1629 		const void*,
1630 		File,
1631 		std::less<const void*>,
1632 		ut_allocator<std::pair<const void* const, File> > >
1633 		Files;
1634 
1635 	typedef OSMutex	Mutex;
1636 
1637 	/** Mutex protecting m_files */
1638 	Mutex			m_mutex;
1639 
1640 	/** Track the latch creation */
1641 	Files			m_files;
1642 };
1643 
1644 /** Track latch creation location. For reducing the size of the latches */
1645 static CreateTracker	create_tracker;
1646 
1647 /** Register a latch, called when it is created
1648 @param[in]	ptr		Latch instance that was created
1649 @param[in]	filename	Filename where it was created
1650 @param[in]	line		Line number in filename */
1651 void
sync_file_created_register(const void * ptr,const char * filename,uint16_t line)1652 sync_file_created_register(
1653 	const void*	ptr,
1654 	const char*	filename,
1655 	uint16_t	line)
1656 {
1657 	create_tracker.register_latch(ptr, filename, line);
1658 }
1659 
1660 /** Deregister a latch, called when it is destroyed
1661 @param[in]	ptr		Latch to be destroyed */
1662 void
sync_file_created_deregister(const void * ptr)1663 sync_file_created_deregister(const void* ptr)
1664 {
1665 	create_tracker.deregister_latch(ptr);
1666 }
1667 
1668 /** Get the string where the file was created. Its format is "name:line"
1669 @param[in]	ptr		Latch instance
1670 @return created information or "" if can't be found */
1671 std::string
sync_file_created_get(const void * ptr)1672 sync_file_created_get(const void* ptr)
1673 {
1674 	return(create_tracker.get(ptr));
1675 }
1676 
1677 /** Initializes the synchronization data structures. */
1678 void
sync_check_init()1679 sync_check_init()
1680 {
1681 	ut_ad(!LatchDebug::s_initialized);
1682 	ut_d(LatchDebug::s_initialized = true);
1683 
1684 	sync_latch_meta_init();
1685 
1686 	/* Init the rw-lock & mutex list and create the mutex to protect it. */
1687 
1688 	UT_LIST_INIT(rw_lock_list, &rw_lock_t::list);
1689 
1690 	mutex_create(LATCH_ID_RW_LOCK_LIST, &rw_lock_list_mutex);
1691 
1692 	ut_d(LatchDebug::init());
1693 
1694 	sync_array_init();
1695 }
1696 
1697 /** Free the InnoDB synchronization data structures. */
1698 void
sync_check_close()1699 sync_check_close()
1700 {
1701 	ut_d(LatchDebug::shutdown());
1702 
1703 	mutex_free(&rw_lock_list_mutex);
1704 
1705 	sync_array_close();
1706 
1707 	sync_latch_meta_destroy();
1708 }
1709 
1710