1 /*****************************************************************************
2 
3 Copyright (c) 2014, 2021, Oracle and/or its affiliates.
4 
5 Portions of this file contain modifications contributed and copyrighted by
6 Google, Inc. Those modifications are gratefully acknowledged and are described
7 briefly in the InnoDB documentation. The contributions by Google are
8 incorporated with their permission, and subject to the conditions contained in
9 the file COPYING.Google.
10 
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License, version 2.0,
13 as published by the Free Software Foundation.
14 
15 This program is also distributed with certain software (including
16 but not limited to OpenSSL) that is licensed under separate terms,
17 as designated in a particular file or component or in included license
18 documentation.  The authors of MySQL hereby grant you an additional
19 permission to link the program and your derivative works with the
20 separately licensed software that they have included with MySQL.
21 
22 This program is distributed in the hope that it will be useful,
23 but WITHOUT ANY WARRANTY; without even the implied warranty of
24 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
25 GNU General Public License, version 2.0, for more details.
26 
27 You should have received a copy of the GNU General Public License along with
28 this program; if not, write to the Free Software Foundation, Inc.,
29 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
30 
31 *****************************************************************************/
32 
33 /**************************************************//**
34 @file sync/sync0debug.cc
35 Debug checks for latches.
36 
37 Created 2012-08-21 Sunny Bains
38 *******************************************************/
39 
40 #include "sync0sync.h"
41 #include "sync0debug.h"
42 
43 #include "ut0new.h"
44 #include "srv0start.h"
45 
46 #include <map>
47 #include <vector>
48 #include <string>
49 #include <algorithm>
50 #include <iostream>
51 
52 #ifdef UNIV_DEBUG
53 
54 my_bool		srv_sync_debug;
55 
56 /** The global mutex which protects debug info lists of all rw-locks.
57 To modify the debug info list of an rw-lock, this mutex has to be
58 acquired in addition to the mutex protecting the lock. */
59 static ib_mutex_t		rw_lock_debug_mutex;
60 
61 /** If deadlock detection does not get immediately the mutex,
62 it may wait for this event */
63 static os_event_t		rw_lock_debug_event;
64 
65 /** This is set to true, if there may be waiters for the event */
66 static bool			rw_lock_debug_waiters;
67 
68 /** The latch held by a thread */
69 struct Latched {
70 
71 	/** Constructor */
LatchedLatched72 	Latched() : m_latch(), m_level(SYNC_UNKNOWN) { }
73 
74 	/** Constructor
75 	@param[in]	latch		Latch instance
76 	@param[in]	level		Level of latch held */
LatchedLatched77 	Latched(const latch_t*	latch,
78 		latch_level_t	level)
79 		:
80 		m_latch(latch),
81 		m_level(level)
82 	{
83 		/* No op */
84 	}
85 
86 	/** @return the latch level */
get_levelLatched87 	latch_level_t get_level() const
88 	{
89 		return(m_level);
90 	}
91 
92 	/** Check if the rhs latch and level match
93 	@param[in]	rhs		instance to compare with
94 	@return true on match */
operator ==Latched95 	bool operator==(const Latched& rhs) const
96 	{
97 		return(m_latch == rhs.m_latch && m_level == rhs.m_level);
98 	}
99 
100 	/** The latch instance */
101 	const latch_t*		m_latch;
102 
103 	/** The latch level. For buffer blocks we can pass a separate latch
104 	level to check against, see buf_block_dbg_add_level() */
105 	latch_level_t		m_level;
106 };
107 
108 /** Thread specific latches. This is ordered on level in descending order. */
109 typedef std::vector<Latched, ut_allocator<Latched> > Latches;
110 
111 /** The deadlock detector. */
112 struct LatchDebug {
113 
114 	/** Debug mutex for control structures, should not be tracked
115 	by this module. */
116 	typedef OSMutex Mutex;
117 
118 	/** Comparator for the ThreadMap. */
119 	struct os_thread_id_less
120 		: public std::binary_function<
121 		  os_thread_id_t,
122 		  os_thread_id_t,
123 		  bool>
124 	{
125 		/** @return true if lhs < rhs */
operator ()LatchDebug::os_thread_id_less126 		bool operator()(
127 			const os_thread_id_t& lhs,
128 			const os_thread_id_t& rhs) const
129 			UNIV_NOTHROW
130 		{
131 			return(os_thread_pf(lhs) < os_thread_pf(rhs));
132 		}
133 	};
134 
135 	/** For tracking a thread's latches. */
136 	typedef std::map<
137 		os_thread_id_t,
138 		Latches*,
139 		os_thread_id_less,
140 		ut_allocator<std::pair<const os_thread_id_t, Latches*> > >
141 		ThreadMap;
142 
143 	/** Constructor */
144 	LatchDebug()
145 		UNIV_NOTHROW;
146 
147 	/** Destructor */
~LatchDebugLatchDebug148 	~LatchDebug()
149 		UNIV_NOTHROW
150 	{
151 		m_mutex.destroy();
152 	}
153 
154 	/** Create a new instance if one doesn't exist else return
155 	the existing one.
156 	@param[in]	add		add an empty entry if one is not
157 					found (default no)
158 	@return	pointer to a thread's acquired latches. */
159 	Latches* thread_latches(bool add = false)
160 		UNIV_NOTHROW;
161 
162 	/** Check that all the latches already owned by a thread have a lower
163 	level than limit.
164 	@param[in]	latches		the thread's existing (acquired) latches
165 	@param[in]	limit		to check against
166 	@return latched if there is one with a level <= limit . */
167 	const Latched* less(
168 		const Latches*	latches,
169 		latch_level_t	limit) const
170 		UNIV_NOTHROW;
171 
172 	/** Checks if the level value exists in the thread's acquired latches.
173 	@param[in]	latches		the thread's existing (acquired) latches
174 	@param[in]	level		to lookup
175 	@return	latch if found or 0 */
176 	const latch_t* find(
177 		const Latches*	Latches,
178 		latch_level_t	level) const
179 		UNIV_NOTHROW;
180 
181 	/**
182 	Checks if the level value exists in the thread's acquired latches.
183 	@param[in]	level		to lookup
184 	@return	latch if found or 0 */
185 	const latch_t* find(latch_level_t level)
186 		UNIV_NOTHROW;
187 
188 	/** Report error and abort.
189 	@param[in]	latches		thread's existing latches
190 	@param[in]	latched		The existing latch causing the
191 					invariant to fail
192 	@param[in]	level		The new level request that breaks
193 					the order */
194 	void crash(
195 		const Latches*	latches,
196 		const Latched*	latched,
197 		latch_level_t	level) const
198 		UNIV_NOTHROW;
199 
200 	/** Do a basic ordering check.
201 	@param[in]	latches		thread's existing latches
202 	@param[in]	requested_level	Level requested by latch
203 	@param[in]	level		declared ulint so that we can
204 					do level - 1. The level of the
205 					latch that the thread is trying
206 					to acquire
207 	@return true if passes, else crash with error message. */
208 	bool basic_check(
209 		const Latches*	latches,
210 		latch_level_t	requested_level,
211 		ulint		level) const
212 		UNIV_NOTHROW;
213 
214 	/** Adds a latch and its level in the thread level array. Allocates
215 	the memory for the array if called for the first time for this
216 	OS thread.  Makes the checks against other latch levels stored
217 	in the array for this thread.
218 
219 	@param[in]	latch	latch that the thread wants to acqire.
220 	@param[in]	level	latch level to check against */
lock_validateLatchDebug221 	void lock_validate(
222 		const latch_t*	latch,
223 		latch_level_t	level)
224 		UNIV_NOTHROW
225 	{
226 		/* Ignore diagnostic latches, starting with '.' */
227 
228 		if (*latch->get_name() != '.'
229 		    && latch->get_level() != SYNC_LEVEL_VARYING) {
230 
231 			ut_ad(level != SYNC_LEVEL_VARYING);
232 
233 			Latches*	latches = check_order(latch, level);
234 
235 			ut_a(latches->empty()
236 			     || level == SYNC_LEVEL_VARYING
237 			     || level == SYNC_NO_ORDER_CHECK
238 			     || latches->back().get_level()
239 			     == SYNC_NO_ORDER_CHECK
240 			     || latches->back().m_latch->get_level()
241 			     == SYNC_LEVEL_VARYING
242 			     || latches->back().get_level() >= level);
243 		}
244 	}
245 
246 	/** Adds a latch and its level in the thread level array. Allocates
247 	the memory for the array if called for the first time for this
248 	OS thread.  Makes the checks against other latch levels stored
249 	in the array for this thread.
250 
251 	@param[in]	latch	latch that the thread wants to acqire.
252 	@param[in]	level	latch level to check against */
lock_grantedLatchDebug253 	void lock_granted(
254 		const latch_t*	latch,
255 		latch_level_t	level)
256 		UNIV_NOTHROW
257 	{
258 		/* Ignore diagnostic latches, starting with '.' */
259 
260 		if (*latch->get_name() != '.'
261 		    && latch->get_level() != SYNC_LEVEL_VARYING) {
262 
263 			Latches*	latches = thread_latches(true);
264 
265 			latches->push_back(Latched(latch, level));
266 		}
267 	}
268 
269 	/** For recursive X rw-locks.
270 	@param[in]	latch		The RW-Lock to relock  */
relockLatchDebug271 	void relock(const latch_t* latch)
272 		UNIV_NOTHROW
273 	{
274 		ut_a(latch->m_rw_lock);
275 
276 		latch_level_t	level = latch->get_level();
277 
278 		/* Ignore diagnostic latches, starting with '.' */
279 
280 		if (*latch->get_name() != '.'
281 		    && latch->get_level() != SYNC_LEVEL_VARYING) {
282 
283 			Latches*	latches = thread_latches(true);
284 
285 			Latches::iterator	it = std::find(
286 				latches->begin(), latches->end(),
287 				Latched(latch, level));
288 
289 			ut_a(latches->empty()
290 			     || level == SYNC_LEVEL_VARYING
291 			     || level == SYNC_NO_ORDER_CHECK
292 			     || latches->back().m_latch->get_level()
293 			     == SYNC_LEVEL_VARYING
294 			     || latches->back().m_latch->get_level()
295 			     == SYNC_NO_ORDER_CHECK
296 			     || latches->back().get_level() >= level
297 			     || it != latches->end());
298 
299 			if (it == latches->end()) {
300 				latches->push_back(Latched(latch, level));
301 			} else {
302 				latches->insert(it, Latched(latch, level));
303 			}
304 		}
305 	}
306 
307 	/** Iterate over a thread's latches.
308 	@param[in,out]	functor		The callback
309 	@return true if the functor returns true. */
for_eachLatchDebug310 	bool for_each(sync_check_functor_t& functor)
311 		UNIV_NOTHROW
312 	{
313 		const Latches*	latches = thread_latches();
314 
315 		if (latches == 0) {
316 			return(functor.result());
317 		}
318 
319 		Latches::const_iterator	end = latches->end();
320 
321 		for (Latches::const_iterator it = latches->begin();
322 		     it != end;
323 		     ++it) {
324 
325 			if (functor(it->m_level)) {
326 				break;
327 			}
328 		}
329 
330 		return(functor.result());
331 	}
332 
333 	/** Removes a latch from the thread level array if it is found there.
334 	@param[in]	latch		The latch that was released
335 	@return true if found in the array; it is not an error if the latch is
336 	not found, as we presently are not able to determine the level for
337 	every latch reservation the program does */
338 	void unlock(const latch_t* latch) UNIV_NOTHROW;
339 
340 	/** Get the level name
341 	@param[in]	level		The level ID to lookup
342 	@return level name */
get_level_nameLatchDebug343 	const std::string& get_level_name(latch_level_t level) const
344 		UNIV_NOTHROW
345 	{
346 		Levels::const_iterator	it = m_levels.find(level);
347 
348 		ut_ad(it != m_levels.end());
349 
350 		return(it->second);
351 	}
352 
353 	/** Initialise the debug data structures */
354 	static void init()
355 		UNIV_NOTHROW;
356 
357 	/** Shutdown the latch debug checking */
358 	static void shutdown()
359 		UNIV_NOTHROW;
360 
361 	/** @return the singleton instance */
instanceLatchDebug362 	static LatchDebug* instance()
363 		UNIV_NOTHROW
364 	{
365 		return(s_instance);
366 	}
367 
368 	/** Create the singleton instance */
create_instanceLatchDebug369 	static void create_instance()
370 		UNIV_NOTHROW
371 	{
372 		ut_ad(s_instance == NULL);
373 
374 		s_instance = UT_NEW_NOKEY(LatchDebug());
375 	}
376 
377 private:
378 	/** Disable copying */
379 	LatchDebug(const LatchDebug&);
380 	LatchDebug& operator=(const LatchDebug&);
381 
382 	/** Adds a latch and its level in the thread level array. Allocates
383 	the memory for the array if called first time for this OS thread.
384 	Makes the checks against other latch levels stored in the array
385 	for this thread.
386 
387 	@param[in]	latch	 pointer to a mutex or an rw-lock
388 	@param[in]	level	level in the latching order
389 	@return the thread's latches */
390 	Latches* check_order(
391 		const latch_t*	latch,
392 		latch_level_t	level)
393 		UNIV_NOTHROW;
394 
395 	/** Print the latches acquired by a thread
396 	@param[in]	latches		Latches acquired by a thread */
397 	void print_latches(const Latches* latches) const
398 		UNIV_NOTHROW;
399 
400 	/** Special handling for the RTR mutexes. We need to add proper
401 	levels for them if possible.
402 	@param[in]	latch		Latch to check
403 	@return true if it is a an _RTR_ mutex */
is_rtr_mutexLatchDebug404 	bool is_rtr_mutex(const latch_t* latch) const
405 		UNIV_NOTHROW
406 	{
407 		return(latch->get_id() == LATCH_ID_RTR_ACTIVE_MUTEX
408 		       || latch->get_id() == LATCH_ID_RTR_PATH_MUTEX
409 		       || latch->get_id() == LATCH_ID_RTR_MATCH_MUTEX
410 		       || latch->get_id() == LATCH_ID_RTR_SSN_MUTEX);
411 	}
412 
413 private:
414 	/** Comparator for the Levels . */
415 	struct latch_level_less
416 		: public std::binary_function<
417 		  latch_level_t,
418 		  latch_level_t,
419 		  bool>
420 	{
421 		/** @return true if lhs < rhs */
operator ()LatchDebug::latch_level_less422 		bool operator()(
423 			const latch_level_t& lhs,
424 			const latch_level_t& rhs) const
425 			UNIV_NOTHROW
426 		{
427 			return(lhs < rhs);
428 		}
429 	};
430 
431 	typedef std::map<
432 		latch_level_t,
433 		std::string,
434 		latch_level_less,
435 		ut_allocator<std::pair<const latch_level_t, std::string> > >
436 		Levels;
437 
438 	/** Mutex protecting the deadlock detector data structures. */
439 	Mutex			m_mutex;
440 
441 	/** Thread specific data. Protected by m_mutex. */
442 	ThreadMap		m_threads;
443 
444 	/** Mapping from latche level to its string representation. */
445 	Levels			m_levels;
446 
447 	/** The singleton instance. Must be created in single threaded mode. */
448 	static LatchDebug*	s_instance;
449 
450 public:
451 	/** For checking whether this module has been initialised or not. */
452 	static bool		s_initialized;
453 };
454 
455 /** The latch order checking infra-structure */
456 LatchDebug* LatchDebug::s_instance = NULL;
457 bool LatchDebug::s_initialized = false;
458 
459 #define LEVEL_MAP_INSERT(T)						\
460 do {									\
461 	std::pair<Levels::iterator, bool>	result =		\
462 		m_levels.insert(Levels::value_type(T, #T));		\
463 	ut_ad(result.second);						\
464 } while(0)
465 
466 /** Setup the mapping from level ID to level name mapping */
LatchDebug()467 LatchDebug::LatchDebug()
468 {
469 	m_mutex.init();
470 
471 	LEVEL_MAP_INSERT(SYNC_UNKNOWN);
472 	LEVEL_MAP_INSERT(SYNC_MUTEX);
473 	LEVEL_MAP_INSERT(RW_LOCK_SX);
474 	LEVEL_MAP_INSERT(RW_LOCK_X_WAIT);
475 	LEVEL_MAP_INSERT(RW_LOCK_S);
476 	LEVEL_MAP_INSERT(RW_LOCK_X);
477 	LEVEL_MAP_INSERT(RW_LOCK_NOT_LOCKED);
478 	LEVEL_MAP_INSERT(SYNC_MONITOR_MUTEX);
479 	LEVEL_MAP_INSERT(SYNC_ANY_LATCH);
480 	LEVEL_MAP_INSERT(SYNC_DOUBLEWRITE);
481 	LEVEL_MAP_INSERT(SYNC_BUF_FLUSH_LIST);
482 	LEVEL_MAP_INSERT(SYNC_BUF_BLOCK);
483 	LEVEL_MAP_INSERT(SYNC_BUF_PAGE_HASH);
484 	LEVEL_MAP_INSERT(SYNC_BUF_POOL);
485 	LEVEL_MAP_INSERT(SYNC_POOL);
486 	LEVEL_MAP_INSERT(SYNC_POOL_MANAGER);
487 	LEVEL_MAP_INSERT(SYNC_SEARCH_SYS);
488 	LEVEL_MAP_INSERT(SYNC_WORK_QUEUE);
489 	LEVEL_MAP_INSERT(SYNC_FTS_TOKENIZE);
490 	LEVEL_MAP_INSERT(SYNC_FTS_OPTIMIZE);
491 	LEVEL_MAP_INSERT(SYNC_FTS_BG_THREADS);
492 	LEVEL_MAP_INSERT(SYNC_FTS_CACHE_INIT);
493 	LEVEL_MAP_INSERT(SYNC_RECV);
494 	LEVEL_MAP_INSERT(SYNC_LOG_FLUSH_ORDER);
495 	LEVEL_MAP_INSERT(SYNC_LOG);
496 	LEVEL_MAP_INSERT(SYNC_LOG_WRITE);
497 	LEVEL_MAP_INSERT(SYNC_PAGE_CLEANER);
498 	LEVEL_MAP_INSERT(SYNC_PURGE_QUEUE);
499 	LEVEL_MAP_INSERT(SYNC_TRX_SYS_HEADER);
500 	LEVEL_MAP_INSERT(SYNC_REC_LOCK);
501 	LEVEL_MAP_INSERT(SYNC_THREADS);
502 	LEVEL_MAP_INSERT(SYNC_TRX);
503 	LEVEL_MAP_INSERT(SYNC_TRX_SYS);
504 	LEVEL_MAP_INSERT(SYNC_LOCK_SYS);
505 	LEVEL_MAP_INSERT(SYNC_LOCK_WAIT_SYS);
506 	LEVEL_MAP_INSERT(SYNC_INDEX_ONLINE_LOG);
507 	LEVEL_MAP_INSERT(SYNC_IBUF_BITMAP);
508 	LEVEL_MAP_INSERT(SYNC_IBUF_BITMAP_MUTEX);
509 	LEVEL_MAP_INSERT(SYNC_IBUF_TREE_NODE);
510 	LEVEL_MAP_INSERT(SYNC_IBUF_TREE_NODE_NEW);
511 	LEVEL_MAP_INSERT(SYNC_IBUF_INDEX_TREE);
512 	LEVEL_MAP_INSERT(SYNC_IBUF_MUTEX);
513 	LEVEL_MAP_INSERT(SYNC_FSP_PAGE);
514 	LEVEL_MAP_INSERT(SYNC_FSP);
515 	LEVEL_MAP_INSERT(SYNC_EXTERN_STORAGE);
516 	LEVEL_MAP_INSERT(SYNC_TRX_UNDO_PAGE);
517 	LEVEL_MAP_INSERT(SYNC_RSEG_HEADER);
518 	LEVEL_MAP_INSERT(SYNC_RSEG_HEADER_NEW);
519 	LEVEL_MAP_INSERT(SYNC_NOREDO_RSEG);
520 	LEVEL_MAP_INSERT(SYNC_REDO_RSEG);
521 	LEVEL_MAP_INSERT(SYNC_TRX_UNDO);
522 	LEVEL_MAP_INSERT(SYNC_PURGE_LATCH);
523 	LEVEL_MAP_INSERT(SYNC_TREE_NODE);
524 	LEVEL_MAP_INSERT(SYNC_TREE_NODE_FROM_HASH);
525 	LEVEL_MAP_INSERT(SYNC_TREE_NODE_NEW);
526 	LEVEL_MAP_INSERT(SYNC_ANALYZE_INDEX);
527 	LEVEL_MAP_INSERT(SYNC_INDEX_TREE);
528 	LEVEL_MAP_INSERT(SYNC_IBUF_PESS_INSERT_MUTEX);
529 	LEVEL_MAP_INSERT(SYNC_IBUF_HEADER);
530 	LEVEL_MAP_INSERT(SYNC_DICT_HEADER);
531 	LEVEL_MAP_INSERT(SYNC_STATS_AUTO_RECALC);
532 	LEVEL_MAP_INSERT(SYNC_DICT_AUTOINC_MUTEX);
533 	LEVEL_MAP_INSERT(SYNC_DICT);
534 	LEVEL_MAP_INSERT(SYNC_FTS_CACHE);
535 	LEVEL_MAP_INSERT(SYNC_DICT_OPERATION);
536 	LEVEL_MAP_INSERT(SYNC_FILE_FORMAT_TAG);
537 	LEVEL_MAP_INSERT(SYNC_TRX_I_S_LAST_READ);
538 	LEVEL_MAP_INSERT(SYNC_TRX_I_S_RWLOCK);
539 	LEVEL_MAP_INSERT(SYNC_RECV_WRITER);
540 	LEVEL_MAP_INSERT(SYNC_LEVEL_VARYING);
541 	LEVEL_MAP_INSERT(SYNC_NO_ORDER_CHECK);
542 
543 	/* Enum count starts from 0 */
544 	ut_ad(m_levels.size() == SYNC_LEVEL_MAX + 1);
545 }
546 
547 /** Print the latches acquired by a thread
548 @param[in]	latches		Latches acquired by a thread */
549 void
print_latches(const Latches * latches) const550 LatchDebug::print_latches(const Latches* latches) const
551 	UNIV_NOTHROW
552 {
553 	ib::error() << "Latches already owned by this thread: ";
554 
555 	Latches::const_iterator	end = latches->end();
556 
557 	for (Latches::const_iterator it = latches->begin();
558 	     it != end;
559 	     ++it) {
560 
561 		ib::error()
562 			<< sync_latch_get_name(it->m_latch->get_id())
563 			<< " -> "
564 			<< it->m_level << " "
565 			<< "(" << get_level_name(it->m_level) << ")";
566 	}
567 }
568 
569 /** Report error and abort
570 @param[in]	latches		thread's existing latches
571 @param[in]	latched		The existing latch causing the invariant to fail
572 @param[in]	level		The new level request that breaks the order */
573 void
crash(const Latches * latches,const Latched * latched,latch_level_t level) const574 LatchDebug::crash(
575 	const Latches*	latches,
576 	const Latched*	latched,
577 	latch_level_t	level) const
578 	UNIV_NOTHROW
579 {
580 	const latch_t*		latch = latched->m_latch;
581 	const std::string&	in_level_name = get_level_name(level);
582 
583 	const std::string&	latch_level_name =
584 		get_level_name(latched->m_level);
585 
586 	ib::error()
587 		<< "Thread " << os_thread_pf(os_thread_get_curr_id())
588 		<< " already owns a latch "
589 		<< sync_latch_get_name(latch->m_id) << " at level"
590 		<< " " << latched->m_level << " (" << latch_level_name
591 		<< " ), which is at a lower/same level than the"
592 		<< " requested latch: "
593 		<< level << " (" << in_level_name << "). "
594 		<< latch->to_string();
595 
596 	print_latches(latches);
597 
598 	ut_error;
599 }
600 
601 /** Check that all the latches already owned by a thread have a lower
602 level than limit.
603 @param[in]	latches		the thread's existing (acquired) latches
604 @param[in]	limit		to check against
605 @return latched info if there is one with a level <= limit . */
606 const Latched*
less(const Latches * latches,latch_level_t limit) const607 LatchDebug::less(
608 	const Latches*	latches,
609 	latch_level_t	limit) const
610 	UNIV_NOTHROW
611 {
612 	Latches::const_iterator	end = latches->end();
613 
614 	for (Latches::const_iterator it = latches->begin(); it != end; ++it) {
615 
616 		if (it->m_level <= limit) {
617 			return(&(*it));
618 		}
619 	}
620 
621 	return(NULL);
622 }
623 
624 /** Do a basic ordering check.
625 @param[in]	latches		thread's existing latches
626 @param[in]	requested_level	Level requested by latch
627 @param[in]	in_level	declared ulint so that we can do level - 1.
628 				The level of the latch that the thread is
629 				trying to acquire
630 @return true if passes, else crash with error message. */
631 bool
basic_check(const Latches * latches,latch_level_t requested_level,ulint in_level) const632 LatchDebug::basic_check(
633 	const Latches*	latches,
634 	latch_level_t	requested_level,
635 	ulint		in_level) const
636 	UNIV_NOTHROW
637 {
638 	latch_level_t	level = latch_level_t(in_level);
639 
640 	ut_ad(level < SYNC_LEVEL_MAX);
641 
642 	const Latched*	latched = less(latches, level);
643 
644 	if (latched != NULL) {
645 		crash(latches, latched, requested_level);
646 		return(false);
647 	}
648 
649 	return(true);
650 }
651 
652 /** Create a new instance if one doesn't exist else return the existing one.
653 @param[in]	add		add an empty entry if one is not found
654 				(default no)
655 @return	pointer to a thread's acquired latches. */
656 Latches*
thread_latches(bool add)657 LatchDebug::thread_latches(bool add)
658 	UNIV_NOTHROW
659 {
660 	m_mutex.enter();
661 
662 	os_thread_id_t		thread_id = os_thread_get_curr_id();
663 	ThreadMap::iterator	lb = m_threads.lower_bound(thread_id);
664 
665 	if (lb != m_threads.end()
666 	    && !(m_threads.key_comp()(thread_id, lb->first))) {
667 
668 		Latches*	latches = lb->second;
669 
670 		m_mutex.exit();
671 
672 		return(latches);
673 
674 	} else if (!add) {
675 
676 		m_mutex.exit();
677 
678 		return(NULL);
679 
680 	} else {
681 		typedef ThreadMap::value_type value_type;
682 
683 		Latches*	latches = UT_NEW_NOKEY(Latches());
684 
685 		ut_a(latches != NULL);
686 
687 		latches->reserve(32);
688 
689 		m_threads.insert(lb, value_type(thread_id, latches));
690 
691 		m_mutex.exit();
692 
693 		return(latches);
694 	}
695 }
696 
697 /** Checks if the level value exists in the thread's acquired latches.
698 @param[in]	levels		the thread's existing (acquired) latches
699 @param[in]	level		to lookup
700 @return	latch if found or 0 */
701 const latch_t*
find(const Latches * latches,latch_level_t level) const702 LatchDebug::find(
703 	const Latches*	latches,
704 	latch_level_t	level) const UNIV_NOTHROW
705 {
706 	Latches::const_iterator	end = latches->end();
707 
708 	for (Latches::const_iterator it = latches->begin(); it != end; ++it) {
709 
710 		if (it->m_level == level) {
711 
712 			return(it->m_latch);
713 		}
714 	}
715 
716 	return(0);
717 }
718 
719 /** Checks if the level value exists in the thread's acquired latches.
720 @param[in]	 level		The level to lookup
721 @return	latch if found or NULL */
722 const latch_t*
find(latch_level_t level)723 LatchDebug::find(latch_level_t level)
724 	UNIV_NOTHROW
725 {
726 	return(find(thread_latches(), level));
727 }
728 
729 /**
730 Adds a latch and its level in the thread level array. Allocates the memory
731 for the array if called first time for this OS thread. Makes the checks
732 against other latch levels stored in the array for this thread.
733 @param[in]	latch	pointer to a mutex or an rw-lock
734 @param[in]	level	level in the latching order
735 @return the thread's latches */
736 Latches*
check_order(const latch_t * latch,latch_level_t level)737 LatchDebug::check_order(
738 	const latch_t*	latch,
739 	latch_level_t	level)
740 	UNIV_NOTHROW
741 {
742 	ut_ad(latch->get_level() != SYNC_LEVEL_VARYING);
743 
744 	Latches*	latches = thread_latches(true);
745 
746 	/* NOTE that there is a problem with _NODE and _LEAF levels: if the
747 	B-tree height changes, then a leaf can change to an internal node
748 	or the other way around. We do not know at present if this can cause
749 	unnecessary assertion failures below. */
750 
751 	switch (level) {
752 	case SYNC_NO_ORDER_CHECK:
753 	case SYNC_EXTERN_STORAGE:
754 	case SYNC_TREE_NODE_FROM_HASH:
755 		/* Do no order checking */
756 		break;
757 
758 	case SYNC_TRX_SYS_HEADER:
759 
760 		if (srv_is_being_started) {
761 			/* This is violated during trx_sys_create_rsegs()
762 			when creating additional rollback segments when
763 			upgrading in innobase_start_or_create_for_mysql(). */
764 			break;
765 		}
766 
767 		/* Fall through */
768 
769 	case SYNC_MONITOR_MUTEX:
770 	case SYNC_RECV:
771 	case SYNC_FTS_BG_THREADS:
772 	case SYNC_WORK_QUEUE:
773 	case SYNC_FTS_TOKENIZE:
774 	case SYNC_FTS_OPTIMIZE:
775 	case SYNC_FTS_CACHE:
776 	case SYNC_FTS_CACHE_INIT:
777 	case SYNC_PAGE_CLEANER:
778 	case SYNC_LOG:
779 	case SYNC_LOG_WRITE:
780 	case SYNC_LOG_FLUSH_ORDER:
781 	case SYNC_FILE_FORMAT_TAG:
782 	case SYNC_DOUBLEWRITE:
783 	case SYNC_SEARCH_SYS:
784 	case SYNC_THREADS:
785 	case SYNC_LOCK_SYS:
786 	case SYNC_LOCK_WAIT_SYS:
787 	case SYNC_TRX_SYS:
788 	case SYNC_IBUF_BITMAP_MUTEX:
789 	case SYNC_REDO_RSEG:
790 	case SYNC_NOREDO_RSEG:
791 	case SYNC_TRX_UNDO:
792 	case SYNC_PURGE_LATCH:
793 	case SYNC_PURGE_QUEUE:
794 	case SYNC_DICT_AUTOINC_MUTEX:
795 	case SYNC_DICT_OPERATION:
796 	case SYNC_DICT_HEADER:
797 	case SYNC_TRX_I_S_RWLOCK:
798 	case SYNC_TRX_I_S_LAST_READ:
799 	case SYNC_IBUF_MUTEX:
800 	case SYNC_INDEX_ONLINE_LOG:
801 	case SYNC_STATS_AUTO_RECALC:
802 	case SYNC_POOL:
803 	case SYNC_POOL_MANAGER:
804 	case SYNC_RECV_WRITER:
805 
806 		basic_check(latches, level, level);
807 		break;
808 
809 	case SYNC_ANY_LATCH:
810 
811 		/* Temporary workaround for LATCH_ID_RTR_*_MUTEX */
812 		if (is_rtr_mutex(latch)) {
813 
814 			const Latched*	latched = less(latches, level);
815 
816 			if (latched == NULL
817 			    || (latched != NULL
818 				&& is_rtr_mutex(latched->m_latch))) {
819 
820 				/* No violation */
821 				break;
822 
823 			}
824 
825 			crash(latches, latched, level);
826 
827 		} else {
828 			basic_check(latches, level, level);
829 		}
830 
831 		break;
832 
833 	case SYNC_TRX:
834 
835 		/* Either the thread must own the lock_sys->mutex, or
836 		it is allowed to own only ONE trx_t::mutex. */
837 
838 		if (less(latches, level) != NULL) {
839 			basic_check(latches, level, level - 1);
840 			ut_a(find(latches, SYNC_LOCK_SYS) != 0);
841 		}
842 		break;
843 
844 	case SYNC_BUF_FLUSH_LIST:
845 	case SYNC_BUF_POOL:
846 
847 		/* We can have multiple mutexes of this type therefore we
848 		can only check whether the greater than condition holds. */
849 
850 		basic_check(latches, level, level - 1);
851 		break;
852 
853 	case SYNC_BUF_PAGE_HASH:
854 
855 		/* Multiple page_hash locks are only allowed during
856 		buf_validate and that is where buf_pool mutex is already
857 		held. */
858 
859 		/* Fall through */
860 
861 	case SYNC_BUF_BLOCK:
862 
863 		/* Either the thread must own the (buffer pool) buf_pool->mutex
864 		or it is allowed to latch only ONE of (buffer block)
865 		block->mutex or buf_pool->zip_mutex. */
866 
867 		if (less(latches, level) != NULL) {
868 			basic_check(latches, level, level - 1);
869 			ut_a(find(latches, SYNC_BUF_POOL) != 0);
870 		}
871 		break;
872 
873 	case SYNC_REC_LOCK:
874 
875 		if (find(latches, SYNC_LOCK_SYS) != 0) {
876 			basic_check(latches, level, SYNC_REC_LOCK - 1);
877 		} else {
878 			basic_check(latches, level, SYNC_REC_LOCK);
879 		}
880 		break;
881 
882 	case SYNC_IBUF_BITMAP:
883 
884 		/* Either the thread must own the master mutex to all
885 		the bitmap pages, or it is allowed to latch only ONE
886 		bitmap page. */
887 
888 		if (find(latches, SYNC_IBUF_BITMAP_MUTEX) != 0) {
889 
890 			basic_check(latches, level, SYNC_IBUF_BITMAP - 1);
891 
892 		} else if (!srv_is_being_started) {
893 
894 			/* This is violated during trx_sys_create_rsegs()
895 			when creating additional rollback segments during
896 			upgrade. */
897 
898 			basic_check(latches, level, SYNC_IBUF_BITMAP);
899 		}
900 		break;
901 
902 	case SYNC_FSP_PAGE:
903 		ut_a(find(latches, SYNC_FSP) != 0);
904 		break;
905 
906 	case SYNC_FSP:
907 
908 		ut_a(find(latches, SYNC_FSP) != 0
909 		     || basic_check(latches, level, SYNC_FSP));
910 		break;
911 
912 	case SYNC_TRX_UNDO_PAGE:
913 
914 		/* Purge is allowed to read in as many UNDO pages as it likes.
915 		The purge thread can read the UNDO pages without any covering
916 		mutex. */
917 
918 		ut_a(find(latches, SYNC_TRX_UNDO) != 0
919 		     || find(latches, SYNC_REDO_RSEG) != 0
920 		     || find(latches, SYNC_NOREDO_RSEG) != 0
921 		     || basic_check(latches, level, level - 1));
922 		break;
923 
924 	case SYNC_RSEG_HEADER:
925 
926 		ut_a(find(latches, SYNC_REDO_RSEG) != 0
927 		     || find(latches, SYNC_NOREDO_RSEG) != 0);
928 		break;
929 
930 	case SYNC_RSEG_HEADER_NEW:
931 
932 		ut_a(find(latches, SYNC_FSP_PAGE) != 0);
933 		break;
934 
935 	case SYNC_TREE_NODE:
936 
937 		{
938 			const latch_t*	fsp_latch;
939 
940 			fsp_latch = find(latches, SYNC_FSP);
941 
942 			ut_a((fsp_latch != NULL
943 			      && fsp_latch->is_temp_fsp())
944 			     || find(latches, SYNC_INDEX_TREE) != 0
945 			     || find(latches, SYNC_DICT_OPERATION)
946 			     || basic_check(latches,
947 					    level, SYNC_TREE_NODE - 1));
948 		}
949 
950 		break;
951 
952 	case SYNC_TREE_NODE_NEW:
953 
954 		ut_a(find(latches, SYNC_FSP_PAGE) != 0);
955 		break;
956 
957 	case SYNC_INDEX_TREE:
958 
959 		basic_check(latches, level, SYNC_TREE_NODE - 1);
960 		break;
961 
962 	case SYNC_ANALYZE_INDEX:
963 
964 		basic_check(latches, level, SYNC_ANALYZE_INDEX - 1);
965 		break;
966 
967 	case SYNC_IBUF_TREE_NODE:
968 
969 		ut_a(find(latches, SYNC_IBUF_INDEX_TREE) != 0
970 		     || basic_check(latches, level, SYNC_IBUF_TREE_NODE - 1));
971 		break;
972 
973 	case SYNC_IBUF_TREE_NODE_NEW:
974 
975 		/* ibuf_add_free_page() allocates new pages for the change
976 		buffer while only holding the tablespace x-latch. These
977 		pre-allocated new pages may only be used while holding
978 		ibuf_mutex, in btr_page_alloc_for_ibuf(). */
979 
980 		ut_a(find(latches, SYNC_IBUF_MUTEX) != 0
981 		     || find(latches, SYNC_FSP) != 0);
982 		break;
983 
984 	case SYNC_IBUF_INDEX_TREE:
985 
986 		if (find(latches, SYNC_FSP) != 0) {
987 			basic_check(latches, level, level - 1);
988 		} else {
989 			basic_check(latches, level, SYNC_IBUF_TREE_NODE - 1);
990 		}
991 		break;
992 
993 	case SYNC_IBUF_PESS_INSERT_MUTEX:
994 
995 		basic_check(latches, level, SYNC_FSP - 1);
996 		ut_a(find(latches, SYNC_IBUF_MUTEX) == 0);
997 		break;
998 
999 	case SYNC_IBUF_HEADER:
1000 
1001 		basic_check(latches, level, SYNC_FSP - 1);
1002 		ut_a(find(latches, SYNC_IBUF_MUTEX) == NULL);
1003 		ut_a(find(latches, SYNC_IBUF_PESS_INSERT_MUTEX) == NULL);
1004 		break;
1005 
1006 	case SYNC_DICT:
1007 		basic_check(latches, level, SYNC_DICT);
1008 		break;
1009 
1010 	case SYNC_MUTEX:
1011 	case SYNC_UNKNOWN:
1012 	case SYNC_LEVEL_VARYING:
1013 	case RW_LOCK_X:
1014 	case RW_LOCK_X_WAIT:
1015 	case RW_LOCK_S:
1016 	case RW_LOCK_SX:
1017 	case RW_LOCK_NOT_LOCKED:
1018 		/* These levels should never be set for a latch. */
1019 		ut_error;
1020 		break;
1021 	}
1022 
1023 	return(latches);
1024 }
1025 
1026 /** Removes a latch from the thread level array if it is found there.
1027 @param[in]	latch		that was released/unlocked
1028 @param[in]	level		level of the latch
1029 @return true if found in the array; it is not an error if the latch is
1030 not found, as we presently are not able to determine the level for
1031 every latch reservation the program does */
1032 void
unlock(const latch_t * latch)1033 LatchDebug::unlock(const latch_t* latch)
1034 	UNIV_NOTHROW
1035 {
1036 	if (latch->get_level() == SYNC_LEVEL_VARYING) {
1037 		// We don't have varying level mutexes
1038 		ut_ad(latch->m_rw_lock);
1039 	}
1040 
1041 	Latches*	latches;
1042 
1043 	if (*latch->get_name() == '.') {
1044 
1045 		/* Ignore diagnostic latches, starting with '.' */
1046 
1047 	} else if ((latches = thread_latches()) != NULL) {
1048 
1049 		Latches::reverse_iterator	rend = latches->rend();
1050 
1051 		for (Latches::reverse_iterator it = latches->rbegin();
1052 		     it != rend;
1053 		     ++it) {
1054 
1055 			if (it->m_latch != latch) {
1056 
1057 				continue;
1058 			}
1059 
1060 			Latches::iterator	i = it.base();
1061 
1062 			latches->erase(--i);
1063 
1064 			/* If this thread doesn't own any more
1065 			latches remove from the map.
1066 
1067 			FIXME: Perhaps use the master thread
1068 			to do purge. Or, do it from close connection.
1069 			This could be expensive. */
1070 
1071 			if (latches->empty()) {
1072 
1073 				m_mutex.enter();
1074 
1075 				os_thread_id_t	thread_id;
1076 
1077 				thread_id = os_thread_get_curr_id();
1078 
1079 				m_threads.erase(thread_id);
1080 
1081 				m_mutex.exit();
1082 
1083 				UT_DELETE(latches);
1084 			}
1085 
1086 			return;
1087 		}
1088 
1089 		if (latch->get_level() != SYNC_LEVEL_VARYING) {
1090 			ib::error()
1091 				<< "Couldn't find latch "
1092 				<< sync_latch_get_name(latch->get_id());
1093 
1094 			print_latches(latches);
1095 
1096 			/** Must find the latch. */
1097 			ut_error;
1098 		}
1099 	}
1100 }
1101 
1102 /** Get the latch id from a latch name.
1103 @param[in]	name	Latch name
1104 @return latch id if found else LATCH_ID_NONE. */
1105 latch_id_t
sync_latch_get_id(const char * name)1106 sync_latch_get_id(const char* name)
1107 {
1108 	LatchMetaData::const_iterator	end = latch_meta.end();
1109 
1110 	/* Linear scan should be OK, this should be extremely rare. */
1111 
1112 	for (LatchMetaData::const_iterator it = latch_meta.begin();
1113 	     it != end;
1114 	     ++it) {
1115 
1116 		if (*it == NULL || (*it)->get_id() == LATCH_ID_NONE) {
1117 
1118 			continue;
1119 
1120 		} else if (strcmp((*it)->get_name(), name) == 0) {
1121 
1122 			return((*it)->get_id());
1123 		}
1124 	}
1125 
1126 	return(LATCH_ID_NONE);
1127 }
1128 
1129 /** Get the latch name from a sync level
1130 @param[in]	level		Latch level to lookup
1131 @return NULL if not found. */
1132 const char*
sync_latch_get_name(latch_level_t level)1133 sync_latch_get_name(latch_level_t level)
1134 {
1135 	LatchMetaData::const_iterator	end = latch_meta.end();
1136 
1137 	/* Linear scan should be OK, this should be extremely rare. */
1138 
1139 	for (LatchMetaData::const_iterator it = latch_meta.begin();
1140 	     it != end;
1141 	     ++it) {
1142 
1143 		if (*it == NULL || (*it)->get_id() == LATCH_ID_NONE) {
1144 
1145 			continue;
1146 
1147 		} else if ((*it)->get_level() == level) {
1148 
1149 			return((*it)->get_name());
1150 		}
1151 	}
1152 
1153 	return(0);
1154 }
1155 
1156 /** Check if it is OK to acquire the latch.
1157 @param[in]	latch	latch type */
1158 void
sync_check_lock_validate(const latch_t * latch)1159 sync_check_lock_validate(const latch_t* latch)
1160 {
1161 	if (LatchDebug::instance() != NULL) {
1162 		LatchDebug::instance()->lock_validate(
1163 			latch, latch->get_level());
1164 	}
1165 }
1166 
1167 /** Note that the lock has been granted
1168 @param[in]	latch	latch type */
1169 void
sync_check_lock_granted(const latch_t * latch)1170 sync_check_lock_granted(const latch_t* latch)
1171 {
1172 	if (LatchDebug::instance() != NULL) {
1173 		LatchDebug::instance()->lock_granted(latch, latch->get_level());
1174 	}
1175 }
1176 
1177 /** Check if it is OK to acquire the latch.
1178 @param[in]	latch	latch type
1179 @param[in]	level	Latch level */
1180 void
sync_check_lock(const latch_t * latch,latch_level_t level)1181 sync_check_lock(
1182 	const latch_t*	latch,
1183 	latch_level_t	level)
1184 {
1185 	if (LatchDebug::instance() != NULL) {
1186 
1187 		ut_ad(latch->get_level() == SYNC_LEVEL_VARYING);
1188 		ut_ad(latch->get_id() == LATCH_ID_BUF_BLOCK_LOCK);
1189 
1190 		LatchDebug::instance()->lock_validate(latch, level);
1191 		LatchDebug::instance()->lock_granted(latch, level);
1192 	}
1193 }
1194 
1195 /** Check if it is OK to re-acquire the lock.
1196 @param[in]	latch		RW-LOCK to relock (recursive X locks) */
1197 void
sync_check_relock(const latch_t * latch)1198 sync_check_relock(const latch_t* latch)
1199 {
1200 	if (LatchDebug::instance() != NULL) {
1201 		LatchDebug::instance()->relock(latch);
1202 	}
1203 }
1204 
1205 /** Removes a latch from the thread level array if it is found there.
1206 @param[in]	latch		The latch to unlock */
1207 void
sync_check_unlock(const latch_t * latch)1208 sync_check_unlock(const latch_t* latch)
1209 {
1210 	if (LatchDebug::instance() != NULL) {
1211 		LatchDebug::instance()->unlock(latch);
1212 	}
1213 }
1214 
1215 /** Checks if the level array for the current thread contains a
1216 mutex or rw-latch at the specified level.
1217 @param[in]	level		to find
1218 @return	a matching latch, or NULL if not found */
1219 const latch_t*
sync_check_find(latch_level_t level)1220 sync_check_find(latch_level_t level)
1221 {
1222 	if (LatchDebug::instance() != NULL) {
1223 		return(LatchDebug::instance()->find(level));
1224 	}
1225 
1226 	return(NULL);
1227 }
1228 
1229 /** Iterate over the thread's latches.
1230 @param[in,out]	functor		called for each element.
1231 @return false if the sync debug hasn't been initialised
1232 @return the value returned by the functor */
1233 bool
sync_check_iterate(sync_check_functor_t & functor)1234 sync_check_iterate(sync_check_functor_t& functor)
1235 {
1236 	if (LatchDebug::instance() != NULL) {
1237 		return(LatchDebug::instance()->for_each(functor));
1238 	}
1239 
1240 	return(false);
1241 }
1242 
1243 /** Enable sync order checking.
1244 
1245 Note: We don't enforce any synchronisation checks. The caller must ensure
1246 that no races can occur */
1247 void
sync_check_enable()1248 sync_check_enable()
1249 {
1250 	if (!srv_sync_debug) {
1251 
1252 		return;
1253 	}
1254 
1255 	/* We should always call this before we create threads. */
1256 
1257 	LatchDebug::create_instance();
1258 }
1259 
1260 /** Initialise the debug data structures */
1261 void
init()1262 LatchDebug::init()
1263 	UNIV_NOTHROW
1264 {
1265 	ut_a(rw_lock_debug_event == NULL);
1266 
1267 	mutex_create(LATCH_ID_RW_LOCK_DEBUG, &rw_lock_debug_mutex);
1268 
1269 	rw_lock_debug_event = os_event_create("rw_lock_debug_event");
1270 
1271 	rw_lock_debug_waiters = FALSE;
1272 }
1273 
1274 /** Shutdown the latch debug checking
1275 
1276 Note: We don't enforce any synchronisation checks. The caller must ensure
1277 that no races can occur */
1278 void
shutdown()1279 LatchDebug::shutdown()
1280 	UNIV_NOTHROW
1281 {
1282 	ut_a(rw_lock_debug_event != NULL);
1283 
1284 	os_event_destroy(rw_lock_debug_event);
1285 
1286 	rw_lock_debug_event = NULL;
1287 
1288 	mutex_free(&rw_lock_debug_mutex);
1289 
1290 	if (instance() == NULL) {
1291 
1292 		return;
1293 	}
1294 
1295 	ut_a(s_initialized);
1296 
1297 	s_initialized = false;
1298 
1299 	UT_DELETE(s_instance);
1300 
1301 	LatchDebug::s_instance = NULL;
1302 }
1303 
1304 /** Acquires the debug mutex. We cannot use the mutex defined in sync0sync,
1305 because the debug mutex is also acquired in sync0arr while holding the OS
1306 mutex protecting the sync array, and the ordinary mutex_enter might
1307 recursively call routines in sync0arr, leading to a deadlock on the OS
1308 mutex. */
1309 void
rw_lock_debug_mutex_enter()1310 rw_lock_debug_mutex_enter()
1311 {
1312 	for (;;) {
1313 
1314 		if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) {
1315 			return;
1316 		}
1317 
1318 		os_event_reset(rw_lock_debug_event);
1319 
1320 		rw_lock_debug_waiters = TRUE;
1321 
1322 		if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) {
1323 			return;
1324 		}
1325 
1326 		os_event_wait(rw_lock_debug_event);
1327 	}
1328 }
1329 
1330 /** Releases the debug mutex. */
1331 void
rw_lock_debug_mutex_exit()1332 rw_lock_debug_mutex_exit()
1333 {
1334 	mutex_exit(&rw_lock_debug_mutex);
1335 
1336 	if (rw_lock_debug_waiters) {
1337 		rw_lock_debug_waiters = FALSE;
1338 		os_event_set(rw_lock_debug_event);
1339 	}
1340 }
1341 #endif /* UNIV_DEBUG */
1342 
1343 /* Meta data for all the InnoDB latches. If the latch is not in recorded
1344 here then it will be be considered for deadlock checks.  */
1345 LatchMetaData	latch_meta;
1346 
1347 /** Load the latch meta data. */
1348 static
1349 void
sync_latch_meta_init()1350 sync_latch_meta_init()
1351 	UNIV_NOTHROW
1352 {
1353 	latch_meta.resize(LATCH_ID_MAX);
1354 
1355 	/* The latches should be ordered on latch_id_t. So that we can
1356 	index directly into the vector to update and fetch meta-data. */
1357 
1358 	LATCH_ADD_MUTEX(AUTOINC, SYNC_DICT_AUTOINC_MUTEX, autoinc_mutex_key);
1359 
1360 #ifndef PFS_SKIP_BUFFER_MUTEX_RWLOCK
1361 	LATCH_ADD_MUTEX(BUF_BLOCK_MUTEX, SYNC_BUF_BLOCK,
1362 			buffer_block_mutex_key);
1363 #else
1364 	LATCH_ADD_MUTEX(BUF_BLOCK_MUTEX, SYNC_BUF_BLOCK, PFS_NOT_INSTRUMENTED);
1365 #endif /* !PFS_SKIP_BUFFER_MUTEX_RWLOCK */
1366 
1367 	LATCH_ADD_MUTEX(BUF_POOL, SYNC_BUF_POOL, buf_pool_mutex_key);
1368 
1369 	LATCH_ADD_MUTEX(BUF_POOL_ZIP, SYNC_BUF_BLOCK, buf_pool_zip_mutex_key);
1370 
1371 	LATCH_ADD_MUTEX(CACHE_LAST_READ, SYNC_TRX_I_S_LAST_READ,
1372 			cache_last_read_mutex_key);
1373 
1374 	LATCH_ADD_MUTEX(DICT_FOREIGN_ERR, SYNC_NO_ORDER_CHECK,
1375 			dict_foreign_err_mutex_key);
1376 
1377 	LATCH_ADD_MUTEX(DICT_SYS, SYNC_DICT, dict_sys_mutex_key);
1378 
1379 	LATCH_ADD_MUTEX(FILE_FORMAT_MAX, SYNC_FILE_FORMAT_TAG,
1380 			file_format_max_mutex_key);
1381 
1382 	LATCH_ADD_MUTEX(FIL_SYSTEM, SYNC_ANY_LATCH, fil_system_mutex_key);
1383 
1384 	LATCH_ADD_MUTEX(FLUSH_LIST, SYNC_BUF_FLUSH_LIST, flush_list_mutex_key);
1385 
1386 	LATCH_ADD_MUTEX(FTS_BG_THREADS, SYNC_FTS_BG_THREADS,
1387 			fts_bg_threads_mutex_key);
1388 
1389 	LATCH_ADD_MUTEX(FTS_DELETE, SYNC_FTS_OPTIMIZE, fts_delete_mutex_key);
1390 
1391 	LATCH_ADD_MUTEX(FTS_OPTIMIZE, SYNC_FTS_OPTIMIZE,
1392 			fts_optimize_mutex_key);
1393 
1394 	LATCH_ADD_MUTEX(FTS_DOC_ID, SYNC_FTS_OPTIMIZE, fts_doc_id_mutex_key);
1395 
1396 	LATCH_ADD_MUTEX(FTS_PLL_TOKENIZE, SYNC_FTS_TOKENIZE,
1397 			fts_pll_tokenize_mutex_key);
1398 
1399 	LATCH_ADD_MUTEX(HASH_TABLE_MUTEX, SYNC_BUF_PAGE_HASH,
1400 			hash_table_mutex_key);
1401 
1402 	LATCH_ADD_MUTEX(IBUF_BITMAP, SYNC_IBUF_BITMAP_MUTEX,
1403 			ibuf_bitmap_mutex_key);
1404 
1405 	LATCH_ADD_MUTEX(IBUF, SYNC_IBUF_MUTEX, ibuf_mutex_key);
1406 
1407 	LATCH_ADD_MUTEX(IBUF_PESSIMISTIC_INSERT, SYNC_IBUF_PESS_INSERT_MUTEX,
1408 			ibuf_pessimistic_insert_mutex_key);
1409 
1410 	LATCH_ADD_MUTEX(LOG_SYS, SYNC_LOG, log_sys_mutex_key);
1411 
1412 	LATCH_ADD_MUTEX(LOG_WRITE, SYNC_LOG_WRITE, log_sys_write_mutex_key);
1413 
1414 	LATCH_ADD_MUTEX(LOG_FLUSH_ORDER, SYNC_LOG_FLUSH_ORDER,
1415 			log_flush_order_mutex_key);
1416 
1417 	LATCH_ADD_MUTEX(MUTEX_LIST, SYNC_NO_ORDER_CHECK, mutex_list_mutex_key);
1418 
1419 	LATCH_ADD_MUTEX(PAGE_CLEANER, SYNC_PAGE_CLEANER,
1420 			page_cleaner_mutex_key);
1421 
1422 	LATCH_ADD_MUTEX(PURGE_SYS_PQ, SYNC_PURGE_QUEUE,
1423 			purge_sys_pq_mutex_key);
1424 
1425 	LATCH_ADD_MUTEX(RECALC_POOL, SYNC_STATS_AUTO_RECALC,
1426 			recalc_pool_mutex_key);
1427 
1428 	LATCH_ADD_MUTEX(RECV_SYS, SYNC_RECV, recv_sys_mutex_key);
1429 
1430 	LATCH_ADD_MUTEX(RECV_WRITER, SYNC_RECV_WRITER, recv_writer_mutex_key);
1431 
1432 	LATCH_ADD_MUTEX(REDO_RSEG, SYNC_REDO_RSEG, redo_rseg_mutex_key);
1433 
1434 	LATCH_ADD_MUTEX(NOREDO_RSEG, SYNC_NOREDO_RSEG, noredo_rseg_mutex_key);
1435 
1436 #ifdef UNIV_DEBUG
1437 	/* Mutex names starting with '.' are not tracked. They are assumed
1438 	to be diagnostic mutexes used in debugging. */
1439 	latch_meta[LATCH_ID_RW_LOCK_DEBUG] =
1440 		LATCH_ADD_MUTEX(RW_LOCK_DEBUG,
1441 			SYNC_NO_ORDER_CHECK,
1442 			rw_lock_debug_mutex_key);
1443 #endif /* UNIV_DEBUG */
1444 
1445 	LATCH_ADD_MUTEX(RTR_SSN_MUTEX, SYNC_ANY_LATCH, rtr_ssn_mutex_key);
1446 
1447 	LATCH_ADD_MUTEX(RTR_ACTIVE_MUTEX, SYNC_ANY_LATCH,
1448 			rtr_active_mutex_key);
1449 
1450 	LATCH_ADD_MUTEX(RTR_MATCH_MUTEX, SYNC_ANY_LATCH, rtr_match_mutex_key);
1451 
1452 	LATCH_ADD_MUTEX(RTR_PATH_MUTEX, SYNC_ANY_LATCH, rtr_path_mutex_key);
1453 
1454 	LATCH_ADD_MUTEX(RW_LOCK_LIST, SYNC_NO_ORDER_CHECK,
1455 			rw_lock_list_mutex_key);
1456 
1457 	LATCH_ADD_MUTEX(RW_LOCK_MUTEX, SYNC_NO_ORDER_CHECK, rw_lock_mutex_key);
1458 
1459 	LATCH_ADD_MUTEX(SRV_DICT_TMPFILE, SYNC_DICT_OPERATION,
1460 			srv_dict_tmpfile_mutex_key);
1461 
1462 	LATCH_ADD_MUTEX(SRV_INNODB_MONITOR, SYNC_NO_ORDER_CHECK,
1463 			srv_innodb_monitor_mutex_key);
1464 
1465 	LATCH_ADD_MUTEX(SRV_MISC_TMPFILE, SYNC_ANY_LATCH,
1466 			srv_misc_tmpfile_mutex_key);
1467 
1468 	LATCH_ADD_MUTEX(SRV_MONITOR_FILE, SYNC_NO_ORDER_CHECK,
1469 			srv_monitor_file_mutex_key);
1470 
1471 #ifdef UNIV_DEBUG
1472 	LATCH_ADD_MUTEX(SYNC_THREAD, SYNC_NO_ORDER_CHECK,
1473 			sync_thread_mutex_key);
1474 #else
1475 	LATCH_ADD_MUTEX(SYNC_THREAD, SYNC_NO_ORDER_CHECK, PFS_NOT_INSTRUMENTED);
1476 #endif /* UNIV_DEBUG */
1477 
1478 	LATCH_ADD_MUTEX(BUF_DBLWR, SYNC_DOUBLEWRITE, buf_dblwr_mutex_key);
1479 
1480 	LATCH_ADD_MUTEX(TRX_UNDO, SYNC_TRX_UNDO, trx_undo_mutex_key);
1481 
1482 	LATCH_ADD_MUTEX(TRX_POOL, SYNC_POOL, trx_pool_mutex_key);
1483 
1484 	LATCH_ADD_MUTEX(TRX_POOL_MANAGER, SYNC_POOL_MANAGER,
1485 			trx_pool_manager_mutex_key);
1486 
1487 	LATCH_ADD_MUTEX(TRX, SYNC_TRX, trx_mutex_key);
1488 
1489 	LATCH_ADD_MUTEX(LOCK_SYS, SYNC_LOCK_SYS, lock_mutex_key);
1490 
1491 	LATCH_ADD_MUTEX(LOCK_SYS_WAIT, SYNC_LOCK_WAIT_SYS,
1492 			lock_wait_mutex_key);
1493 
1494 	LATCH_ADD_MUTEX(TRX_SYS, SYNC_TRX_SYS, trx_sys_mutex_key);
1495 
1496 	LATCH_ADD_MUTEX(SRV_SYS, SYNC_THREADS, srv_sys_mutex_key);
1497 
1498 	LATCH_ADD_MUTEX(SRV_SYS_TASKS, SYNC_ANY_LATCH, srv_threads_mutex_key);
1499 
1500 	LATCH_ADD_MUTEX(PAGE_ZIP_STAT_PER_INDEX, SYNC_ANY_LATCH,
1501 			page_zip_stat_per_index_mutex_key);
1502 
1503 #ifndef PFS_SKIP_EVENT_MUTEX
1504 	LATCH_ADD_MUTEX(EVENT_MANAGER, SYNC_NO_ORDER_CHECK,
1505 			event_manager_mutex_key);
1506 	LATCH_ADD_MUTEX(EVENT_MUTEX, SYNC_NO_ORDER_CHECK, event_mutex_key);
1507 #else
1508 	LATCH_ADD_MUTEX(EVENT_MANAGER, SYNC_NO_ORDER_CHECK,
1509 			PFS_NOT_INSTRUMENTED);
1510 	LATCH_ADD_MUTEX(EVENT_MUTEX, SYNC_NO_ORDER_CHECK,
1511 			PFS_NOT_INSTRUMENTED);
1512 #endif /* !PFS_SKIP_EVENT_MUTEX */
1513 
1514 	LATCH_ADD_MUTEX(SYNC_ARRAY_MUTEX, SYNC_NO_ORDER_CHECK,
1515 			sync_array_mutex_key);
1516 
1517 	LATCH_ADD_MUTEX(THREAD_MUTEX, SYNC_NO_ORDER_CHECK, thread_mutex_key);
1518 
1519 	LATCH_ADD_MUTEX(ZIP_PAD_MUTEX, SYNC_NO_ORDER_CHECK, zip_pad_mutex_key);
1520 
1521 	LATCH_ADD_MUTEX(OS_AIO_READ_MUTEX, SYNC_NO_ORDER_CHECK,
1522 			PFS_NOT_INSTRUMENTED);
1523 
1524 	LATCH_ADD_MUTEX(OS_AIO_WRITE_MUTEX, SYNC_NO_ORDER_CHECK,
1525 			PFS_NOT_INSTRUMENTED);
1526 
1527 	LATCH_ADD_MUTEX(OS_AIO_LOG_MUTEX, SYNC_NO_ORDER_CHECK,
1528 			PFS_NOT_INSTRUMENTED);
1529 
1530 	LATCH_ADD_MUTEX(OS_AIO_IBUF_MUTEX, SYNC_NO_ORDER_CHECK,
1531 			PFS_NOT_INSTRUMENTED);
1532 
1533 	LATCH_ADD_MUTEX(OS_AIO_SYNC_MUTEX, SYNC_NO_ORDER_CHECK,
1534 			PFS_NOT_INSTRUMENTED);
1535 
1536 	LATCH_ADD_MUTEX(ROW_DROP_LIST, SYNC_NO_ORDER_CHECK,
1537 			row_drop_list_mutex_key);
1538 
1539 	LATCH_ADD_RWLOCK(INDEX_ONLINE_LOG, SYNC_INDEX_ONLINE_LOG,
1540 			index_online_log_key);
1541 
1542 	LATCH_ADD_MUTEX(WORK_QUEUE, SYNC_WORK_QUEUE, PFS_NOT_INSTRUMENTED);
1543 
1544 	// Add the RW locks
1545 	LATCH_ADD_RWLOCK(BTR_SEARCH, SYNC_SEARCH_SYS, btr_search_latch_key);
1546 
1547 #ifndef PFS_SKIP_BUFFER_MUTEX_RWLOCK
1548 	LATCH_ADD_RWLOCK(BUF_BLOCK_LOCK, SYNC_LEVEL_VARYING,
1549 			 buf_block_lock_key);
1550 #else
1551 	LATCH_ADD_RWLOCK(BUF_BLOCK_LOCK, SYNC_LEVEL_VARYING,
1552 			 PFS_NOT_INSTRUMENTED);
1553 #endif /* PFS_SKIP_BUFFER_MUTEX_RWLOCK */
1554 
1555 #ifdef UNIV_DEBUG
1556 	LATCH_ADD_RWLOCK(BUF_BLOCK_DEBUG, SYNC_NO_ORDER_CHECK,
1557 			 buf_block_debug_latch_key);
1558 #else
1559 	LATCH_ADD_RWLOCK(BUF_BLOCK_DEBUG, SYNC_NO_ORDER_CHECK,
1560 			 PFS_NOT_INSTRUMENTED);
1561 #endif /* UNIV_DEBUG */
1562 
1563 	LATCH_ADD_RWLOCK(DICT_OPERATION, SYNC_DICT, dict_operation_lock_key);
1564 
1565 	LATCH_ADD_RWLOCK(CHECKPOINT, SYNC_NO_ORDER_CHECK, checkpoint_lock_key);
1566 
1567 	LATCH_ADD_RWLOCK(FIL_SPACE, SYNC_FSP, fil_space_latch_key);
1568 
1569 	LATCH_ADD_RWLOCK(FTS_CACHE, SYNC_FTS_CACHE, fts_cache_rw_lock_key);
1570 
1571 	LATCH_ADD_RWLOCK(FTS_CACHE_INIT, SYNC_FTS_CACHE_INIT,
1572 			 fts_cache_init_rw_lock_key);
1573 
1574 	LATCH_ADD_RWLOCK(TRX_I_S_CACHE, SYNC_TRX_I_S_RWLOCK,
1575 			 trx_i_s_cache_lock_key);
1576 
1577 	LATCH_ADD_RWLOCK(TRX_PURGE, SYNC_PURGE_LATCH, trx_purge_latch_key);
1578 
1579 	LATCH_ADD_RWLOCK(IBUF_INDEX_TREE, SYNC_IBUF_INDEX_TREE,
1580 			 index_tree_rw_lock_key);
1581 
1582 	LATCH_ADD_RWLOCK(INDEX_TREE, SYNC_INDEX_TREE, index_tree_rw_lock_key);
1583 
1584 	LATCH_ADD_RWLOCK(DICT_TABLE_STATS, SYNC_INDEX_TREE,
1585 			 dict_table_stats_key);
1586 
1587 	LATCH_ADD_RWLOCK(HASH_TABLE_RW_LOCK, SYNC_BUF_PAGE_HASH,
1588 			 hash_table_locks_key);
1589 
1590 	LATCH_ADD_RWLOCK(SYNC_DEBUG_MUTEX, SYNC_NO_ORDER_CHECK,
1591 			 PFS_NOT_INSTRUMENTED);
1592 
1593 	LATCH_ADD_MUTEX(MASTER_KEY_ID_MUTEX, SYNC_NO_ORDER_CHECK,
1594 			master_key_id_mutex_key);
1595 
1596 	LATCH_ADD_MUTEX(ANALYZE_INDEX_MUTEX, SYNC_ANALYZE_INDEX,
1597 			analyze_index_mutex_key);
1598 
1599 	latch_id_t	id = LATCH_ID_NONE;
1600 
1601 	/* The array should be ordered on latch ID.We need to
1602 	index directly into it from the mutex policy to update
1603 	the counters and access the meta-data. */
1604 
1605 	for (LatchMetaData::iterator it = latch_meta.begin();
1606 	     it != latch_meta.end();
1607 	     ++it) {
1608 
1609 		const latch_meta_t*	meta = *it;
1610 
1611 		/* Skip blank entries */
1612 		if (meta == NULL || meta->get_id() == LATCH_ID_NONE) {
1613 			continue;
1614 		}
1615 
1616 		ut_a(id < meta->get_id());
1617 
1618 		id = meta->get_id();
1619 	}
1620 }
1621 
1622 /** Destroy the latch meta data */
1623 static
1624 void
sync_latch_meta_destroy()1625 sync_latch_meta_destroy()
1626 {
1627 	for (LatchMetaData::iterator it = latch_meta.begin();
1628 	     it != latch_meta.end();
1629 	     ++it) {
1630 
1631 		UT_DELETE(*it);
1632 	}
1633 
1634 	latch_meta.clear();
1635 }
1636 
1637 /** Track mutex file creation name and line number. This is to avoid storing
1638 { const char* name; uint16_t line; } in every instance. This results in the
1639 sizeof(Mutex) > 64. We use a lookup table to store it separately. Fetching
1640 the values is very rare, only required for diagnostic purposes. And, we
1641 don't create/destroy mutexes that frequently. */
1642 struct CreateTracker {
1643 
1644 	/** Constructor */
CreateTrackerCreateTracker1645 	CreateTracker()
1646 		UNIV_NOTHROW
1647 	{
1648 		m_mutex.init();
1649 	}
1650 
1651 	/** Destructor */
~CreateTrackerCreateTracker1652 	~CreateTracker()
1653 		UNIV_NOTHROW
1654 	{
1655 		ut_d(m_files.empty());
1656 
1657 		m_mutex.destroy();
1658 	}
1659 
1660 	/** Register where the latch was created
1661 	@param[in]	ptr		Latch instance
1662 	@param[in]	filename	Where created
1663 	@param[in]	line		Line number in filename */
register_latchCreateTracker1664 	void register_latch(
1665 		const void*	ptr,
1666 		const char*	filename,
1667 		uint16_t	line)
1668 		UNIV_NOTHROW
1669 	{
1670 		m_mutex.enter();
1671 
1672 		Files::iterator	lb = m_files.lower_bound(ptr);
1673 
1674 		ut_ad(lb == m_files.end()
1675 		      || m_files.key_comp()(ptr, lb->first));
1676 
1677 		typedef Files::value_type value_type;
1678 
1679 		m_files.insert(lb, value_type(ptr, File(filename, line)));
1680 
1681 		m_mutex.exit();
1682 	}
1683 
1684 	/** Deregister a latch - when it is destroyed
1685 	@param[in]	ptr		Latch instance being destroyed */
deregister_latchCreateTracker1686 	void deregister_latch(const void* ptr)
1687 		UNIV_NOTHROW
1688 	{
1689 		m_mutex.enter();
1690 
1691 		Files::iterator	lb = m_files.lower_bound(ptr);
1692 
1693 		ut_ad(lb != m_files.end()
1694 		      && !(m_files.key_comp()(ptr, lb->first)));
1695 
1696 		m_files.erase(lb);
1697 
1698 		m_mutex.exit();
1699 	}
1700 
1701 	/** Get the create string, format is "name:line"
1702 	@param[in]	ptr		Latch instance
1703 	@return the create string or "" if not found */
getCreateTracker1704 	std::string get(const void* ptr)
1705 		UNIV_NOTHROW
1706 	{
1707 		m_mutex.enter();
1708 
1709 		std::string	created;
1710 
1711 		Files::iterator	lb = m_files.lower_bound(ptr);
1712 
1713 		if (lb != m_files.end()
1714 		    && !(m_files.key_comp()(ptr, lb->first))) {
1715 
1716 			std::ostringstream	msg;
1717 
1718 			msg << lb->second.m_name << ":" << lb->second.m_line;
1719 
1720 			created = msg.str();
1721 		}
1722 
1723 		m_mutex.exit();
1724 
1725 		return(created);
1726 	}
1727 
1728 private:
1729 	/** For tracking the filename and line number */
1730 	struct File {
1731 
1732 		/** Constructor */
FileCreateTracker::File1733 		File() UNIV_NOTHROW : m_name(), m_line() { }
1734 
1735 		/** Constructor
1736 		@param[in]	name		Filename where created
1737 		@param[in]	line		Line number where created */
FileCreateTracker::File1738 		File(const char*  name, uint16_t line)
1739 			UNIV_NOTHROW
1740 			:
1741 			m_name(sync_basename(name)),
1742 			m_line(line)
1743 		{
1744 			/* No op */
1745 		}
1746 
1747 		/** Filename where created */
1748 		std::string		m_name;
1749 
1750 		/** Line number where created */
1751 		uint16_t		m_line;
1752 	};
1753 
1754 	/** Map the mutex instance to where it was created */
1755 	typedef std::map<
1756 		const void*,
1757 		File,
1758 		std::less<const void*>,
1759 		ut_allocator<std::pair<const void* const, File> > >
1760 		Files;
1761 
1762 	typedef OSMutex	Mutex;
1763 
1764 	/** Mutex protecting m_files */
1765 	Mutex			m_mutex;
1766 
1767 	/** Track the latch creation */
1768 	Files			m_files;
1769 };
1770 
1771 /** Track latch creation location. For reducing the size of the latches */
1772 static CreateTracker*	create_tracker;
1773 
1774 /** Register a latch, called when it is created
1775 @param[in]	ptr		Latch instance that was created
1776 @param[in]	filename	Filename where it was created
1777 @param[in]	line		Line number in filename */
1778 void
sync_file_created_register(const void * ptr,const char * filename,uint16_t line)1779 sync_file_created_register(
1780 	const void*	ptr,
1781 	const char*	filename,
1782 	uint16_t	line)
1783 {
1784 	create_tracker->register_latch(ptr, filename, line);
1785 }
1786 
1787 /** Deregister a latch, called when it is destroyed
1788 @param[in]	ptr		Latch to be destroyed */
1789 void
sync_file_created_deregister(const void * ptr)1790 sync_file_created_deregister(const void* ptr)
1791 {
1792 	create_tracker->deregister_latch(ptr);
1793 }
1794 
1795 /** Get the string where the file was created. Its format is "name:line"
1796 @param[in]	ptr		Latch instance
1797 @return created information or "" if can't be found */
1798 std::string
sync_file_created_get(const void * ptr)1799 sync_file_created_get(const void* ptr)
1800 {
1801 	return(create_tracker->get(ptr));
1802 }
1803 
1804 /** Initializes the synchronization data structures. */
1805 void
sync_check_init()1806 sync_check_init()
1807 {
1808 	ut_ad(!LatchDebug::s_initialized);
1809 	ut_d(LatchDebug::s_initialized = true);
1810 
1811 	/** For collecting latch statistic - SHOW ... MUTEX */
1812 	mutex_monitor = UT_NEW_NOKEY(MutexMonitor());
1813 
1814 	/** For trcking mutex creation location */
1815 	create_tracker = UT_NEW_NOKEY(CreateTracker());
1816 
1817 	sync_latch_meta_init();
1818 
1819 	/* Init the rw-lock & mutex list and create the mutex to protect it. */
1820 
1821 	UT_LIST_INIT(rw_lock_list, &rw_lock_t::list);
1822 
1823 	mutex_create(LATCH_ID_RW_LOCK_LIST, &rw_lock_list_mutex);
1824 
1825 	ut_d(LatchDebug::init());
1826 
1827 	sync_array_init(OS_THREAD_MAX_N);
1828 }
1829 
1830 /** Frees the resources in InnoDB's own synchronization data structures. Use
1831 os_sync_free() after calling this. */
1832 void
sync_check_close()1833 sync_check_close()
1834 {
1835 	ut_d(LatchDebug::shutdown());
1836 
1837 	mutex_free(&rw_lock_list_mutex);
1838 
1839 	sync_array_close();
1840 
1841 	UT_DELETE(mutex_monitor);
1842 
1843 	mutex_monitor = NULL;
1844 
1845 	UT_DELETE(create_tracker);
1846 
1847 	create_tracker = NULL;
1848 
1849 	sync_latch_meta_destroy();
1850 }
1851 
1852