1 /*****************************************************************************
2 
3 Copyright (c) 2014, 2020, Oracle and/or its affiliates. All Rights Reserved.
4 
5 Portions of this file contain modifications contributed and copyrighted by
6 Google, Inc. Those modifications are gratefully acknowledged and are described
7 briefly in the InnoDB documentation. The contributions by Google are
8 incorporated with their permission, and subject to the conditions contained in
9 the file COPYING.Google.
10 
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License, version 2.0,
13 as published by the Free Software Foundation.
14 
15 This program is also distributed with certain software (including
16 but not limited to OpenSSL) that is licensed under separate terms,
17 as designated in a particular file or component or in included license
18 documentation.  The authors of MySQL hereby grant you an additional
19 permission to link the program and your derivative works with the
20 separately licensed software that they have included with MySQL.
21 
22 This program is distributed in the hope that it will be useful,
23 but WITHOUT ANY WARRANTY; without even the implied warranty of
24 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
25 GNU General Public License, version 2.0, for more details.
26 
27 You should have received a copy of the GNU General Public License along with
28 this program; if not, write to the Free Software Foundation, Inc.,
29 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
30 
31 *****************************************************************************/
32 
33 /**************************************************//**
34 @file sync/sync0debug.cc
35 Debug checks for latches.
36 
37 Created 2012-08-21 Sunny Bains
38 *******************************************************/
39 
40 #include "sync0sync.h"
41 #include "sync0debug.h"
42 
43 #include "ut0new.h"
44 #include "srv0start.h"
45 
46 #include <map>
47 #include <vector>
48 #include <string>
49 #include <algorithm>
50 #include <iostream>
51 
52 #ifdef UNIV_DEBUG
53 
54 my_bool		srv_sync_debug;
55 
56 /** The global mutex which protects debug info lists of all rw-locks.
57 To modify the debug info list of an rw-lock, this mutex has to be
58 acquired in addition to the mutex protecting the lock. */
59 static ib_mutex_t		rw_lock_debug_mutex;
60 
61 /** If deadlock detection does not get immediately the mutex,
62 it may wait for this event */
63 static os_event_t		rw_lock_debug_event;
64 
65 /** This is set to true, if there may be waiters for the event */
66 static bool			rw_lock_debug_waiters;
67 
68 /** The latch held by a thread */
69 struct Latched {
70 
71 	/** Constructor */
LatchedLatched72 	Latched() : m_latch(), m_level(SYNC_UNKNOWN) { }
73 
74 	/** Constructor
75 	@param[in]	latch		Latch instance
76 	@param[in]	level		Level of latch held */
LatchedLatched77 	Latched(const latch_t*	latch,
78 		latch_level_t	level)
79 		:
80 		m_latch(latch),
81 		m_level(level)
82 	{
83 		/* No op */
84 	}
85 
86 	/** @return the latch level */
get_levelLatched87 	latch_level_t get_level() const
88 	{
89 		return(m_level);
90 	}
91 
92 	/** Check if the rhs latch and level match
93 	@param[in]	rhs		instance to compare with
94 	@return true on match */
operator ==Latched95 	bool operator==(const Latched& rhs) const
96 	{
97 		return(m_latch == rhs.m_latch && m_level == rhs.m_level);
98 	}
99 
100 	/** The latch instance */
101 	const latch_t*		m_latch;
102 
103 	/** The latch level. For buffer blocks we can pass a separate latch
104 	level to check against, see buf_block_dbg_add_level() */
105 	latch_level_t		m_level;
106 };
107 
108 /** Thread specific latches. This is ordered on level in descending order. */
109 typedef std::vector<Latched, ut_allocator<Latched> > Latches;
110 
111 /** The deadlock detector. */
112 struct LatchDebug {
113 
114 	/** Debug mutex for control structures, should not be tracked
115 	by this module. */
116 	typedef OSMutex Mutex;
117 
118 	/** Comparator for the ThreadMap. */
119 	struct os_thread_id_less
120 		: public std::binary_function<
121 		  os_thread_id_t,
122 		  os_thread_id_t,
123 		  bool>
124 	{
125 		/** @return true if lhs < rhs */
operator ()LatchDebug::os_thread_id_less126 		bool operator()(
127 			const os_thread_id_t& lhs,
128 			const os_thread_id_t& rhs) const
129 			UNIV_NOTHROW
130 		{
131 			return(os_thread_pf(lhs) < os_thread_pf(rhs));
132 		}
133 	};
134 
135 	/** For tracking a thread's latches. */
136 	typedef std::map<
137 		os_thread_id_t,
138 		Latches*,
139 		os_thread_id_less,
140 		ut_allocator<std::pair<const os_thread_id_t, Latches*> > >
141 		ThreadMap;
142 
143 	/** Constructor */
144 	LatchDebug()
145 		UNIV_NOTHROW;
146 
147 	/** Destructor */
~LatchDebugLatchDebug148 	~LatchDebug()
149 		UNIV_NOTHROW
150 	{
151 		m_mutex.destroy();
152 	}
153 
154 	/** Create a new instance if one doesn't exist else return
155 	the existing one.
156 	@param[in]	add		add an empty entry if one is not
157 					found (default no)
158 	@return	pointer to a thread's acquired latches. */
159 	Latches* thread_latches(bool add = false)
160 		UNIV_NOTHROW;
161 
162 	/** Check that all the latches already owned by a thread have a lower
163 	level than limit.
164 	@param[in]	latches		the thread's existing (acquired) latches
165 	@param[in]	limit		to check against
166 	@return latched if there is one with a level <= limit . */
167 	const Latched* less(
168 		const Latches*	latches,
169 		latch_level_t	limit) const
170 		UNIV_NOTHROW;
171 
172 	/** Checks if the level value exists in the thread's acquired latches.
173 	@param[in]	latches		the thread's existing (acquired) latches
174 	@param[in]	level		to lookup
175 	@return	latch if found or 0 */
176 	const latch_t* find(
177 		const Latches*	Latches,
178 		latch_level_t	level) const
179 		UNIV_NOTHROW;
180 
181 	/**
182 	Checks if the level value exists in the thread's acquired latches.
183 	@param[in]	level		to lookup
184 	@return	latch if found or 0 */
185 	const latch_t* find(latch_level_t level)
186 		UNIV_NOTHROW;
187 
188 	/** Report error and abort.
189 	@param[in]	latches		thread's existing latches
190 	@param[in]	latched		The existing latch causing the
191 					invariant to fail
192 	@param[in]	level		The new level request that breaks
193 					the order */
194 	void crash(
195 		const Latches*	latches,
196 		const Latched*	latched,
197 		latch_level_t	level) const
198 		UNIV_NOTHROW;
199 
200 	/** Do a basic ordering check.
201 	@param[in]	latches		thread's existing latches
202 	@param[in]	requested_level	Level requested by latch
203 	@param[in]	level		declared ulint so that we can
204 					do level - 1. The level of the
205 					latch that the thread is trying
206 					to acquire
207 	@return true if passes, else crash with error message. */
208 	bool basic_check(
209 		const Latches*	latches,
210 		latch_level_t	requested_level,
211 		ulint		level) const
212 		UNIV_NOTHROW;
213 
214 	/** Adds a latch and its level in the thread level array. Allocates
215 	the memory for the array if called for the first time for this
216 	OS thread.  Makes the checks against other latch levels stored
217 	in the array for this thread.
218 
219 	@param[in]	latch	latch that the thread wants to acqire.
220 	@param[in]	level	latch level to check against */
lock_validateLatchDebug221 	void lock_validate(
222 		const latch_t*	latch,
223 		latch_level_t	level)
224 		UNIV_NOTHROW
225 	{
226 		/* Ignore diagnostic latches, starting with '.' */
227 
228 		if (*latch->get_name() != '.'
229 		    && latch->get_level() != SYNC_LEVEL_VARYING) {
230 
231 			ut_ad(level != SYNC_LEVEL_VARYING);
232 
233 			Latches*	latches = check_order(latch, level);
234 
235 			ut_a(latches->empty()
236 			     || level == SYNC_LEVEL_VARYING
237 			     || level == SYNC_NO_ORDER_CHECK
238 			     || latches->back().get_level()
239 			     == SYNC_NO_ORDER_CHECK
240 			     || latches->back().m_latch->get_level()
241 			     == SYNC_LEVEL_VARYING
242 			     || latches->back().get_level() >= level);
243 		}
244 	}
245 
246 	/** Adds a latch and its level in the thread level array. Allocates
247 	the memory for the array if called for the first time for this
248 	OS thread.  Makes the checks against other latch levels stored
249 	in the array for this thread.
250 
251 	@param[in]	latch	latch that the thread wants to acqire.
252 	@param[in]	level	latch level to check against */
lock_grantedLatchDebug253 	void lock_granted(
254 		const latch_t*	latch,
255 		latch_level_t	level)
256 		UNIV_NOTHROW
257 	{
258 		/* Ignore diagnostic latches, starting with '.' */
259 
260 		if (*latch->get_name() != '.'
261 		    && latch->get_level() != SYNC_LEVEL_VARYING) {
262 
263 			Latches*	latches = thread_latches(true);
264 
265 			latches->push_back(Latched(latch, level));
266 		}
267 	}
268 
269 	/** For recursive X rw-locks.
270 	@param[in]	latch		The RW-Lock to relock  */
relockLatchDebug271 	void relock(const latch_t* latch)
272 		UNIV_NOTHROW
273 	{
274 		ut_a(latch->m_rw_lock);
275 
276 		latch_level_t	level = latch->get_level();
277 
278 		/* Ignore diagnostic latches, starting with '.' */
279 
280 		if (*latch->get_name() != '.'
281 		    && latch->get_level() != SYNC_LEVEL_VARYING) {
282 
283 			Latches*	latches = thread_latches(true);
284 
285 			Latches::iterator	it = std::find(
286 				latches->begin(), latches->end(),
287 				Latched(latch, level));
288 
289 			ut_a(latches->empty()
290 			     || level == SYNC_LEVEL_VARYING
291 			     || level == SYNC_NO_ORDER_CHECK
292 			     || latches->back().m_latch->get_level()
293 			     == SYNC_LEVEL_VARYING
294 			     || latches->back().m_latch->get_level()
295 			     == SYNC_NO_ORDER_CHECK
296 			     || latches->back().get_level() >= level
297 			     || it != latches->end());
298 
299 			if (it == latches->end()) {
300 				latches->push_back(Latched(latch, level));
301 			} else {
302 				latches->insert(it, Latched(latch, level));
303 			}
304 		}
305 	}
306 
307 	/** Iterate over a thread's latches.
308 	@param[in,out]	functor		The callback
309 	@return true if the functor returns true. */
for_eachLatchDebug310 	bool for_each(sync_check_functor_t& functor)
311 		UNIV_NOTHROW
312 	{
313 		const Latches*	latches = thread_latches();
314 
315 		if (latches == 0) {
316 			return(functor.result());
317 		}
318 
319 		Latches::const_iterator	end = latches->end();
320 
321 		for (Latches::const_iterator it = latches->begin();
322 		     it != end;
323 		     ++it) {
324 
325 			if (functor(it->m_level)) {
326 				break;
327 			}
328 		}
329 
330 		return(functor.result());
331 	}
332 
333 	/** Removes a latch from the thread level array if it is found there.
334 	@param[in]	latch		The latch that was released
335 	@return true if found in the array; it is not an error if the latch is
336 	not found, as we presently are not able to determine the level for
337 	every latch reservation the program does */
338 	void unlock(const latch_t* latch) UNIV_NOTHROW;
339 
340 	/** Get the level name
341 	@param[in]	level		The level ID to lookup
342 	@return level name */
get_level_nameLatchDebug343 	const std::string& get_level_name(latch_level_t level) const
344 		UNIV_NOTHROW
345 	{
346 		Levels::const_iterator	it = m_levels.find(level);
347 
348 		ut_ad(it != m_levels.end());
349 
350 		return(it->second);
351 	}
352 
353 	/** Initialise the debug data structures */
354 	static void init()
355 		UNIV_NOTHROW;
356 
357 	/** Shutdown the latch debug checking */
358 	static void shutdown()
359 		UNIV_NOTHROW;
360 
361 	/** @return the singleton instance */
instanceLatchDebug362 	static LatchDebug* instance()
363 		UNIV_NOTHROW
364 	{
365 		return(s_instance);
366 	}
367 
368 	/** Create the singleton instance */
create_instanceLatchDebug369 	static void create_instance()
370 		UNIV_NOTHROW
371 	{
372 		ut_ad(s_instance == NULL);
373 
374 		s_instance = UT_NEW_NOKEY(LatchDebug());
375 	}
376 
377 private:
378 	/** Disable copying */
379 	LatchDebug(const LatchDebug&);
380 	LatchDebug& operator=(const LatchDebug&);
381 
382 	/** Adds a latch and its level in the thread level array. Allocates
383 	the memory for the array if called first time for this OS thread.
384 	Makes the checks against other latch levels stored in the array
385 	for this thread.
386 
387 	@param[in]	latch	 pointer to a mutex or an rw-lock
388 	@param[in]	level	level in the latching order
389 	@return the thread's latches */
390 	Latches* check_order(
391 		const latch_t*	latch,
392 		latch_level_t	level)
393 		UNIV_NOTHROW;
394 
395 	/** Print the latches acquired by a thread
396 	@param[in]	latches		Latches acquired by a thread */
397 	void print_latches(const Latches* latches) const
398 		UNIV_NOTHROW;
399 
400 	/** Special handling for the RTR mutexes. We need to add proper
401 	levels for them if possible.
402 	@param[in]	latch		Latch to check
403 	@return true if it is a an _RTR_ mutex */
is_rtr_mutexLatchDebug404 	bool is_rtr_mutex(const latch_t* latch) const
405 		UNIV_NOTHROW
406 	{
407 		return(latch->get_id() == LATCH_ID_RTR_ACTIVE_MUTEX
408 		       || latch->get_id() == LATCH_ID_RTR_PATH_MUTEX
409 		       || latch->get_id() == LATCH_ID_RTR_MATCH_MUTEX
410 		       || latch->get_id() == LATCH_ID_RTR_SSN_MUTEX);
411 	}
412 
413 private:
414 	/** Comparator for the Levels . */
415 	struct latch_level_less
416 		: public std::binary_function<
417 		  latch_level_t,
418 		  latch_level_t,
419 		  bool>
420 	{
421 		/** @return true if lhs < rhs */
operator ()LatchDebug::latch_level_less422 		bool operator()(
423 			const latch_level_t& lhs,
424 			const latch_level_t& rhs) const
425 			UNIV_NOTHROW
426 		{
427 			return(lhs < rhs);
428 		}
429 	};
430 
431 	typedef std::map<
432 		latch_level_t,
433 		std::string,
434 		latch_level_less,
435 		ut_allocator<std::pair<const latch_level_t, std::string> > >
436 		Levels;
437 
438 	/** Mutex protecting the deadlock detector data structures. */
439 	Mutex			m_mutex;
440 
441 	/** Thread specific data. Protected by m_mutex. */
442 	ThreadMap		m_threads;
443 
444 	/** Mapping from latche level to its string representation. */
445 	Levels			m_levels;
446 
447 	/** The singleton instance. Must be created in single threaded mode. */
448 	static LatchDebug*	s_instance;
449 
450 public:
451 	/** For checking whether this module has been initialised or not. */
452 	static bool		s_initialized;
453 };
454 
455 /** The latch order checking infra-structure */
456 LatchDebug* LatchDebug::s_instance = NULL;
457 bool LatchDebug::s_initialized = false;
458 
459 #define LEVEL_MAP_INSERT(T)						\
460 do {									\
461 	std::pair<Levels::iterator, bool>	result =		\
462 		m_levels.insert(Levels::value_type(T, #T));		\
463 	ut_ad(result.second);						\
464 } while(0)
465 
466 /** Setup the mapping from level ID to level name mapping */
LatchDebug()467 LatchDebug::LatchDebug()
468 {
469 	m_mutex.init();
470 
471 	LEVEL_MAP_INSERT(SYNC_UNKNOWN);
472 	LEVEL_MAP_INSERT(SYNC_MUTEX);
473 	LEVEL_MAP_INSERT(RW_LOCK_SX);
474 	LEVEL_MAP_INSERT(RW_LOCK_X_WAIT);
475 	LEVEL_MAP_INSERT(RW_LOCK_S);
476 	LEVEL_MAP_INSERT(RW_LOCK_X);
477 	LEVEL_MAP_INSERT(RW_LOCK_NOT_LOCKED);
478 	LEVEL_MAP_INSERT(SYNC_MONITOR_MUTEX);
479 	LEVEL_MAP_INSERT(SYNC_ANY_LATCH);
480 	LEVEL_MAP_INSERT(SYNC_DOUBLEWRITE);
481 	LEVEL_MAP_INSERT(SYNC_BUF_FLUSH_LIST);
482 	LEVEL_MAP_INSERT(SYNC_BUF_BLOCK);
483 	LEVEL_MAP_INSERT(SYNC_BUF_PAGE_HASH);
484 	LEVEL_MAP_INSERT(SYNC_BUF_POOL);
485 	LEVEL_MAP_INSERT(SYNC_POOL);
486 	LEVEL_MAP_INSERT(SYNC_POOL_MANAGER);
487 	LEVEL_MAP_INSERT(SYNC_SEARCH_SYS);
488 	LEVEL_MAP_INSERT(SYNC_WORK_QUEUE);
489 	LEVEL_MAP_INSERT(SYNC_FTS_TOKENIZE);
490 	LEVEL_MAP_INSERT(SYNC_FTS_OPTIMIZE);
491 	LEVEL_MAP_INSERT(SYNC_FTS_BG_THREADS);
492 	LEVEL_MAP_INSERT(SYNC_FTS_CACHE_INIT);
493 	LEVEL_MAP_INSERT(SYNC_RECV);
494 	LEVEL_MAP_INSERT(SYNC_LOG_FLUSH_ORDER);
495 	LEVEL_MAP_INSERT(SYNC_LOG);
496 	LEVEL_MAP_INSERT(SYNC_LOG_WRITE);
497 	LEVEL_MAP_INSERT(SYNC_PAGE_CLEANER);
498 	LEVEL_MAP_INSERT(SYNC_PURGE_QUEUE);
499 	LEVEL_MAP_INSERT(SYNC_TRX_SYS_HEADER);
500 	LEVEL_MAP_INSERT(SYNC_REC_LOCK);
501 	LEVEL_MAP_INSERT(SYNC_THREADS);
502 	LEVEL_MAP_INSERT(SYNC_TRX);
503 	LEVEL_MAP_INSERT(SYNC_TRX_SYS);
504 	LEVEL_MAP_INSERT(SYNC_LOCK_SYS);
505 	LEVEL_MAP_INSERT(SYNC_LOCK_WAIT_SYS);
506 	LEVEL_MAP_INSERT(SYNC_INDEX_ONLINE_LOG);
507 	LEVEL_MAP_INSERT(SYNC_IBUF_BITMAP);
508 	LEVEL_MAP_INSERT(SYNC_IBUF_BITMAP_MUTEX);
509 	LEVEL_MAP_INSERT(SYNC_IBUF_TREE_NODE);
510 	LEVEL_MAP_INSERT(SYNC_IBUF_TREE_NODE_NEW);
511 	LEVEL_MAP_INSERT(SYNC_IBUF_INDEX_TREE);
512 	LEVEL_MAP_INSERT(SYNC_IBUF_MUTEX);
513 	LEVEL_MAP_INSERT(SYNC_FSP_PAGE);
514 	LEVEL_MAP_INSERT(SYNC_FSP);
515 	LEVEL_MAP_INSERT(SYNC_EXTERN_STORAGE);
516 	LEVEL_MAP_INSERT(SYNC_TRX_UNDO_PAGE);
517 	LEVEL_MAP_INSERT(SYNC_RSEG_HEADER);
518 	LEVEL_MAP_INSERT(SYNC_RSEG_HEADER_NEW);
519 	LEVEL_MAP_INSERT(SYNC_NOREDO_RSEG);
520 	LEVEL_MAP_INSERT(SYNC_REDO_RSEG);
521 	LEVEL_MAP_INSERT(SYNC_TRX_UNDO);
522 	LEVEL_MAP_INSERT(SYNC_PURGE_LATCH);
523 	LEVEL_MAP_INSERT(SYNC_TREE_NODE);
524 	LEVEL_MAP_INSERT(SYNC_TREE_NODE_FROM_HASH);
525 	LEVEL_MAP_INSERT(SYNC_TREE_NODE_NEW);
526 	LEVEL_MAP_INSERT(SYNC_ANALYZE_INDEX);
527 	LEVEL_MAP_INSERT(SYNC_INDEX_TREE);
528 	LEVEL_MAP_INSERT(SYNC_IBUF_PESS_INSERT_MUTEX);
529 	LEVEL_MAP_INSERT(SYNC_IBUF_HEADER);
530 	LEVEL_MAP_INSERT(SYNC_DICT_HEADER);
531 	LEVEL_MAP_INSERT(SYNC_STATS_AUTO_RECALC);
532 	LEVEL_MAP_INSERT(SYNC_DICT_AUTOINC_MUTEX);
533 	LEVEL_MAP_INSERT(SYNC_DICT);
534 	LEVEL_MAP_INSERT(SYNC_FTS_CACHE);
535 	LEVEL_MAP_INSERT(SYNC_DICT_OPERATION);
536 	LEVEL_MAP_INSERT(SYNC_FILE_FORMAT_TAG);
537 	LEVEL_MAP_INSERT(SYNC_TRX_I_S_LAST_READ);
538 	LEVEL_MAP_INSERT(SYNC_TRX_I_S_RWLOCK);
539 	LEVEL_MAP_INSERT(SYNC_RECV_WRITER);
540 	LEVEL_MAP_INSERT(SYNC_LEVEL_VARYING);
541 	LEVEL_MAP_INSERT(SYNC_NO_ORDER_CHECK);
542 
543 	/* Enum count starts from 0 */
544 	ut_ad(m_levels.size() == SYNC_LEVEL_MAX + 1);
545 }
546 
547 /** Print the latches acquired by a thread
548 @param[in]	latches		Latches acquired by a thread */
549 void
print_latches(const Latches * latches) const550 LatchDebug::print_latches(const Latches* latches) const
551 	UNIV_NOTHROW
552 {
553 	ib::error() << "Latches already owned by this thread: ";
554 
555 	Latches::const_iterator	end = latches->end();
556 
557 	for (Latches::const_iterator it = latches->begin();
558 	     it != end;
559 	     ++it) {
560 
561 		ib::error()
562 			<< sync_latch_get_name(it->m_latch->get_id())
563 			<< " -> "
564 			<< it->m_level << " "
565 			<< "(" << get_level_name(it->m_level) << ")";
566 	}
567 }
568 
569 /** Report error and abort
570 @param[in]	latches		thread's existing latches
571 @param[in]	latched		The existing latch causing the invariant to fail
572 @param[in]	level		The new level request that breaks the order */
573 void
crash(const Latches * latches,const Latched * latched,latch_level_t level) const574 LatchDebug::crash(
575 	const Latches*	latches,
576 	const Latched*	latched,
577 	latch_level_t	level) const
578 	UNIV_NOTHROW
579 {
580 	const latch_t*		latch = latched->m_latch;
581 	const std::string&	in_level_name = get_level_name(level);
582 
583 	const std::string&	latch_level_name =
584 		get_level_name(latched->m_level);
585 
586 	ib::error()
587 		<< "Thread " << os_thread_pf(os_thread_get_curr_id())
588 		<< " already owns a latch "
589 		<< sync_latch_get_name(latch->m_id) << " at level"
590 		<< " " << latched->m_level << " (" << latch_level_name
591 		<< " ), which is at a lower/same level than the"
592 		<< " requested latch: "
593 		<< level << " (" << in_level_name << "). "
594 		<< latch->to_string();
595 
596 	print_latches(latches);
597 
598 	ut_error;
599 }
600 
601 /** Check that all the latches already owned by a thread have a lower
602 level than limit.
603 @param[in]	latches		the thread's existing (acquired) latches
604 @param[in]	limit		to check against
605 @return latched info if there is one with a level <= limit . */
606 const Latched*
less(const Latches * latches,latch_level_t limit) const607 LatchDebug::less(
608 	const Latches*	latches,
609 	latch_level_t	limit) const
610 	UNIV_NOTHROW
611 {
612 	Latches::const_iterator	end = latches->end();
613 
614 	for (Latches::const_iterator it = latches->begin(); it != end; ++it) {
615 
616 		if (it->m_level <= limit) {
617 			return(&(*it));
618 		}
619 	}
620 
621 	return(NULL);
622 }
623 
624 /** Do a basic ordering check.
625 @param[in]	latches		thread's existing latches
626 @param[in]	requested_level	Level requested by latch
627 @param[in]	in_level	declared ulint so that we can do level - 1.
628 				The level of the latch that the thread is
629 				trying to acquire
630 @return true if passes, else crash with error message. */
631 bool
basic_check(const Latches * latches,latch_level_t requested_level,ulint in_level) const632 LatchDebug::basic_check(
633 	const Latches*	latches,
634 	latch_level_t	requested_level,
635 	ulint		in_level) const
636 	UNIV_NOTHROW
637 {
638 	latch_level_t	level = latch_level_t(in_level);
639 
640 	ut_ad(level < SYNC_LEVEL_MAX);
641 
642 	const Latched*	latched = less(latches, level);
643 
644 	if (latched != NULL) {
645 		crash(latches, latched, requested_level);
646 		return(false);
647 	}
648 
649 	return(true);
650 }
651 
652 /** Create a new instance if one doesn't exist else return the existing one.
653 @param[in]	add		add an empty entry if one is not found
654 				(default no)
655 @return	pointer to a thread's acquired latches. */
656 Latches*
thread_latches(bool add)657 LatchDebug::thread_latches(bool add)
658 	UNIV_NOTHROW
659 {
660 	m_mutex.enter();
661 
662 	os_thread_id_t		thread_id = os_thread_get_curr_id();
663 	ThreadMap::iterator	lb = m_threads.lower_bound(thread_id);
664 
665 	if (lb != m_threads.end()
666 	    && !(m_threads.key_comp()(thread_id, lb->first))) {
667 
668 		Latches*	latches = lb->second;
669 
670 		m_mutex.exit();
671 
672 		return(latches);
673 
674 	} else if (!add) {
675 
676 		m_mutex.exit();
677 
678 		return(NULL);
679 
680 	} else {
681 		typedef ThreadMap::value_type value_type;
682 
683 		Latches*	latches = UT_NEW_NOKEY(Latches());
684 
685 		ut_a(latches != NULL);
686 
687 		latches->reserve(32);
688 
689 		m_threads.insert(lb, value_type(thread_id, latches));
690 
691 		m_mutex.exit();
692 
693 		return(latches);
694 	}
695 }
696 
697 /** Checks if the level value exists in the thread's acquired latches.
698 @param[in]	levels		the thread's existing (acquired) latches
699 @param[in]	level		to lookup
700 @return	latch if found or 0 */
701 const latch_t*
find(const Latches * latches,latch_level_t level) const702 LatchDebug::find(
703 	const Latches*	latches,
704 	latch_level_t	level) const UNIV_NOTHROW
705 {
706 	Latches::const_iterator	end = latches->end();
707 
708 	for (Latches::const_iterator it = latches->begin(); it != end; ++it) {
709 
710 		if (it->m_level == level) {
711 
712 			return(it->m_latch);
713 		}
714 	}
715 
716 	return(0);
717 }
718 
719 /** Checks if the level value exists in the thread's acquired latches.
720 @param[in]	 level		The level to lookup
721 @return	latch if found or NULL */
722 const latch_t*
find(latch_level_t level)723 LatchDebug::find(latch_level_t level)
724 	UNIV_NOTHROW
725 {
726 	return(find(thread_latches(), level));
727 }
728 
729 /**
730 Adds a latch and its level in the thread level array. Allocates the memory
731 for the array if called first time for this OS thread. Makes the checks
732 against other latch levels stored in the array for this thread.
733 @param[in]	latch	pointer to a mutex or an rw-lock
734 @param[in]	level	level in the latching order
735 @return the thread's latches */
736 Latches*
check_order(const latch_t * latch,latch_level_t level)737 LatchDebug::check_order(
738 	const latch_t*	latch,
739 	latch_level_t	level)
740 	UNIV_NOTHROW
741 {
742 	ut_ad(latch->get_level() != SYNC_LEVEL_VARYING);
743 
744 	Latches*	latches = thread_latches(true);
745 
746 	/* NOTE that there is a problem with _NODE and _LEAF levels: if the
747 	B-tree height changes, then a leaf can change to an internal node
748 	or the other way around. We do not know at present if this can cause
749 	unnecessary assertion failures below. */
750 
751 	switch (level) {
752 
753 	case SYNC_NO_ORDER_CHECK:
754 	case SYNC_EXTERN_STORAGE:
755 	case SYNC_TREE_NODE_FROM_HASH:
756 		/* Do no order checking */
757 		break;
758 
759 	case SYNC_TRX_SYS_HEADER:
760 
761 		if (srv_is_being_started) {
762 			/* This is violated during trx_sys_create_rsegs()
763 			when creating additional rollback segments when
764 			upgrading in innobase_start_or_create_for_mysql(). */
765 			break;
766 		}
767 
768 		/* Fall through */
769 
770 	case SYNC_MONITOR_MUTEX:
771 	case SYNC_RECV:
772 	case SYNC_FTS_BG_THREADS:
773 	case SYNC_WORK_QUEUE:
774 	case SYNC_FTS_TOKENIZE:
775 	case SYNC_FTS_OPTIMIZE:
776 	case SYNC_FTS_CACHE:
777 	case SYNC_FTS_CACHE_INIT:
778 	case SYNC_PAGE_CLEANER:
779 	case SYNC_LOG:
780 	case SYNC_LOG_WRITE:
781 	case SYNC_LOG_FLUSH_ORDER:
782 	case SYNC_FILE_FORMAT_TAG:
783 	case SYNC_DOUBLEWRITE:
784 	case SYNC_SEARCH_SYS:
785 	case SYNC_THREADS:
786 	case SYNC_LOCK_SYS:
787 	case SYNC_LOCK_WAIT_SYS:
788 	case SYNC_TRX_SYS:
789 	case SYNC_IBUF_BITMAP_MUTEX:
790 	case SYNC_REDO_RSEG:
791 	case SYNC_NOREDO_RSEG:
792 	case SYNC_TRX_UNDO:
793 	case SYNC_PURGE_LATCH:
794 	case SYNC_PURGE_QUEUE:
795 	case SYNC_DICT_AUTOINC_MUTEX:
796 	case SYNC_DICT_OPERATION:
797 	case SYNC_DICT_HEADER:
798 	case SYNC_TRX_I_S_RWLOCK:
799 	case SYNC_TRX_I_S_LAST_READ:
800 	case SYNC_IBUF_MUTEX:
801 	case SYNC_INDEX_ONLINE_LOG:
802 	case SYNC_STATS_AUTO_RECALC:
803 	case SYNC_POOL:
804 	case SYNC_POOL_MANAGER:
805 	case SYNC_RECV_WRITER:
806 
807 		basic_check(latches, level, level);
808 		break;
809 
810 	case SYNC_ANY_LATCH:
811 
812 		/* Temporary workaround for LATCH_ID_RTR_*_MUTEX */
813 		if (is_rtr_mutex(latch)) {
814 
815 			const Latched*	latched = less(latches, level);
816 
817 			if (latched == NULL
818 			    || (latched != NULL
819 				&& is_rtr_mutex(latched->m_latch))) {
820 
821 				/* No violation */
822 				break;
823 
824 			}
825 
826 			crash(latches, latched, level);
827 
828 		} else {
829 			basic_check(latches, level, level);
830 		}
831 
832 		break;
833 
834 	case SYNC_TRX:
835 
836 		/* Either the thread must own the lock_sys->mutex, or
837 		it is allowed to own only ONE trx_t::mutex. */
838 
839 		if (less(latches, level) != NULL) {
840 			basic_check(latches, level, level - 1);
841 			ut_a(find(latches, SYNC_LOCK_SYS) != 0);
842 		}
843 		break;
844 
845 	case SYNC_BUF_FLUSH_LIST:
846 	case SYNC_BUF_POOL:
847 
848 		/* We can have multiple mutexes of this type therefore we
849 		can only check whether the greater than condition holds. */
850 
851 		basic_check(latches, level, level - 1);
852 		break;
853 
854 	case SYNC_BUF_PAGE_HASH:
855 
856 		/* Multiple page_hash locks are only allowed during
857 		buf_validate and that is where buf_pool mutex is already
858 		held. */
859 
860 		/* Fall through */
861 
862 	case SYNC_BUF_BLOCK:
863 
864 		/* Either the thread must own the (buffer pool) buf_pool->mutex
865 		or it is allowed to latch only ONE of (buffer block)
866 		block->mutex or buf_pool->zip_mutex. */
867 
868 		if (less(latches, level) != NULL) {
869 			basic_check(latches, level, level - 1);
870 			ut_a(find(latches, SYNC_BUF_POOL) != 0);
871 		}
872 		break;
873 
874 	case SYNC_REC_LOCK:
875 
876 		if (find(latches, SYNC_LOCK_SYS) != 0) {
877 			basic_check(latches, level, SYNC_REC_LOCK - 1);
878 		} else {
879 			basic_check(latches, level, SYNC_REC_LOCK);
880 		}
881 		break;
882 
883 	case SYNC_IBUF_BITMAP:
884 
885 		/* Either the thread must own the master mutex to all
886 		the bitmap pages, or it is allowed to latch only ONE
887 		bitmap page. */
888 
889 		if (find(latches, SYNC_IBUF_BITMAP_MUTEX) != 0) {
890 
891 			basic_check(latches, level, SYNC_IBUF_BITMAP - 1);
892 
893 		} else if (!srv_is_being_started) {
894 
895 			/* This is violated during trx_sys_create_rsegs()
896 			when creating additional rollback segments during
897 			upgrade. */
898 
899 			basic_check(latches, level, SYNC_IBUF_BITMAP);
900 		}
901 		break;
902 
903 	case SYNC_FSP_PAGE:
904 		ut_a(find(latches, SYNC_FSP) != 0);
905 		break;
906 
907 	case SYNC_FSP:
908 
909 		ut_a(find(latches, SYNC_FSP) != 0
910 		     || basic_check(latches, level, SYNC_FSP));
911 		break;
912 
913 	case SYNC_TRX_UNDO_PAGE:
914 
915 		/* Purge is allowed to read in as many UNDO pages as it likes.
916 		The purge thread can read the UNDO pages without any covering
917 		mutex. */
918 
919 		ut_a(find(latches, SYNC_TRX_UNDO) != 0
920 		     || find(latches, SYNC_REDO_RSEG) != 0
921 		     || find(latches, SYNC_NOREDO_RSEG) != 0
922 		     || basic_check(latches, level, level - 1));
923 		break;
924 
925 	case SYNC_RSEG_HEADER:
926 
927 		ut_a(find(latches, SYNC_REDO_RSEG) != 0
928 		     || find(latches, SYNC_NOREDO_RSEG) != 0);
929 		break;
930 
931 	case SYNC_RSEG_HEADER_NEW:
932 
933 		ut_a(find(latches, SYNC_FSP_PAGE) != 0);
934 		break;
935 
936 	case SYNC_TREE_NODE:
937 
938 		{
939 			const latch_t*	fsp_latch;
940 
941 			fsp_latch = find(latches, SYNC_FSP);
942 
943 			ut_a((fsp_latch != NULL
944 			      && fsp_latch->is_temp_fsp())
945 			     || find(latches, SYNC_INDEX_TREE) != 0
946 			     || find(latches, SYNC_DICT_OPERATION)
947 			     || basic_check(latches,
948 					    level, SYNC_TREE_NODE - 1));
949 		}
950 
951 		break;
952 
953 	case SYNC_TREE_NODE_NEW:
954 
955 		ut_a(find(latches, SYNC_FSP_PAGE) != 0);
956 		break;
957 
958 	case SYNC_INDEX_TREE:
959 
960 		basic_check(latches, level, SYNC_TREE_NODE - 1);
961 		break;
962 
963 	case SYNC_ANALYZE_INDEX:
964 
965 		basic_check(latches, level, SYNC_ANALYZE_INDEX - 1);
966 		break;
967 
968 	case SYNC_IBUF_TREE_NODE:
969 
970 		ut_a(find(latches, SYNC_IBUF_INDEX_TREE) != 0
971 		     || basic_check(latches, level, SYNC_IBUF_TREE_NODE - 1));
972 		break;
973 
974 	case SYNC_IBUF_TREE_NODE_NEW:
975 
976 		/* ibuf_add_free_page() allocates new pages for the change
977 		buffer while only holding the tablespace x-latch. These
978 		pre-allocated new pages may only be used while holding
979 		ibuf_mutex, in btr_page_alloc_for_ibuf(). */
980 
981 		ut_a(find(latches, SYNC_IBUF_MUTEX) != 0
982 		     || find(latches, SYNC_FSP) != 0);
983 		break;
984 
985 	case SYNC_IBUF_INDEX_TREE:
986 
987 		if (find(latches, SYNC_FSP) != 0) {
988 			basic_check(latches, level, level - 1);
989 		} else {
990 			basic_check(latches, level, SYNC_IBUF_TREE_NODE - 1);
991 		}
992 		break;
993 
994 	case SYNC_IBUF_PESS_INSERT_MUTEX:
995 
996 		basic_check(latches, level, SYNC_FSP - 1);
997 		ut_a(find(latches, SYNC_IBUF_MUTEX) == 0);
998 		break;
999 
1000 	case SYNC_IBUF_HEADER:
1001 
1002 		basic_check(latches, level, SYNC_FSP - 1);
1003 		ut_a(find(latches, SYNC_IBUF_MUTEX) == NULL);
1004 		ut_a(find(latches, SYNC_IBUF_PESS_INSERT_MUTEX) == NULL);
1005 		break;
1006 
1007 	case SYNC_DICT:
1008 		basic_check(latches, level, SYNC_DICT);
1009 		break;
1010 
1011 	case SYNC_MUTEX:
1012 	case SYNC_UNKNOWN:
1013 	case SYNC_LEVEL_VARYING:
1014 	case RW_LOCK_X:
1015 	case RW_LOCK_X_WAIT:
1016 	case RW_LOCK_S:
1017 	case RW_LOCK_SX:
1018 	case RW_LOCK_NOT_LOCKED:
1019 		/* These levels should never be set for a latch. */
1020 		ut_error;
1021 		break;
1022 	}
1023 
1024 	return(latches);
1025 }
1026 
1027 /** Removes a latch from the thread level array if it is found there.
1028 @param[in]	latch		that was released/unlocked
1029 @param[in]	level		level of the latch
1030 @return true if found in the array; it is not an error if the latch is
1031 not found, as we presently are not able to determine the level for
1032 every latch reservation the program does */
1033 void
unlock(const latch_t * latch)1034 LatchDebug::unlock(const latch_t* latch)
1035 	UNIV_NOTHROW
1036 {
1037 	if (latch->get_level() == SYNC_LEVEL_VARYING) {
1038 		// We don't have varying level mutexes
1039 		ut_ad(latch->m_rw_lock);
1040 	}
1041 
1042 	Latches*	latches;
1043 
1044 	if (*latch->get_name() == '.') {
1045 
1046 		/* Ignore diagnostic latches, starting with '.' */
1047 
1048 	} else if ((latches = thread_latches()) != NULL) {
1049 
1050 		Latches::reverse_iterator	rend = latches->rend();
1051 
1052 		for (Latches::reverse_iterator it = latches->rbegin();
1053 		     it != rend;
1054 		     ++it) {
1055 
1056 			if (it->m_latch != latch) {
1057 
1058 				continue;
1059 			}
1060 
1061 			Latches::iterator	i = it.base();
1062 
1063 			latches->erase(--i);
1064 
1065 			/* If this thread doesn't own any more
1066 			latches remove from the map.
1067 
1068 			FIXME: Perhaps use the master thread
1069 			to do purge. Or, do it from close connection.
1070 			This could be expensive. */
1071 
1072 			if (latches->empty()) {
1073 
1074 				m_mutex.enter();
1075 
1076 				os_thread_id_t	thread_id;
1077 
1078 				thread_id = os_thread_get_curr_id();
1079 
1080 				m_threads.erase(thread_id);
1081 
1082 				m_mutex.exit();
1083 
1084 				UT_DELETE(latches);
1085 			}
1086 
1087 			return;
1088 		}
1089 
1090 		if (latch->get_level() != SYNC_LEVEL_VARYING) {
1091 			ib::error()
1092 				<< "Couldn't find latch "
1093 				<< sync_latch_get_name(latch->get_id());
1094 
1095 			print_latches(latches);
1096 
1097 			/** Must find the latch. */
1098 			ut_error;
1099 		}
1100 	}
1101 }
1102 
1103 /** Get the latch id from a latch name.
1104 @param[in]	name	Latch name
1105 @return latch id if found else LATCH_ID_NONE. */
1106 latch_id_t
sync_latch_get_id(const char * name)1107 sync_latch_get_id(const char* name)
1108 {
1109 	LatchMetaData::const_iterator	end = latch_meta.end();
1110 
1111 	/* Linear scan should be OK, this should be extremely rare. */
1112 
1113 	for (LatchMetaData::const_iterator it = latch_meta.begin();
1114 	     it != end;
1115 	     ++it) {
1116 
1117 		if (*it == NULL || (*it)->get_id() == LATCH_ID_NONE) {
1118 
1119 			continue;
1120 
1121 		} else if (strcmp((*it)->get_name(), name) == 0) {
1122 
1123 			return((*it)->get_id());
1124 		}
1125 	}
1126 
1127 	return(LATCH_ID_NONE);
1128 }
1129 
1130 /** Get the latch name from a sync level
1131 @param[in]	level		Latch level to lookup
1132 @return NULL if not found. */
1133 const char*
sync_latch_get_name(latch_level_t level)1134 sync_latch_get_name(latch_level_t level)
1135 {
1136 	LatchMetaData::const_iterator	end = latch_meta.end();
1137 
1138 	/* Linear scan should be OK, this should be extremely rare. */
1139 
1140 	for (LatchMetaData::const_iterator it = latch_meta.begin();
1141 	     it != end;
1142 	     ++it) {
1143 
1144 		if (*it == NULL || (*it)->get_id() == LATCH_ID_NONE) {
1145 
1146 			continue;
1147 
1148 		} else if ((*it)->get_level() == level) {
1149 
1150 			return((*it)->get_name());
1151 		}
1152 	}
1153 
1154 	return(0);
1155 }
1156 
1157 /** Check if it is OK to acquire the latch.
1158 @param[in]	latch	latch type */
1159 void
sync_check_lock_validate(const latch_t * latch)1160 sync_check_lock_validate(const latch_t* latch)
1161 {
1162 	if (LatchDebug::instance() != NULL) {
1163 		LatchDebug::instance()->lock_validate(
1164 			latch, latch->get_level());
1165 	}
1166 }
1167 
1168 /** Note that the lock has been granted
1169 @param[in]	latch	latch type */
1170 void
sync_check_lock_granted(const latch_t * latch)1171 sync_check_lock_granted(const latch_t* latch)
1172 {
1173 	if (LatchDebug::instance() != NULL) {
1174 		LatchDebug::instance()->lock_granted(latch, latch->get_level());
1175 	}
1176 }
1177 
1178 /** Check if it is OK to acquire the latch.
1179 @param[in]	latch	latch type
1180 @param[in]	level	Latch level */
1181 void
sync_check_lock(const latch_t * latch,latch_level_t level)1182 sync_check_lock(
1183 	const latch_t*	latch,
1184 	latch_level_t	level)
1185 {
1186 	if (LatchDebug::instance() != NULL) {
1187 
1188 		ut_ad(latch->get_level() == SYNC_LEVEL_VARYING);
1189 		ut_ad(latch->get_id() == LATCH_ID_BUF_BLOCK_LOCK);
1190 
1191 		LatchDebug::instance()->lock_validate(latch, level);
1192 		LatchDebug::instance()->lock_granted(latch, level);
1193 	}
1194 }
1195 
1196 /** Check if it is OK to re-acquire the lock.
1197 @param[in]	latch		RW-LOCK to relock (recursive X locks) */
1198 void
sync_check_relock(const latch_t * latch)1199 sync_check_relock(const latch_t* latch)
1200 {
1201 	if (LatchDebug::instance() != NULL) {
1202 		LatchDebug::instance()->relock(latch);
1203 	}
1204 }
1205 
1206 /** Removes a latch from the thread level array if it is found there.
1207 @param[in]	latch		The latch to unlock */
1208 void
sync_check_unlock(const latch_t * latch)1209 sync_check_unlock(const latch_t* latch)
1210 {
1211 	if (LatchDebug::instance() != NULL) {
1212 		LatchDebug::instance()->unlock(latch);
1213 	}
1214 }
1215 
1216 /** Checks if the level array for the current thread contains a
1217 mutex or rw-latch at the specified level.
1218 @param[in]	level		to find
1219 @return	a matching latch, or NULL if not found */
1220 const latch_t*
sync_check_find(latch_level_t level)1221 sync_check_find(latch_level_t level)
1222 {
1223 	if (LatchDebug::instance() != NULL) {
1224 		return(LatchDebug::instance()->find(level));
1225 	}
1226 
1227 	return(NULL);
1228 }
1229 
1230 /** Iterate over the thread's latches.
1231 @param[in,out]	functor		called for each element.
1232 @return false if the sync debug hasn't been initialised
1233 @return the value returned by the functor */
1234 bool
sync_check_iterate(sync_check_functor_t & functor)1235 sync_check_iterate(sync_check_functor_t& functor)
1236 {
1237 	if (LatchDebug::instance() != NULL) {
1238 		return(LatchDebug::instance()->for_each(functor));
1239 	}
1240 
1241 	return(false);
1242 }
1243 
1244 /** Enable sync order checking.
1245 
1246 Note: We don't enforce any synchronisation checks. The caller must ensure
1247 that no races can occur */
1248 void
sync_check_enable()1249 sync_check_enable()
1250 {
1251 	if (!srv_sync_debug) {
1252 
1253 		return;
1254 	}
1255 
1256 	/* We should always call this before we create threads. */
1257 
1258 	LatchDebug::create_instance();
1259 }
1260 
1261 /** Initialise the debug data structures */
1262 void
init()1263 LatchDebug::init()
1264 	UNIV_NOTHROW
1265 {
1266 	ut_a(rw_lock_debug_event == NULL);
1267 
1268 	mutex_create(LATCH_ID_RW_LOCK_DEBUG, &rw_lock_debug_mutex);
1269 
1270 	rw_lock_debug_event = os_event_create("rw_lock_debug_event");
1271 
1272 	rw_lock_debug_waiters = FALSE;
1273 }
1274 
1275 /** Shutdown the latch debug checking
1276 
1277 Note: We don't enforce any synchronisation checks. The caller must ensure
1278 that no races can occur */
1279 void
shutdown()1280 LatchDebug::shutdown()
1281 	UNIV_NOTHROW
1282 {
1283 	ut_a(rw_lock_debug_event != NULL);
1284 
1285 	os_event_destroy(rw_lock_debug_event);
1286 
1287 	rw_lock_debug_event = NULL;
1288 
1289 	mutex_free(&rw_lock_debug_mutex);
1290 
1291 	if (instance() == NULL) {
1292 
1293 		s_initialized = false;
1294 
1295 		return;
1296 	}
1297 
1298 	ut_a(s_initialized);
1299 
1300 	s_initialized = false;
1301 
1302 	UT_DELETE(s_instance);
1303 
1304 	LatchDebug::s_instance = NULL;
1305 }
1306 
1307 /** Acquires the debug mutex. We cannot use the mutex defined in sync0sync,
1308 because the debug mutex is also acquired in sync0arr while holding the OS
1309 mutex protecting the sync array, and the ordinary mutex_enter might
1310 recursively call routines in sync0arr, leading to a deadlock on the OS
1311 mutex. */
1312 void
rw_lock_debug_mutex_enter()1313 rw_lock_debug_mutex_enter()
1314 {
1315 	for (;;) {
1316 
1317 		if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) {
1318 			return;
1319 		}
1320 
1321 		os_event_reset(rw_lock_debug_event);
1322 
1323 		rw_lock_debug_waiters = TRUE;
1324 
1325 		if (0 == mutex_enter_nowait(&rw_lock_debug_mutex)) {
1326 			return;
1327 		}
1328 
1329 		os_event_wait(rw_lock_debug_event);
1330 	}
1331 }
1332 
1333 /** Releases the debug mutex. */
1334 void
rw_lock_debug_mutex_exit()1335 rw_lock_debug_mutex_exit()
1336 {
1337 	mutex_exit(&rw_lock_debug_mutex);
1338 
1339 	if (rw_lock_debug_waiters) {
1340 		rw_lock_debug_waiters = FALSE;
1341 		os_event_set(rw_lock_debug_event);
1342 	}
1343 }
1344 #endif /* UNIV_DEBUG */
1345 
1346 /* Meta data for all the InnoDB latches. If the latch is not in recorded
1347 here then it will be be considered for deadlock checks.  */
1348 LatchMetaData	latch_meta;
1349 
1350 /** Load the latch meta data. */
1351 static
1352 void
sync_latch_meta_init()1353 sync_latch_meta_init()
1354 	UNIV_NOTHROW
1355 {
1356 	latch_meta.resize(LATCH_ID_MAX);
1357 
1358 	latch_meta[0] = NULL;
1359 
1360 	/* The latches should be ordered on latch_id_t. So that we can
1361 	index directly into the vector to update and fetch meta-data. */
1362 
1363 	LATCH_ADD_MUTEX(AUTOINC, SYNC_DICT_AUTOINC_MUTEX, autoinc_mutex_key);
1364 
1365 #ifndef PFS_SKIP_BUFFER_MUTEX_RWLOCK
1366 	LATCH_ADD_MUTEX(BUF_BLOCK_MUTEX, SYNC_BUF_BLOCK,
1367 			buffer_block_mutex_key);
1368 #else
1369 	LATCH_ADD_MUTEX(BUF_BLOCK_MUTEX, SYNC_BUF_BLOCK, PFS_NOT_INSTRUMENTED);
1370 #endif /* !PFS_SKIP_BUFFER_MUTEX_RWLOCK */
1371 
1372 	LATCH_ADD_MUTEX(BUF_POOL, SYNC_BUF_POOL, buf_pool_mutex_key);
1373 
1374 	LATCH_ADD_MUTEX(BUF_POOL_ZIP, SYNC_BUF_BLOCK, buf_pool_zip_mutex_key);
1375 
1376 	LATCH_ADD_MUTEX(CACHE_LAST_READ, SYNC_TRX_I_S_LAST_READ,
1377 			cache_last_read_mutex_key);
1378 
1379 	LATCH_ADD_MUTEX(DICT_FOREIGN_ERR, SYNC_NO_ORDER_CHECK,
1380 			dict_foreign_err_mutex_key);
1381 
1382 	LATCH_ADD_MUTEX(DICT_SYS, SYNC_DICT, dict_sys_mutex_key);
1383 
1384 	LATCH_ADD_MUTEX(FILE_FORMAT_MAX, SYNC_FILE_FORMAT_TAG,
1385 			file_format_max_mutex_key);
1386 
1387 	LATCH_ADD_MUTEX(FIL_SYSTEM, SYNC_ANY_LATCH, fil_system_mutex_key);
1388 
1389 	LATCH_ADD_MUTEX(FLUSH_LIST, SYNC_BUF_FLUSH_LIST, flush_list_mutex_key);
1390 
1391 	LATCH_ADD_MUTEX(FTS_BG_THREADS, SYNC_FTS_BG_THREADS,
1392 			fts_bg_threads_mutex_key);
1393 
1394 	LATCH_ADD_MUTEX(FTS_DELETE, SYNC_FTS_OPTIMIZE, fts_delete_mutex_key);
1395 
1396 	LATCH_ADD_MUTEX(FTS_OPTIMIZE, SYNC_FTS_OPTIMIZE,
1397 			fts_optimize_mutex_key);
1398 
1399 	LATCH_ADD_MUTEX(FTS_DOC_ID, SYNC_FTS_OPTIMIZE, fts_doc_id_mutex_key);
1400 
1401 	LATCH_ADD_MUTEX(FTS_PLL_TOKENIZE, SYNC_FTS_TOKENIZE,
1402 			fts_pll_tokenize_mutex_key);
1403 
1404 	LATCH_ADD_MUTEX(HASH_TABLE_MUTEX, SYNC_BUF_PAGE_HASH,
1405 			hash_table_mutex_key);
1406 
1407 	LATCH_ADD_MUTEX(IBUF_BITMAP, SYNC_IBUF_BITMAP_MUTEX,
1408 			ibuf_bitmap_mutex_key);
1409 
1410 	LATCH_ADD_MUTEX(IBUF, SYNC_IBUF_MUTEX, ibuf_mutex_key);
1411 
1412 	LATCH_ADD_MUTEX(IBUF_PESSIMISTIC_INSERT, SYNC_IBUF_PESS_INSERT_MUTEX,
1413 			ibuf_pessimistic_insert_mutex_key);
1414 
1415 	LATCH_ADD_MUTEX(LOG_SYS, SYNC_LOG, log_sys_mutex_key);
1416 
1417 	LATCH_ADD_MUTEX(LOG_WRITE, SYNC_LOG_WRITE, log_sys_write_mutex_key);
1418 
1419 	LATCH_ADD_MUTEX(LOG_FLUSH_ORDER, SYNC_LOG_FLUSH_ORDER,
1420 			log_flush_order_mutex_key);
1421 
1422 	LATCH_ADD_MUTEX(MUTEX_LIST, SYNC_NO_ORDER_CHECK, mutex_list_mutex_key);
1423 
1424 	LATCH_ADD_MUTEX(PAGE_CLEANER, SYNC_PAGE_CLEANER,
1425 			page_cleaner_mutex_key);
1426 
1427 	LATCH_ADD_MUTEX(PURGE_SYS_PQ, SYNC_PURGE_QUEUE,
1428 			purge_sys_pq_mutex_key);
1429 
1430 	LATCH_ADD_MUTEX(RECALC_POOL, SYNC_STATS_AUTO_RECALC,
1431 			recalc_pool_mutex_key);
1432 
1433 	LATCH_ADD_MUTEX(RECV_SYS, SYNC_RECV, recv_sys_mutex_key);
1434 
1435 	LATCH_ADD_MUTEX(RECV_WRITER, SYNC_RECV_WRITER, recv_writer_mutex_key);
1436 
1437 	LATCH_ADD_MUTEX(REDO_RSEG, SYNC_REDO_RSEG, redo_rseg_mutex_key);
1438 
1439 	LATCH_ADD_MUTEX(NOREDO_RSEG, SYNC_NOREDO_RSEG, noredo_rseg_mutex_key);
1440 
1441 #ifdef UNIV_DEBUG
1442 	/* Mutex names starting with '.' are not tracked. They are assumed
1443 	to be diagnostic mutexes used in debugging. */
1444 	// latch_meta[LATCH_ID_RW_LOCK_DEBUG] =
1445 		LATCH_ADD_MUTEX(RW_LOCK_DEBUG,
1446 			SYNC_NO_ORDER_CHECK,
1447 			rw_lock_debug_mutex_key);
1448 #endif /* UNIV_DEBUG */
1449 
1450 	LATCH_ADD_MUTEX(RTR_SSN_MUTEX, SYNC_ANY_LATCH, rtr_ssn_mutex_key);
1451 
1452 	LATCH_ADD_MUTEX(RTR_ACTIVE_MUTEX, SYNC_ANY_LATCH,
1453 			rtr_active_mutex_key);
1454 
1455 	LATCH_ADD_MUTEX(RTR_MATCH_MUTEX, SYNC_ANY_LATCH, rtr_match_mutex_key);
1456 
1457 	LATCH_ADD_MUTEX(RTR_PATH_MUTEX, SYNC_ANY_LATCH, rtr_path_mutex_key);
1458 
1459 	LATCH_ADD_MUTEX(RW_LOCK_LIST, SYNC_NO_ORDER_CHECK,
1460 			rw_lock_list_mutex_key);
1461 
1462 	LATCH_ADD_MUTEX(RW_LOCK_MUTEX, SYNC_NO_ORDER_CHECK, rw_lock_mutex_key);
1463 
1464 	LATCH_ADD_MUTEX(SRV_DICT_TMPFILE, SYNC_DICT_OPERATION,
1465 			srv_dict_tmpfile_mutex_key);
1466 
1467 	LATCH_ADD_MUTEX(SRV_INNODB_MONITOR, SYNC_NO_ORDER_CHECK,
1468 			srv_innodb_monitor_mutex_key);
1469 
1470 	LATCH_ADD_MUTEX(SRV_MISC_TMPFILE, SYNC_ANY_LATCH,
1471 			srv_misc_tmpfile_mutex_key);
1472 
1473 	LATCH_ADD_MUTEX(SRV_MONITOR_FILE, SYNC_NO_ORDER_CHECK,
1474 			srv_monitor_file_mutex_key);
1475 
1476 #ifdef UNIV_DEBUG
1477 	LATCH_ADD_MUTEX(SYNC_THREAD, SYNC_NO_ORDER_CHECK,
1478 			sync_thread_mutex_key);
1479 #else
1480 	LATCH_ADD_MUTEX(SYNC_THREAD, SYNC_NO_ORDER_CHECK, PFS_NOT_INSTRUMENTED);
1481 #endif /* UNIV_DEBUG */
1482 
1483 	LATCH_ADD_MUTEX(BUF_DBLWR, SYNC_DOUBLEWRITE, buf_dblwr_mutex_key);
1484 
1485 	LATCH_ADD_MUTEX(TRX_UNDO, SYNC_TRX_UNDO, trx_undo_mutex_key);
1486 
1487 	LATCH_ADD_MUTEX(TRX_POOL, SYNC_POOL, trx_pool_mutex_key);
1488 
1489 	LATCH_ADD_MUTEX(TRX_POOL_MANAGER, SYNC_POOL_MANAGER,
1490 			trx_pool_manager_mutex_key);
1491 
1492 	LATCH_ADD_MUTEX(TRX, SYNC_TRX, trx_mutex_key);
1493 
1494 	LATCH_ADD_MUTEX(LOCK_SYS, SYNC_LOCK_SYS, lock_mutex_key);
1495 
1496 	LATCH_ADD_MUTEX(LOCK_SYS_WAIT, SYNC_LOCK_WAIT_SYS,
1497 			lock_wait_mutex_key);
1498 
1499 	LATCH_ADD_MUTEX(TRX_SYS, SYNC_TRX_SYS, trx_sys_mutex_key);
1500 
1501 	LATCH_ADD_MUTEX(SRV_SYS, SYNC_THREADS, srv_sys_mutex_key);
1502 
1503 	LATCH_ADD_MUTEX(SRV_SYS_TASKS, SYNC_ANY_LATCH, srv_threads_mutex_key);
1504 
1505 	LATCH_ADD_MUTEX(PAGE_ZIP_STAT_PER_INDEX, SYNC_ANY_LATCH,
1506 			page_zip_stat_per_index_mutex_key);
1507 
1508 #ifndef PFS_SKIP_EVENT_MUTEX
1509 	LATCH_ADD_MUTEX(EVENT_MANAGER, SYNC_NO_ORDER_CHECK,
1510 			event_manager_mutex_key);
1511 	LATCH_ADD_MUTEX(EVENT_MUTEX, SYNC_NO_ORDER_CHECK, event_mutex_key);
1512 #else
1513 	LATCH_ADD_MUTEX(EVENT_MANAGER, SYNC_NO_ORDER_CHECK,
1514 			PFS_NOT_INSTRUMENTED);
1515 	LATCH_ADD_MUTEX(EVENT_MUTEX, SYNC_NO_ORDER_CHECK,
1516 			PFS_NOT_INSTRUMENTED);
1517 #endif /* !PFS_SKIP_EVENT_MUTEX */
1518 
1519 	LATCH_ADD_MUTEX(SYNC_ARRAY_MUTEX, SYNC_NO_ORDER_CHECK,
1520 			sync_array_mutex_key);
1521 
1522 	LATCH_ADD_MUTEX(THREAD_MUTEX, SYNC_NO_ORDER_CHECK, thread_mutex_key);
1523 
1524 	LATCH_ADD_MUTEX(ZIP_PAD_MUTEX, SYNC_NO_ORDER_CHECK, zip_pad_mutex_key);
1525 
1526 	LATCH_ADD_MUTEX(OS_AIO_READ_MUTEX, SYNC_NO_ORDER_CHECK,
1527 			PFS_NOT_INSTRUMENTED);
1528 
1529 	LATCH_ADD_MUTEX(OS_AIO_WRITE_MUTEX, SYNC_NO_ORDER_CHECK,
1530 			PFS_NOT_INSTRUMENTED);
1531 
1532 	LATCH_ADD_MUTEX(OS_AIO_LOG_MUTEX, SYNC_NO_ORDER_CHECK,
1533 			PFS_NOT_INSTRUMENTED);
1534 
1535 	LATCH_ADD_MUTEX(OS_AIO_IBUF_MUTEX, SYNC_NO_ORDER_CHECK,
1536 			PFS_NOT_INSTRUMENTED);
1537 
1538 	LATCH_ADD_MUTEX(OS_AIO_SYNC_MUTEX, SYNC_NO_ORDER_CHECK,
1539 			PFS_NOT_INSTRUMENTED);
1540 
1541 	LATCH_ADD_MUTEX(ROW_DROP_LIST, SYNC_NO_ORDER_CHECK,
1542 			row_drop_list_mutex_key);
1543 
1544 	LATCH_ADD_RWLOCK(INDEX_ONLINE_LOG, SYNC_INDEX_ONLINE_LOG,
1545 			index_online_log_key);
1546 
1547 	LATCH_ADD_MUTEX(WORK_QUEUE, SYNC_WORK_QUEUE, PFS_NOT_INSTRUMENTED);
1548 
1549 	// Add the RW locks
1550 	LATCH_ADD_RWLOCK(BTR_SEARCH, SYNC_SEARCH_SYS, btr_search_latch_key);
1551 
1552 #ifndef PFS_SKIP_BUFFER_MUTEX_RWLOCK
1553 	LATCH_ADD_RWLOCK(BUF_BLOCK_LOCK, SYNC_LEVEL_VARYING,
1554 			 buf_block_lock_key);
1555 #else
1556 	LATCH_ADD_RWLOCK(BUF_BLOCK_LOCK, SYNC_LEVEL_VARYING,
1557 			 PFS_NOT_INSTRUMENTED);
1558 #endif /* PFS_SKIP_BUFFER_MUTEX_RWLOCK */
1559 
1560 #ifdef UNIV_DEBUG
1561 	LATCH_ADD_RWLOCK(BUF_BLOCK_DEBUG, SYNC_NO_ORDER_CHECK,
1562 			 buf_block_debug_latch_key);
1563 #else
1564 	LATCH_ADD_RWLOCK(BUF_BLOCK_DEBUG, SYNC_NO_ORDER_CHECK,
1565 			 PFS_NOT_INSTRUMENTED);
1566 #endif /* UNIV_DEBUG */
1567 
1568 	LATCH_ADD_RWLOCK(DICT_OPERATION, SYNC_DICT, dict_operation_lock_key);
1569 
1570 	LATCH_ADD_RWLOCK(CHECKPOINT, SYNC_NO_ORDER_CHECK, checkpoint_lock_key);
1571 
1572 	LATCH_ADD_RWLOCK(FIL_SPACE, SYNC_FSP, fil_space_latch_key);
1573 
1574 	LATCH_ADD_RWLOCK(FTS_CACHE, SYNC_FTS_CACHE, fts_cache_rw_lock_key);
1575 
1576 	LATCH_ADD_RWLOCK(FTS_CACHE_INIT, SYNC_FTS_CACHE_INIT,
1577 			 fts_cache_init_rw_lock_key);
1578 
1579 	LATCH_ADD_RWLOCK(TRX_I_S_CACHE, SYNC_TRX_I_S_RWLOCK,
1580 			 trx_i_s_cache_lock_key);
1581 
1582 	LATCH_ADD_RWLOCK(TRX_PURGE, SYNC_PURGE_LATCH, trx_purge_latch_key);
1583 
1584 	LATCH_ADD_RWLOCK(IBUF_INDEX_TREE, SYNC_IBUF_INDEX_TREE,
1585 			 index_tree_rw_lock_key);
1586 
1587 	LATCH_ADD_RWLOCK(INDEX_TREE, SYNC_INDEX_TREE, index_tree_rw_lock_key);
1588 
1589 	LATCH_ADD_RWLOCK(DICT_TABLE_STATS, SYNC_INDEX_TREE,
1590 			 dict_table_stats_key);
1591 
1592 	LATCH_ADD_RWLOCK(HASH_TABLE_RW_LOCK, SYNC_BUF_PAGE_HASH,
1593 			 hash_table_locks_key);
1594 
1595 	LATCH_ADD_RWLOCK(SYNC_DEBUG_MUTEX, SYNC_NO_ORDER_CHECK,
1596 			 PFS_NOT_INSTRUMENTED);
1597 
1598 	LATCH_ADD_MUTEX(MASTER_KEY_ID_MUTEX, SYNC_NO_ORDER_CHECK,
1599 			master_key_id_mutex_key);
1600 
1601 	LATCH_ADD_MUTEX(ANALYZE_INDEX_MUTEX, SYNC_ANALYZE_INDEX,
1602 			analyze_index_mutex_key);
1603 
1604 	// xtrabackup
1605 
1606 	LATCH_ADD_MUTEX(XTRA_DATAFILES_ITER_MUTEX, SYNC_MUTEX, PFS_NOT_INSTRUMENTED);
1607 
1608 	LATCH_ADD_MUTEX(XTRA_COUNT_MUTEX, SYNC_MUTEX, PFS_NOT_INSTRUMENTED);
1609 
1610 	LATCH_ADD_MUTEX(XTRA_DATADIR_ITER_T_MUTEX, SYNC_MUTEX, PFS_NOT_INSTRUMENTED);
1611 
1612 	latch_id_t	id = LATCH_ID_NONE;
1613 
1614 	/* The array should be ordered on latch ID.We need to
1615 	index directly into it from the mutex policy to update
1616 	the counters and access the meta-data. */
1617 
1618 	for (LatchMetaData::iterator it = latch_meta.begin();
1619 	     it != latch_meta.end();
1620 	     ++it) {
1621 
1622 		const latch_meta_t*	meta = *it;
1623 
1624 		/* Skip blank entries */
1625 		if (meta == NULL || meta->get_id() == LATCH_ID_NONE) {
1626 			continue;
1627 		}
1628 
1629 		ut_a(id < meta->get_id());
1630 
1631 		id = meta->get_id();
1632 	}
1633 }
1634 
1635 /** Destroy the latch meta data */
1636 static
1637 void
sync_latch_meta_destroy()1638 sync_latch_meta_destroy()
1639 {
1640 	for (LatchMetaData::iterator it = latch_meta.begin();
1641 	     it != latch_meta.end();
1642 	     ++it) {
1643 		UT_DELETE(*it);
1644 	}
1645 
1646 	latch_meta.clear();
1647 }
1648 
1649 /** Track mutex file creation name and line number. This is to avoid storing
1650 { const char* name; uint16_t line; } in every instance. This results in the
1651 sizeof(Mutex) > 64. We use a lookup table to store it separately. Fetching
1652 the values is very rare, only required for diagnostic purposes. And, we
1653 don't create/destroy mutexes that frequently. */
1654 struct CreateTracker {
1655 
1656 	/** Constructor */
CreateTrackerCreateTracker1657 	CreateTracker()
1658 		UNIV_NOTHROW
1659 	{
1660 		m_mutex.init();
1661 	}
1662 
1663 	/** Destructor */
~CreateTrackerCreateTracker1664 	~CreateTracker()
1665 		UNIV_NOTHROW
1666 	{
1667 		ut_d(m_files.empty());
1668 
1669 		m_mutex.destroy();
1670 	}
1671 
1672 	/** Register where the latch was created
1673 	@param[in]	ptr		Latch instance
1674 	@param[in]	filename	Where created
1675 	@param[in]	line		Line number in filename */
register_latchCreateTracker1676 	void register_latch(
1677 		const void*	ptr,
1678 		const char*	filename,
1679 		uint16_t	line)
1680 		UNIV_NOTHROW
1681 	{
1682 		m_mutex.enter();
1683 
1684 		Files::iterator	lb = m_files.lower_bound(ptr);
1685 
1686 		ut_ad(lb == m_files.end()
1687 		      || m_files.key_comp()(ptr, lb->first));
1688 
1689 		typedef Files::value_type value_type;
1690 
1691 		m_files.insert(lb, value_type(ptr, File(filename, line)));
1692 
1693 		m_mutex.exit();
1694 	}
1695 
1696 	/** Deregister a latch - when it is destroyed
1697 	@param[in]	ptr		Latch instance being destroyed */
deregister_latchCreateTracker1698 	void deregister_latch(const void* ptr)
1699 		UNIV_NOTHROW
1700 	{
1701 		m_mutex.enter();
1702 
1703 		Files::iterator	lb = m_files.lower_bound(ptr);
1704 
1705 		ut_ad(lb != m_files.end()
1706 		      && !(m_files.key_comp()(ptr, lb->first)));
1707 
1708 		m_files.erase(lb);
1709 
1710 		m_mutex.exit();
1711 	}
1712 
1713 	/** Get the create string, format is "name:line"
1714 	@param[in]	ptr		Latch instance
1715 	@return the create string or "" if not found */
getCreateTracker1716 	std::string get(const void* ptr)
1717 		UNIV_NOTHROW
1718 	{
1719 		m_mutex.enter();
1720 
1721 		std::string	created;
1722 
1723 		Files::iterator	lb = m_files.lower_bound(ptr);
1724 
1725 		if (lb != m_files.end()
1726 		    && !(m_files.key_comp()(ptr, lb->first))) {
1727 
1728 			std::ostringstream	msg;
1729 
1730 			msg << lb->second.m_name << ":" << lb->second.m_line;
1731 
1732 			created = msg.str();
1733 		}
1734 
1735 		m_mutex.exit();
1736 
1737 		return(created);
1738 	}
1739 
1740 private:
1741 	/** For tracking the filename and line number */
1742 	struct File {
1743 
1744 		/** Constructor */
FileCreateTracker::File1745 		File() UNIV_NOTHROW : m_name(), m_line() { }
1746 
1747 		/** Constructor
1748 		@param[in]	name		Filename where created
1749 		@param[in]	line		Line number where created */
FileCreateTracker::File1750 		File(const char*  name, uint16_t line)
1751 			UNIV_NOTHROW
1752 			:
1753 			m_name(sync_basename(name)),
1754 			m_line(line)
1755 		{
1756 			/* No op */
1757 		}
1758 
1759 		/** Filename where created */
1760 		std::string		m_name;
1761 
1762 		/** Line number where created */
1763 		uint16_t		m_line;
1764 	};
1765 
1766 	/** Map the mutex instance to where it was created */
1767 	typedef std::map<
1768 		const void*,
1769 		File,
1770 		std::less<const void*>,
1771 		ut_allocator<std::pair<const void* const, File> > >
1772 		Files;
1773 
1774 	typedef OSMutex	Mutex;
1775 
1776 	/** Mutex protecting m_files */
1777 	Mutex			m_mutex;
1778 
1779 	/** Track the latch creation */
1780 	Files			m_files;
1781 };
1782 
1783 /** Track latch creation location. For reducing the size of the latches */
1784 static CreateTracker*	create_tracker;
1785 
1786 /** Register a latch, called when it is created
1787 @param[in]	ptr		Latch instance that was created
1788 @param[in]	filename	Filename where it was created
1789 @param[in]	line		Line number in filename */
1790 void
sync_file_created_register(const void * ptr,const char * filename,uint16_t line)1791 sync_file_created_register(
1792 	const void*	ptr,
1793 	const char*	filename,
1794 	uint16_t	line)
1795 {
1796 	create_tracker->register_latch(ptr, filename, line);
1797 }
1798 
1799 /** Deregister a latch, called when it is destroyed
1800 @param[in]	ptr		Latch to be destroyed */
1801 void
sync_file_created_deregister(const void * ptr)1802 sync_file_created_deregister(const void* ptr)
1803 {
1804 	create_tracker->deregister_latch(ptr);
1805 }
1806 
1807 /** Get the string where the file was created. Its format is "name:line"
1808 @param[in]	ptr		Latch instance
1809 @return created information or "" if can't be found */
1810 std::string
sync_file_created_get(const void * ptr)1811 sync_file_created_get(const void* ptr)
1812 {
1813 	return(create_tracker->get(ptr));
1814 }
1815 
1816 /** Initializes the synchronization data structures. */
1817 void
sync_check_init()1818 sync_check_init()
1819 {
1820 	ut_ad(!LatchDebug::s_initialized);
1821 	ut_d(LatchDebug::s_initialized = true);
1822 
1823 	/** For collecting latch statistic - SHOW ... MUTEX */
1824 	mutex_monitor = UT_NEW_NOKEY(MutexMonitor());
1825 
1826 	/** For trcking mutex creation location */
1827 	create_tracker = UT_NEW_NOKEY(CreateTracker());
1828 
1829 	sync_latch_meta_init();
1830 
1831 	/* Init the rw-lock & mutex list and create the mutex to protect it. */
1832 
1833 	UT_LIST_INIT(rw_lock_list, &rw_lock_t::list);
1834 
1835 	mutex_create(LATCH_ID_RW_LOCK_LIST, &rw_lock_list_mutex);
1836 
1837 	ut_d(LatchDebug::init());
1838 
1839 	sync_array_init(OS_THREAD_MAX_N);
1840 }
1841 
1842 /** Frees the resources in InnoDB's own synchronization data structures. Use
1843 os_sync_free() after calling this. */
1844 void
sync_check_close()1845 sync_check_close()
1846 {
1847 	ut_d(LatchDebug::shutdown());
1848 
1849 	mutex_free(&rw_lock_list_mutex);
1850 
1851 	sync_array_close();
1852 
1853 	UT_DELETE(mutex_monitor);
1854 
1855 	mutex_monitor = NULL;
1856 
1857 	UT_DELETE(create_tracker);
1858 
1859 	create_tracker = NULL;
1860 
1861 	sync_latch_meta_destroy();
1862 }
1863 
1864