1 /*****************************************************************************
2 
3 Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
4 Copyright (c) 2017, 2020, MariaDB Corporation.
5 
6 This program is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free Software
8 Foundation; version 2 of the License.
9 
10 This program is distributed in the hope that it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
12 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
13 
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc.,
16 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
17 
18 *****************************************************************************/
19 
20 /**************************************************//**
21 @file include/sync0types.h
22 Global types for sync
23 
24 Created 9/5/1995 Heikki Tuuri
25 *******************************************************/
26 
27 #ifndef sync0types_h
28 #define sync0types_h
29 
30 #include <vector>
31 
32 #include "ut0new.h"
33 
34 #ifdef _WIN32
35 /** Native mutex */
36 typedef CRITICAL_SECTION	sys_mutex_t;
37 #else
38 /** Native mutex */
39 typedef pthread_mutex_t		sys_mutex_t;
40 #endif /* _WIN32 */
41 
42 /** Mutex states. */
43 enum mutex_state_t {
44 	/** Mutex is free */
45 	MUTEX_STATE_UNLOCKED = 0,
46 
47 	/** Mutex is acquired by some thread. */
48 	MUTEX_STATE_LOCKED = 1,
49 
50 	/** Mutex is contended and there are threads waiting on the lock. */
51 	MUTEX_STATE_WAITERS = 2
52 };
53 
54 /*
55 		LATCHING ORDER WITHIN THE DATABASE
56 		==================================
57 
58 The mutex or latch in the central memory object, for instance, a rollback
59 segment object, must be acquired before acquiring the latch or latches to
60 the corresponding file data structure. In the latching order below, these
61 file page object latches are placed immediately below the corresponding
62 central memory object latch or mutex.
63 
64 Synchronization object			Notes
65 ----------------------			-----
66 
67 Dictionary mutex			If we have a pointer to a dictionary
68 |					object, e.g., a table, it can be
69 |					accessed without reserving the
70 |					dictionary mutex. We must have a
71 |					reservation, a memoryfix, to the
72 |					appropriate table object in this case,
73 |					and the table must be explicitly
74 |					released later.
75 V
76 Dictionary header
77 |
78 V
79 Secondary index tree latch		The tree latch protects also all
80 |					the B-tree non-leaf pages. These
81 V					can be read with the page only
82 Secondary index non-leaf		bufferfixed to save CPU time,
83 |					no s-latch is needed on the page.
84 |					Modification of a page requires an
85 |					x-latch on the page, however. If a
86 |					thread owns an x-latch to the tree,
87 |					it is allowed to latch non-leaf pages
88 |					even after it has acquired the fsp
89 |					latch.
90 V
91 Secondary index leaf			The latch on the secondary index leaf
92 |					can be kept while accessing the
93 |					clustered index, to save CPU time.
94 V
95 Clustered index tree latch		To increase concurrency, the tree
96 |					latch is usually released when the
97 |					leaf page latch has been acquired.
98 V
99 Clustered index non-leaf
100 |
101 V
102 Clustered index leaf
103 |
104 V
105 Transaction system header
106 |
107 V
108 Rollback segment mutex			The rollback segment mutex must be
109 |					reserved, if, e.g., a new page must
110 |					be added to an undo log. The rollback
111 |					segment and the undo logs in its
112 |					history list can be seen as an
113 |					analogue of a B-tree, and the latches
114 |					reserved similarly, using a version of
115 |					lock-coupling. If an undo log must be
116 |					extended by a page when inserting an
117 |					undo log record, this corresponds to
118 |					a pessimistic insert in a B-tree.
119 V
120 Rollback segment header
121 |
122 V
123 Purge system latch
124 |
125 V
126 Undo log pages				If a thread owns the trx undo mutex,
127 |					or for a log in the history list, the
128 |					rseg mutex, it is allowed to latch
129 |					undo log pages in any order, and even
130 |					after it has acquired the fsp latch.
131 |					If a thread does not have the
132 |					appropriate mutex, it is allowed to
133 |					latch only a single undo log page in
134 |					a mini-transaction.
135 V
136 File space management latch		If a mini-transaction must allocate
137 |					several file pages, it can do that,
138 |					because it keeps the x-latch to the
139 |					file space management in its memo.
140 V
141 File system pages
142 |
143 V
144 lock_sys_wait_mutex			Mutex protecting lock timeout data
145 |
146 V
147 lock_sys_mutex				Mutex protecting lock_sys_t
148 |
149 V
150 trx_sys.mutex				Mutex protecting trx_sys.trx_list
151 |
152 V
153 Threads mutex				Background thread scheduling mutex
154 |
155 V
156 query_thr_mutex				Mutex protecting query threads
157 |
158 V
159 trx_mutex				Mutex protecting trx_t fields
160 |
161 V
162 Search system mutex
163 |
164 V
165 Buffer pool mutex
166 |
167 V
168 Log mutex
169 |
170 Any other latch
171 |
172 V
173 Memory pool mutex */
174 
175 /** Latching order levels. If you modify these, you have to also update
176 LatchDebug internals in sync0debug.cc */
177 
178 enum latch_level_t {
179 	SYNC_UNKNOWN = 0,
180 
181 	SYNC_MUTEX = 1,
182 
183 	RW_LOCK_SX,
184 	RW_LOCK_X_WAIT,
185 	RW_LOCK_S,
186 	RW_LOCK_X,
187 	RW_LOCK_NOT_LOCKED,
188 
189 	SYNC_ANY_LATCH,
190 
191 	SYNC_POOL,
192 	SYNC_POOL_MANAGER,
193 
194 	SYNC_SEARCH_SYS,
195 
196 	SYNC_WORK_QUEUE,
197 
198 	SYNC_FTS_TOKENIZE,
199 	SYNC_FTS_OPTIMIZE,
200 	SYNC_FTS_CACHE_INIT,
201 	SYNC_RECV,
202 	SYNC_PURGE_QUEUE,
203 	SYNC_TRX_SYS_HEADER,
204 	SYNC_TRX,
205 	SYNC_RW_TRX_HASH_ELEMENT,
206 	SYNC_READ_VIEW,
207 	SYNC_TRX_SYS,
208 	SYNC_LOCK_SYS,
209 	SYNC_LOCK_WAIT_SYS,
210 
211 	SYNC_INDEX_ONLINE_LOG,
212 
213 	SYNC_IBUF_BITMAP,
214 	SYNC_IBUF_BITMAP_MUTEX,
215 	SYNC_IBUF_TREE_NODE,
216 	SYNC_IBUF_TREE_NODE_NEW,
217 	SYNC_IBUF_INDEX_TREE,
218 
219 	SYNC_IBUF_MUTEX,
220 
221 	SYNC_FSP_PAGE,
222 	SYNC_FSP,
223 	SYNC_EXTERN_STORAGE,
224 	SYNC_TRX_UNDO_PAGE,
225 	SYNC_RSEG_HEADER,
226 	SYNC_RSEG_HEADER_NEW,
227 	SYNC_NOREDO_RSEG,
228 	SYNC_REDO_RSEG,
229 	SYNC_PURGE_LATCH,
230 	SYNC_TREE_NODE,
231 	SYNC_TREE_NODE_FROM_HASH,
232 	SYNC_TREE_NODE_NEW,
233 	SYNC_IBUF_PESS_INSERT_MUTEX,
234 	SYNC_INDEX_TREE,
235 
236 	SYNC_IBUF_HEADER,
237 	SYNC_DICT_HEADER,
238 	SYNC_STATS_AUTO_RECALC,
239 	SYNC_DICT,
240 	SYNC_FTS_CACHE,
241 
242 	SYNC_DICT_OPERATION,
243 
244 	SYNC_TRX_I_S_RWLOCK,
245 
246 	/** Level is varying. Only used with buffer pool page locks, which
247 	do not have a fixed level, but instead have their level set after
248 	the page is locked; see e.g.  ibuf_bitmap_get_map_page(). */
249 
250 	SYNC_LEVEL_VARYING,
251 
252 	/** This can be used to suppress order checking. */
253 	SYNC_NO_ORDER_CHECK,
254 
255 	/** Maximum level value */
256 	SYNC_LEVEL_MAX = SYNC_NO_ORDER_CHECK
257 };
258 
259 /** Each latch has an ID. This id is used for creating the latch and to look
260 up its meta-data. See sync0debug.cc. */
261 enum latch_id_t {
262 	LATCH_ID_NONE = 0,
263 	LATCH_ID_DICT_FOREIGN_ERR,
264 	LATCH_ID_DICT_SYS,
265 	LATCH_ID_FIL_SYSTEM,
266 	LATCH_ID_FTS_DELETE,
267 	LATCH_ID_FTS_DOC_ID,
268 	LATCH_ID_FTS_PLL_TOKENIZE,
269 	LATCH_ID_IBUF_BITMAP,
270 	LATCH_ID_IBUF,
271 	LATCH_ID_IBUF_PESSIMISTIC_INSERT,
272 	LATCH_ID_PURGE_SYS_PQ,
273 	LATCH_ID_RECALC_POOL,
274 	LATCH_ID_RECV_SYS,
275 	LATCH_ID_REDO_RSEG,
276 	LATCH_ID_NOREDO_RSEG,
277 	LATCH_ID_RW_LOCK_DEBUG,
278 	LATCH_ID_RTR_ACTIVE_MUTEX,
279 	LATCH_ID_RTR_MATCH_MUTEX,
280 	LATCH_ID_RTR_PATH_MUTEX,
281 	LATCH_ID_RW_LOCK_LIST,
282 	LATCH_ID_SRV_INNODB_MONITOR,
283 	LATCH_ID_SRV_MISC_TMPFILE,
284 	LATCH_ID_SRV_MONITOR_FILE,
285 	LATCH_ID_TRX_POOL,
286 	LATCH_ID_TRX_POOL_MANAGER,
287 	LATCH_ID_TRX,
288 	LATCH_ID_LOCK_SYS,
289 	LATCH_ID_LOCK_SYS_WAIT,
290 	LATCH_ID_TRX_SYS,
291 	LATCH_ID_SRV_SYS_TASKS,
292 	LATCH_ID_PAGE_ZIP_STAT_PER_INDEX,
293 	LATCH_ID_SYNC_ARRAY_MUTEX,
294 	LATCH_ID_ROW_DROP_LIST,
295 	LATCH_ID_INDEX_ONLINE_LOG,
296 	LATCH_ID_WORK_QUEUE,
297 	LATCH_ID_BTR_SEARCH,
298 	LATCH_ID_BUF_BLOCK_LOCK,
299 	LATCH_ID_BUF_BLOCK_DEBUG,
300 	LATCH_ID_DICT_OPERATION,
301 	LATCH_ID_FIL_SPACE,
302 	LATCH_ID_FTS_CACHE,
303 	LATCH_ID_FTS_CACHE_INIT,
304 	LATCH_ID_TRX_I_S_CACHE,
305 	LATCH_ID_TRX_PURGE,
306 	LATCH_ID_IBUF_INDEX_TREE,
307 	LATCH_ID_INDEX_TREE,
308 	LATCH_ID_DICT_TABLE_STATS,
309 	LATCH_ID_DEFRAGMENT_MUTEX,
310 	LATCH_ID_BTR_DEFRAGMENT_MUTEX,
311 	LATCH_ID_FIL_CRYPT_STAT_MUTEX,
312 	LATCH_ID_FIL_CRYPT_DATA_MUTEX,
313 	LATCH_ID_FIL_CRYPT_THREADS_MUTEX,
314 	LATCH_ID_RW_TRX_HASH_ELEMENT,
315 	LATCH_ID_READ_VIEW,
316 	LATCH_ID_MAX = LATCH_ID_READ_VIEW
317 };
318 
319 #ifndef UNIV_INNOCHECKSUM
320 /** OS mutex, without any policy. It is a thin wrapper around the
321 system mutexes. The interface is different from the policy mutexes,
322 to ensure that it is called directly and not confused with the
323 policy mutexes. */
324 struct OSMutex {
325 
326 	/** Constructor */
OSMutexOSMutex327 	OSMutex()
328 		UNIV_NOTHROW
329 	{
330 		ut_d(m_freed = true);
331 	}
332 
333 	/** Create the mutex by calling the system functions. */
initOSMutex334 	void init()
335 		UNIV_NOTHROW
336 	{
337 		ut_ad(m_freed);
338 
339 #ifdef _WIN32
340 		InitializeCriticalSection((LPCRITICAL_SECTION) &m_mutex);
341 #else
342 		{
343 			int	ret = pthread_mutex_init(&m_mutex, NULL);
344 			ut_a(ret == 0);
345 		}
346 #endif /* _WIN32 */
347 
348 		ut_d(m_freed = false);
349 	}
350 
351 	/** Destructor */
~OSMutexOSMutex352 	~OSMutex() { }
353 
354 	/** Destroy the mutex */
destroyOSMutex355 	void destroy()
356 		UNIV_NOTHROW
357 	{
358 		ut_ad(!m_freed);
359 #ifdef _WIN32
360 		DeleteCriticalSection((LPCRITICAL_SECTION) &m_mutex);
361 #else
362 		int	ret;
363 
364 		ret = pthread_mutex_destroy(&m_mutex);
365 
366 		if (ret != 0) {
367 
368 			ib::error()
369 				<< "Return value " << ret << " when calling "
370 				<< "pthread_mutex_destroy().";
371 		}
372 #endif /* _WIN32 */
373 		ut_d(m_freed = true);
374 	}
375 
376 	/** Release the mutex. */
exitOSMutex377 	void exit()
378 		UNIV_NOTHROW
379 	{
380 		ut_ad(!m_freed);
381 #ifdef _WIN32
382 		LeaveCriticalSection(&m_mutex);
383 #else
384 		int	ret = pthread_mutex_unlock(&m_mutex);
385 		ut_a(ret == 0);
386 #endif /* _WIN32 */
387 	}
388 
389 	/** Acquire the mutex. */
enterOSMutex390 	void enter()
391 		UNIV_NOTHROW
392 	{
393 		ut_ad(!m_freed);
394 #ifdef _WIN32
395 		EnterCriticalSection((LPCRITICAL_SECTION) &m_mutex);
396 #else
397 		int	ret = pthread_mutex_lock(&m_mutex);
398 		ut_a(ret == 0);
399 #endif /* _WIN32 */
400 	}
401 
402 	/** @return true if locking succeeded */
try_lockOSMutex403 	bool try_lock()
404 		UNIV_NOTHROW
405 	{
406 		ut_ad(!m_freed);
407 #ifdef _WIN32
408 		return(TryEnterCriticalSection(&m_mutex) != 0);
409 #else
410 		return(pthread_mutex_trylock(&m_mutex) == 0);
411 #endif /* _WIN32 */
412 	}
413 
414 	/** Required for os_event_t */
415 	operator sys_mutex_t*()
416 		UNIV_NOTHROW
417 	{
418 		return(&m_mutex);
419 	}
420 
421 private:
422 #ifdef DBUG_ASSERT_EXISTS
423 	/** true if the mutex has been freed/destroyed. */
424 	bool			m_freed;
425 #endif /* DBUG_ASSERT_EXISTS */
426 
427 	sys_mutex_t		m_mutex;
428 };
429 
430 #ifdef UNIV_PFS_MUTEX
431 /** Latch element.
432 Used for mutexes which have PFS keys defined under UNIV_PFS_MUTEX.
433 @param[in]	id		Latch id
434 @param[in]	level		Latch level
435 @param[in]	key		PFS key */
436 # define LATCH_ADD_MUTEX(id, level, key)	latch_meta[LATCH_ID_ ## id] =\
437 	UT_NEW_NOKEY(latch_meta_t(LATCH_ID_ ## id, #id, level, #level, key))
438 
439 #ifdef UNIV_PFS_RWLOCK
440 /** Latch element.
441 Used for rwlocks which have PFS keys defined under UNIV_PFS_RWLOCK.
442 @param[in]	id		Latch id
443 @param[in]	level		Latch level
444 @param[in]	key		PFS key */
445 # define LATCH_ADD_RWLOCK(id, level, key)	latch_meta[LATCH_ID_ ## id] =\
446 	UT_NEW_NOKEY(latch_meta_t(LATCH_ID_ ## id, #id, level, #level, key))
447 #else
448 # define LATCH_ADD_RWLOCK(id, level, key)	latch_meta[LATCH_ID_ ## id] =\
449 	UT_NEW_NOKEY(latch_meta_t(LATCH_ID_ ## id, #id, level, #level,	     \
450 		     PSI_NOT_INSTRUMENTED))
451 #endif /* UNIV_PFS_RWLOCK */
452 
453 #else
454 # define LATCH_ADD_MUTEX(id, level, key)	latch_meta[LATCH_ID_ ## id] =\
455 	UT_NEW_NOKEY(latch_meta_t(LATCH_ID_ ## id, #id, level, #level))
456 # define LATCH_ADD_RWLOCK(id, level, key)	latch_meta[LATCH_ID_ ## id] =\
457 	UT_NEW_NOKEY(latch_meta_t(LATCH_ID_ ## id, #id, level, #level))
458 #endif /* UNIV_PFS_MUTEX */
459 
460 /** Default latch counter */
461 class LatchCounter {
462 
463 public:
464 	/** The counts we collect for a mutex */
465 	struct Count {
466 
467 		/** Constructor */
CountCount468 		Count()
469 			UNIV_NOTHROW
470 			:
471 			m_spins(),
472 			m_waits(),
473 			m_calls(),
474 			m_enabled()
475 		{
476 			/* No op */
477 		}
478 
479 		/** Rest the values to zero */
resetCount480 		void reset()
481 			UNIV_NOTHROW
482 		{
483 			m_spins = 0;
484 			m_waits = 0;
485 			m_calls = 0;
486 		}
487 
488 		/** Number of spins trying to acquire the latch. */
489 		uint32_t	m_spins;
490 
491 		/** Number of waits trying to acquire the latch */
492 		uint32_t	m_waits;
493 
494 		/** Number of times it was called */
495 		uint32_t	m_calls;
496 
497 		/** true if enabled */
498 		bool		m_enabled;
499 	};
500 
501 	/** Constructor */
LatchCounter()502 	LatchCounter()
503 		UNIV_NOTHROW
504 		:
505 		m_active(false)
506 	{
507 		m_mutex.init();
508 	}
509 
510 	/** Destructor */
~LatchCounter()511 	~LatchCounter()
512 		UNIV_NOTHROW
513 	{
514 		m_mutex.destroy();
515 
516 		for (Counters::iterator it = m_counters.begin();
517 		     it != m_counters.end();
518 		     ++it) {
519 
520 			Count*	count = *it;
521 
522 			UT_DELETE(count);
523 		}
524 	}
525 
526 	/** Reset all counters to zero. It is not protected by any
527 	mutex and we don't care about atomicity. Unless it is a
528 	demonstrated problem. The information collected is not
529 	required for the correct functioning of the server. */
reset()530 	void reset()
531 		UNIV_NOTHROW
532 	{
533 		m_mutex.enter();
534 
535 		Counters::iterator	end = m_counters.end();
536 
537 		for (Counters::iterator it = m_counters.begin();
538 		     it != end;
539 		     ++it) {
540 
541 			(*it)->reset();
542 		}
543 
544 		m_mutex.exit();
545 	}
546 
547 	/** @return the aggregate counter */
sum_register()548 	Count* sum_register()
549 		UNIV_NOTHROW
550 	{
551 		m_mutex.enter();
552 
553 		Count*	count;
554 
555 		if (m_counters.empty()) {
556 			count = UT_NEW_NOKEY(Count());
557 			m_counters.push_back(count);
558 		} else {
559 			ut_a(m_counters.size() == 1);
560 			count = m_counters[0];
561 		}
562 
563 		m_mutex.exit();
564 
565 		return(count);
566 	}
567 
568 	/** Register a single instance counter */
single_register(Count * count)569 	void single_register(Count* count)
570 		UNIV_NOTHROW
571 	{
572 		m_mutex.enter();
573 
574 		m_counters.push_back(count);
575 
576 		m_mutex.exit();
577 	}
578 
579 	/** Deregister a single instance counter
580 	@param[in]	count		The count instance to deregister */
single_deregister(Count * count)581 	void single_deregister(Count* count)
582 		UNIV_NOTHROW
583 	{
584 		m_mutex.enter();
585 
586 		m_counters.erase(
587 			std::remove(
588 				m_counters.begin(),
589 				m_counters.end(), count),
590 			m_counters.end());
591 
592 		m_mutex.exit();
593 	}
594 
595 	/** Iterate over the counters */
iterate(const C & callback)596 	template<typename C> void iterate(const C& callback) UNIV_NOTHROW
597 	{
598 		m_mutex.enter();
599 
600 		Counters::const_iterator	end = m_counters.end();
601 
602 		for (Counters::const_iterator it = m_counters.begin();
603 		     it != end;
604 		     ++it) {
605 
606 			callback(*it);
607 		}
608 
609 		m_mutex.exit();
610 	}
611 
612 	/** Disable the monitoring */
enable()613 	void enable()
614 		UNIV_NOTHROW
615 	{
616 		m_mutex.enter();
617 
618 		Counters::const_iterator	end = m_counters.end();
619 
620 		for (Counters::const_iterator it = m_counters.begin();
621 		     it != end;
622 		     ++it) {
623 
624 			(*it)->m_enabled = true;
625 		}
626 
627 		m_active = true;
628 
629 		m_mutex.exit();
630 	}
631 
632 	/** Disable the monitoring */
disable()633 	void disable()
634 		UNIV_NOTHROW
635 	{
636 		m_mutex.enter();
637 
638 		Counters::const_iterator	end = m_counters.end();
639 
640 		for (Counters::const_iterator it = m_counters.begin();
641 		     it != end;
642 		     ++it) {
643 
644 			(*it)->m_enabled = false;
645 		}
646 
647 		m_active = false;
648 
649 		m_mutex.exit();
650 	}
651 
652 	/** @return if monitoring is active */
is_enabled()653 	bool is_enabled() const
654 		UNIV_NOTHROW
655 	{
656 		return(m_active);
657 	}
658 
659 private:
660 	/* Disable copying */
661 	LatchCounter(const LatchCounter&);
662 	LatchCounter& operator=(const LatchCounter&);
663 
664 private:
665 	typedef OSMutex Mutex;
666 	typedef std::vector<Count*> Counters;
667 
668 	/** Mutex protecting m_counters */
669 	Mutex			m_mutex;
670 
671 	/** Counters for the latches */
672 	Counters		m_counters;
673 
674 	/** if true then we collect the data */
675 	bool			m_active;
676 };
677 
678 /** Latch meta data */
679 template <typename Counter = LatchCounter>
680 class LatchMeta {
681 
682 public:
683 	typedef Counter CounterType;
684 
685 #ifdef UNIV_PFS_MUTEX
686 	typedef	mysql_pfs_key_t	pfs_key_t;
687 #endif /* UNIV_PFS_MUTEX */
688 
689 	/** Constructor */
LatchMeta()690 	LatchMeta()
691 		:
692 		m_id(LATCH_ID_NONE),
693 		m_name(),
694 		m_level(SYNC_UNKNOWN),
695 		m_level_name()
696 #ifdef UNIV_PFS_MUTEX
697 		,m_pfs_key()
698 #endif /* UNIV_PFS_MUTEX */
699 	{
700 	}
701 
702 	/** Destructor */
~LatchMeta()703 	~LatchMeta() { }
704 
705 	/** Constructor
706 	@param[in]	id		Latch id
707 	@param[in]	name		Latch name
708 	@param[in]	level		Latch level
709 	@param[in]	level_name	Latch level text representation
710 	@param[in]	key		PFS key */
LatchMeta(latch_id_t id,const char * name,latch_level_t level,const char * level_name,pfs_key_t key)711 	LatchMeta(
712 		latch_id_t	id,
713 		const char*	name,
714 		latch_level_t	level,
715 		const char*	level_name
716 #ifdef UNIV_PFS_MUTEX
717 		,pfs_key_t	key
718 #endif /* UNIV_PFS_MUTEX */
719 	      )
720 		:
721 		m_id(id),
722 		m_name(name),
723 		m_level(level),
724 		m_level_name(level_name)
725 #ifdef UNIV_PFS_MUTEX
726 		,m_pfs_key(key)
727 #endif /* UNIV_PFS_MUTEX */
728 	{
729 		/* No op */
730 	}
731 
732 	/* Less than operator.
733 	@param[in]	rhs		Instance to compare against
734 	@return true if this.get_id() < rhs.get_id() */
735 	bool operator<(const LatchMeta& rhs) const
736 	{
737 		return(get_id() < rhs.get_id());
738 	}
739 
740 	/** @return the latch id */
get_id()741 	latch_id_t get_id() const
742 	{
743 		return(m_id);
744 	}
745 
746 	/** @return the latch name */
get_name()747 	const char* get_name() const
748 	{
749 		return(m_name);
750 	}
751 
752 	/** @return the latch level */
get_level()753 	latch_level_t get_level() const
754 	{
755 		return(m_level);
756 	}
757 
758 	/** @return the latch level name */
get_level_name()759 	const char* get_level_name() const
760 	{
761 		return(m_level_name);
762 	}
763 
764 #ifdef UNIV_PFS_MUTEX
765 	/** @return the PFS key for the latch */
get_pfs_key()766 	pfs_key_t get_pfs_key() const
767 	{
768 		return(m_pfs_key);
769 	}
770 #endif /* UNIV_PFS_MUTEX */
771 
772 	/** @return the counter instance */
get_counter()773 	Counter* get_counter()
774 	{
775 		return(&m_counter);
776 	}
777 
778 private:
779 	/** Latch id */
780 	latch_id_t		m_id;
781 
782 	/** Latch name */
783 	const char*		m_name;
784 
785 	/** Latch level in the ordering */
786 	latch_level_t		m_level;
787 
788 	/** Latch level text representation */
789 	const char*		m_level_name;
790 
791 #ifdef UNIV_PFS_MUTEX
792 	/** PFS key */
793 	pfs_key_t		m_pfs_key;
794 #endif /* UNIV_PFS_MUTEX */
795 
796 	/** For gathering latch statistics */
797 	Counter			m_counter;
798 };
799 
800 typedef LatchMeta<LatchCounter> latch_meta_t;
801 typedef std::vector<latch_meta_t*, ut_allocator<latch_meta_t*> > LatchMetaData;
802 
803 /** Note: This is accessed without any mutex protection. It is initialised
804 at startup and elements should not be added to or removed from it after
805 that.  See sync_latch_meta_init() */
806 extern LatchMetaData	latch_meta;
807 
808 /** Get the latch meta-data from the latch ID
809 @param[in]	id		Latch ID
810 @return the latch meta data */
811 inline
812 latch_meta_t&
sync_latch_get_meta(latch_id_t id)813 sync_latch_get_meta(latch_id_t id)
814 {
815 	ut_ad(static_cast<size_t>(id) < latch_meta.size());
816 	ut_ad(id == latch_meta[id]->get_id());
817 
818 	return(*latch_meta[id]);
819 }
820 
821 /** Fetch the counter for the latch
822 @param[in]	id		Latch ID
823 @return the latch counter */
824 inline
825 latch_meta_t::CounterType*
sync_latch_get_counter(latch_id_t id)826 sync_latch_get_counter(latch_id_t id)
827 {
828 	latch_meta_t&	meta = sync_latch_get_meta(id);
829 
830 	return(meta.get_counter());
831 }
832 
833 /** Get the latch name from the latch ID
834 @param[in]	id		Latch ID
835 @return the name, will assert if not found */
836 inline
837 const char*
sync_latch_get_name(latch_id_t id)838 sync_latch_get_name(latch_id_t id)
839 {
840 	const latch_meta_t&	meta = sync_latch_get_meta(id);
841 
842 	return(meta.get_name());
843 }
844 
845 /** Get the latch ordering level
846 @param[in]	id		Latch id to lookup
847 @return the latch level */
848 inline
849 latch_level_t
sync_latch_get_level(latch_id_t id)850 sync_latch_get_level(latch_id_t id)
851 {
852 	const latch_meta_t&	meta = sync_latch_get_meta(id);
853 
854 	return(meta.get_level());
855 }
856 
857 #ifdef UNIV_PFS_MUTEX
858 /** Get the latch PFS key from the latch ID
859 @param[in]	id		Latch ID
860 @return the PFS key */
861 inline
862 mysql_pfs_key_t
sync_latch_get_pfs_key(latch_id_t id)863 sync_latch_get_pfs_key(latch_id_t id)
864 {
865 	const latch_meta_t&	meta = sync_latch_get_meta(id);
866 
867 	return(meta.get_pfs_key());
868 }
869 #endif
870 
871 /** String representation of the filename and line number where the
872 latch was created
873 @param[in]	id		Latch ID
874 @param[in]	created		Filename and line number where it was crated
875 @return the string representation */
876 std::string
877 sync_mutex_to_string(
878 	latch_id_t		id,
879 	const std::string&	created);
880 
881 /** Get the latch name from a sync level
882 @param[in]	level		Latch level to lookup
883 @return 0 if not found. */
884 const char*
885 sync_latch_get_name(latch_level_t level);
886 
887 /** Print the filename "basename"
888 @return the basename */
889 const char*
890 sync_basename(const char* filename);
891 
892 #ifdef UNIV_DEBUG
893 
894 /** All (ordered) latches, used in debugging, must derive from this class. */
895 struct latch_t {
896 
897 	/** Constructor
898 	@param[in]	id	The latch ID */
899 	explicit latch_t(latch_id_t id = LATCH_ID_NONE)
900 		UNIV_NOTHROW
901 		:
m_idlatch_t902 		m_id(id),
903 		m_rw_lock() {}
904 
905 	/** Destructor */
~latch_tlatch_t906 	virtual ~latch_t() UNIV_NOTHROW { }
907 
908 	/** @return the latch ID */
get_idlatch_t909 	latch_id_t get_id() const
910 	{
911 		return(m_id);
912 	}
913 
914 	/** @return true if it is a rw-lock */
is_rw_locklatch_t915 	bool is_rw_lock() const
916 		UNIV_NOTHROW
917 	{
918 		return(m_rw_lock);
919 	}
920 
921 	/** Print the latch context
922 	@return the string representation */
923 	virtual std::string to_string() const = 0;
924 
925 	/** @return the latch level */
get_levellatch_t926 	latch_level_t get_level() const
927 		UNIV_NOTHROW
928 	{
929 		ut_a(m_id != LATCH_ID_NONE);
930 
931 		return(sync_latch_get_level(m_id));
932 	}
933 
934 	/** @return the latch name, m_id must be set  */
get_namelatch_t935 	const char* get_name() const
936 		UNIV_NOTHROW
937 	{
938 		ut_a(m_id != LATCH_ID_NONE);
939 
940 		return(sync_latch_get_name(m_id));
941 	}
942 
943 	/** Latch ID */
944 	latch_id_t	m_id;
945 
946 	/** true if it is a rw-lock. In debug mode, rw_lock_t derives from
947 	this class and sets this variable. */
948 	bool		m_rw_lock;
949 };
950 
951 /** Subclass this to iterate over a thread's acquired latch levels. */
952 struct sync_check_functor_t {
~sync_check_functor_tsync_check_functor_t953 	virtual ~sync_check_functor_t() { }
954 	virtual bool operator()(const latch_level_t) const = 0;
955 };
956 
957 /** Check that no latch is being held.
958 @tparam	some_allowed	whether some latches are allowed to be held */
959 template<bool some_allowed = false>
960 struct sync_checker : public sync_check_functor_t
961 {
962 	/** Check the latching constraints
963 	@param[in]	level		The level held by the thread
964 	@return whether a latch violation was detected */
operatorsync_checker965 	bool operator()(const latch_level_t level) const override
966 	{
967 		if (some_allowed) {
968 			switch (level) {
969 			case SYNC_FSP:
970 			case SYNC_DICT:
971 			case SYNC_DICT_OPERATION:
972 			case SYNC_FTS_CACHE:
973 			case SYNC_NO_ORDER_CHECK:
974 				return(false);
975 			default:
976 				return(true);
977 			}
978 		}
979 
980 		return(true);
981 	}
982 };
983 
984 /** The strict latch checker (no InnoDB latches may be held) */
985 typedef struct sync_checker<false> sync_check;
986 /** The sloppy latch checker (can hold InnoDB dictionary or SQL latches) */
987 typedef struct sync_checker<true> dict_sync_check;
988 
989 /** Functor to check for given latching constraints. */
990 struct sync_allowed_latches : public sync_check_functor_t {
991 
992 	/** Constructor
993 	@param[in]	from	first element in an array of latch_level_t
994 	@param[in]	to	last element in an array of latch_level_t */
995 	sync_allowed_latches(
996 		const latch_level_t*	from,
997 		const latch_level_t*	to)
998 		: begin(from), end(to) { }
999 
1000 	/** Checks whether the given latch_t violates the latch constraint.
1001 	This object maintains a list of allowed latch levels, and if the given
1002 	latch belongs to a latch level that is not there in the allowed list,
1003 	then it is a violation.
1004 
1005 	@param[in]	latch	The latch level to check
1006 	@return true if there is a latch violation */
1007 	bool operator()(const latch_level_t level) const override
1008 	{
1009 		return(std::find(begin, end, level) == end);
1010 	}
1011 
1012 private:
1013 	/** First element in an array of allowed latch levels */
1014 	const latch_level_t* const begin;
1015 	/** First element after the end of the array of allowed latch levels */
1016 	const latch_level_t* const end;
1017 };
1018 
1019 /** Get the latch id from a latch name.
1020 @param[in]	id	Latch name
1021 @return LATCH_ID_NONE. */
1022 latch_id_t
1023 sync_latch_get_id(const char* name);
1024 
1025 typedef ulint rw_lock_flags_t;
1026 
1027 /* Flags to specify lock types for rw_lock_own_flagged() */
1028 enum rw_lock_flag_t {
1029 	RW_LOCK_FLAG_S  = 1 << 0,
1030 	RW_LOCK_FLAG_X  = 1 << 1,
1031 	RW_LOCK_FLAG_SX = 1 << 2
1032 };
1033 
1034 #endif /* UNIV_DBEUG */
1035 
1036 #endif /* UNIV_INNOCHECKSUM */
1037 
1038 /** Simple non-atomic counter aligned to CACHE_LINE_SIZE
1039 @tparam	Type	the integer type of the counter */
1040 template <typename Type>
1041 struct MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) simple_counter
1042 {
1043 	/** Increment the counter */
1044 	Type inc() { return add(1); }
1045 	/** Decrement the counter */
1046 	Type dec() { return add(Type(~0)); }
1047 
1048 	/** Add to the counter
1049 	@param[in]	i	amount to be added
1050 	@return	the value of the counter after adding */
1051 	Type add(Type i) { return m_counter += i; }
1052 
1053 	/** @return the value of the counter */
1054 	operator Type() const { return m_counter; }
1055 
1056 private:
1057 	/** The counter */
1058 	Type	m_counter;
1059 };
1060 #endif /* sync0types_h */
1061