1 /*****************************************************************************
2 
3 Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
4 Copyright (c) 2017, 2020, MariaDB Corporation.
5 
6 This program is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free Software
8 Foundation; version 2 of the License.
9 
10 This program is distributed in the hope that it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
12 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
13 
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc.,
16 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
17 
18 *****************************************************************************/
19 
20 /**************************************************//**
21 @file include/sync0types.h
22 Global types for sync
23 
24 Created 9/5/1995 Heikki Tuuri
25 *******************************************************/
26 
27 #ifndef sync0types_h
28 #define sync0types_h
29 
30 #include <vector>
31 #include <my_atomic.h>
32 
33 #include "ut0new.h"
34 
35 #ifdef _WIN32
36 /** Native mutex */
37 typedef CRITICAL_SECTION	sys_mutex_t;
38 #else
39 /** Native mutex */
40 typedef pthread_mutex_t		sys_mutex_t;
41 #endif /* _WIN32 */
42 
43 /** Mutex states. */
44 enum mutex_state_t {
45 	/** Mutex is free */
46 	MUTEX_STATE_UNLOCKED = 0,
47 
48 	/** Mutex is acquired by some thread. */
49 	MUTEX_STATE_LOCKED = 1,
50 
51 	/** Mutex is contended and there are threads waiting on the lock. */
52 	MUTEX_STATE_WAITERS = 2
53 };
54 
55 /*
56 		LATCHING ORDER WITHIN THE DATABASE
57 		==================================
58 
59 The mutex or latch in the central memory object, for instance, a rollback
60 segment object, must be acquired before acquiring the latch or latches to
61 the corresponding file data structure. In the latching order below, these
62 file page object latches are placed immediately below the corresponding
63 central memory object latch or mutex.
64 
65 Synchronization object			Notes
66 ----------------------			-----
67 
68 Dictionary mutex			If we have a pointer to a dictionary
69 |					object, e.g., a table, it can be
70 |					accessed without reserving the
71 |					dictionary mutex. We must have a
72 |					reservation, a memoryfix, to the
73 |					appropriate table object in this case,
74 |					and the table must be explicitly
75 |					released later.
76 V
77 Dictionary header
78 |
79 V
80 Secondary index tree latch		The tree latch protects also all
81 |					the B-tree non-leaf pages. These
82 V					can be read with the page only
83 Secondary index non-leaf		bufferfixed to save CPU time,
84 |					no s-latch is needed on the page.
85 |					Modification of a page requires an
86 |					x-latch on the page, however. If a
87 |					thread owns an x-latch to the tree,
88 |					it is allowed to latch non-leaf pages
89 |					even after it has acquired the fsp
90 |					latch.
91 V
92 Secondary index leaf			The latch on the secondary index leaf
93 |					can be kept while accessing the
94 |					clustered index, to save CPU time.
95 V
96 Clustered index tree latch		To increase concurrency, the tree
97 |					latch is usually released when the
98 |					leaf page latch has been acquired.
99 V
100 Clustered index non-leaf
101 |
102 V
103 Clustered index leaf
104 |
105 V
106 Transaction system header
107 |
108 V
109 Rollback segment mutex			The rollback segment mutex must be
110 |					reserved, if, e.g., a new page must
111 |					be added to an undo log. The rollback
112 |					segment and the undo logs in its
113 |					history list can be seen as an
114 |					analogue of a B-tree, and the latches
115 |					reserved similarly, using a version of
116 |					lock-coupling. If an undo log must be
117 |					extended by a page when inserting an
118 |					undo log record, this corresponds to
119 |					a pessimistic insert in a B-tree.
120 V
121 Rollback segment header
122 |
123 V
124 Purge system latch
125 |
126 V
127 Undo log pages				If a thread owns the trx undo mutex,
128 |					or for a log in the history list, the
129 |					rseg mutex, it is allowed to latch
130 |					undo log pages in any order, and even
131 |					after it has acquired the fsp latch.
132 |					If a thread does not have the
133 |					appropriate mutex, it is allowed to
134 |					latch only a single undo log page in
135 |					a mini-transaction.
136 V
137 File space management latch		If a mini-transaction must allocate
138 |					several file pages, it can do that,
139 |					because it keeps the x-latch to the
140 |					file space management in its memo.
141 V
142 File system pages
143 |
144 V
145 lock_sys_wait_mutex			Mutex protecting lock timeout data
146 |
147 V
148 lock_sys_mutex				Mutex protecting lock_sys_t
149 |
150 V
151 trx_sys.mutex				Mutex protecting trx_sys_t
152 |
153 V
154 Threads mutex				Background thread scheduling mutex
155 |
156 V
157 query_thr_mutex				Mutex protecting query threads
158 |
159 V
160 trx_mutex				Mutex protecting trx_t fields
161 |
162 V
163 Search system mutex
164 |
165 V
166 Buffer pool mutex
167 |
168 V
169 Log mutex
170 |
171 Any other latch
172 |
173 V
174 Memory pool mutex */
175 
176 /** Latching order levels. If you modify these, you have to also update
177 LatchDebug internals in sync0debug.cc */
178 
179 enum latch_level_t {
180 	SYNC_UNKNOWN = 0,
181 
182 	SYNC_MUTEX = 1,
183 
184 	RW_LOCK_SX,
185 	RW_LOCK_X_WAIT,
186 	RW_LOCK_S,
187 	RW_LOCK_X,
188 	RW_LOCK_NOT_LOCKED,
189 
190 	SYNC_MONITOR_MUTEX,
191 
192 	SYNC_ANY_LATCH,
193 
194 	SYNC_DOUBLEWRITE,
195 
196 	SYNC_BUF_FLUSH_LIST,
197 
198 	SYNC_BUF_BLOCK,
199 	SYNC_BUF_PAGE_HASH,
200 
201 	SYNC_BUF_POOL,
202 
203 	SYNC_POOL,
204 	SYNC_POOL_MANAGER,
205 
206 	SYNC_SEARCH_SYS,
207 
208 	SYNC_WORK_QUEUE,
209 
210 	SYNC_FTS_TOKENIZE,
211 	SYNC_FTS_OPTIMIZE,
212 	SYNC_FTS_CACHE_INIT,
213 	SYNC_RECV,
214 	SYNC_LOG_FLUSH_ORDER,
215 	SYNC_LOG,
216 	SYNC_LOG_WRITE,
217 	SYNC_PAGE_CLEANER,
218 	SYNC_PURGE_QUEUE,
219 	SYNC_TRX_SYS_HEADER,
220 	SYNC_THREADS,
221 	SYNC_TRX,
222 	SYNC_RW_TRX_HASH_ELEMENT,
223 	SYNC_TRX_SYS,
224 	SYNC_LOCK_SYS,
225 	SYNC_LOCK_WAIT_SYS,
226 
227 	SYNC_INDEX_ONLINE_LOG,
228 
229 	SYNC_IBUF_BITMAP,
230 	SYNC_IBUF_BITMAP_MUTEX,
231 	SYNC_IBUF_TREE_NODE,
232 	SYNC_IBUF_TREE_NODE_NEW,
233 	SYNC_IBUF_INDEX_TREE,
234 
235 	SYNC_IBUF_MUTEX,
236 
237 	SYNC_FSP_PAGE,
238 	SYNC_FSP,
239 	SYNC_EXTERN_STORAGE,
240 	SYNC_TRX_UNDO_PAGE,
241 	SYNC_RSEG_HEADER,
242 	SYNC_RSEG_HEADER_NEW,
243 	SYNC_NOREDO_RSEG,
244 	SYNC_REDO_RSEG,
245 	SYNC_PURGE_LATCH,
246 	SYNC_TREE_NODE,
247 	SYNC_TREE_NODE_FROM_HASH,
248 	SYNC_TREE_NODE_NEW,
249 	SYNC_IBUF_PESS_INSERT_MUTEX,
250 	SYNC_INDEX_TREE,
251 
252 	SYNC_IBUF_HEADER,
253 	SYNC_DICT_HEADER,
254 	SYNC_STATS_AUTO_RECALC,
255 	SYNC_DICT_AUTOINC_MUTEX,
256 	SYNC_DICT,
257 	SYNC_FTS_CACHE,
258 
259 	SYNC_DICT_OPERATION,
260 
261 	SYNC_TRX_I_S_LAST_READ,
262 
263 	SYNC_TRX_I_S_RWLOCK,
264 
265 	SYNC_RECV_WRITER,
266 
267 	/** Level is varying. Only used with buffer pool page locks, which
268 	do not have a fixed level, but instead have their level set after
269 	the page is locked; see e.g.  ibuf_bitmap_get_map_page(). */
270 
271 	SYNC_LEVEL_VARYING,
272 
273 	/** This can be used to suppress order checking. */
274 	SYNC_NO_ORDER_CHECK,
275 
276 	/** Maximum level value */
277 	SYNC_LEVEL_MAX = SYNC_NO_ORDER_CHECK
278 };
279 
280 /** Each latch has an ID. This id is used for creating the latch and to look
281 up its meta-data. See sync0debug.c. */
282 enum latch_id_t {
283 	LATCH_ID_NONE = 0,
284 	LATCH_ID_AUTOINC,
285 	LATCH_ID_BUF_BLOCK_MUTEX,
286 	LATCH_ID_BUF_POOL,
287 	LATCH_ID_BUF_POOL_ZIP,
288 	LATCH_ID_CACHE_LAST_READ,
289 	LATCH_ID_DICT_FOREIGN_ERR,
290 	LATCH_ID_DICT_SYS,
291 	LATCH_ID_FILE_FORMAT_MAX,
292 	LATCH_ID_FIL_SYSTEM,
293 	LATCH_ID_FLUSH_LIST,
294 	LATCH_ID_FTS_DELETE,
295 	LATCH_ID_FTS_DOC_ID,
296 	LATCH_ID_FTS_PLL_TOKENIZE,
297 	LATCH_ID_HASH_TABLE_MUTEX,
298 	LATCH_ID_IBUF_BITMAP,
299 	LATCH_ID_IBUF,
300 	LATCH_ID_IBUF_PESSIMISTIC_INSERT,
301 	LATCH_ID_LOG_SYS,
302 	LATCH_ID_LOG_WRITE,
303 	LATCH_ID_LOG_FLUSH_ORDER,
304 	LATCH_ID_LIST,
305 	LATCH_ID_MUTEX_LIST,
306 	LATCH_ID_PAGE_CLEANER,
307 	LATCH_ID_PURGE_SYS_PQ,
308 	LATCH_ID_RECALC_POOL,
309 	LATCH_ID_RECV_SYS,
310 	LATCH_ID_RECV_WRITER,
311 	LATCH_ID_REDO_RSEG,
312 	LATCH_ID_NOREDO_RSEG,
313 	LATCH_ID_RW_LOCK_DEBUG,
314 	LATCH_ID_RTR_ACTIVE_MUTEX,
315 	LATCH_ID_RTR_MATCH_MUTEX,
316 	LATCH_ID_RTR_PATH_MUTEX,
317 	LATCH_ID_RW_LOCK_LIST,
318 	LATCH_ID_RW_LOCK_MUTEX,
319 	LATCH_ID_SRV_INNODB_MONITOR,
320 	LATCH_ID_SRV_MISC_TMPFILE,
321 	LATCH_ID_SRV_MONITOR_FILE,
322 	LATCH_ID_BUF_DBLWR,
323 	LATCH_ID_TRX_POOL,
324 	LATCH_ID_TRX_POOL_MANAGER,
325 	LATCH_ID_TRX,
326 	LATCH_ID_LOCK_SYS,
327 	LATCH_ID_LOCK_SYS_WAIT,
328 	LATCH_ID_TRX_SYS,
329 	LATCH_ID_SRV_SYS,
330 	LATCH_ID_SRV_SYS_TASKS,
331 	LATCH_ID_PAGE_ZIP_STAT_PER_INDEX,
332 	LATCH_ID_EVENT_MANAGER,
333 	LATCH_ID_EVENT_MUTEX,
334 	LATCH_ID_SYNC_ARRAY_MUTEX,
335 	LATCH_ID_ZIP_PAD_MUTEX,
336 	LATCH_ID_OS_AIO_READ_MUTEX,
337 	LATCH_ID_OS_AIO_WRITE_MUTEX,
338 	LATCH_ID_OS_AIO_LOG_MUTEX,
339 	LATCH_ID_OS_AIO_IBUF_MUTEX,
340 	LATCH_ID_OS_AIO_SYNC_MUTEX,
341 	LATCH_ID_ROW_DROP_LIST,
342 	LATCH_ID_INDEX_ONLINE_LOG,
343 	LATCH_ID_WORK_QUEUE,
344 	LATCH_ID_BTR_SEARCH,
345 	LATCH_ID_BUF_BLOCK_LOCK,
346 	LATCH_ID_BUF_BLOCK_DEBUG,
347 	LATCH_ID_DICT_OPERATION,
348 	LATCH_ID_CHECKPOINT,
349 	LATCH_ID_FIL_SPACE,
350 	LATCH_ID_FTS_CACHE,
351 	LATCH_ID_FTS_CACHE_INIT,
352 	LATCH_ID_TRX_I_S_CACHE,
353 	LATCH_ID_TRX_PURGE,
354 	LATCH_ID_IBUF_INDEX_TREE,
355 	LATCH_ID_INDEX_TREE,
356 	LATCH_ID_DICT_TABLE_STATS,
357 	LATCH_ID_HASH_TABLE_RW_LOCK,
358 	LATCH_ID_BUF_CHUNK_MAP_LATCH,
359 	LATCH_ID_SYNC_DEBUG_MUTEX,
360 	LATCH_ID_SCRUB_STAT_MUTEX,
361 	LATCH_ID_DEFRAGMENT_MUTEX,
362 	LATCH_ID_BTR_DEFRAGMENT_MUTEX,
363 	LATCH_ID_FIL_CRYPT_STAT_MUTEX,
364 	LATCH_ID_FIL_CRYPT_DATA_MUTEX,
365 	LATCH_ID_FIL_CRYPT_THREADS_MUTEX,
366 	LATCH_ID_RW_TRX_HASH_ELEMENT,
367 	LATCH_ID_TEST_MUTEX,
368 	LATCH_ID_MAX = LATCH_ID_TEST_MUTEX
369 };
370 
371 #ifndef UNIV_INNOCHECKSUM
372 /** OS mutex, without any policy. It is a thin wrapper around the
373 system mutexes. The interface is different from the policy mutexes,
374 to ensure that it is called directly and not confused with the
375 policy mutexes. */
376 struct OSMutex {
377 
378 	/** Constructor */
OSMutexOSMutex379 	OSMutex()
380 		UNIV_NOTHROW
381 	{
382 		ut_d(m_freed = true);
383 	}
384 
385 	/** Create the mutex by calling the system functions. */
initOSMutex386 	void init()
387 		UNIV_NOTHROW
388 	{
389 		ut_ad(m_freed);
390 
391 #ifdef _WIN32
392 		InitializeCriticalSection((LPCRITICAL_SECTION) &m_mutex);
393 #else
394 		{
395 			int	ret = pthread_mutex_init(&m_mutex, NULL);
396 			ut_a(ret == 0);
397 		}
398 #endif /* _WIN32 */
399 
400 		ut_d(m_freed = false);
401 	}
402 
403 	/** Destructor */
~OSMutexOSMutex404 	~OSMutex() { }
405 
406 	/** Destroy the mutex */
destroyOSMutex407 	void destroy()
408 		UNIV_NOTHROW
409 	{
410 		ut_ad(!m_freed);
411 #ifdef _WIN32
412 		DeleteCriticalSection((LPCRITICAL_SECTION) &m_mutex);
413 #else
414 		int	ret;
415 
416 		ret = pthread_mutex_destroy(&m_mutex);
417 
418 		if (ret != 0) {
419 
420 			ib::error()
421 				<< "Return value " << ret << " when calling "
422 				<< "pthread_mutex_destroy().";
423 		}
424 #endif /* _WIN32 */
425 		ut_d(m_freed = true);
426 	}
427 
428 	/** Release the mutex. */
exitOSMutex429 	void exit()
430 		UNIV_NOTHROW
431 	{
432 		ut_ad(!m_freed);
433 #ifdef _WIN32
434 		LeaveCriticalSection(&m_mutex);
435 #else
436 		int	ret = pthread_mutex_unlock(&m_mutex);
437 		ut_a(ret == 0);
438 #endif /* _WIN32 */
439 	}
440 
441 	/** Acquire the mutex. */
enterOSMutex442 	void enter()
443 		UNIV_NOTHROW
444 	{
445 		ut_ad(!m_freed);
446 #ifdef _WIN32
447 		EnterCriticalSection((LPCRITICAL_SECTION) &m_mutex);
448 #else
449 		int	ret = pthread_mutex_lock(&m_mutex);
450 		ut_a(ret == 0);
451 #endif /* _WIN32 */
452 	}
453 
454 	/** @return true if locking succeeded */
try_lockOSMutex455 	bool try_lock()
456 		UNIV_NOTHROW
457 	{
458 		ut_ad(!m_freed);
459 #ifdef _WIN32
460 		return(TryEnterCriticalSection(&m_mutex) != 0);
461 #else
462 		return(pthread_mutex_trylock(&m_mutex) == 0);
463 #endif /* _WIN32 */
464 	}
465 
466 	/** Required for os_event_t */
467 	operator sys_mutex_t*()
468 		UNIV_NOTHROW
469 	{
470 		return(&m_mutex);
471 	}
472 
473 private:
474 #ifdef DBUG_ASSERT_EXISTS
475 	/** true if the mutex has been freed/destroyed. */
476 	bool			m_freed;
477 #endif /* DBUG_ASSERT_EXISTS */
478 
479 	sys_mutex_t		m_mutex;
480 };
481 
482 #ifdef UNIV_PFS_MUTEX
483 /** Latch element.
484 Used for mutexes which have PFS keys defined under UNIV_PFS_MUTEX.
485 @param[in]	id		Latch id
486 @param[in]	level		Latch level
487 @param[in]	key		PFS key */
488 # define LATCH_ADD_MUTEX(id, level, key)	latch_meta[LATCH_ID_ ## id] =\
489 	UT_NEW_NOKEY(latch_meta_t(LATCH_ID_ ## id, #id, level, #level, key))
490 
491 #ifdef UNIV_PFS_RWLOCK
492 /** Latch element.
493 Used for rwlocks which have PFS keys defined under UNIV_PFS_RWLOCK.
494 @param[in]	id		Latch id
495 @param[in]	level		Latch level
496 @param[in]	key		PFS key */
497 # define LATCH_ADD_RWLOCK(id, level, key)	latch_meta[LATCH_ID_ ## id] =\
498 	UT_NEW_NOKEY(latch_meta_t(LATCH_ID_ ## id, #id, level, #level, key))
499 #else
500 # define LATCH_ADD_RWLOCK(id, level, key)	latch_meta[LATCH_ID_ ## id] =\
501 	UT_NEW_NOKEY(latch_meta_t(LATCH_ID_ ## id, #id, level, #level,	     \
502 		     PSI_NOT_INSTRUMENTED))
503 #endif /* UNIV_PFS_RWLOCK */
504 
505 #else
506 # define LATCH_ADD_MUTEX(id, level, key)	latch_meta[LATCH_ID_ ## id] =\
507 	UT_NEW_NOKEY(latch_meta_t(LATCH_ID_ ## id, #id, level, #level))
508 # define LATCH_ADD_RWLOCK(id, level, key)	latch_meta[LATCH_ID_ ## id] =\
509 	UT_NEW_NOKEY(latch_meta_t(LATCH_ID_ ## id, #id, level, #level))
510 #endif /* UNIV_PFS_MUTEX */
511 
512 /** Default latch counter */
513 class LatchCounter {
514 
515 public:
516 	/** The counts we collect for a mutex */
517 	struct Count {
518 
519 		/** Constructor */
CountCount520 		Count()
521 			UNIV_NOTHROW
522 			:
523 			m_spins(),
524 			m_waits(),
525 			m_calls(),
526 			m_enabled()
527 		{
528 			/* No op */
529 		}
530 
531 		/** Rest the values to zero */
resetCount532 		void reset()
533 			UNIV_NOTHROW
534 		{
535 			m_spins = 0;
536 			m_waits = 0;
537 			m_calls = 0;
538 		}
539 
540 		/** Number of spins trying to acquire the latch. */
541 		uint32_t	m_spins;
542 
543 		/** Number of waits trying to acquire the latch */
544 		uint32_t	m_waits;
545 
546 		/** Number of times it was called */
547 		uint32_t	m_calls;
548 
549 		/** true if enabled */
550 		bool		m_enabled;
551 	};
552 
553 	/** Constructor */
LatchCounter()554 	LatchCounter()
555 		UNIV_NOTHROW
556 		:
557 		m_active(false)
558 	{
559 		m_mutex.init();
560 	}
561 
562 	/** Destructor */
~LatchCounter()563 	~LatchCounter()
564 		UNIV_NOTHROW
565 	{
566 		m_mutex.destroy();
567 
568 		for (Counters::iterator it = m_counters.begin();
569 		     it != m_counters.end();
570 		     ++it) {
571 
572 			Count*	count = *it;
573 
574 			UT_DELETE(count);
575 		}
576 	}
577 
578 	/** Reset all counters to zero. It is not protected by any
579 	mutex and we don't care about atomicity. Unless it is a
580 	demonstrated problem. The information collected is not
581 	required for the correct functioning of the server. */
reset()582 	void reset()
583 		UNIV_NOTHROW
584 	{
585 		m_mutex.enter();
586 
587 		Counters::iterator	end = m_counters.end();
588 
589 		for (Counters::iterator it = m_counters.begin();
590 		     it != end;
591 		     ++it) {
592 
593 			(*it)->reset();
594 		}
595 
596 		m_mutex.exit();
597 	}
598 
599 	/** @return the aggregate counter */
sum_register()600 	Count* sum_register()
601 		UNIV_NOTHROW
602 	{
603 		m_mutex.enter();
604 
605 		Count*	count;
606 
607 		if (m_counters.empty()) {
608 			count = UT_NEW_NOKEY(Count());
609 			m_counters.push_back(count);
610 		} else {
611 			ut_a(m_counters.size() == 1);
612 			count = m_counters[0];
613 		}
614 
615 		m_mutex.exit();
616 
617 		return(count);
618 	}
619 
620 	/** Register a single instance counter */
single_register(Count * count)621 	void single_register(Count* count)
622 		UNIV_NOTHROW
623 	{
624 		m_mutex.enter();
625 
626 		m_counters.push_back(count);
627 
628 		m_mutex.exit();
629 	}
630 
631 	/** Deregister a single instance counter
632 	@param[in]	count		The count instance to deregister */
single_deregister(Count * count)633 	void single_deregister(Count* count)
634 		UNIV_NOTHROW
635 	{
636 		m_mutex.enter();
637 
638 		m_counters.erase(
639 			std::remove(
640 				m_counters.begin(),
641 				m_counters.end(), count),
642 			m_counters.end());
643 
644 		m_mutex.exit();
645 	}
646 
647 	/** Iterate over the counters */
iterate(const C & callback)648 	template<typename C> void iterate(const C& callback) UNIV_NOTHROW
649 	{
650 		m_mutex.enter();
651 
652 		Counters::const_iterator	end = m_counters.end();
653 
654 		for (Counters::const_iterator it = m_counters.begin();
655 		     it != end;
656 		     ++it) {
657 
658 			callback(*it);
659 		}
660 
661 		m_mutex.exit();
662 	}
663 
664 	/** Disable the monitoring */
enable()665 	void enable()
666 		UNIV_NOTHROW
667 	{
668 		m_mutex.enter();
669 
670 		Counters::const_iterator	end = m_counters.end();
671 
672 		for (Counters::const_iterator it = m_counters.begin();
673 		     it != end;
674 		     ++it) {
675 
676 			(*it)->m_enabled = true;
677 		}
678 
679 		m_active = true;
680 
681 		m_mutex.exit();
682 	}
683 
684 	/** Disable the monitoring */
disable()685 	void disable()
686 		UNIV_NOTHROW
687 	{
688 		m_mutex.enter();
689 
690 		Counters::const_iterator	end = m_counters.end();
691 
692 		for (Counters::const_iterator it = m_counters.begin();
693 		     it != end;
694 		     ++it) {
695 
696 			(*it)->m_enabled = false;
697 		}
698 
699 		m_active = false;
700 
701 		m_mutex.exit();
702 	}
703 
704 	/** @return if monitoring is active */
is_enabled()705 	bool is_enabled() const
706 		UNIV_NOTHROW
707 	{
708 		return(m_active);
709 	}
710 
711 private:
712 	/* Disable copying */
713 	LatchCounter(const LatchCounter&);
714 	LatchCounter& operator=(const LatchCounter&);
715 
716 private:
717 	typedef OSMutex Mutex;
718 	typedef std::vector<Count*> Counters;
719 
720 	/** Mutex protecting m_counters */
721 	Mutex			m_mutex;
722 
723 	/** Counters for the latches */
724 	Counters		m_counters;
725 
726 	/** if true then we collect the data */
727 	bool			m_active;
728 };
729 
730 /** Latch meta data */
731 template <typename Counter = LatchCounter>
732 class LatchMeta {
733 
734 public:
735 	typedef Counter CounterType;
736 
737 #ifdef UNIV_PFS_MUTEX
738 	typedef	mysql_pfs_key_t	pfs_key_t;
739 #endif /* UNIV_PFS_MUTEX */
740 
741 	/** Constructor */
LatchMeta()742 	LatchMeta()
743 		:
744 		m_id(LATCH_ID_NONE),
745 		m_name(),
746 		m_level(SYNC_UNKNOWN),
747 		m_level_name()
748 #ifdef UNIV_PFS_MUTEX
749 		,m_pfs_key()
750 #endif /* UNIV_PFS_MUTEX */
751 	{
752 	}
753 
754 	/** Destructor */
~LatchMeta()755 	~LatchMeta() { }
756 
757 	/** Constructor
758 	@param[in]	id		Latch id
759 	@param[in]	name		Latch name
760 	@param[in]	level		Latch level
761 	@param[in]	level_name	Latch level text representation
762 	@param[in]	key		PFS key */
LatchMeta(latch_id_t id,const char * name,latch_level_t level,const char * level_name,pfs_key_t key)763 	LatchMeta(
764 		latch_id_t	id,
765 		const char*	name,
766 		latch_level_t	level,
767 		const char*	level_name
768 #ifdef UNIV_PFS_MUTEX
769 		,pfs_key_t	key
770 #endif /* UNIV_PFS_MUTEX */
771 	      )
772 		:
773 		m_id(id),
774 		m_name(name),
775 		m_level(level),
776 		m_level_name(level_name)
777 #ifdef UNIV_PFS_MUTEX
778 		,m_pfs_key(key)
779 #endif /* UNIV_PFS_MUTEX */
780 	{
781 		/* No op */
782 	}
783 
784 	/* Less than operator.
785 	@param[in]	rhs		Instance to compare against
786 	@return true if this.get_id() < rhs.get_id() */
787 	bool operator<(const LatchMeta& rhs) const
788 	{
789 		return(get_id() < rhs.get_id());
790 	}
791 
792 	/** @return the latch id */
get_id()793 	latch_id_t get_id() const
794 	{
795 		return(m_id);
796 	}
797 
798 	/** @return the latch name */
get_name()799 	const char* get_name() const
800 	{
801 		return(m_name);
802 	}
803 
804 	/** @return the latch level */
get_level()805 	latch_level_t get_level() const
806 	{
807 		return(m_level);
808 	}
809 
810 	/** @return the latch level name */
get_level_name()811 	const char* get_level_name() const
812 	{
813 		return(m_level_name);
814 	}
815 
816 #ifdef UNIV_PFS_MUTEX
817 	/** @return the PFS key for the latch */
get_pfs_key()818 	pfs_key_t get_pfs_key() const
819 	{
820 		return(m_pfs_key);
821 	}
822 #endif /* UNIV_PFS_MUTEX */
823 
824 	/** @return the counter instance */
get_counter()825 	Counter* get_counter()
826 	{
827 		return(&m_counter);
828 	}
829 
830 private:
831 	/** Latch id */
832 	latch_id_t		m_id;
833 
834 	/** Latch name */
835 	const char*		m_name;
836 
837 	/** Latch level in the ordering */
838 	latch_level_t		m_level;
839 
840 	/** Latch level text representation */
841 	const char*		m_level_name;
842 
843 #ifdef UNIV_PFS_MUTEX
844 	/** PFS key */
845 	pfs_key_t		m_pfs_key;
846 #endif /* UNIV_PFS_MUTEX */
847 
848 	/** For gathering latch statistics */
849 	Counter			m_counter;
850 };
851 
852 typedef LatchMeta<LatchCounter> latch_meta_t;
853 typedef std::vector<latch_meta_t*, ut_allocator<latch_meta_t*> > LatchMetaData;
854 
855 /** Note: This is accessed without any mutex protection. It is initialised
856 at startup and elements should not be added to or removed from it after
857 that.  See sync_latch_meta_init() */
858 extern LatchMetaData	latch_meta;
859 
860 /** Get the latch meta-data from the latch ID
861 @param[in]	id		Latch ID
862 @return the latch meta data */
863 inline
864 latch_meta_t&
sync_latch_get_meta(latch_id_t id)865 sync_latch_get_meta(latch_id_t id)
866 {
867 	ut_ad(static_cast<size_t>(id) < latch_meta.size());
868 	ut_ad(id == latch_meta[id]->get_id());
869 
870 	return(*latch_meta[id]);
871 }
872 
873 /** Fetch the counter for the latch
874 @param[in]	id		Latch ID
875 @return the latch counter */
876 inline
877 latch_meta_t::CounterType*
sync_latch_get_counter(latch_id_t id)878 sync_latch_get_counter(latch_id_t id)
879 {
880 	latch_meta_t&	meta = sync_latch_get_meta(id);
881 
882 	return(meta.get_counter());
883 }
884 
885 /** Get the latch name from the latch ID
886 @param[in]	id		Latch ID
887 @return the name, will assert if not found */
888 inline
889 const char*
sync_latch_get_name(latch_id_t id)890 sync_latch_get_name(latch_id_t id)
891 {
892 	const latch_meta_t&	meta = sync_latch_get_meta(id);
893 
894 	return(meta.get_name());
895 }
896 
897 /** Get the latch ordering level
898 @param[in]	id		Latch id to lookup
899 @return the latch level */
900 inline
901 latch_level_t
sync_latch_get_level(latch_id_t id)902 sync_latch_get_level(latch_id_t id)
903 {
904 	const latch_meta_t&	meta = sync_latch_get_meta(id);
905 
906 	return(meta.get_level());
907 }
908 
909 #ifdef UNIV_PFS_MUTEX
910 /** Get the latch PFS key from the latch ID
911 @param[in]	id		Latch ID
912 @return the PFS key */
913 inline
914 mysql_pfs_key_t
sync_latch_get_pfs_key(latch_id_t id)915 sync_latch_get_pfs_key(latch_id_t id)
916 {
917 	const latch_meta_t&	meta = sync_latch_get_meta(id);
918 
919 	return(meta.get_pfs_key());
920 }
921 #endif
922 
923 /** String representation of the filename and line number where the
924 latch was created
925 @param[in]	id		Latch ID
926 @param[in]	created		Filename and line number where it was crated
927 @return the string representation */
928 std::string
929 sync_mutex_to_string(
930 	latch_id_t		id,
931 	const std::string&	created);
932 
933 /** Get the latch name from a sync level
934 @param[in]	level		Latch level to lookup
935 @return 0 if not found. */
936 const char*
937 sync_latch_get_name(latch_level_t level);
938 
939 /** Print the filename "basename"
940 @return the basename */
941 const char*
942 sync_basename(const char* filename);
943 
944 /** Register a latch, called when it is created
945 @param[in]	ptr		Latch instance that was created
946 @param[in]	filename	Filename where it was created
947 @param[in]	line		Line number in filename */
948 void
949 sync_file_created_register(
950 	const void*	ptr,
951 	const char*	filename,
952 	uint16_t	line);
953 
954 /** Deregister a latch, called when it is destroyed
955 @param[in]	ptr		Latch to be destroyed */
956 void
957 sync_file_created_deregister(const void* ptr);
958 
959 /** Get the string where the file was created. Its format is "name:line"
960 @param[in]	ptr		Latch instance
961 @return created information or "" if can't be found */
962 std::string
963 sync_file_created_get(const void* ptr);
964 
965 #ifdef UNIV_DEBUG
966 
967 /** All (ordered) latches, used in debugging, must derive from this class. */
968 struct latch_t {
969 
970 	/** Constructor
971 	@param[in]	id	The latch ID */
972 	explicit latch_t(latch_id_t id = LATCH_ID_NONE)
973 		UNIV_NOTHROW
974 		:
m_idlatch_t975 		m_id(id),
976 		m_rw_lock() {}
977 
978 	/** Destructor */
~latch_tlatch_t979 	virtual ~latch_t() UNIV_NOTHROW { }
980 
981 	/** @return the latch ID */
get_idlatch_t982 	latch_id_t get_id() const
983 	{
984 		return(m_id);
985 	}
986 
987 	/** @return true if it is a rw-lock */
is_rw_locklatch_t988 	bool is_rw_lock() const
989 		UNIV_NOTHROW
990 	{
991 		return(m_rw_lock);
992 	}
993 
994 	/** Print the latch context
995 	@return the string representation */
996 	virtual std::string to_string() const = 0;
997 
998 	/** @return "filename:line" from where the latch was last locked */
999 	virtual std::string locked_from() const = 0;
1000 
1001 	/** @return the latch level */
get_levellatch_t1002 	latch_level_t get_level() const
1003 		UNIV_NOTHROW
1004 	{
1005 		ut_a(m_id != LATCH_ID_NONE);
1006 
1007 		return(sync_latch_get_level(m_id));
1008 	}
1009 
1010 	/** @return the latch name, m_id must be set  */
get_namelatch_t1011 	const char* get_name() const
1012 		UNIV_NOTHROW
1013 	{
1014 		ut_a(m_id != LATCH_ID_NONE);
1015 
1016 		return(sync_latch_get_name(m_id));
1017 	}
1018 
1019 	/** Latch ID */
1020 	latch_id_t	m_id;
1021 
1022 	/** true if it is a rw-lock. In debug mode, rw_lock_t derives from
1023 	this class and sets this variable. */
1024 	bool		m_rw_lock;
1025 };
1026 
1027 /** Subclass this to iterate over a thread's acquired latch levels. */
1028 struct sync_check_functor_t {
~sync_check_functor_tsync_check_functor_t1029 	virtual ~sync_check_functor_t() { }
1030 	virtual bool operator()(const latch_level_t) const = 0;
1031 };
1032 
1033 /** Check that no latch is being held.
1034 @tparam	some_allowed	whether some latches are allowed to be held */
1035 template<bool some_allowed = false>
1036 struct sync_checker : public sync_check_functor_t
1037 {
1038 	/** Check the latching constraints
1039 	@param[in]	level		The level held by the thread
1040 	@return whether a latch violation was detected */
operatorsync_checker1041 	bool operator()(const latch_level_t level) const
1042 	{
1043 		if (some_allowed) {
1044 			switch (level) {
1045 			case SYNC_RECV_WRITER:
1046 				/* This only happens in
1047 				recv_apply_hashed_log_recs. */
1048 			case SYNC_DICT:
1049 			case SYNC_DICT_OPERATION:
1050 			case SYNC_FTS_CACHE:
1051 			case SYNC_NO_ORDER_CHECK:
1052 				return(false);
1053 			default:
1054 				return(true);
1055 			}
1056 		}
1057 
1058 		return(true);
1059 	}
1060 };
1061 
1062 /** The strict latch checker (no InnoDB latches may be held) */
1063 typedef struct sync_checker<false> sync_check;
1064 /** The sloppy latch checker (can hold InnoDB dictionary or SQL latches) */
1065 typedef struct sync_checker<true> dict_sync_check;
1066 
1067 /** Functor to check for given latching constraints. */
1068 struct sync_allowed_latches : public sync_check_functor_t {
1069 
1070 	/** Constructor
1071 	@param[in]	from	first element in an array of latch_level_t
1072 	@param[in]	to	last element in an array of latch_level_t */
1073 	sync_allowed_latches(
1074 		const latch_level_t*	from,
1075 		const latch_level_t*	to)
1076 		: begin(from), end(to) { }
1077 
1078 	/** Checks whether the given latch_t violates the latch constraint.
1079 	This object maintains a list of allowed latch levels, and if the given
1080 	latch belongs to a latch level that is not there in the allowed list,
1081 	then it is a violation.
1082 
1083 	@param[in]	latch	The latch level to check
1084 	@return true if there is a latch violation */
1085 	bool operator()(const latch_level_t level) const
1086 	{
1087 		return(std::find(begin, end, level) == end);
1088 	}
1089 
1090 private:
1091 	/** First element in an array of allowed latch levels */
1092 	const latch_level_t* const begin;
1093 	/** First element after the end of the array of allowed latch levels */
1094 	const latch_level_t* const end;
1095 };
1096 
1097 /** Get the latch id from a latch name.
1098 @param[in]	id	Latch name
1099 @return LATCH_ID_NONE. */
1100 latch_id_t
1101 sync_latch_get_id(const char* name);
1102 
1103 typedef ulint rw_lock_flags_t;
1104 
1105 /* Flags to specify lock types for rw_lock_own_flagged() */
1106 enum rw_lock_flag_t {
1107 	RW_LOCK_FLAG_S  = 1 << 0,
1108 	RW_LOCK_FLAG_X  = 1 << 1,
1109 	RW_LOCK_FLAG_SX = 1 << 2
1110 };
1111 
1112 #endif /* UNIV_DBEUG */
1113 
1114 #endif /* UNIV_INNOCHECKSUM */
1115 
1116 static inline ulint my_atomic_addlint(ulint *A, ulint B)
1117 {
1118 #ifdef _WIN64
1119   return ulint(my_atomic_add64((volatile int64*)A, B));
1120 #else
1121   return ulint(my_atomic_addlong(A, B));
1122 #endif
1123 }
1124 
1125 static inline ulint my_atomic_loadlint(const ulint *A)
1126 {
1127 #ifdef _WIN64
1128   return ulint(my_atomic_load64((volatile int64*)A));
1129 #else
1130   return ulint(my_atomic_loadlong(A));
1131 #endif
1132 }
1133 
1134 static inline lint my_atomic_addlint(volatile lint *A, lint B)
1135 {
1136 #ifdef _WIN64
1137   return my_atomic_add64((volatile int64*)A, B);
1138 #else
1139   return my_atomic_addlong(A, B);
1140 #endif
1141 }
1142 
1143 static inline lint my_atomic_loadlint(const lint *A)
1144 {
1145 #ifdef _WIN64
1146   return lint(my_atomic_load64((volatile int64*)A));
1147 #else
1148   return my_atomic_loadlong(A);
1149 #endif
1150 }
1151 
1152 static inline void my_atomic_storelint(ulint *A, ulint B)
1153 {
1154 #ifdef _WIN64
1155   my_atomic_store64((volatile int64*)A, B);
1156 #else
1157   my_atomic_storelong(A, B);
1158 #endif
1159 }
1160 
1161 /** Simple non-atomic counter aligned to CACHE_LINE_SIZE
1162 @tparam	Type	the integer type of the counter */
1163 template <typename Type>
1164 struct MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) simple_counter
1165 {
1166 	/** Increment the counter */
1167 	Type inc() { return add(1); }
1168 	/** Decrement the counter */
1169 	Type dec() { return add(Type(~0)); }
1170 
1171 	/** Add to the counter
1172 	@param[in]	i	amount to be added
1173 	@return	the value of the counter after adding */
1174 	Type add(Type i) { return m_counter += i; }
1175 
1176 	/** @return the value of the counter */
1177 	operator Type() const { return m_counter; }
1178 
1179 private:
1180 	/** The counter */
1181 	Type	m_counter;
1182 };
1183 
1184 /** Simple atomic counter aligned to CACHE_LINE_SIZE
1185 @tparam	Type	lint or ulint */
1186 template <typename Type = ulint>
1187 struct MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) simple_atomic_counter
1188 {
1189 	/** Increment the counter */
1190 	Type inc() { return add(1); }
1191 	/** Decrement the counter */
1192 	Type dec() { return add(Type(~0)); }
1193 
1194 	/** Add to the counter
1195 	@param[in]	i	amount to be added
1196 	@return	the value of the counter before adding */
1197 	Type add(Type i) { return my_atomic_addlint(&m_counter, i); }
1198 
1199 	/** @return the value of the counter (non-atomic access)! */
1200 	operator Type() const { return m_counter; }
1201 
1202 private:
1203 	/** The counter */
1204 	Type	m_counter;
1205 };
1206 
1207 #endif /* sync0types_h */
1208