1 /*****************************************************************************
2 
3 Copyright (c) 2007, 2015, Oracle and/or its affiliates. All Rights Reserved.
4 Copyright (c) 2017, 2021, MariaDB Corporation.
5 
6 This program is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free Software
8 Foundation; version 2 of the License.
9 
10 This program is distributed in the hope that it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
12 FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
13 
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc.,
16 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
17 
18 *****************************************************************************/
19 
20 /**************************************************//**
21 @file trx/trx0i_s.cc
22 INFORMATION SCHEMA innodb_trx, innodb_locks and
23 innodb_lock_waits tables fetch code.
24 
25 The code below fetches information needed to fill those
26 3 dynamic tables and uploads it into a "transactions
27 table cache" for later retrieval.
28 
29 Created July 17, 2007 Vasil Dimov
30 *******************************************************/
31 
32 #include "trx0i_s.h"
33 #include "buf0buf.h"
34 #include "dict0dict.h"
35 #include "ha0storage.h"
36 #include "hash0hash.h"
37 #include "lock0iter.h"
38 #include "lock0lock.h"
39 #include "mem0mem.h"
40 #include "page0page.h"
41 #include "rem0rec.h"
42 #include "row0row.h"
43 #include "srv0srv.h"
44 #include "sync0rw.h"
45 #include "sync0sync.h"
46 #include "trx0sys.h"
47 #include "que0que.h"
48 #include "trx0purge.h"
49 #include "sql_class.h"
50 
51 /** Initial number of rows in the table cache */
52 #define TABLE_CACHE_INITIAL_ROWSNUM	1024
53 
54 /** @brief The maximum number of chunks to allocate for a table cache.
55 
56 The rows of a table cache are stored in a set of chunks. When a new
57 row is added a new chunk is allocated if necessary. Assuming that the
58 first one is 1024 rows (TABLE_CACHE_INITIAL_ROWSNUM) and each
59 subsequent is N/2 where N is the number of rows we have allocated till
60 now, then 39th chunk would accommodate 1677416425 rows and all chunks
61 would accommodate 3354832851 rows. */
62 #define MEM_CHUNKS_IN_TABLE_CACHE	39
63 
64 /** The following are some testing auxiliary macros. Do not enable them
65 in a production environment. */
66 /* @{ */
67 
68 #if 0
69 /** If this is enabled then lock folds will always be different
70 resulting in equal rows being put in a different cells of the hash
71 table. Checking for duplicates will be flawed because different
72 fold will be calculated when a row is searched in the hash table. */
73 #define TEST_LOCK_FOLD_ALWAYS_DIFFERENT
74 #endif
75 
76 #if 0
77 /** This effectively kills the search-for-duplicate-before-adding-a-row
78 function, but searching in the hash is still performed. It will always
79 be assumed that lock is not present and insertion will be performed in
80 the hash table. */
81 #define TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
82 #endif
83 
84 #if 0
85 /** This aggressively repeats adding each row many times. Depending on
86 the above settings this may be noop or may result in lots of rows being
87 added. */
88 #define TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
89 #endif
90 
91 #if 0
92 /** Very similar to TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T but hash
93 table search is not performed at all. */
94 #define TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS
95 #endif
96 
97 #if 0
98 /** Do not insert each row into the hash table, duplicates may appear
99 if this is enabled, also if this is enabled searching into the hash is
100 noop because it will be empty. */
101 #define TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE
102 #endif
103 /* @} */
104 
105 /** Memory limit passed to ha_storage_put_memlim().
106 @param cache hash storage
107 @return maximum allowed allocation size */
108 #define MAX_ALLOWED_FOR_STORAGE(cache)		\
109 	(TRX_I_S_MEM_LIMIT			\
110 	 - (cache)->mem_allocd)
111 
112 /** Memory limit in table_cache_create_empty_row().
113 @param cache hash storage
114 @return maximum allowed allocation size */
115 #define MAX_ALLOWED_FOR_ALLOC(cache)		\
116 	(TRX_I_S_MEM_LIMIT			\
117 	 - (cache)->mem_allocd			\
118 	 - ha_storage_get_size((cache)->storage))
119 
120 /** Memory for each table in the intermediate buffer is allocated in
121 separate chunks. These chunks are considered to be concatenated to
122 represent one flat array of rows. */
123 struct i_s_mem_chunk_t {
124 	ulint	offset;		/*!< offset, in number of rows */
125 	ulint	rows_allocd;	/*!< the size of this chunk, in number
126 				of rows */
127 	void*	base;		/*!< start of the chunk */
128 };
129 
130 /** This represents one table's cache. */
131 struct i_s_table_cache_t {
132 	ulint		rows_used;	/*!< number of used rows */
133 	ulint		rows_allocd;	/*!< number of allocated rows */
134 	ulint		row_size;	/*!< size of a single row */
135 	i_s_mem_chunk_t	chunks[MEM_CHUNKS_IN_TABLE_CACHE]; /*!< array of
136 					memory chunks that stores the
137 					rows */
138 };
139 
140 /** This structure describes the intermediate buffer */
141 struct trx_i_s_cache_t {
142 	rw_lock_t	rw_lock;	/*!< read-write lock protecting
143 					the rest of this structure */
144 	Atomic_relaxed<ulonglong> last_read;
145 					/*!< last time the cache was read;
146 					measured in nanoseconds */
147 	i_s_table_cache_t innodb_trx;	/*!< innodb_trx table */
148 	i_s_table_cache_t innodb_locks;	/*!< innodb_locks table */
149 	i_s_table_cache_t innodb_lock_waits;/*!< innodb_lock_waits table */
150 /** the hash table size is LOCKS_HASH_CELLS_NUM * sizeof(void*) bytes */
151 #define LOCKS_HASH_CELLS_NUM		10000
152 	hash_table_t	locks_hash;	/*!< hash table used to eliminate
153 					duplicate entries in the
154 					innodb_locks table */
155 /** Initial size of the cache storage */
156 #define CACHE_STORAGE_INITIAL_SIZE	1024
157 /** Number of hash cells in the cache storage */
158 #define CACHE_STORAGE_HASH_CELLS	2048
159 	ha_storage_t*	storage;	/*!< storage for external volatile
160 					data that may become unavailable
161 					when we release
162 					lock_sys.mutex */
163 	ulint		mem_allocd;	/*!< the amount of memory
164 					allocated with mem_alloc*() */
165 	bool		is_truncated;	/*!< this is true if the memory
166 					limit was hit and thus the data
167 					in the cache is truncated */
168 };
169 
170 /** This is the intermediate buffer where data needed to fill the
171 INFORMATION SCHEMA tables is fetched and later retrieved by the C++
172 code in handler/i_s.cc. */
173 static trx_i_s_cache_t	trx_i_s_cache_static;
174 /** This is the intermediate buffer where data needed to fill the
175 INFORMATION SCHEMA tables is fetched and later retrieved by the C++
176 code in handler/i_s.cc. */
177 trx_i_s_cache_t*	trx_i_s_cache = &trx_i_s_cache_static;
178 
179 /** @return the heap number of a record lock
180 @retval 0xFFFF for table locks */
wait_lock_get_heap_no(const lock_t * lock)181 static uint16_t wait_lock_get_heap_no(const lock_t *lock)
182 {
183   return lock_get_type(lock) == LOCK_REC
184     ? static_cast<uint16_t>(lock_rec_find_set_bit(lock))
185     : uint16_t{0xFFFF};
186 }
187 
188 /*******************************************************************//**
189 Initializes the members of a table cache. */
190 static
191 void
table_cache_init(i_s_table_cache_t * table_cache,size_t row_size)192 table_cache_init(
193 /*=============*/
194 	i_s_table_cache_t*	table_cache,	/*!< out: table cache */
195 	size_t			row_size)	/*!< in: the size of a
196 						row */
197 {
198 	ulint	i;
199 
200 	table_cache->rows_used = 0;
201 	table_cache->rows_allocd = 0;
202 	table_cache->row_size = row_size;
203 
204 	for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
205 
206 		/* the memory is actually allocated in
207 		table_cache_create_empty_row() */
208 		table_cache->chunks[i].base = NULL;
209 	}
210 }
211 
212 /*******************************************************************//**
213 Frees a table cache. */
214 static
215 void
table_cache_free(i_s_table_cache_t * table_cache)216 table_cache_free(
217 /*=============*/
218 	i_s_table_cache_t*	table_cache)	/*!< in/out: table cache */
219 {
220 	ulint	i;
221 
222 	for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
223 
224 		/* the memory is actually allocated in
225 		table_cache_create_empty_row() */
226 		if (table_cache->chunks[i].base) {
227 			ut_free(table_cache->chunks[i].base);
228 			table_cache->chunks[i].base = NULL;
229 		}
230 	}
231 }
232 
233 /*******************************************************************//**
234 Returns an empty row from a table cache. The row is allocated if no more
235 empty rows are available. The number of used rows is incremented.
236 If the memory limit is hit then NULL is returned and nothing is
237 allocated.
238 @return empty row, or NULL if out of memory */
239 static
240 void*
table_cache_create_empty_row(i_s_table_cache_t * table_cache,trx_i_s_cache_t * cache)241 table_cache_create_empty_row(
242 /*=========================*/
243 	i_s_table_cache_t*	table_cache,	/*!< in/out: table cache */
244 	trx_i_s_cache_t*	cache)		/*!< in/out: cache to record
245 						how many bytes are
246 						allocated */
247 {
248 	ulint	i;
249 	void*	row;
250 
251 	ut_a(table_cache->rows_used <= table_cache->rows_allocd);
252 
253 	if (table_cache->rows_used == table_cache->rows_allocd) {
254 
255 		/* rows_used == rows_allocd means that new chunk needs
256 		to be allocated: either no more empty rows in the
257 		last allocated chunk or nothing has been allocated yet
258 		(rows_num == rows_allocd == 0); */
259 
260 		i_s_mem_chunk_t*	chunk;
261 		ulint			req_bytes;
262 		ulint			got_bytes;
263 		ulint			req_rows;
264 		ulint			got_rows;
265 
266 		/* find the first not allocated chunk */
267 		for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
268 
269 			if (table_cache->chunks[i].base == NULL) {
270 
271 				break;
272 			}
273 		}
274 
275 		/* i == MEM_CHUNKS_IN_TABLE_CACHE means that all chunks
276 		have been allocated :-X */
277 		ut_a(i < MEM_CHUNKS_IN_TABLE_CACHE);
278 
279 		/* allocate the chunk we just found */
280 
281 		if (i == 0) {
282 
283 			/* first chunk, nothing is allocated yet */
284 			req_rows = TABLE_CACHE_INITIAL_ROWSNUM;
285 		} else {
286 
287 			/* Memory is increased by the formula
288 			new = old + old / 2; We are trying not to be
289 			aggressive here (= using the common new = old * 2)
290 			because the allocated memory will not be freed
291 			until InnoDB exit (it is reused). So it is better
292 			to once allocate the memory in more steps, but
293 			have less unused/wasted memory than to use less
294 			steps in allocation (which is done once in a
295 			lifetime) but end up with lots of unused/wasted
296 			memory. */
297 			req_rows = table_cache->rows_allocd / 2;
298 		}
299 		req_bytes = req_rows * table_cache->row_size;
300 
301 		if (req_bytes > MAX_ALLOWED_FOR_ALLOC(cache)) {
302 
303 			return(NULL);
304 		}
305 
306 		chunk = &table_cache->chunks[i];
307 
308 		got_bytes = req_bytes;
309 		chunk->base = ut_malloc_nokey(req_bytes);
310 
311 		got_rows = got_bytes / table_cache->row_size;
312 
313 		cache->mem_allocd += got_bytes;
314 
315 #if 0
316 		printf("allocating chunk %d req bytes=%lu, got bytes=%lu,"
317 		       " row size=%lu,"
318 		       " req rows=%lu, got rows=%lu\n",
319 		       i, req_bytes, got_bytes,
320 		       table_cache->row_size,
321 		       req_rows, got_rows);
322 #endif
323 
324 		chunk->rows_allocd = got_rows;
325 
326 		table_cache->rows_allocd += got_rows;
327 
328 		/* adjust the offset of the next chunk */
329 		if (i < MEM_CHUNKS_IN_TABLE_CACHE - 1) {
330 
331 			table_cache->chunks[i + 1].offset
332 				= chunk->offset + chunk->rows_allocd;
333 		}
334 
335 		/* return the first empty row in the newly allocated
336 		chunk */
337 		row = chunk->base;
338 	} else {
339 
340 		char*	chunk_start;
341 		ulint	offset;
342 
343 		/* there is an empty row, no need to allocate new
344 		chunks */
345 
346 		/* find the first chunk that contains allocated but
347 		empty/unused rows */
348 		for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
349 
350 			if (table_cache->chunks[i].offset
351 			    + table_cache->chunks[i].rows_allocd
352 			    > table_cache->rows_used) {
353 
354 				break;
355 			}
356 		}
357 
358 		/* i == MEM_CHUNKS_IN_TABLE_CACHE means that all chunks
359 		are full, but
360 		table_cache->rows_used != table_cache->rows_allocd means
361 		exactly the opposite - there are allocated but
362 		empty/unused rows :-X */
363 		ut_a(i < MEM_CHUNKS_IN_TABLE_CACHE);
364 
365 		chunk_start = (char*) table_cache->chunks[i].base;
366 		offset = table_cache->rows_used
367 			- table_cache->chunks[i].offset;
368 
369 		row = chunk_start + offset * table_cache->row_size;
370 	}
371 
372 	table_cache->rows_used++;
373 
374 	return(row);
375 }
376 
377 #ifdef UNIV_DEBUG
378 /*******************************************************************//**
379 Validates a row in the locks cache.
380 @return TRUE if valid */
381 static
382 ibool
i_s_locks_row_validate(const i_s_locks_row_t * row)383 i_s_locks_row_validate(
384 /*===================*/
385 	const i_s_locks_row_t*	row)	/*!< in: row to validate */
386 {
387 	ut_ad(row->lock_mode);
388 	ut_ad(row->lock_table != NULL);
389 	ut_ad(row->lock_table_id != 0);
390 
391 	if (!row->lock_index) {
392 		/* table lock */
393 		ut_ad(!row->lock_data);
394 		ut_ad(row->lock_page == page_id_t(0, 0));
395 		ut_ad(!row->lock_rec);
396 	} else {
397 		/* record lock */
398 		/* row->lock_data == NULL if buf_page_try_get() == NULL */
399 	}
400 
401 	return(TRUE);
402 }
403 #endif /* UNIV_DEBUG */
404 
405 /*******************************************************************//**
406 Fills i_s_trx_row_t object.
407 If memory can not be allocated then FALSE is returned.
408 @return FALSE if allocation fails */
409 static
410 ibool
fill_trx_row(i_s_trx_row_t * row,const trx_t * trx,const i_s_locks_row_t * requested_lock_row,trx_i_s_cache_t * cache)411 fill_trx_row(
412 /*=========*/
413 	i_s_trx_row_t*		row,		/*!< out: result object
414 						that's filled */
415 	const trx_t*		trx,		/*!< in: transaction to
416 						get data from */
417 	const i_s_locks_row_t*	requested_lock_row,/*!< in: pointer to the
418 						corresponding row in
419 						innodb_locks if trx is
420 						waiting or NULL if trx
421 						is not waiting */
422 	trx_i_s_cache_t*	cache)		/*!< in/out: cache into
423 						which to copy volatile
424 						strings */
425 {
426 	const char*	s;
427 
428 	ut_ad(lock_mutex_own());
429 
430 	row->trx_id = trx_get_id_for_print(trx);
431 	row->trx_started = trx->start_time;
432 	row->trx_state = trx_get_que_state_str(trx);
433 	row->requested_lock_row = requested_lock_row;
434 	ut_ad(requested_lock_row == NULL
435 	      || i_s_locks_row_validate(requested_lock_row));
436 
437 	if (trx->lock.wait_lock != NULL) {
438 
439 		ut_a(requested_lock_row != NULL);
440 		row->trx_wait_started = trx->lock.wait_started;
441 	} else {
442 		ut_a(requested_lock_row == NULL);
443 		row->trx_wait_started = 0;
444 	}
445 
446 	row->trx_weight = static_cast<uintmax_t>(TRX_WEIGHT(trx));
447 
448 	if (trx->mysql_thd == NULL) {
449 		/* For internal transactions e.g., purge and transactions
450 		being recovered at startup there is no associated MySQL
451 		thread data structure. */
452 		row->trx_mysql_thread_id = 0;
453 		row->trx_query = NULL;
454 		goto thd_done;
455 	}
456 
457 	row->trx_mysql_thread_id = thd_get_thread_id(trx->mysql_thd);
458 
459 	char	query[TRX_I_S_TRX_QUERY_MAX_LEN + 1];
460 	if (size_t stmt_len = thd_query_safe(trx->mysql_thd, query,
461 					     sizeof query)) {
462 		row->trx_query = static_cast<const char*>(
463 			ha_storage_put_memlim(
464 				cache->storage, query, stmt_len + 1,
465 				MAX_ALLOWED_FOR_STORAGE(cache)));
466 
467 		row->trx_query_cs = thd_charset(trx->mysql_thd);
468 
469 		if (row->trx_query == NULL) {
470 
471 			return(FALSE);
472 		}
473 	} else {
474 
475 		row->trx_query = NULL;
476 	}
477 
478 thd_done:
479 	row->trx_operation_state = trx->op_info;
480 
481 	row->trx_tables_in_use = trx->n_mysql_tables_in_use;
482 
483 	row->trx_tables_locked = lock_number_of_tables_locked(&trx->lock);
484 
485 	/* These are protected by both trx->mutex or lock_sys.mutex,
486 	or just lock_sys.mutex. For reading, it suffices to hold
487 	lock_sys.mutex. */
488 
489 	row->trx_lock_structs = UT_LIST_GET_LEN(trx->lock.trx_locks);
490 
491 	row->trx_lock_memory_bytes = mem_heap_get_size(trx->lock.lock_heap);
492 
493 	row->trx_rows_locked = lock_number_of_rows_locked(&trx->lock);
494 
495 	row->trx_rows_modified = trx->undo_no;
496 
497 	row->trx_isolation_level = trx->isolation_level;
498 
499 	row->trx_unique_checks = (ibool) trx->check_unique_secondary;
500 
501 	row->trx_foreign_key_checks = (ibool) trx->check_foreigns;
502 
503 	s = trx->detailed_error;
504 
505 	if (s != NULL && s[0] != '\0') {
506 
507 		TRX_I_S_STRING_COPY(s,
508 				    row->trx_foreign_key_error,
509 				    TRX_I_S_TRX_FK_ERROR_MAX_LEN, cache);
510 
511 		if (row->trx_foreign_key_error == NULL) {
512 
513 			return(FALSE);
514 		}
515 	} else {
516 		row->trx_foreign_key_error = NULL;
517 	}
518 
519 	row->trx_is_read_only = trx->read_only;
520 
521 	row->trx_is_autocommit_non_locking = trx->is_autocommit_non_locking();
522 
523 	return(TRUE);
524 }
525 
526 /*******************************************************************//**
527 Format the nth field of "rec" and put it in "buf". The result is always
528 NUL-terminated. Returns the number of bytes that were written to "buf"
529 (including the terminating NUL).
530 @return end of the result */
531 static
532 ulint
put_nth_field(char * buf,ulint buf_size,ulint n,const dict_index_t * index,const rec_t * rec,const rec_offs * offsets)533 put_nth_field(
534 /*==========*/
535 	char*			buf,	/*!< out: buffer */
536 	ulint			buf_size,/*!< in: buffer size in bytes */
537 	ulint			n,	/*!< in: number of field */
538 	const dict_index_t*	index,	/*!< in: index */
539 	const rec_t*		rec,	/*!< in: record */
540 	const rec_offs*		offsets)/*!< in: record offsets, returned
541 					by rec_get_offsets() */
542 {
543 	const byte*	data;
544 	ulint		data_len;
545 	dict_field_t*	dict_field;
546 	ulint		ret;
547 
548 	ut_ad(rec_offs_validate(rec, NULL, offsets));
549 
550 	if (buf_size == 0) {
551 
552 		return(0);
553 	}
554 
555 	ret = 0;
556 
557 	if (n > 0) {
558 		/* we must append ", " before the actual data */
559 
560 		if (buf_size < 3) {
561 
562 			buf[0] = '\0';
563 			return(1);
564 		}
565 
566 		memcpy(buf, ", ", 3);
567 
568 		buf += 2;
569 		buf_size -= 2;
570 		ret += 2;
571 	}
572 
573 	/* now buf_size >= 1 */
574 
575 	data = rec_get_nth_field(rec, offsets, n, &data_len);
576 
577 	dict_field = dict_index_get_nth_field(index, n);
578 
579 	ret += row_raw_format((const char*) data, data_len,
580 			      dict_field, buf, buf_size);
581 
582 	return(ret);
583 }
584 
585 /*******************************************************************//**
586 Fills the "lock_data" member of i_s_locks_row_t object.
587 If memory can not be allocated then FALSE is returned.
588 @return FALSE if allocation fails */
589 static
590 ibool
fill_lock_data(const char ** lock_data,const lock_t * lock,ulint heap_no,trx_i_s_cache_t * cache)591 fill_lock_data(
592 /*===========*/
593 	const char**		lock_data,/*!< out: "lock_data" to fill */
594 	const lock_t*		lock,	/*!< in: lock used to find the data */
595 	ulint			heap_no,/*!< in: rec num used to find the data */
596 	trx_i_s_cache_t*	cache)	/*!< in/out: cache where to store
597 					volatile data */
598 {
599 	ut_a(lock_get_type(lock) == LOCK_REC);
600 
601 	switch (heap_no) {
602 	case PAGE_HEAP_NO_INFIMUM:
603 	case PAGE_HEAP_NO_SUPREMUM:
604 		*lock_data = ha_storage_put_str_memlim(
605 			cache->storage,
606 			heap_no == PAGE_HEAP_NO_INFIMUM
607 			? "infimum pseudo-record"
608 			: "supremum pseudo-record",
609 			MAX_ALLOWED_FOR_STORAGE(cache));
610 		return(*lock_data != NULL);
611 	}
612 
613 	mtr_t			mtr;
614 
615 	const buf_block_t*	block;
616 	const page_t*		page;
617 	const rec_t*		rec;
618 	const dict_index_t*	index;
619 	ulint			n_fields;
620 	mem_heap_t*		heap;
621 	rec_offs		offsets_onstack[REC_OFFS_NORMAL_SIZE];
622 	rec_offs*		offsets;
623 	char			buf[TRX_I_S_LOCK_DATA_MAX_LEN];
624 	ulint			buf_used;
625 	ulint			i;
626 
627 	mtr_start(&mtr);
628 
629 	block = buf_page_try_get(lock->un_member.rec_lock.page_id, &mtr);
630 
631 	if (block == NULL) {
632 
633 		*lock_data = NULL;
634 
635 		mtr_commit(&mtr);
636 
637 		return(TRUE);
638 	}
639 
640 	page = reinterpret_cast<const page_t*>(buf_block_get_frame(block));
641 
642 	rec_offs_init(offsets_onstack);
643 	offsets = offsets_onstack;
644 
645 	rec = page_find_rec_with_heap_no(page, heap_no);
646 
647 	index = lock_rec_get_index(lock);
648 
649 	n_fields = dict_index_get_n_unique(index);
650 
651 	ut_a(n_fields > 0);
652 
653 	heap = NULL;
654 	offsets = rec_get_offsets(rec, index, offsets, index->n_core_fields,
655 				  n_fields, &heap);
656 
657 	/* format and store the data */
658 
659 	buf_used = 0;
660 	for (i = 0; i < n_fields; i++) {
661 
662 		buf_used += put_nth_field(
663 			buf + buf_used, sizeof(buf) - buf_used,
664 			i, index, rec, offsets) - 1;
665 	}
666 
667 	*lock_data = (const char*) ha_storage_put_memlim(
668 		cache->storage, buf, buf_used + 1,
669 		MAX_ALLOWED_FOR_STORAGE(cache));
670 
671 	if (heap != NULL) {
672 
673 		/* this means that rec_get_offsets() has created a new
674 		heap and has stored offsets in it; check that this is
675 		really the case and free the heap */
676 		ut_a(offsets != offsets_onstack);
677 		mem_heap_free(heap);
678 	}
679 
680 	mtr_commit(&mtr);
681 
682 	if (*lock_data == NULL) {
683 
684 		return(FALSE);
685 	}
686 
687 	return(TRUE);
688 }
689 
690 /*******************************************************************//**
691 Fills i_s_locks_row_t object. Returns its first argument.
692 If memory can not be allocated then FALSE is returned.
693 @return false if allocation fails */
fill_locks_row(i_s_locks_row_t * row,const lock_t * lock,uint16_t heap_no,trx_i_s_cache_t * cache)694 static bool fill_locks_row(
695 	i_s_locks_row_t* row,	/*!< out: result object that's filled */
696 	const lock_t*	lock,	/*!< in: lock to get data from */
697 	uint16_t	heap_no,/*!< in: lock's record number
698 				or 0 if the lock
699 				is a table lock */
700 	trx_i_s_cache_t* cache)	/*!< in/out: cache into which to copy
701 				volatile strings */
702 {
703 	row->lock_trx_id = lock->trx->id;
704 	const auto lock_type = lock_get_type(lock);
705 	ut_ad(lock_type == LOCK_REC || lock_type == LOCK_TABLE);
706 
707 	const bool is_gap_lock = lock_type == LOCK_REC
708 		&& (lock->type_mode & LOCK_GAP);
709 	switch (lock->type_mode & LOCK_MODE_MASK) {
710 	case LOCK_S:
711 		row->lock_mode = uint8_t(1 + is_gap_lock);
712 		break;
713 	case LOCK_X:
714 		row->lock_mode = uint8_t(3 + is_gap_lock);
715 		break;
716 	case LOCK_IS:
717 		row->lock_mode = uint8_t(5 + is_gap_lock);
718 		break;
719 	case LOCK_IX:
720 		row->lock_mode = uint8_t(7 + is_gap_lock);
721 		break;
722 	case LOCK_AUTO_INC:
723 		row->lock_mode = 9;
724 		break;
725 	default:
726 		ut_ad("unknown lock mode" == 0);
727 		row->lock_mode = 0;
728 	}
729 
730 	row->lock_table = ha_storage_put_str_memlim(
731 		cache->storage, lock_get_table_name(lock).m_name,
732 		MAX_ALLOWED_FOR_STORAGE(cache));
733 
734 	/* memory could not be allocated */
735 	if (row->lock_table == NULL) {
736 
737 		return false;
738 	}
739 
740 	if (lock_type == LOCK_REC) {
741 		row->lock_index = ha_storage_put_str_memlim(
742 			cache->storage, lock_rec_get_index_name(lock),
743 			MAX_ALLOWED_FOR_STORAGE(cache));
744 
745 		/* memory could not be allocated */
746 		if (row->lock_index == NULL) {
747 
748 			return false;
749 		}
750 
751 		row->lock_page = lock->un_member.rec_lock.page_id;
752 		row->lock_rec = heap_no;
753 
754 		if (!fill_lock_data(&row->lock_data, lock, heap_no, cache)) {
755 
756 			/* memory could not be allocated */
757 			return false;
758 		}
759 	} else {
760 		row->lock_index = NULL;
761 
762 		row->lock_page = page_id_t(0, 0);
763 		row->lock_rec = 0;
764 
765 		row->lock_data = NULL;
766 	}
767 
768 	row->lock_table_id = lock_get_table_id(lock);
769 
770 	row->hash_chain.value = row;
771 	ut_ad(i_s_locks_row_validate(row));
772 
773 	return true;
774 }
775 
776 /*******************************************************************//**
777 Fills i_s_lock_waits_row_t object. Returns its first argument.
778 @return result object that's filled */
779 static
780 i_s_lock_waits_row_t*
fill_lock_waits_row(i_s_lock_waits_row_t * row,const i_s_locks_row_t * requested_lock_row,const i_s_locks_row_t * blocking_lock_row)781 fill_lock_waits_row(
782 /*================*/
783 	i_s_lock_waits_row_t*	row,		/*!< out: result object
784 						that's filled */
785 	const i_s_locks_row_t*	requested_lock_row,/*!< in: pointer to the
786 						relevant requested lock
787 						row in innodb_locks */
788 	const i_s_locks_row_t*	blocking_lock_row)/*!< in: pointer to the
789 						relevant blocking lock
790 						row in innodb_locks */
791 {
792 	ut_ad(i_s_locks_row_validate(requested_lock_row));
793 	ut_ad(i_s_locks_row_validate(blocking_lock_row));
794 
795 	row->requested_lock_row = requested_lock_row;
796 	row->blocking_lock_row = blocking_lock_row;
797 
798 	return(row);
799 }
800 
801 /*******************************************************************//**
802 Calculates a hash fold for a lock. For a record lock the fold is
803 calculated from 4 elements, which uniquely identify a lock at a given
804 point in time: transaction id, space id, page number, record number.
805 For a table lock the fold is table's id.
806 @return fold */
807 static
808 ulint
fold_lock(const lock_t * lock,ulint heap_no)809 fold_lock(
810 /*======*/
811 	const lock_t*	lock,	/*!< in: lock object to fold */
812 	ulint		heap_no)/*!< in: lock's record number
813 				or 0xFFFF if the lock
814 				is a table lock */
815 {
816 #ifdef TEST_LOCK_FOLD_ALWAYS_DIFFERENT
817 	static ulint	fold = 0;
818 
819 	return(fold++);
820 #else
821 	ulint	ret;
822 
823 	switch (lock_get_type(lock)) {
824 	case LOCK_REC:
825 		ut_a(heap_no != 0xFFFF);
826 		ret = ut_fold_ulint_pair((ulint) lock->trx->id,
827 					 lock->un_member.rec_lock.page_id.
828 					 fold());
829 		ret = ut_fold_ulint_pair(ret, heap_no);
830 
831 		break;
832 	case LOCK_TABLE:
833 		/* this check is actually not necessary for continuing
834 		correct operation, but something must have gone wrong if
835 		it fails. */
836 		ut_a(heap_no == 0xFFFF);
837 
838 		ret = (ulint) lock_get_table_id(lock);
839 
840 		break;
841 	default:
842 		ut_error;
843 	}
844 
845 	return(ret);
846 #endif
847 }
848 
849 /*******************************************************************//**
850 Checks whether i_s_locks_row_t object represents a lock_t object.
851 @return TRUE if they match */
852 static
853 ibool
locks_row_eq_lock(const i_s_locks_row_t * row,const lock_t * lock,ulint heap_no)854 locks_row_eq_lock(
855 /*==============*/
856 	const i_s_locks_row_t*	row,	/*!< in: innodb_locks row */
857 	const lock_t*		lock,	/*!< in: lock object */
858 	ulint			heap_no)/*!< in: lock's record number
859 					or 0xFFFF if the lock
860 					is a table lock */
861 {
862 	ut_ad(i_s_locks_row_validate(row));
863 #ifdef TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T
864 	return(0);
865 #else
866 	switch (lock_get_type(lock)) {
867 	case LOCK_REC:
868 		ut_a(heap_no != 0xFFFF);
869 
870 		return(row->lock_trx_id == lock->trx->id
871 		       && row->lock_page == lock->un_member.rec_lock.page_id
872 		       && row->lock_rec == heap_no);
873 
874 	case LOCK_TABLE:
875 		/* this check is actually not necessary for continuing
876 		correct operation, but something must have gone wrong if
877 		it fails. */
878 		ut_a(heap_no == 0xFFFF);
879 
880 		return(row->lock_trx_id == lock->trx->id
881 		       && row->lock_table_id == lock_get_table_id(lock));
882 
883 	default:
884 		ut_error;
885 		return(FALSE);
886 	}
887 #endif
888 }
889 
890 /*******************************************************************//**
891 Searches for a row in the innodb_locks cache that has a specified id.
892 This happens in O(1) time since a hash table is used. Returns pointer to
893 the row or NULL if none is found.
894 @return row or NULL */
895 static
896 i_s_locks_row_t*
search_innodb_locks(trx_i_s_cache_t * cache,const lock_t * lock,uint16_t heap_no)897 search_innodb_locks(
898 /*================*/
899 	trx_i_s_cache_t*	cache,	/*!< in: cache */
900 	const lock_t*		lock,	/*!< in: lock to search for */
901 	uint16_t		heap_no)/*!< in: lock's record number
902 					or 0xFFFF if the lock
903 					is a table lock */
904 {
905 	i_s_hash_chain_t*	hash_chain;
906 
907 	HASH_SEARCH(
908 		/* hash_chain->"next" */
909 		next,
910 		/* the hash table */
911 		&cache->locks_hash,
912 		/* fold */
913 		fold_lock(lock, heap_no),
914 		/* the type of the next variable */
915 		i_s_hash_chain_t*,
916 		/* auxiliary variable */
917 		hash_chain,
918 		/* assertion on every traversed item */
919 		ut_ad(i_s_locks_row_validate(hash_chain->value)),
920 		/* this determines if we have found the lock */
921 		locks_row_eq_lock(hash_chain->value, lock, heap_no));
922 
923 	if (hash_chain == NULL) {
924 
925 		return(NULL);
926 	}
927 	/* else */
928 
929 	return(hash_chain->value);
930 }
931 
932 /*******************************************************************//**
933 Adds new element to the locks cache, enlarging it if necessary.
934 Returns a pointer to the added row. If the row is already present then
935 no row is added and a pointer to the existing row is returned.
936 If row can not be allocated then NULL is returned.
937 @return row */
938 static
939 i_s_locks_row_t*
add_lock_to_cache(trx_i_s_cache_t * cache,const lock_t * lock,uint16_t heap_no)940 add_lock_to_cache(
941 /*==============*/
942 	trx_i_s_cache_t*	cache,	/*!< in/out: cache */
943 	const lock_t*		lock,	/*!< in: the element to add */
944 	uint16_t		heap_no)/*!< in: lock's record number
945 					or 0 if the lock
946 					is a table lock */
947 {
948 	i_s_locks_row_t*	dst_row;
949 
950 #ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
951 	ulint	i;
952 	for (i = 0; i < 10000; i++) {
953 #endif
954 #ifndef TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS
955 	/* quit if this lock is already present */
956 	dst_row = search_innodb_locks(cache, lock, heap_no);
957 	if (dst_row != NULL) {
958 
959 		ut_ad(i_s_locks_row_validate(dst_row));
960 		return(dst_row);
961 	}
962 #endif
963 
964 	dst_row = (i_s_locks_row_t*)
965 		table_cache_create_empty_row(&cache->innodb_locks, cache);
966 
967 	/* memory could not be allocated */
968 	if (dst_row == NULL) {
969 
970 		return(NULL);
971 	}
972 
973 	if (!fill_locks_row(dst_row, lock, heap_no, cache)) {
974 
975 		/* memory could not be allocated */
976 		cache->innodb_locks.rows_used--;
977 		return(NULL);
978 	}
979 
980 #ifndef TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE
981 	HASH_INSERT(
982 		/* the type used in the hash chain */
983 		i_s_hash_chain_t,
984 		/* hash_chain->"next" */
985 		next,
986 		/* the hash table */
987 		&cache->locks_hash,
988 		/* fold */
989 		fold_lock(lock, heap_no),
990 		/* add this data to the hash */
991 		&dst_row->hash_chain);
992 #endif
993 #ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES
994 	} /* for()-loop */
995 #endif
996 
997 	ut_ad(i_s_locks_row_validate(dst_row));
998 	return(dst_row);
999 }
1000 
1001 /*******************************************************************//**
1002 Adds new pair of locks to the lock waits cache.
1003 If memory can not be allocated then FALSE is returned.
1004 @return FALSE if allocation fails */
1005 static
1006 ibool
add_lock_wait_to_cache(trx_i_s_cache_t * cache,const i_s_locks_row_t * requested_lock_row,const i_s_locks_row_t * blocking_lock_row)1007 add_lock_wait_to_cache(
1008 /*===================*/
1009 	trx_i_s_cache_t*	cache,		/*!< in/out: cache */
1010 	const i_s_locks_row_t*	requested_lock_row,/*!< in: pointer to the
1011 						relevant requested lock
1012 						row in innodb_locks */
1013 	const i_s_locks_row_t*	blocking_lock_row)/*!< in: pointer to the
1014 						relevant blocking lock
1015 						row in innodb_locks */
1016 {
1017 	i_s_lock_waits_row_t*	dst_row;
1018 
1019 	dst_row = (i_s_lock_waits_row_t*)
1020 		table_cache_create_empty_row(&cache->innodb_lock_waits,
1021 					     cache);
1022 
1023 	/* memory could not be allocated */
1024 	if (dst_row == NULL) {
1025 
1026 		return(FALSE);
1027 	}
1028 
1029 	fill_lock_waits_row(dst_row, requested_lock_row, blocking_lock_row);
1030 
1031 	return(TRUE);
1032 }
1033 
1034 /*******************************************************************//**
1035 Adds transaction's relevant (important) locks to cache.
1036 If the transaction is waiting, then the wait lock is added to
1037 innodb_locks and a pointer to the added row is returned in
1038 requested_lock_row, otherwise requested_lock_row is set to NULL.
1039 If rows can not be allocated then FALSE is returned and the value of
1040 requested_lock_row is undefined.
1041 @return FALSE if allocation fails */
1042 static
1043 ibool
add_trx_relevant_locks_to_cache(trx_i_s_cache_t * cache,const trx_t * trx,i_s_locks_row_t ** requested_lock_row)1044 add_trx_relevant_locks_to_cache(
1045 /*============================*/
1046 	trx_i_s_cache_t*	cache,	/*!< in/out: cache */
1047 	const trx_t*		trx,	/*!< in: transaction */
1048 	i_s_locks_row_t**	requested_lock_row)/*!< out: pointer to the
1049 					requested lock row, or NULL or
1050 					undefined */
1051 {
1052 	ut_ad(lock_mutex_own());
1053 
1054 	/* If transaction is waiting we add the wait lock and all locks
1055 	from another transactions that are blocking the wait lock. */
1056 	if (trx->lock.que_state == TRX_QUE_LOCK_WAIT) {
1057 
1058 		const lock_t*		curr_lock;
1059 		i_s_locks_row_t*	blocking_lock_row;
1060 		lock_queue_iterator_t	iter;
1061 
1062 		ut_a(trx->lock.wait_lock != NULL);
1063 
1064 		uint16_t wait_lock_heap_no
1065 			= wait_lock_get_heap_no(trx->lock.wait_lock);
1066 
1067 		/* add the requested lock */
1068 		*requested_lock_row
1069 			= add_lock_to_cache(cache, trx->lock.wait_lock,
1070 					    wait_lock_heap_no);
1071 
1072 		/* memory could not be allocated */
1073 		if (*requested_lock_row == NULL) {
1074 
1075 			return(FALSE);
1076 		}
1077 
1078 		/* then iterate over the locks before the wait lock and
1079 		add the ones that are blocking it */
1080 
1081 		lock_queue_iterator_reset(&iter, trx->lock.wait_lock,
1082 					  ULINT_UNDEFINED);
1083 
1084 		for (curr_lock = lock_queue_iterator_get_prev(&iter);
1085 		     curr_lock != NULL;
1086 		     curr_lock = lock_queue_iterator_get_prev(&iter)) {
1087 
1088 			if (lock_has_to_wait(trx->lock.wait_lock,
1089 					     curr_lock)) {
1090 
1091 				/* add the lock that is
1092 				blocking trx->lock.wait_lock */
1093 				blocking_lock_row
1094 					= add_lock_to_cache(
1095 						cache, curr_lock,
1096 						/* heap_no is the same
1097 						for the wait and waited
1098 						locks */
1099 						wait_lock_heap_no);
1100 
1101 				/* memory could not be allocated */
1102 				if (blocking_lock_row == NULL) {
1103 
1104 					return(FALSE);
1105 				}
1106 
1107 				/* add the relation between both locks
1108 				to innodb_lock_waits */
1109 				if (!add_lock_wait_to_cache(
1110 						cache, *requested_lock_row,
1111 						blocking_lock_row)) {
1112 
1113 					/* memory could not be allocated */
1114 					return(FALSE);
1115 				}
1116 			}
1117 		}
1118 	} else {
1119 
1120 		*requested_lock_row = NULL;
1121 	}
1122 
1123 	return(TRUE);
1124 }
1125 
1126 /** The minimum time that a cache must not be updated after it has been
1127 read for the last time; measured in nanoseconds. We use this technique
1128 to ensure that SELECTs which join several INFORMATION SCHEMA tables read
1129 the same version of the cache. */
1130 #define CACHE_MIN_IDLE_TIME_NS	100000000 /* 0.1 sec */
1131 
1132 /*******************************************************************//**
1133 Checks if the cache can safely be updated.
1134 @return whether the cache can be updated */
can_cache_be_updated(trx_i_s_cache_t * cache)1135 static bool can_cache_be_updated(trx_i_s_cache_t* cache)
1136 {
1137 	/* cache->last_read is only updated when a shared rw lock on the
1138 	whole cache is being held (see trx_i_s_cache_end_read()) and
1139 	we are currently holding an exclusive rw lock on the cache.
1140 	So it is not possible for last_read to be updated while we are
1141 	reading it. */
1142 
1143 	ut_ad(rw_lock_own(&cache->rw_lock, RW_LOCK_X));
1144 
1145 	return my_interval_timer() - cache->last_read > CACHE_MIN_IDLE_TIME_NS;
1146 }
1147 
1148 /*******************************************************************//**
1149 Declare a cache empty, preparing it to be filled up. Not all resources
1150 are freed because they can be reused. */
1151 static
1152 void
trx_i_s_cache_clear(trx_i_s_cache_t * cache)1153 trx_i_s_cache_clear(
1154 /*================*/
1155 	trx_i_s_cache_t*	cache)	/*!< out: cache to clear */
1156 {
1157 	cache->innodb_trx.rows_used = 0;
1158 	cache->innodb_locks.rows_used = 0;
1159 	cache->innodb_lock_waits.rows_used = 0;
1160 
1161 	cache->locks_hash.clear();
1162 
1163 	ha_storage_empty(&cache->storage);
1164 }
1165 
1166 
1167 /**
1168   Add transactions to innodb_trx's cache.
1169 
1170   We also add all locks that are relevant to each transaction into
1171   innodb_locks' and innodb_lock_waits' caches.
1172 */
1173 
fetch_data_into_cache_low(trx_i_s_cache_t * cache,const trx_t * trx)1174 static void fetch_data_into_cache_low(trx_i_s_cache_t *cache, const trx_t *trx)
1175 {
1176   i_s_locks_row_t *requested_lock_row;
1177 
1178 #ifdef UNIV_DEBUG
1179   {
1180     const auto state= trx->state;
1181 
1182     if (trx->is_autocommit_non_locking())
1183     {
1184       ut_ad(trx->read_only);
1185       ut_ad(!trx->is_recovered);
1186       ut_ad(trx->mysql_thd);
1187       ut_ad(state == TRX_STATE_NOT_STARTED || state == TRX_STATE_ACTIVE);
1188     }
1189     else
1190       ut_ad(state == TRX_STATE_ACTIVE ||
1191             state == TRX_STATE_PREPARED ||
1192             state == TRX_STATE_PREPARED_RECOVERED ||
1193             state == TRX_STATE_COMMITTED_IN_MEMORY);
1194   }
1195 #endif /* UNIV_DEBUG */
1196 
1197   if (add_trx_relevant_locks_to_cache(cache, trx, &requested_lock_row))
1198   {
1199     if (i_s_trx_row_t *trx_row= reinterpret_cast<i_s_trx_row_t*>(
1200         table_cache_create_empty_row(&cache->innodb_trx, cache)))
1201     {
1202       if (fill_trx_row(trx_row, trx, requested_lock_row, cache))
1203         return;
1204       --cache->innodb_trx.rows_used;
1205     }
1206   }
1207 
1208   /* memory could not be allocated */
1209   cache->is_truncated= true;
1210 }
1211 
1212 
1213 /**
1214   Fetches the data needed to fill the 3 INFORMATION SCHEMA tables into the
1215   table cache buffer. Cache must be locked for write.
1216 */
1217 
fetch_data_into_cache(trx_i_s_cache_t * cache)1218 static void fetch_data_into_cache(trx_i_s_cache_t *cache)
1219 {
1220   ut_ad(lock_mutex_own());
1221   trx_i_s_cache_clear(cache);
1222 
1223   /* Capture the state of transactions */
1224   trx_sys.trx_list.for_each([cache](trx_t &trx) {
1225     if (!cache->is_truncated && trx.state != TRX_STATE_NOT_STARTED &&
1226         &trx != (purge_sys.query ? purge_sys.query->trx : nullptr))
1227     {
1228       mutex_enter(&trx.mutex);
1229       if (trx.state != TRX_STATE_NOT_STARTED)
1230         fetch_data_into_cache_low(cache, &trx);
1231       mutex_exit(&trx.mutex);
1232     }
1233   });
1234   cache->is_truncated= false;
1235 }
1236 
1237 
1238 /*******************************************************************//**
1239 Update the transactions cache if it has not been read for some time.
1240 Called from handler/i_s.cc.
1241 @return 0 - fetched, 1 - not */
1242 int
trx_i_s_possibly_fetch_data_into_cache(trx_i_s_cache_t * cache)1243 trx_i_s_possibly_fetch_data_into_cache(
1244 /*===================================*/
1245 	trx_i_s_cache_t*	cache)	/*!< in/out: cache */
1246 {
1247 	if (!can_cache_be_updated(cache)) {
1248 
1249 		return(1);
1250 	}
1251 
1252 	/* We need to read trx_sys and record/table lock queues */
1253 
1254 	lock_mutex_enter();
1255 	fetch_data_into_cache(cache);
1256 	lock_mutex_exit();
1257 
1258 	/* update cache last read time */
1259 	cache->last_read = my_interval_timer();
1260 
1261 	return(0);
1262 }
1263 
1264 /*******************************************************************//**
1265 Returns TRUE if the data in the cache is truncated due to the memory
1266 limit posed by TRX_I_S_MEM_LIMIT.
1267 @return TRUE if truncated */
1268 bool
trx_i_s_cache_is_truncated(trx_i_s_cache_t * cache)1269 trx_i_s_cache_is_truncated(
1270 /*=======================*/
1271 	trx_i_s_cache_t*	cache)	/*!< in: cache */
1272 {
1273 	return(cache->is_truncated);
1274 }
1275 
1276 /*******************************************************************//**
1277 Initialize INFORMATION SCHEMA trx related cache. */
1278 void
trx_i_s_cache_init(trx_i_s_cache_t * cache)1279 trx_i_s_cache_init(
1280 /*===============*/
1281 	trx_i_s_cache_t*	cache)	/*!< out: cache to init */
1282 {
1283 	/* The latching is done in the following order:
1284 	acquire trx_i_s_cache_t::rw_lock, X
1285 	acquire lock mutex
1286 	release lock mutex
1287 	release trx_i_s_cache_t::rw_lock
1288 	acquire trx_i_s_cache_t::rw_lock, S
1289 	release trx_i_s_cache_t::rw_lock */
1290 
1291 	rw_lock_create(trx_i_s_cache_lock_key, &cache->rw_lock,
1292 		       SYNC_TRX_I_S_RWLOCK);
1293 
1294 	cache->last_read = 0;
1295 
1296 	table_cache_init(&cache->innodb_trx, sizeof(i_s_trx_row_t));
1297 	table_cache_init(&cache->innodb_locks, sizeof(i_s_locks_row_t));
1298 	table_cache_init(&cache->innodb_lock_waits,
1299 			 sizeof(i_s_lock_waits_row_t));
1300 
1301 	cache->locks_hash.create(LOCKS_HASH_CELLS_NUM);
1302 
1303 	cache->storage = ha_storage_create(CACHE_STORAGE_INITIAL_SIZE,
1304 					   CACHE_STORAGE_HASH_CELLS);
1305 
1306 	cache->mem_allocd = 0;
1307 
1308 	cache->is_truncated = false;
1309 }
1310 
1311 /*******************************************************************//**
1312 Free the INFORMATION SCHEMA trx related cache. */
1313 void
trx_i_s_cache_free(trx_i_s_cache_t * cache)1314 trx_i_s_cache_free(
1315 /*===============*/
1316 	trx_i_s_cache_t*	cache)	/*!< in, own: cache to free */
1317 {
1318 	rw_lock_free(&cache->rw_lock);
1319 
1320 	cache->locks_hash.free();
1321 	ha_storage_free(cache->storage);
1322 	table_cache_free(&cache->innodb_trx);
1323 	table_cache_free(&cache->innodb_locks);
1324 	table_cache_free(&cache->innodb_lock_waits);
1325 }
1326 
1327 /*******************************************************************//**
1328 Issue a shared/read lock on the tables cache. */
1329 void
trx_i_s_cache_start_read(trx_i_s_cache_t * cache)1330 trx_i_s_cache_start_read(
1331 /*=====================*/
1332 	trx_i_s_cache_t*	cache)	/*!< in: cache */
1333 {
1334 	rw_lock_s_lock(&cache->rw_lock);
1335 }
1336 
1337 /*******************************************************************//**
1338 Release a shared/read lock on the tables cache. */
1339 void
trx_i_s_cache_end_read(trx_i_s_cache_t * cache)1340 trx_i_s_cache_end_read(
1341 /*===================*/
1342 	trx_i_s_cache_t*	cache)	/*!< in: cache */
1343 {
1344 	cache->last_read = my_interval_timer();
1345 	rw_lock_s_unlock(&cache->rw_lock);
1346 }
1347 
1348 /*******************************************************************//**
1349 Issue an exclusive/write lock on the tables cache. */
1350 void
trx_i_s_cache_start_write(trx_i_s_cache_t * cache)1351 trx_i_s_cache_start_write(
1352 /*======================*/
1353 	trx_i_s_cache_t*	cache)	/*!< in: cache */
1354 {
1355 	rw_lock_x_lock(&cache->rw_lock);
1356 }
1357 
1358 /*******************************************************************//**
1359 Release an exclusive/write lock on the tables cache. */
1360 void
trx_i_s_cache_end_write(trx_i_s_cache_t * cache)1361 trx_i_s_cache_end_write(
1362 /*====================*/
1363 	trx_i_s_cache_t*	cache)	/*!< in: cache */
1364 {
1365 	ut_ad(rw_lock_own(&cache->rw_lock, RW_LOCK_X));
1366 
1367 	rw_lock_x_unlock(&cache->rw_lock);
1368 }
1369 
1370 /*******************************************************************//**
1371 Selects a INFORMATION SCHEMA table cache from the whole cache.
1372 @return table cache */
1373 static
1374 i_s_table_cache_t*
cache_select_table(trx_i_s_cache_t * cache,enum i_s_table table)1375 cache_select_table(
1376 /*===============*/
1377 	trx_i_s_cache_t*	cache,	/*!< in: whole cache */
1378 	enum i_s_table		table)	/*!< in: which table */
1379 {
1380 	ut_ad(rw_lock_own_flagged(&cache->rw_lock,
1381 				  RW_LOCK_FLAG_X | RW_LOCK_FLAG_S));
1382 
1383 	switch (table) {
1384 	case I_S_INNODB_TRX:
1385 		return &cache->innodb_trx;
1386 	case I_S_INNODB_LOCKS:
1387 		return &cache->innodb_locks;
1388 	case I_S_INNODB_LOCK_WAITS:
1389 		return &cache->innodb_lock_waits;
1390 	}
1391 
1392 	ut_error;
1393 	return NULL;
1394 }
1395 
1396 /*******************************************************************//**
1397 Retrieves the number of used rows in the cache for a given
1398 INFORMATION SCHEMA table.
1399 @return number of rows */
1400 ulint
trx_i_s_cache_get_rows_used(trx_i_s_cache_t * cache,enum i_s_table table)1401 trx_i_s_cache_get_rows_used(
1402 /*========================*/
1403 	trx_i_s_cache_t*	cache,	/*!< in: cache */
1404 	enum i_s_table		table)	/*!< in: which table */
1405 {
1406 	i_s_table_cache_t*	table_cache;
1407 
1408 	table_cache = cache_select_table(cache, table);
1409 
1410 	return(table_cache->rows_used);
1411 }
1412 
1413 /*******************************************************************//**
1414 Retrieves the nth row (zero-based) in the cache for a given
1415 INFORMATION SCHEMA table.
1416 @return row */
1417 void*
trx_i_s_cache_get_nth_row(trx_i_s_cache_t * cache,enum i_s_table table,ulint n)1418 trx_i_s_cache_get_nth_row(
1419 /*======================*/
1420 	trx_i_s_cache_t*	cache,	/*!< in: cache */
1421 	enum i_s_table		table,	/*!< in: which table */
1422 	ulint			n)	/*!< in: row number */
1423 {
1424 	i_s_table_cache_t*	table_cache;
1425 	ulint			i;
1426 	void*			row;
1427 
1428 	table_cache = cache_select_table(cache, table);
1429 
1430 	ut_a(n < table_cache->rows_used);
1431 
1432 	row = NULL;
1433 
1434 	for (i = 0; i < MEM_CHUNKS_IN_TABLE_CACHE; i++) {
1435 
1436 		if (table_cache->chunks[i].offset
1437 		    + table_cache->chunks[i].rows_allocd > n) {
1438 
1439 			row = (char*) table_cache->chunks[i].base
1440 				+ (n - table_cache->chunks[i].offset)
1441 				* table_cache->row_size;
1442 			break;
1443 		}
1444 	}
1445 
1446 	ut_a(row != NULL);
1447 
1448 	return(row);
1449 }
1450 
1451 /*******************************************************************//**
1452 Crafts a lock id string from a i_s_locks_row_t object. Returns its
1453 second argument. This function aborts if there is not enough space in
1454 lock_id. Be sure to provide at least TRX_I_S_LOCK_ID_MAX_LEN + 1 if you
1455 want to be 100% sure that it will not abort.
1456 @return resulting lock id */
1457 char*
trx_i_s_create_lock_id(const i_s_locks_row_t * row,char * lock_id,ulint lock_id_size)1458 trx_i_s_create_lock_id(
1459 /*===================*/
1460 	const i_s_locks_row_t*	row,	/*!< in: innodb_locks row */
1461 	char*			lock_id,/*!< out: resulting lock_id */
1462 	ulint			lock_id_size)/*!< in: size of the lock id
1463 					buffer */
1464 {
1465 	int	res_len;
1466 
1467 	/* please adjust TRX_I_S_LOCK_ID_MAX_LEN if you change this */
1468 
1469 	if (row->lock_index) {
1470 		/* record lock */
1471 		res_len = snprintf(lock_id, lock_id_size,
1472 				   TRX_ID_FMT
1473 				   ":%u:%u:%u",
1474 				   row->lock_trx_id, row->lock_page.space(),
1475 				   row->lock_page.page_no(), row->lock_rec);
1476 	} else {
1477 		/* table lock */
1478 		res_len = snprintf(lock_id, lock_id_size,
1479 				   TRX_ID_FMT":" UINT64PF,
1480 				   row->lock_trx_id,
1481 				   row->lock_table_id);
1482 	}
1483 
1484 	/* the typecast is safe because snprintf(3) never returns
1485 	negative result */
1486 	ut_a(res_len >= 0);
1487 	ut_a((ulint) res_len < lock_id_size);
1488 
1489 	return(lock_id);
1490 }
1491