1/*****************************************************************************
2
3Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
4Copyright (c) 2008, Google Inc.
5Copyright (c) 2014, 2020, MariaDB Corporation.
6
7Portions of this file contain modifications contributed and copyrighted by
8Google, Inc. Those modifications are gratefully acknowledged and are described
9briefly in the InnoDB documentation. The contributions by Google are
10incorporated with their permission, and subject to the conditions contained in
11the file COPYING.Google.
12
13This program is free software; you can redistribute it and/or modify it under
14the terms of the GNU General Public License as published by the Free Software
15Foundation; version 2 of the License.
16
17This program is distributed in the hope that it will be useful, but WITHOUT
18ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
19FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
20
21You should have received a copy of the GNU General Public License along with
22this program; if not, write to the Free Software Foundation, Inc.,
2351 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
24
25*****************************************************************************/
26
27/**************************************************//**
28@file include/buf0buf.ic
29The database buffer buf_pool
30
31Created 11/5/1995 Heikki Tuuri
32*******************************************************/
33
34#include "mtr0mtr.h"
35#include "buf0flu.h"
36#include "buf0lru.h"
37#include "buf0rea.h"
38#include "fsp0types.h"
39
40/** A chunk of buffers. The buffer pool is allocated in chunks. */
41struct buf_chunk_t{
42	ulint		size;		/*!< size of frames[] and blocks[] */
43	unsigned char*	mem;		/*!< pointer to the memory area which
44					was allocated for the frames */
45	ut_new_pfx_t	mem_pfx;	/*!< Auxiliary structure, describing
46					"mem". It is filled by the allocator's
47					alloc method and later passed to the
48					deallocate method. */
49	buf_block_t*	blocks;		/*!< array of buffer control blocks */
50
51	/** Get the size of 'mem' in bytes. */
52	size_t	mem_size() const {
53		return(mem_pfx.m_size);
54	}
55};
56
57bool buf_pool_t::is_block_field(const void *ptr) const
58{
59  const buf_chunk_t* chunk= chunks;
60  const buf_chunk_t *const echunk= chunk + ut_min(n_chunks,
61                                                  n_chunks_new);
62  /* TODO: protect chunks with a mutex (the older pointer will
63  currently remain during resize()) */
64  while (chunk < echunk)
65  {
66    if (ptr >= reinterpret_cast<const void*>(chunk->blocks) &&
67        ptr < reinterpret_cast<const void*>(
68		chunk->blocks + chunk->size))
69      return true;
70    chunk++;
71  }
72
73  return false;
74}
75
76/*********************************************************************//**
77Gets the current size of buffer buf_pool in bytes.
78@return size in bytes */
79UNIV_INLINE
80ulint
81buf_pool_get_curr_size(void)
82/*========================*/
83{
84	return(srv_buf_pool_curr_size);
85}
86
87/********************************************************************//**
88Calculates the index of a buffer pool to the buf_pool[] array.
89@return the position of the buffer pool in buf_pool[] */
90UNIV_INLINE
91unsigned
92buf_pool_index(
93/*===========*/
94	const buf_pool_t*	buf_pool)	/*!< in: buffer pool */
95{
96	unsigned	i = unsigned(buf_pool - buf_pool_ptr);
97	ut_ad(i < MAX_BUFFER_POOLS);
98	ut_ad(i < srv_buf_pool_instances);
99	return(i);
100}
101
102/******************************************************************//**
103Returns the buffer pool instance given a page instance
104@return buf_pool */
105UNIV_INLINE
106buf_pool_t*
107buf_pool_from_bpage(
108/*================*/
109	const buf_page_t*	bpage) /*!< in: buffer pool page */
110{
111	ut_ad(bpage->buf_pool_index < srv_buf_pool_instances);
112	return(&buf_pool_ptr[bpage->buf_pool_index]);
113}
114
115/******************************************************************//**
116Returns the buffer pool instance given a block instance
117@return buf_pool */
118UNIV_INLINE
119buf_pool_t*
120buf_pool_from_block(
121/*================*/
122	const buf_block_t*	block) /*!< in: block */
123{
124	return(buf_pool_from_bpage(&block->page));
125}
126
127/*********************************************************************//**
128Gets the current size of buffer buf_pool in pages.
129@return size in pages*/
130UNIV_INLINE
131ulint
132buf_pool_get_n_pages(void)
133/*======================*/
134{
135  if (!buf_pool_ptr)
136    return buf_pool_get_curr_size() >> srv_page_size_shift;
137
138  ulint chunk_size= 0;
139  for (uint i= 0; i < srv_buf_pool_instances; i++)
140  {
141    buf_pool_t* buf_pool = buf_pool_from_array(i);
142    for (uint j= 0; j < buf_pool->n_chunks; j++)
143      chunk_size+= buf_pool->chunks[j].size;
144  }
145  return chunk_size;
146}
147
148/********************************************************************//**
149Reads the freed_page_clock of a buffer block.
150@return freed_page_clock */
151UNIV_INLINE
152unsigned
153buf_page_get_freed_page_clock(
154/*==========================*/
155	const buf_page_t*	bpage)	/*!< in: block */
156{
157	/* This is sometimes read without holding buf_pool->mutex. */
158	return(bpage->freed_page_clock);
159}
160
161/********************************************************************//**
162Reads the freed_page_clock of a buffer block.
163@return freed_page_clock */
164UNIV_INLINE
165unsigned
166buf_block_get_freed_page_clock(
167/*===========================*/
168	const buf_block_t*	block)	/*!< in: block */
169{
170	return(buf_page_get_freed_page_clock(&block->page));
171}
172
173/********************************************************************//**
174Tells if a block is still close enough to the MRU end of the LRU list
175meaning that it is not in danger of getting evicted and also implying
176that it has been accessed recently.
177Note that this is for heuristics only and does not reserve buffer pool
178mutex.
179@return TRUE if block is close to MRU end of LRU */
180UNIV_INLINE
181ibool
182buf_page_peek_if_young(
183/*===================*/
184	const buf_page_t*	bpage)	/*!< in: block */
185{
186	buf_pool_t*	buf_pool = buf_pool_from_bpage(bpage);
187
188	/* FIXME: bpage->freed_page_clock is 31 bits */
189	return((buf_pool->freed_page_clock & ((1UL << 31) - 1))
190	       < (bpage->freed_page_clock
191		  + (buf_pool->curr_size
192		     * (BUF_LRU_OLD_RATIO_DIV - buf_pool->LRU_old_ratio)
193		     / (BUF_LRU_OLD_RATIO_DIV * 4))));
194}
195
196/********************************************************************//**
197Recommends a move of a block to the start of the LRU list if there is danger
198of dropping from the buffer pool. NOTE: does not reserve the buffer pool
199mutex.
200@return TRUE if should be made younger */
201UNIV_INLINE
202ibool
203buf_page_peek_if_too_old(
204/*=====================*/
205	const buf_page_t*	bpage)	/*!< in: block to make younger */
206{
207	buf_pool_t*		buf_pool = buf_pool_from_bpage(bpage);
208
209	if (buf_pool->freed_page_clock == 0) {
210		/* If eviction has not started yet, do not update the
211		statistics or move blocks in the LRU list.  This is
212		either the warm-up phase or an in-memory workload. */
213		return(FALSE);
214	} else if (buf_LRU_old_threshold_ms && bpage->old) {
215		unsigned	access_time = buf_page_is_accessed(bpage);
216
217		/* It is possible that the below comparison returns an
218		unexpected result. 2^32 milliseconds pass in about 50 days,
219		so if the difference between ut_time_ms() and access_time
220		is e.g. 50 days + 15 ms, then the below will behave as if
221		it is 15 ms. This is known and fixing it would require to
222		increase buf_page_t::access_time from 32 to 64 bits. */
223		if (access_time > 0
224		    && ((ib_uint32_t) (ut_time_ms() - access_time))
225		    >= buf_LRU_old_threshold_ms) {
226			return(TRUE);
227		}
228
229		buf_pool->stat.n_pages_not_made_young++;
230		return(FALSE);
231	} else {
232		return(!buf_page_peek_if_young(bpage));
233	}
234}
235
236/*********************************************************************//**
237Gets the state of a block.
238@return state */
239UNIV_INLINE
240enum buf_page_state
241buf_page_get_state(
242/*===============*/
243	const buf_page_t*	bpage)	/*!< in: pointer to the control block */
244{
245	enum buf_page_state	state	= bpage->state;
246
247#ifdef UNIV_DEBUG
248	switch (state) {
249	case BUF_BLOCK_POOL_WATCH:
250	case BUF_BLOCK_ZIP_PAGE:
251	case BUF_BLOCK_ZIP_DIRTY:
252	case BUF_BLOCK_NOT_USED:
253	case BUF_BLOCK_READY_FOR_USE:
254	case BUF_BLOCK_FILE_PAGE:
255	case BUF_BLOCK_MEMORY:
256	case BUF_BLOCK_REMOVE_HASH:
257		break;
258	default:
259		ut_error;
260	}
261#endif /* UNIV_DEBUG */
262
263	return(state);
264}
265/*********************************************************************//**
266Gets the state of a block.
267@return state */
268UNIV_INLINE
269enum buf_page_state
270buf_block_get_state(
271/*================*/
272	const buf_block_t*	block)	/*!< in: pointer to the control block */
273{
274	return(buf_page_get_state(&block->page));
275}
276
277/*********************************************************************//**
278Gets the state name for state of a block
279@return	name or "CORRUPTED" */
280UNIV_INLINE
281const char*
282buf_get_state_name(
283/*===============*/
284	const buf_block_t*	block)	/*!< in: pointer to the control
285					block */
286{
287	enum buf_page_state	state = buf_page_get_state(&block->page);
288
289	switch (state) {
290	case BUF_BLOCK_POOL_WATCH:
291		return (const char *) "BUF_BLOCK_POOL_WATCH";
292	case BUF_BLOCK_ZIP_PAGE:
293		return (const char *) "BUF_BLOCK_ZIP_PAGE";
294	case BUF_BLOCK_ZIP_DIRTY:
295		return (const char *) "BUF_BLOCK_ZIP_DIRTY";
296	case BUF_BLOCK_NOT_USED:
297		return (const char *) "BUF_BLOCK_NOT_USED";
298	case BUF_BLOCK_READY_FOR_USE:
299		return (const char *) "BUF_BLOCK_NOT_USED";
300	case BUF_BLOCK_FILE_PAGE:
301		return (const char *) "BUF_BLOCK_FILE_PAGE";
302	case BUF_BLOCK_MEMORY:
303		return (const char *) "BUF_BLOCK_MEMORY";
304	case BUF_BLOCK_REMOVE_HASH:
305		return (const char *) "BUF_BLOCK_REMOVE_HASH";
306	default:
307		return (const char *) "CORRUPTED";
308	}
309}
310
311/*********************************************************************//**
312Sets the state of a block. */
313UNIV_INLINE
314void
315buf_page_set_state(
316/*===============*/
317	buf_page_t*		bpage,	/*!< in/out: pointer to control block */
318	enum buf_page_state	state)	/*!< in: state */
319{
320#ifdef UNIV_DEBUG
321	enum buf_page_state	old_state	= buf_page_get_state(bpage);
322
323	switch (old_state) {
324	case BUF_BLOCK_POOL_WATCH:
325		ut_error;
326		break;
327	case BUF_BLOCK_ZIP_PAGE:
328		ut_a(state == BUF_BLOCK_ZIP_DIRTY);
329		break;
330	case BUF_BLOCK_ZIP_DIRTY:
331		ut_a(state == BUF_BLOCK_ZIP_PAGE);
332		break;
333	case BUF_BLOCK_NOT_USED:
334		ut_a(state == BUF_BLOCK_READY_FOR_USE);
335		break;
336	case BUF_BLOCK_READY_FOR_USE:
337		ut_a(state == BUF_BLOCK_MEMORY
338		     || state == BUF_BLOCK_FILE_PAGE
339		     || state == BUF_BLOCK_NOT_USED);
340		break;
341	case BUF_BLOCK_MEMORY:
342		ut_a(state == BUF_BLOCK_NOT_USED);
343		break;
344	case BUF_BLOCK_FILE_PAGE:
345		if (!(state == BUF_BLOCK_NOT_USED
346	              || state == BUF_BLOCK_REMOVE_HASH
347		      || state == BUF_BLOCK_FILE_PAGE)) {
348			const char *old_state_name = buf_get_state_name((buf_block_t*)bpage);
349			bpage->state = state;
350
351			fprintf(stderr,
352				"InnoDB: Error: block old state %d (%s) "
353				" new state %d (%s) not correct\n",
354				old_state,
355				old_state_name,
356				state,
357				buf_get_state_name((buf_block_t*)bpage));
358			ut_a(state == BUF_BLOCK_NOT_USED
359				|| state == BUF_BLOCK_REMOVE_HASH
360				|| state == BUF_BLOCK_FILE_PAGE);
361		}
362
363		break;
364	case BUF_BLOCK_REMOVE_HASH:
365		ut_a(state == BUF_BLOCK_MEMORY);
366		break;
367	}
368#endif /* UNIV_DEBUG */
369	bpage->state = state;
370}
371
372/*********************************************************************//**
373Sets the state of a block. */
374UNIV_INLINE
375void
376buf_block_set_state(
377/*================*/
378	buf_block_t*		block,	/*!< in/out: pointer to control block */
379	enum buf_page_state	state)	/*!< in: state */
380{
381	buf_page_set_state(&block->page, state);
382}
383
384/*********************************************************************//**
385Determines if a block is mapped to a tablespace.
386@return TRUE if mapped */
387UNIV_INLINE
388ibool
389buf_page_in_file(
390/*=============*/
391	const buf_page_t*	bpage)	/*!< in: pointer to control block */
392{
393	switch (buf_page_get_state(bpage)) {
394	case BUF_BLOCK_POOL_WATCH:
395		ut_error;
396		break;
397	case BUF_BLOCK_ZIP_PAGE:
398	case BUF_BLOCK_ZIP_DIRTY:
399	case BUF_BLOCK_FILE_PAGE:
400		return(TRUE);
401	case BUF_BLOCK_NOT_USED:
402	case BUF_BLOCK_READY_FOR_USE:
403	case BUF_BLOCK_MEMORY:
404	case BUF_BLOCK_REMOVE_HASH:
405		break;
406	}
407
408	return(FALSE);
409}
410
411/*********************************************************************//**
412Determines if a block should be on unzip_LRU list.
413@return TRUE if block belongs to unzip_LRU */
414UNIV_INLINE
415ibool
416buf_page_belongs_to_unzip_LRU(
417/*==========================*/
418	const buf_page_t*	bpage)	/*!< in: pointer to control block */
419{
420	ut_ad(buf_page_in_file(bpage));
421
422	return(bpage->zip.data
423	       && buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE);
424}
425
426/*********************************************************************//**
427Gets the mutex of a block.
428@return pointer to mutex protecting bpage */
429UNIV_INLINE
430BPageMutex*
431buf_page_get_mutex(
432/*===============*/
433	const buf_page_t*	bpage)	/*!< in: pointer to control block */
434{
435	buf_pool_t*	buf_pool = buf_pool_from_bpage(bpage);
436
437	switch (buf_page_get_state(bpage)) {
438	case BUF_BLOCK_POOL_WATCH:
439		ut_error;
440		return(NULL);
441	case BUF_BLOCK_ZIP_PAGE:
442	case BUF_BLOCK_ZIP_DIRTY:
443		return(&buf_pool->zip_mutex);
444	default:
445		return(&((buf_block_t*) bpage)->mutex);
446	}
447}
448
449/*********************************************************************//**
450Get the flush type of a page.
451@return flush type */
452UNIV_INLINE
453buf_flush_t
454buf_page_get_flush_type(
455/*====================*/
456	const buf_page_t*	bpage)	/*!< in: buffer page */
457{
458	buf_flush_t	flush_type = (buf_flush_t) bpage->flush_type;
459
460#ifdef UNIV_DEBUG
461	switch (flush_type) {
462	case BUF_FLUSH_LRU:
463	case BUF_FLUSH_LIST:
464	case BUF_FLUSH_SINGLE_PAGE:
465		return(flush_type);
466	case BUF_FLUSH_N_TYPES:
467		ut_error;
468	}
469	ut_error;
470#endif /* UNIV_DEBUG */
471	return(flush_type);
472}
473/*********************************************************************//**
474Set the flush type of a page. */
475UNIV_INLINE
476void
477buf_page_set_flush_type(
478/*====================*/
479	buf_page_t*	bpage,		/*!< in: buffer page */
480	buf_flush_t	flush_type)	/*!< in: flush type */
481{
482	bpage->flush_type = flush_type;
483	ut_ad(buf_page_get_flush_type(bpage) == flush_type);
484}
485
486/** Map a block to a file page.
487@param[in,out]	block	pointer to control block
488@param[in]	page_id	page id */
489UNIV_INLINE
490void
491buf_block_set_file_page(
492	buf_block_t*		block,
493	const page_id_t		page_id)
494{
495	buf_block_set_state(block, BUF_BLOCK_FILE_PAGE);
496	block->page.id = page_id;
497}
498
499/*********************************************************************//**
500Gets the io_fix state of a block.
501@return io_fix state */
502UNIV_INLINE
503enum buf_io_fix
504buf_page_get_io_fix(
505/*================*/
506	const buf_page_t*	bpage)	/*!< in: pointer to the control block */
507{
508	ut_ad(bpage != NULL);
509
510	enum buf_io_fix	io_fix	= bpage->io_fix;
511
512#ifdef UNIV_DEBUG
513	switch (io_fix) {
514	case BUF_IO_NONE:
515	case BUF_IO_READ:
516	case BUF_IO_WRITE:
517	case BUF_IO_PIN:
518		return(io_fix);
519	}
520	ut_error;
521#endif /* UNIV_DEBUG */
522	return(io_fix);
523}
524
525/*********************************************************************//**
526Gets the io_fix state of a block.
527@return io_fix state */
528UNIV_INLINE
529enum buf_io_fix
530buf_block_get_io_fix(
531/*=================*/
532	const buf_block_t*	block)	/*!< in: pointer to the control block */
533{
534	return(buf_page_get_io_fix(&block->page));
535}
536
537/*********************************************************************//**
538Sets the io_fix state of a block. */
539UNIV_INLINE
540void
541buf_page_set_io_fix(
542/*================*/
543	buf_page_t*	bpage,	/*!< in/out: control block */
544	enum buf_io_fix	io_fix)	/*!< in: io_fix state */
545{
546#ifdef UNIV_DEBUG
547	buf_pool_t*	buf_pool = buf_pool_from_bpage(bpage);
548	ut_ad(buf_pool_mutex_own(buf_pool));
549#endif /* UNIV_DEBUG */
550	ut_ad(mutex_own(buf_page_get_mutex(bpage)));
551
552	bpage->io_fix = io_fix;
553	ut_ad(buf_page_get_io_fix(bpage) == io_fix);
554}
555
556/*********************************************************************//**
557Sets the io_fix state of a block. */
558UNIV_INLINE
559void
560buf_block_set_io_fix(
561/*=================*/
562	buf_block_t*	block,	/*!< in/out: control block */
563	enum buf_io_fix	io_fix)	/*!< in: io_fix state */
564{
565	buf_page_set_io_fix(&block->page, io_fix);
566}
567
568/*********************************************************************//**
569Makes a block sticky. A sticky block implies that even after we release
570the buf_pool->mutex and the block->mutex:
571* it cannot be removed from the flush_list
572* the block descriptor cannot be relocated
573* it cannot be removed from the LRU list
574Note that:
575* the block can still change its position in the LRU list
576* the next and previous pointers can change. */
577UNIV_INLINE
578void
579buf_page_set_sticky(
580/*================*/
581	buf_page_t*	bpage)	/*!< in/out: control block */
582{
583#ifdef UNIV_DEBUG
584	buf_pool_t*	buf_pool = buf_pool_from_bpage(bpage);
585	ut_ad(buf_pool_mutex_own(buf_pool));
586#endif /* UNIV_DEBUG */
587	ut_ad(mutex_own(buf_page_get_mutex(bpage)));
588	ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
589
590	bpage->io_fix = BUF_IO_PIN;
591}
592
593/*********************************************************************//**
594Removes stickiness of a block. */
595UNIV_INLINE
596void
597buf_page_unset_sticky(
598/*==================*/
599	buf_page_t*	bpage)	/*!< in/out: control block */
600{
601#ifdef UNIV_DEBUG
602	buf_pool_t*	buf_pool = buf_pool_from_bpage(bpage);
603	ut_ad(buf_pool_mutex_own(buf_pool));
604#endif /* UNIV_DEBUG */
605	ut_ad(mutex_own(buf_page_get_mutex(bpage)));
606	ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_PIN);
607
608	bpage->io_fix = BUF_IO_NONE;
609}
610
611/********************************************************************//**
612Determine if a buffer block can be relocated in memory.  The block
613can be dirty, but it must not be I/O-fixed or bufferfixed. */
614UNIV_INLINE
615ibool
616buf_page_can_relocate(
617/*==================*/
618	const buf_page_t*	bpage)	/*!< control block being relocated */
619{
620#ifdef UNIV_DEBUG
621	buf_pool_t*	buf_pool = buf_pool_from_bpage(bpage);
622	ut_ad(buf_pool_mutex_own(buf_pool));
623#endif /* UNIV_DEBUG */
624	ut_ad(mutex_own(buf_page_get_mutex(bpage)));
625	ut_ad(buf_page_in_file(bpage));
626	ut_ad(bpage->in_LRU_list);
627
628	return(buf_page_get_io_fix(bpage) == BUF_IO_NONE
629	       && bpage->buf_fix_count == 0);
630}
631
632/*********************************************************************//**
633Determine if a block has been flagged old.
634@return TRUE if old */
635UNIV_INLINE
636ibool
637buf_page_is_old(
638/*============*/
639	const buf_page_t*	bpage)	/*!< in: control block */
640{
641#ifdef UNIV_DEBUG
642	buf_pool_t*	buf_pool = buf_pool_from_bpage(bpage);
643	ut_ad(buf_pool_mutex_own(buf_pool));
644#endif /* UNIV_DEBUG */
645	ut_ad(buf_page_in_file(bpage));
646
647	return(bpage->old);
648}
649
650/*********************************************************************//**
651Flag a block old. */
652UNIV_INLINE
653void
654buf_page_set_old(
655/*=============*/
656	buf_page_t*	bpage,	/*!< in/out: control block */
657	bool		old)	/*!< in: old */
658{
659#ifdef UNIV_DEBUG
660	buf_pool_t*	buf_pool = buf_pool_from_bpage(bpage);
661#endif /* UNIV_DEBUG */
662	ut_a(buf_page_in_file(bpage));
663	ut_ad(buf_pool_mutex_own(buf_pool));
664	ut_ad(bpage->in_LRU_list);
665
666#ifdef UNIV_LRU_DEBUG
667	ut_a((buf_pool->LRU_old_len == 0) == (buf_pool->LRU_old == NULL));
668	/* If a block is flagged "old", the LRU_old list must exist. */
669	ut_a(!old || buf_pool->LRU_old);
670
671	if (UT_LIST_GET_PREV(LRU, bpage) && UT_LIST_GET_NEXT(LRU, bpage)) {
672		const buf_page_t*	prev = UT_LIST_GET_PREV(LRU, bpage);
673		const buf_page_t*	next = UT_LIST_GET_NEXT(LRU, bpage);
674		if (prev->old == next->old) {
675			ut_a(prev->old == old);
676		} else {
677			ut_a(!prev->old);
678			ut_a(buf_pool->LRU_old == (old ? bpage : next));
679		}
680	}
681#endif /* UNIV_LRU_DEBUG */
682
683	bpage->old = old;
684}
685
686/*********************************************************************//**
687Determine the time of first access of a block in the buffer pool.
688@return ut_time_ms() at the time of first access, 0 if not accessed */
689UNIV_INLINE
690unsigned
691buf_page_is_accessed(
692/*=================*/
693	const buf_page_t*	bpage)	/*!< in: control block */
694{
695	ut_ad(buf_page_in_file(bpage));
696
697	return(bpage->access_time);
698}
699
700/*********************************************************************//**
701Flag a block accessed. */
702UNIV_INLINE
703void
704buf_page_set_accessed(
705/*==================*/
706	buf_page_t*	bpage)		/*!< in/out: control block */
707{
708#ifdef UNIV_DEBUG
709	buf_pool_t*	buf_pool = buf_pool_from_bpage(bpage);
710	ut_ad(!buf_pool_mutex_own(buf_pool));
711	ut_ad(mutex_own(buf_page_get_mutex(bpage)));
712#endif /* UNIV_DEBUG */
713
714	ut_a(buf_page_in_file(bpage));
715
716	if (bpage->access_time == 0) {
717		/* Make this the time of the first access. */
718		bpage->access_time = static_cast<uint>(ut_time_ms());
719	}
720}
721
722/*********************************************************************//**
723Gets the buf_block_t handle of a buffered file block if an uncompressed
724page frame exists, or NULL.
725@return control block, or NULL */
726UNIV_INLINE
727buf_block_t*
728buf_page_get_block(
729/*===============*/
730	buf_page_t*	bpage)	/*!< in: control block, or NULL */
731{
732	if (bpage != NULL) {
733		ut_ad(buf_page_in_file(bpage));
734
735		if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) {
736			return((buf_block_t*) bpage);
737		}
738	}
739
740	return(NULL);
741}
742
743#ifdef UNIV_DEBUG
744/*********************************************************************//**
745Gets a pointer to the memory frame of a block.
746@return pointer to the frame */
747UNIV_INLINE
748buf_frame_t*
749buf_block_get_frame(
750/*================*/
751	const buf_block_t*	block)	/*!< in: pointer to the control block */
752{
753	if (!block) {
754		return NULL;
755	}
756
757	switch (buf_block_get_state(block)) {
758	case BUF_BLOCK_POOL_WATCH:
759	case BUF_BLOCK_ZIP_PAGE:
760	case BUF_BLOCK_ZIP_DIRTY:
761	case BUF_BLOCK_NOT_USED:
762		ut_error;
763		break;
764	case BUF_BLOCK_FILE_PAGE:
765		ut_a(block->page.buf_fix_count > 0);
766		/* fall through */
767	case BUF_BLOCK_READY_FOR_USE:
768	case BUF_BLOCK_MEMORY:
769	case BUF_BLOCK_REMOVE_HASH:
770		goto ok;
771	}
772	ut_error;
773ok:
774	return((buf_frame_t*) block->frame);
775}
776#endif /* UNIV_DEBUG */
777
778/***********************************************************************
779FIXME_FTS Gets the frame the pointer is pointing to. */
780UNIV_INLINE
781buf_frame_t*
782buf_frame_align(
783/*============*/
784                        /* out: pointer to frame */
785        byte*   ptr)    /* in: pointer to a frame */
786{
787        buf_frame_t*    frame;
788
789        ut_ad(ptr);
790
791        frame = (buf_frame_t*) ut_align_down(ptr, srv_page_size);
792
793        return(frame);
794}
795
796/**********************************************************************//**
797Gets the space id, page offset, and byte offset within page of a
798pointer pointing to a buffer frame containing a file page. */
799UNIV_INLINE
800void
801buf_ptr_get_fsp_addr(
802/*=================*/
803	const void*	ptr,	/*!< in: pointer to a buffer frame */
804	ulint*		space,	/*!< out: space id */
805	fil_addr_t*	addr)	/*!< out: page offset and byte offset */
806{
807	const page_t*	page = (const page_t*) ut_align_down(ptr,
808							     srv_page_size);
809
810	*space = mach_read_from_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
811	addr->page = mach_read_from_4(page + FIL_PAGE_OFFSET);
812	addr->boffset = ut_align_offset(ptr, srv_page_size);
813}
814
815/**********************************************************************//**
816Gets the hash value of the page the pointer is pointing to. This can be used
817in searches in the lock hash table.
818@return lock hash value */
819UNIV_INLINE
820unsigned
821buf_block_get_lock_hash_val(
822/*========================*/
823	const buf_block_t*	block)	/*!< in: block */
824{
825	ut_ad(block);
826	ut_ad(buf_page_in_file(&block->page));
827	ut_ad(rw_lock_own(&(((buf_block_t*) block)->lock), RW_LOCK_X)
828	      || rw_lock_own(&(((buf_block_t*) block)->lock), RW_LOCK_S));
829
830	return(block->lock_hash_val);
831}
832
833/********************************************************************//**
834Allocates a buf_page_t descriptor. This function must succeed. In case
835of failure we assert in this function.
836@return: the allocated descriptor. */
837UNIV_INLINE
838buf_page_t*
839buf_page_alloc_descriptor(void)
840/*===========================*/
841{
842	buf_page_t*	bpage;
843
844	bpage = (buf_page_t*) ut_zalloc_nokey(sizeof *bpage);
845	ut_ad(bpage);
846	MEM_UNDEFINED(bpage, sizeof *bpage);
847
848	return(bpage);
849}
850
851/********************************************************************//**
852Free a buf_page_t descriptor. */
853UNIV_INLINE
854void
855buf_page_free_descriptor(
856/*=====================*/
857	buf_page_t*	bpage)	/*!< in: bpage descriptor to free. */
858{
859	ut_free(bpage);
860}
861
862/********************************************************************//**
863Frees a buffer block which does not contain a file page. */
864UNIV_INLINE
865void
866buf_block_free(
867/*===========*/
868	buf_block_t*	block)	/*!< in, own: block to be freed */
869{
870	buf_pool_t*	buf_pool = buf_pool_from_bpage((buf_page_t*) block);
871
872	buf_pool_mutex_enter(buf_pool);
873
874	buf_page_mutex_enter(block);
875
876	ut_a(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE);
877
878	buf_LRU_block_free_non_file_page(block);
879
880	buf_page_mutex_exit(block);
881
882	buf_pool_mutex_exit(buf_pool);
883}
884
885/*********************************************************************//**
886Copies contents of a buffer frame to a given buffer.
887@return buf */
888UNIV_INLINE
889byte*
890buf_frame_copy(
891/*===========*/
892	byte*			buf,	/*!< in: buffer to copy to */
893	const buf_frame_t*	frame)	/*!< in: buffer frame */
894{
895	ut_ad(buf && frame);
896
897	ut_memcpy(buf, frame, srv_page_size);
898
899	return(buf);
900}
901
902/********************************************************************//**
903Gets the youngest modification log sequence number for a frame.
904Returns zero if not file page or no modification occurred yet.
905@return newest modification to page */
906UNIV_INLINE
907lsn_t
908buf_page_get_newest_modification(
909/*=============================*/
910	const buf_page_t*	bpage)	/*!< in: block containing the
911					page frame */
912{
913	lsn_t		lsn;
914	BPageMutex*	block_mutex = buf_page_get_mutex(bpage);
915
916	mutex_enter(block_mutex);
917
918	if (buf_page_in_file(bpage)) {
919		lsn = bpage->newest_modification;
920	} else {
921		lsn = 0;
922	}
923
924	mutex_exit(block_mutex);
925
926	return(lsn);
927}
928
929/********************************************************************//**
930Increments the modify clock of a frame by 1. The caller must (1) own the
931buf_pool mutex and block bufferfix count has to be zero, (2) or own an x-lock
932on the block. */
933UNIV_INLINE
934void
935buf_block_modify_clock_inc(
936/*=======================*/
937	buf_block_t*	block)	/*!< in: block */
938{
939#ifdef UNIV_DEBUG
940	buf_pool_t*	buf_pool = buf_pool_from_bpage((buf_page_t*) block);
941
942	/* No latch is acquired for the shared temporary tablespace. */
943	if (!fsp_is_system_temporary(block->page.id.space())) {
944		ut_ad((buf_pool_mutex_own(buf_pool)
945		       && (block->page.buf_fix_count == 0))
946		      || rw_lock_own_flagged(&block->lock,
947					     RW_LOCK_FLAG_X | RW_LOCK_FLAG_SX));
948	}
949#endif /* UNIV_DEBUG */
950	assert_block_ahi_valid(block);
951
952	block->modify_clock++;
953}
954
955/********************************************************************//**
956Returns the value of the modify clock. The caller must have an s-lock
957or x-lock on the block.
958@return value */
959UNIV_INLINE
960ib_uint64_t
961buf_block_get_modify_clock(
962/*=======================*/
963	buf_block_t*	block)	/*!< in: block */
964{
965#ifdef UNIV_DEBUG
966	/* No latch is acquired for the shared temporary tablespace. */
967	if (!fsp_is_system_temporary(block->page.id.space())) {
968		ut_ad(rw_lock_own(&(block->lock), RW_LOCK_S)
969		      || rw_lock_own(&(block->lock), RW_LOCK_X)
970		      || rw_lock_own(&(block->lock), RW_LOCK_SX));
971	}
972#endif /* UNIV_DEBUG */
973
974	return(block->modify_clock);
975}
976
977/** Increments the bufferfix count.
978@param[in,out]	bpage	block to bufferfix
979@return the count */
980UNIV_INLINE
981ulint
982buf_block_fix(buf_page_t* bpage)
983{
984	return uint32(my_atomic_add32_explicit(
985			      &bpage->buf_fix_count, 1,
986			      MY_MEMORY_ORDER_RELAXED)) + 1;
987}
988
989/** Increments the bufferfix count.
990@param[in,out]	block	block to bufferfix
991@return the count */
992UNIV_INLINE
993ulint
994buf_block_fix(buf_block_t* block)
995{
996	return buf_block_fix(&block->page);
997}
998
999/** Get the bufferfix count.
1000@param[in]	bpage	block to bufferfix
1001@return the count */
1002UNIV_INLINE
1003ulint
1004buf_block_get_fix(buf_page_t* bpage)
1005{
1006	return my_atomic_load32_explicit(&bpage->buf_fix_count,
1007					 MY_MEMORY_ORDER_RELAXED);
1008}
1009
1010/** Get the bufferfix count.
1011@param[in]	bpage	block to bufferfix
1012@return the count */
1013UNIV_INLINE
1014ulint
1015buf_block_get_fix(buf_block_t* block)
1016{
1017	return buf_block_get_fix(&block->page);
1018}
1019
1020/*******************************************************************//**
1021Increments the bufferfix count. */
1022UNIV_INLINE
1023void
1024buf_block_buf_fix_inc_func(
1025/*=======================*/
1026#ifdef UNIV_DEBUG
1027	const char*	file,	/*!< in: file name */
1028	unsigned	line,	/*!< in: line */
1029#endif /* UNIV_DEBUG */
1030	buf_block_t*	block)	/*!< in/out: block to bufferfix */
1031{
1032#ifdef UNIV_DEBUG
1033	/* No debug latch is acquired if block belongs to system temporary.
1034	Debug latch is not of much help if access to block is single
1035	threaded. */
1036	if (!fsp_is_system_temporary(block->page.id.space())) {
1037		ibool   ret;
1038		ret = rw_lock_s_lock_nowait(&block->debug_latch, file, line);
1039		ut_a(ret);
1040	}
1041#endif /* UNIV_DEBUG */
1042
1043	buf_block_fix(block);
1044}
1045
1046/** Decrements the bufferfix count.
1047@param[in,out]	bpage	block to bufferunfix
1048@return	the remaining buffer-fix count */
1049UNIV_INLINE
1050ulint
1051buf_block_unfix(buf_page_t* bpage)
1052{
1053	uint32	count = uint32(my_atomic_add32_explicit(
1054				       &bpage->buf_fix_count,
1055				       -1, MY_MEMORY_ORDER_RELAXED));
1056	ut_ad(count != 0);
1057	return count - 1;
1058}
1059
1060/** Decrements the bufferfix count.
1061@param[in,out]	block	block to bufferunfix
1062@return the remaining buffer-fix count */
1063UNIV_INLINE
1064ulint
1065buf_block_unfix(buf_block_t* block)
1066{
1067	return buf_block_unfix(&block->page);
1068}
1069
1070/*******************************************************************//**
1071Decrements the bufferfix count. */
1072UNIV_INLINE
1073void
1074buf_block_buf_fix_dec(
1075/*==================*/
1076	buf_block_t*	block)	/*!< in/out: block to bufferunfix */
1077{
1078#ifdef UNIV_DEBUG
1079	/* No debug latch is acquired if block belongs to system temporary.
1080	Debug latch is not of much help if access to block is single
1081	threaded. */
1082	if (!fsp_is_system_temporary(block->page.id.space())) {
1083		rw_lock_s_unlock(&block->debug_latch);
1084	}
1085#endif /* UNIV_DEBUG */
1086
1087	buf_block_unfix(block);
1088}
1089
1090/** Returns the buffer pool instance given a page id.
1091@param[in]	page_id	page id
1092@return buffer pool */
1093inline buf_pool_t* buf_pool_get(const page_id_t page_id)
1094{
1095        /* 2log of BUF_READ_AHEAD_AREA (64) */
1096        ulint		ignored_page_no = page_id.page_no() >> 6;
1097
1098        page_id_t	id(page_id.space(), ignored_page_no);
1099
1100        ulint		i = id.fold() % srv_buf_pool_instances;
1101
1102        return(&buf_pool_ptr[i]);
1103}
1104
1105/******************************************************************//**
1106Returns the buffer pool instance given its array index
1107@return buffer pool */
1108UNIV_INLINE
1109buf_pool_t*
1110buf_pool_from_array(
1111/*================*/
1112	ulint	index)		/*!< in: array index to get
1113				buffer pool instance from */
1114{
1115	ut_ad(index < MAX_BUFFER_POOLS);
1116	ut_ad(index < srv_buf_pool_instances);
1117	return(&buf_pool_ptr[index]);
1118}
1119
1120/** Returns the control block of a file page, NULL if not found.
1121@param[in]	buf_pool	buffer pool instance
1122@param[in]	page_id		page id
1123@return block, NULL if not found */
1124UNIV_INLINE
1125buf_page_t*
1126buf_page_hash_get_low(
1127	buf_pool_t*		buf_pool,
1128	const page_id_t		page_id)
1129{
1130	buf_page_t*	bpage;
1131
1132#ifdef UNIV_DEBUG
1133	rw_lock_t*	hash_lock;
1134
1135	hash_lock = hash_get_lock(buf_pool->page_hash, page_id.fold());
1136	ut_ad(rw_lock_own(hash_lock, RW_LOCK_X)
1137	      || rw_lock_own(hash_lock, RW_LOCK_S));
1138#endif /* UNIV_DEBUG */
1139
1140	/* Look for the page in the hash table */
1141
1142	HASH_SEARCH(hash, buf_pool->page_hash, page_id.fold(), buf_page_t*,
1143		    bpage,
1144		    ut_ad(bpage->in_page_hash && !bpage->in_zip_hash
1145			  && buf_page_in_file(bpage)),
1146		    page_id == bpage->id);
1147	if (bpage) {
1148		ut_a(buf_page_in_file(bpage));
1149		ut_ad(bpage->in_page_hash);
1150		ut_ad(!bpage->in_zip_hash);
1151		ut_ad(buf_pool_from_bpage(bpage) == buf_pool);
1152	}
1153
1154	return(bpage);
1155}
1156
1157/** Returns the control block of a file page, NULL if not found.
1158If the block is found and lock is not NULL then the appropriate
1159page_hash lock is acquired in the specified lock mode. Otherwise,
1160mode value is ignored. It is up to the caller to release the
1161lock. If the block is found and the lock is NULL then the page_hash
1162lock is released by this function.
1163@param[in]	buf_pool	buffer pool instance
1164@param[in]	page_id		page id
1165@param[in,out]	lock		lock of the page hash acquired if bpage is
1166found, NULL otherwise. If NULL is passed then the hash_lock is released by
1167this function.
1168@param[in]	lock_mode	RW_LOCK_X or RW_LOCK_S. Ignored if
1169lock == NULL
1170@param[in]	watch		if true, return watch sentinel also.
1171@return pointer to the bpage or NULL; if NULL, lock is also NULL or
1172a watch sentinel. */
1173UNIV_INLINE
1174buf_page_t*
1175buf_page_hash_get_locked(
1176	buf_pool_t*		buf_pool,
1177	const page_id_t		page_id,
1178	rw_lock_t**		lock,
1179	ulint			lock_mode,
1180	bool			watch)
1181{
1182	buf_page_t*	bpage = NULL;
1183	rw_lock_t*	hash_lock;
1184	ulint		mode = RW_LOCK_S;
1185
1186	if (lock != NULL) {
1187		*lock = NULL;
1188		ut_ad(lock_mode == RW_LOCK_X
1189		      || lock_mode == RW_LOCK_S);
1190		mode = lock_mode;
1191	}
1192
1193	hash_lock = hash_get_lock(buf_pool->page_hash, page_id.fold());
1194
1195	ut_ad(!rw_lock_own(hash_lock, RW_LOCK_X)
1196	      && !rw_lock_own(hash_lock, RW_LOCK_S));
1197
1198	if (mode == RW_LOCK_S) {
1199		rw_lock_s_lock(hash_lock);
1200
1201		/* If not own buf_pool_mutex, page_hash can be changed. */
1202		hash_lock = hash_lock_s_confirm(
1203			hash_lock, buf_pool->page_hash, page_id.fold());
1204	} else {
1205		rw_lock_x_lock(hash_lock);
1206		/* If not own buf_pool_mutex, page_hash can be changed. */
1207		hash_lock = hash_lock_x_confirm(
1208			hash_lock, buf_pool->page_hash, page_id.fold());
1209	}
1210
1211	bpage = buf_page_hash_get_low(buf_pool, page_id);
1212
1213	if (!bpage || buf_pool_watch_is_sentinel(buf_pool, bpage)) {
1214		if (!watch) {
1215			bpage = NULL;
1216		}
1217		goto unlock_and_exit;
1218	}
1219
1220	ut_ad(buf_page_in_file(bpage));
1221	ut_ad(page_id == bpage->id);
1222
1223	if (lock == NULL) {
1224		/* The caller wants us to release the page_hash lock */
1225		goto unlock_and_exit;
1226	} else {
1227		/* To be released by the caller */
1228		*lock = hash_lock;
1229		goto exit;
1230	}
1231
1232unlock_and_exit:
1233	if (mode == RW_LOCK_S) {
1234		rw_lock_s_unlock(hash_lock);
1235	} else {
1236		rw_lock_x_unlock(hash_lock);
1237	}
1238exit:
1239	return(bpage);
1240}
1241
1242/** Returns the control block of a file page, NULL if not found.
1243If the block is found and lock is not NULL then the appropriate
1244page_hash lock is acquired in the specified lock mode. Otherwise,
1245mode value is ignored. It is up to the caller to release the
1246lock. If the block is found and the lock is NULL then the page_hash
1247lock is released by this function.
1248@param[in]	buf_pool	buffer pool instance
1249@param[in]	page_id		page id
1250@param[in,out]	lock		lock of the page hash acquired if bpage is
1251found, NULL otherwise. If NULL is passed then the hash_lock is released by
1252this function.
1253@param[in]	lock_mode	RW_LOCK_X or RW_LOCK_S. Ignored if
1254lock == NULL
1255@return pointer to the block or NULL; if NULL, lock is also NULL. */
1256UNIV_INLINE
1257buf_block_t*
1258buf_block_hash_get_locked(
1259	buf_pool_t*		buf_pool,
1260	const page_id_t		page_id,
1261	rw_lock_t**		lock,
1262	ulint			lock_mode)
1263{
1264	buf_page_t*	bpage = buf_page_hash_get_locked(buf_pool,
1265							 page_id,
1266							 lock,
1267							 lock_mode);
1268	buf_block_t*	block = buf_page_get_block(bpage);
1269
1270	if (block != NULL) {
1271
1272		ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
1273		ut_ad(!lock || rw_lock_own(*lock, lock_mode));
1274
1275		return(block);
1276	} else if (bpage) {
1277		/* It is not a block. Just a bpage */
1278		ut_ad(buf_page_in_file(bpage));
1279
1280		if (lock) {
1281			if (lock_mode == RW_LOCK_S) {
1282				rw_lock_s_unlock(*lock);
1283			} else {
1284				rw_lock_x_unlock(*lock);
1285			}
1286		}
1287		*lock = NULL;
1288		return(NULL);
1289	}
1290
1291	ut_ad(!bpage);
1292	ut_ad(lock == NULL ||*lock == NULL);
1293	return(NULL);
1294}
1295
1296/** Returns TRUE if the page can be found in the buffer pool hash table.
1297NOTE that it is possible that the page is not yet read from disk,
1298though.
1299@param[in]	page_id	page id
1300@return true if found in the page hash table */
1301inline bool buf_page_peek(const page_id_t page_id)
1302{
1303	buf_pool_t*	buf_pool = buf_pool_get(page_id);
1304
1305	return(buf_page_hash_get(buf_pool, page_id) != NULL);
1306}
1307
1308/********************************************************************//**
1309Releases a compressed-only page acquired with buf_page_get_zip(). */
1310UNIV_INLINE
1311void
1312buf_page_release_zip(
1313/*=================*/
1314	buf_page_t*	bpage)		/*!< in: buffer block */
1315{
1316	ut_ad(bpage);
1317	ut_a(bpage->buf_fix_count > 0);
1318
1319	switch (buf_page_get_state(bpage)) {
1320	case BUF_BLOCK_FILE_PAGE:
1321#ifdef UNIV_DEBUG
1322	{
1323		/* No debug latch is acquired if block belongs to system
1324		temporary. Debug latch is not of much help if access to block
1325		is single threaded. */
1326		buf_block_t*	block = reinterpret_cast<buf_block_t*>(bpage);
1327		if (!fsp_is_system_temporary(block->page.id.space())) {
1328			rw_lock_s_unlock(&block->debug_latch);
1329		}
1330	}
1331#endif /* UNIV_DEBUG */
1332		/* Fall through */
1333	case BUF_BLOCK_ZIP_PAGE:
1334	case BUF_BLOCK_ZIP_DIRTY:
1335		buf_block_unfix(reinterpret_cast<buf_block_t*>(bpage));
1336		return;
1337
1338	case BUF_BLOCK_POOL_WATCH:
1339	case BUF_BLOCK_NOT_USED:
1340	case BUF_BLOCK_READY_FOR_USE:
1341	case BUF_BLOCK_MEMORY:
1342	case BUF_BLOCK_REMOVE_HASH:
1343		break;
1344	}
1345
1346	ut_error;
1347}
1348
1349/********************************************************************//**
1350Releases a latch, if specified. */
1351UNIV_INLINE
1352void
1353buf_page_release_latch(
1354/*===================*/
1355	buf_block_t*	block,		/*!< in: buffer block */
1356	ulint		rw_latch)	/*!< in: RW_S_LATCH, RW_X_LATCH,
1357					RW_NO_LATCH */
1358{
1359#ifdef UNIV_DEBUG
1360	/* No debug latch is acquired if block belongs to system
1361	temporary. Debug latch is not of much help if access to block
1362	is single threaded. */
1363	if (!fsp_is_system_temporary(block->page.id.space())) {
1364		rw_lock_s_unlock(&block->debug_latch);
1365	}
1366#endif /* UNIV_DEBUG */
1367
1368	if (rw_latch == RW_S_LATCH) {
1369		rw_lock_s_unlock(&block->lock);
1370	} else if (rw_latch == RW_SX_LATCH) {
1371		rw_lock_sx_unlock(&block->lock);
1372	} else if (rw_latch == RW_X_LATCH) {
1373		rw_lock_x_unlock(&block->lock);
1374	}
1375}
1376
1377#ifdef UNIV_DEBUG
1378/*********************************************************************//**
1379Adds latch level info for the rw-lock protecting the buffer frame. This
1380should be called in the debug version after a successful latching of a
1381page if we know the latching order level of the acquired latch. */
1382UNIV_INLINE
1383void
1384buf_block_dbg_add_level(
1385/*====================*/
1386	buf_block_t*	block,	/*!< in: buffer page
1387				where we have acquired latch */
1388	latch_level_t	level)	/*!< in: latching order level */
1389{
1390	sync_check_lock(&block->lock, level);
1391}
1392
1393#endif /* UNIV_DEBUG */
1394/********************************************************************//**
1395Acquire mutex on all buffer pool instances. */
1396UNIV_INLINE
1397void
1398buf_pool_mutex_enter_all(void)
1399/*==========================*/
1400{
1401	for (ulint i = 0; i < srv_buf_pool_instances; ++i) {
1402		buf_pool_t*	buf_pool = buf_pool_from_array(i);
1403
1404		buf_pool_mutex_enter(buf_pool);
1405	}
1406}
1407
1408/********************************************************************//**
1409Release mutex on all buffer pool instances. */
1410UNIV_INLINE
1411void
1412buf_pool_mutex_exit_all(void)
1413/*=========================*/
1414{
1415	ulint   i;
1416
1417	for (i = 0; i < srv_buf_pool_instances; i++) {
1418		buf_pool_t*	buf_pool;
1419
1420		buf_pool = buf_pool_from_array(i);
1421		buf_pool_mutex_exit(buf_pool);
1422	}
1423}
1424/*********************************************************************//**
1425Get the nth chunk's buffer block in the specified buffer pool.
1426@return the nth chunk's buffer block. */
1427UNIV_INLINE
1428buf_block_t*
1429buf_get_nth_chunk_block(
1430/*====================*/
1431	const buf_pool_t* buf_pool,	/*!< in: buffer pool instance */
1432	ulint		n,		/*!< in: nth chunk in the buffer pool */
1433	ulint*		chunk_size)	/*!< in: chunk size */
1434{
1435	const buf_chunk_t*	chunk;
1436
1437	chunk = buf_pool->chunks + n;
1438	*chunk_size = chunk->size;
1439	return(chunk->blocks);
1440}
1441
1442/********************************************************************//**
1443Get buf frame. */
1444UNIV_INLINE
1445void *
1446buf_page_get_frame(
1447/*===============*/
1448	const buf_page_t*	bpage) /*!< in: buffer pool page */
1449{
1450	/* In encryption/compression buffer pool page may contain extra
1451	buffer where result is stored. */
1452	if (bpage->slot && bpage->slot->out_buf) {
1453		return bpage->slot->out_buf;
1454	} else if (bpage->zip.data) {
1455		return bpage->zip.data;
1456	} else {
1457		return ((buf_block_t*) bpage)->frame;
1458	}
1459}
1460