1/*****************************************************************************
2
3Copyright (c) 1995, 2014, Oracle and/or its affiliates. All Rights Reserved.
4Copyright (c) 2008, Google Inc.
5
6Portions of this file contain modifications contributed and copyrighted by
7Google, Inc. Those modifications are gratefully acknowledged and are described
8briefly in the InnoDB documentation. The contributions by Google are
9incorporated with their permission, and subject to the conditions contained in
10the file COPYING.Google.
11
12This program is free software; you can redistribute it and/or modify
13it under the terms of the GNU General Public License, version 2.0,
14as published by the Free Software Foundation.
15
16This program is also distributed with certain software (including
17but not limited to OpenSSL) that is licensed under separate terms,
18as designated in a particular file or component or in included license
19documentation.  The authors of MySQL hereby grant you an additional
20permission to link the program and your derivative works with the
21separately licensed software that they have included with MySQL.
22
23This program is distributed in the hope that it will be useful,
24but WITHOUT ANY WARRANTY; without even the implied warranty of
25MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
26GNU General Public License, version 2.0, for more details.
27
28You should have received a copy of the GNU General Public License along with
29this program; if not, write to the Free Software Foundation, Inc.,
3051 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
31
32*****************************************************************************/
33
34/**************************************************//**
35@file include/buf0buf.ic
36The database buffer buf_pool
37
38Created 11/5/1995 Heikki Tuuri
39*******************************************************/
40
41#include "mtr0mtr.h"
42#ifndef UNIV_HOTBACKUP
43#include "buf0flu.h"
44#include "buf0lru.h"
45#include "buf0rea.h"
46
47/** A chunk of buffers. The buffer pool is allocated in chunks. */
48struct buf_chunk_t{
49	ulint		mem_size;	/*!< allocated size of the chunk */
50	ulint		size;		/*!< size of frames[] and blocks[] */
51	void*		mem;		/*!< pointer to the memory area which
52					was allocated for the frames */
53	buf_block_t*	blocks;		/*!< array of buffer control blocks */
54};
55
56
57#include "srv0srv.h"
58
59/*********************************************************************//**
60Gets the current size of buffer buf_pool in bytes.
61@return size in bytes */
62UNIV_INLINE
63ulint
64buf_pool_get_curr_size(void)
65/*========================*/
66{
67	return(srv_buf_pool_curr_size);
68}
69
70/********************************************************************//**
71Calculates the index of a buffer pool to the buf_pool[] array.
72@return	the position of the buffer pool in buf_pool[] */
73UNIV_INLINE
74ulint
75buf_pool_index(
76/*===========*/
77	const buf_pool_t*	buf_pool)	/*!< in: buffer pool */
78{
79	ulint	i = buf_pool - buf_pool_ptr;
80	ut_ad(i < MAX_BUFFER_POOLS);
81	ut_ad(i < srv_buf_pool_instances);
82	return(i);
83}
84
85/******************************************************************//**
86Returns the buffer pool instance given a page instance
87@return buf_pool */
88UNIV_INLINE
89buf_pool_t*
90buf_pool_from_bpage(
91/*================*/
92	const buf_page_t*	bpage) /*!< in: buffer pool page */
93{
94	ulint	i;
95	i = bpage->buf_pool_index;
96	ut_ad(i < srv_buf_pool_instances);
97	return(&buf_pool_ptr[i]);
98}
99
100/******************************************************************//**
101Returns the buffer pool instance given a block instance
102@return buf_pool */
103UNIV_INLINE
104buf_pool_t*
105buf_pool_from_block(
106/*================*/
107	const buf_block_t*	block) /*!< in: block */
108{
109	return(buf_pool_from_bpage(&block->page));
110}
111
112/*********************************************************************//**
113Gets the current size of buffer buf_pool in pages.
114@return size in pages*/
115UNIV_INLINE
116ulint
117buf_pool_get_n_pages(void)
118/*======================*/
119{
120	return(buf_pool_get_curr_size() / UNIV_PAGE_SIZE);
121}
122
123/********************************************************************//**
124Reads the freed_page_clock of a buffer block.
125@return	freed_page_clock */
126UNIV_INLINE
127ulint
128buf_page_get_freed_page_clock(
129/*==========================*/
130	const buf_page_t*	bpage)	/*!< in: block */
131{
132	/* This is sometimes read without holding any buffer pool mutex. */
133	return(bpage->freed_page_clock);
134}
135
136/********************************************************************//**
137Reads the freed_page_clock of a buffer block.
138@return	freed_page_clock */
139UNIV_INLINE
140ulint
141buf_block_get_freed_page_clock(
142/*===========================*/
143	const buf_block_t*	block)	/*!< in: block */
144{
145	return(buf_page_get_freed_page_clock(&block->page));
146}
147
148/********************************************************************//**
149Tells if a block is still close enough to the MRU end of the LRU list
150meaning that it is not in danger of getting evicted and also implying
151that it has been accessed recently.
152Note that this is for heuristics only and does not reserve buffer pool
153mutex.
154@return	TRUE if block is close to MRU end of LRU */
155UNIV_INLINE
156ibool
157buf_page_peek_if_young(
158/*===================*/
159	const buf_page_t*	bpage)	/*!< in: block */
160{
161	buf_pool_t*	buf_pool = buf_pool_from_bpage(bpage);
162
163	/* FIXME: bpage->freed_page_clock is 31 bits */
164	return((buf_pool->freed_page_clock & ((1UL << 31) - 1))
165	       < ((ulint) bpage->freed_page_clock
166		  + (buf_pool->curr_size
167		     * (BUF_LRU_OLD_RATIO_DIV - buf_pool->LRU_old_ratio)
168		     / (BUF_LRU_OLD_RATIO_DIV * 4))));
169}
170
171/********************************************************************//**
172Recommends a move of a block to the start of the LRU list if there is danger
173of dropping from the buffer pool. NOTE: does not reserve the buffer pool
174mutex.
175@return	TRUE if should be made younger */
176UNIV_INLINE
177ibool
178buf_page_peek_if_too_old(
179/*=====================*/
180	const buf_page_t*	bpage)	/*!< in: block to make younger */
181{
182	buf_pool_t*		buf_pool = buf_pool_from_bpage(bpage);
183
184	if (buf_pool->freed_page_clock == 0) {
185		/* If eviction has not started yet, do not update the
186		statistics or move blocks in the LRU list.  This is
187		either the warm-up phase or an in-memory workload. */
188		return(FALSE);
189	} else if (buf_LRU_old_threshold_ms && bpage->old) {
190		unsigned	access_time = buf_page_is_accessed(bpage);
191
192		if (access_time > 0
193		    && ((ib_uint32_t) (ut_time_ms() - access_time))
194		    >= buf_LRU_old_threshold_ms) {
195			return(TRUE);
196		}
197
198		buf_pool->stat.n_pages_not_made_young++;
199		return(FALSE);
200	} else {
201		return(!buf_page_peek_if_young(bpage));
202	}
203}
204#endif /* !UNIV_HOTBACKUP */
205
206/*********************************************************************//**
207Gets the state of a block.
208@return	state */
209UNIV_INLINE
210enum buf_page_state
211buf_page_get_state(
212/*===============*/
213	const buf_page_t*	bpage)	/*!< in: pointer to the control block */
214{
215	enum buf_page_state	state = (enum buf_page_state) bpage->state;
216
217#ifdef UNIV_DEBUG
218	switch (state) {
219	case BUF_BLOCK_POOL_WATCH:
220	case BUF_BLOCK_ZIP_PAGE:
221	case BUF_BLOCK_ZIP_DIRTY:
222	case BUF_BLOCK_NOT_USED:
223	case BUF_BLOCK_READY_FOR_USE:
224	case BUF_BLOCK_FILE_PAGE:
225	case BUF_BLOCK_MEMORY:
226	case BUF_BLOCK_REMOVE_HASH:
227		break;
228	default:
229		ut_error;
230	}
231#endif /* UNIV_DEBUG */
232
233	return(state);
234}
235/*********************************************************************//**
236Gets the state of a block.
237@return	state */
238UNIV_INLINE
239enum buf_page_state
240buf_block_get_state(
241/*================*/
242	const buf_block_t*	block)	/*!< in: pointer to the control block */
243{
244	return(buf_page_get_state(&block->page));
245}
246/*********************************************************************//**
247Sets the state of a block. */
248UNIV_INLINE
249void
250buf_page_set_state(
251/*===============*/
252	buf_page_t*		bpage,	/*!< in/out: pointer to control block */
253	enum buf_page_state	state)	/*!< in: state */
254{
255#ifdef UNIV_DEBUG
256	enum buf_page_state	old_state	= buf_page_get_state(bpage);
257
258	switch (old_state) {
259	case BUF_BLOCK_POOL_WATCH:
260		ut_error;
261		break;
262	case BUF_BLOCK_ZIP_PAGE:
263		ut_a(state == BUF_BLOCK_ZIP_DIRTY);
264		break;
265	case BUF_BLOCK_ZIP_DIRTY:
266		ut_a(state == BUF_BLOCK_ZIP_PAGE);
267		break;
268	case BUF_BLOCK_NOT_USED:
269		ut_a(state == BUF_BLOCK_READY_FOR_USE);
270		break;
271	case BUF_BLOCK_READY_FOR_USE:
272		ut_a(state == BUF_BLOCK_MEMORY
273		     || state == BUF_BLOCK_FILE_PAGE
274		     || state == BUF_BLOCK_NOT_USED);
275		break;
276	case BUF_BLOCK_MEMORY:
277		ut_a(state == BUF_BLOCK_NOT_USED);
278		break;
279	case BUF_BLOCK_FILE_PAGE:
280		ut_a(state == BUF_BLOCK_NOT_USED
281		     || state == BUF_BLOCK_REMOVE_HASH);
282		break;
283	case BUF_BLOCK_REMOVE_HASH:
284		ut_a(state == BUF_BLOCK_MEMORY);
285		break;
286	}
287#endif /* UNIV_DEBUG */
288	bpage->state = state;
289	ut_ad(buf_page_get_state(bpage) == state);
290}
291
292/*********************************************************************//**
293Sets the state of a block. */
294UNIV_INLINE
295void
296buf_block_set_state(
297/*================*/
298	buf_block_t*		block,	/*!< in/out: pointer to control block */
299	enum buf_page_state	state)	/*!< in: state */
300{
301	buf_page_set_state(&block->page, state);
302}
303
304/*********************************************************************//**
305Determines if a block is mapped to a tablespace.
306@return	TRUE if mapped */
307UNIV_INLINE
308ibool
309buf_page_in_file(
310/*=============*/
311	const buf_page_t*	bpage)	/*!< in: pointer to control block */
312{
313	switch (buf_page_get_state(bpage)) {
314	case BUF_BLOCK_POOL_WATCH:
315		ut_error;
316		break;
317	case BUF_BLOCK_ZIP_PAGE:
318	case BUF_BLOCK_ZIP_DIRTY:
319	case BUF_BLOCK_FILE_PAGE:
320		return(TRUE);
321	case BUF_BLOCK_NOT_USED:
322	case BUF_BLOCK_READY_FOR_USE:
323	case BUF_BLOCK_MEMORY:
324	case BUF_BLOCK_REMOVE_HASH:
325		break;
326	}
327
328	return(FALSE);
329}
330
331#ifndef UNIV_HOTBACKUP
332/*********************************************************************//**
333Determines if a block should be on unzip_LRU list.
334@return	TRUE if block belongs to unzip_LRU */
335UNIV_INLINE
336ibool
337buf_page_belongs_to_unzip_LRU(
338/*==========================*/
339	const buf_page_t*	bpage)	/*!< in: pointer to control block */
340{
341	ut_ad(buf_page_in_file(bpage));
342
343	return(bpage->zip.data
344	       && buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE);
345}
346
347/*********************************************************************//**
348Gets the mutex of a block.
349@return	pointer to mutex protecting bpage */
350UNIV_INLINE
351ib_mutex_t*
352buf_page_get_mutex(
353/*===============*/
354	const buf_page_t*	bpage)	/*!< in: pointer to control block */
355{
356	switch (buf_page_get_state(bpage)) {
357	case BUF_BLOCK_POOL_WATCH:
358		ut_error;
359		return(NULL);
360	case BUF_BLOCK_ZIP_PAGE:
361	case BUF_BLOCK_ZIP_DIRTY: {
362		buf_pool_t*	buf_pool = buf_pool_from_bpage(bpage);
363
364		return(&buf_pool->zip_mutex);
365		}
366	default:
367		return(&((buf_block_t*) bpage)->mutex);
368	}
369}
370
371/*********************************************************************//**
372Get the flush type of a page.
373@return	flush type */
374UNIV_INLINE
375buf_flush_t
376buf_page_get_flush_type(
377/*====================*/
378	const buf_page_t*	bpage)	/*!< in: buffer page */
379{
380	buf_flush_t	flush_type = (buf_flush_t) bpage->flush_type;
381
382#ifdef UNIV_DEBUG
383	switch (flush_type) {
384	case BUF_FLUSH_LRU:
385	case BUF_FLUSH_LIST:
386	case BUF_FLUSH_SINGLE_PAGE:
387		return(flush_type);
388	case BUF_FLUSH_N_TYPES:
389		ut_error;
390	}
391	ut_error;
392#endif /* UNIV_DEBUG */
393	return(flush_type);
394}
395/*********************************************************************//**
396Set the flush type of a page. */
397UNIV_INLINE
398void
399buf_page_set_flush_type(
400/*====================*/
401	buf_page_t*	bpage,		/*!< in: buffer page */
402	buf_flush_t	flush_type)	/*!< in: flush type */
403{
404	bpage->flush_type = flush_type;
405	ut_ad(buf_page_get_flush_type(bpage) == flush_type);
406}
407
408/*********************************************************************//**
409Map a block to a file page. */
410UNIV_INLINE
411void
412buf_block_set_file_page(
413/*====================*/
414	buf_block_t*		block,	/*!< in/out: pointer to control block */
415	ulint			space,	/*!< in: tablespace id */
416	ulint			page_no)/*!< in: page number */
417{
418	buf_block_set_state(block, BUF_BLOCK_FILE_PAGE);
419	block->page.space = static_cast<ib_uint32_t>(space);
420	block->page.offset = static_cast<ib_uint32_t>(page_no);
421}
422
423/*********************************************************************//**
424Gets the io_fix state of a block.
425@return	io_fix state */
426UNIV_INLINE
427enum buf_io_fix
428buf_page_get_io_fix(
429/*================*/
430	const buf_page_t*	bpage)	/*!< in: pointer to the control block */
431{
432	ut_ad(mutex_own(buf_page_get_mutex(bpage)));
433	return buf_page_get_io_fix_unlocked(bpage);
434}
435
436/*********************************************************************//**
437Gets the io_fix state of a block.  Does not assert that the
438buf_page_get_mutex() mutex is held, to be used in the cases where it is safe
439not to hold it.
440@return	io_fix state */
441UNIV_INLINE
442enum buf_io_fix
443buf_page_get_io_fix_unlocked(
444/*=========================*/
445	const buf_page_t*	bpage)	/*!< in: pointer to the control block */
446{
447	enum buf_io_fix	io_fix = (enum buf_io_fix) bpage->io_fix;
448#ifdef UNIV_DEBUG
449	switch (io_fix) {
450	case BUF_IO_NONE:
451	case BUF_IO_READ:
452	case BUF_IO_WRITE:
453	case BUF_IO_PIN:
454		return(io_fix);
455	}
456	ut_error;
457#endif /* UNIV_DEBUG */
458	return(io_fix);
459}
460
461/*********************************************************************//**
462Gets the io_fix state of a block.
463@return	io_fix state */
464UNIV_INLINE
465enum buf_io_fix
466buf_block_get_io_fix(
467/*=================*/
468	const buf_block_t*	block)	/*!< in: pointer to the control block */
469{
470	return(buf_page_get_io_fix(&block->page));
471}
472
473/*********************************************************************//**
474Gets the io_fix state of a block.  Does not assert that the
475buf_page_get_mutex() mutex is held, to be used in the cases where it is safe
476not to hold it.
477@return	io_fix state */
478UNIV_INLINE
479enum buf_io_fix
480buf_block_get_io_fix_unlocked(
481/*==========================*/
482	const buf_block_t*	block)	/*!< in: pointer to the control block */
483{
484	return(buf_page_get_io_fix_unlocked(&block->page));
485}
486
487
488/*********************************************************************//**
489Sets the io_fix state of a block. */
490UNIV_INLINE
491void
492buf_page_set_io_fix(
493/*================*/
494	buf_page_t*	bpage,	/*!< in/out: control block */
495	enum buf_io_fix	io_fix)	/*!< in: io_fix state */
496{
497	ut_ad(mutex_own(buf_page_get_mutex(bpage)));
498
499	bpage->io_fix = io_fix;
500	ut_ad(buf_page_get_io_fix(bpage) == io_fix);
501}
502
503/*********************************************************************//**
504Sets the io_fix state of a block. */
505UNIV_INLINE
506void
507buf_block_set_io_fix(
508/*=================*/
509	buf_block_t*	block,	/*!< in/out: control block */
510	enum buf_io_fix	io_fix)	/*!< in: io_fix state */
511{
512	buf_page_set_io_fix(&block->page, io_fix);
513}
514
515/*********************************************************************//**
516Makes a block sticky. A sticky block implies that even after we release
517the buf_pool->LRU_list_mutex and the block->mutex:
518* it cannot be removed from the flush_list
519* the block descriptor cannot be relocated
520* it cannot be removed from the LRU list
521Note that:
522* the block can still change its position in the LRU list
523* the next and previous pointers can change. */
524UNIV_INLINE
525void
526buf_page_set_sticky(
527/*================*/
528	buf_page_t*	bpage)	/*!< in/out: control block */
529{
530#ifdef UNIV_DEBUG
531	buf_pool_t*	buf_pool = buf_pool_from_bpage(bpage);
532	ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
533#endif
534	ut_ad(mutex_own(buf_page_get_mutex(bpage)));
535	ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
536	ut_ad(bpage->in_LRU_list);
537
538	bpage->io_fix = BUF_IO_PIN;
539}
540
541/*********************************************************************//**
542Removes stickiness of a block. */
543UNIV_INLINE
544void
545buf_page_unset_sticky(
546/*==================*/
547	buf_page_t*	bpage)	/*!< in/out: control block */
548{
549	ut_ad(mutex_own(buf_page_get_mutex(bpage)));
550	ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_PIN);
551
552	bpage->io_fix = BUF_IO_NONE;
553}
554
555/********************************************************************//**
556Determine if a buffer block can be relocated in memory.  The block
557can be dirty, but it must not be I/O-fixed or bufferfixed. */
558UNIV_INLINE
559ibool
560buf_page_can_relocate(
561/*==================*/
562	const buf_page_t*	bpage)	/*!< control block being relocated */
563{
564	ut_ad(mutex_own(buf_page_get_mutex(bpage)));
565	ut_ad(buf_page_in_file(bpage));
566	ut_ad(bpage->in_LRU_list);
567
568	return(buf_page_get_io_fix(bpage) == BUF_IO_NONE
569	       && bpage->buf_fix_count == 0);
570}
571
572/*********************************************************************//**
573Determine if a block has been flagged old.
574@return	TRUE if old */
575UNIV_INLINE
576ibool
577buf_page_is_old(
578/*============*/
579	const buf_page_t*	bpage)	/*!< in: control block */
580{
581#ifdef UNIV_DEBUG
582	buf_pool_t*	buf_pool = buf_pool_from_bpage(bpage);
583#endif
584	/* Buffer page mutex is not strictly required here for heuristic
585	purposes even if LRU mutex is not being held.  Keep the assertion
586	for now since all the callers hold it.  */
587	ut_ad(mutex_own(buf_page_get_mutex(bpage))
588	      || mutex_own(&buf_pool->LRU_list_mutex));
589	ut_ad(buf_page_in_file(bpage));
590
591	return(bpage->old);
592}
593
594/*********************************************************************//**
595Flag a block old. */
596UNIV_INLINE
597void
598buf_page_set_old(
599/*=============*/
600	buf_page_t*	bpage,	/*!< in/out: control block */
601	ibool		old)	/*!< in: old */
602{
603#ifdef UNIV_DEBUG
604	buf_pool_t*	buf_pool = buf_pool_from_bpage(bpage);
605#endif /* UNIV_DEBUG */
606	ut_a(buf_page_in_file(bpage));
607	ut_ad(mutex_own(&buf_pool->LRU_list_mutex));
608	ut_ad(bpage->in_LRU_list);
609
610#ifdef UNIV_LRU_DEBUG
611	ut_a((buf_pool->LRU_old_len == 0) == (buf_pool->LRU_old == NULL));
612	/* If a block is flagged "old", the LRU_old list must exist. */
613	ut_a(!old || buf_pool->LRU_old);
614
615	if (UT_LIST_GET_PREV(LRU, bpage) && UT_LIST_GET_NEXT(LRU, bpage)) {
616		const buf_page_t*	prev = UT_LIST_GET_PREV(LRU, bpage);
617		const buf_page_t*	next = UT_LIST_GET_NEXT(LRU, bpage);
618		if (prev->old == next->old) {
619			ut_a(prev->old == old);
620		} else {
621			ut_a(!prev->old);
622			ut_a(buf_pool->LRU_old == (old ? bpage : next));
623		}
624	}
625#endif /* UNIV_LRU_DEBUG */
626
627	bpage->old = old;
628}
629
630/*********************************************************************//**
631Determine the time of first access of a block in the buffer pool.
632@return	ut_time_ms() at the time of first access, 0 if not accessed */
633UNIV_INLINE
634unsigned
635buf_page_is_accessed(
636/*=================*/
637	const buf_page_t*	bpage)	/*!< in: control block */
638{
639	ut_ad(buf_page_in_file(bpage));
640
641	return(bpage->access_time);
642}
643
644/*********************************************************************//**
645Flag a block accessed. */
646UNIV_INLINE
647void
648buf_page_set_accessed(
649/*==================*/
650	buf_page_t*	bpage)		/*!< in/out: control block */
651{
652	ut_ad(mutex_own(buf_page_get_mutex(bpage)));
653
654	ut_a(buf_page_in_file(bpage));
655
656	if (bpage->access_time == 0) {
657		/* Make this the time of the first access. */
658		bpage->access_time = static_cast<uint>(ut_time_ms());
659	}
660}
661
662/*********************************************************************//**
663Gets the buf_block_t handle of a buffered file block if an uncompressed
664page frame exists, or NULL.
665@return	control block, or NULL */
666UNIV_INLINE
667buf_block_t*
668buf_page_get_block(
669/*===============*/
670	buf_page_t*	bpage)	/*!< in: control block, or NULL */
671{
672	if (bpage != NULL) {
673#ifdef UNIV_DEBUG
674		buf_pool_t*	buf_pool	= buf_pool_from_bpage(bpage);
675		ut_ad(buf_page_hash_lock_held_s_or_x(buf_pool, bpage)
676		      || mutex_own(&buf_pool->LRU_list_mutex));
677#endif
678		ut_ad(buf_page_in_file(bpage));
679
680		if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) {
681			return((buf_block_t*) bpage);
682		}
683	}
684
685	return(NULL);
686}
687#endif /* !UNIV_HOTBACKUP */
688
689#ifdef UNIV_DEBUG
690/*********************************************************************//**
691Gets a pointer to the memory frame of a block.
692@return	pointer to the frame */
693UNIV_INLINE
694buf_frame_t*
695buf_block_get_frame(
696/*================*/
697	const buf_block_t*	block)	/*!< in: pointer to the control block */
698{
699	SRV_CORRUPT_TABLE_CHECK(block, return(0););
700
701	return(buf_nonnull_block_get_frame(block));
702}
703
704/*********************************************************************//**
705Gets a pointer to the memory frame of a block, where block is known not to be
706NULL.
707@return	pointer to the frame */
708UNIV_INLINE
709buf_frame_t*
710buf_nonnull_block_get_frame(
711/*========================*/
712	const buf_block_t*	block)	/*!< in: pointer to the control block */
713{
714	switch (buf_block_get_state(block)) {
715	case BUF_BLOCK_POOL_WATCH:
716	case BUF_BLOCK_ZIP_PAGE:
717	case BUF_BLOCK_ZIP_DIRTY:
718	case BUF_BLOCK_NOT_USED:
719		ut_error;
720		break;
721	case BUF_BLOCK_FILE_PAGE:
722# ifndef UNIV_HOTBACKUP
723		ut_a(block->page.buf_fix_count > 0);
724# endif /* !UNIV_HOTBACKUP */
725		/* fall through */
726	case BUF_BLOCK_READY_FOR_USE:
727	case BUF_BLOCK_MEMORY:
728	case BUF_BLOCK_REMOVE_HASH:
729		goto ok;
730	}
731	ut_error;
732ok:
733	return((buf_frame_t*) block->frame);
734}
735
736#endif /* UNIV_DEBUG */
737
738/*********************************************************************//**
739Gets the space id of a block.
740@return	space id */
741UNIV_INLINE
742ulint
743buf_page_get_space(
744/*===============*/
745	const buf_page_t*	bpage)	/*!< in: pointer to the control block */
746{
747	ut_ad(bpage);
748	ut_a(buf_page_in_file(bpage));
749
750	return(bpage->space);
751}
752
753/*********************************************************************//**
754Gets the space id of a block.
755@return	space id */
756UNIV_INLINE
757ulint
758buf_block_get_space(
759/*================*/
760	const buf_block_t*	block)	/*!< in: pointer to the control block */
761{
762	ut_ad(block);
763	ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
764
765	return(block->page.space);
766}
767
768/*********************************************************************//**
769Gets the page number of a block.
770@return	page number */
771UNIV_INLINE
772ulint
773buf_page_get_page_no(
774/*=================*/
775	const buf_page_t*	bpage)	/*!< in: pointer to the control block */
776{
777	ut_ad(bpage);
778	ut_a(buf_page_in_file(bpage));
779
780	return(bpage->offset);
781}
782/***********************************************************************
783FIXME_FTS Gets the frame the pointer is pointing to. */
784UNIV_INLINE
785buf_frame_t*
786buf_frame_align(
787/*============*/
788                        /* out: pointer to frame */
789        byte*   ptr)    /* in: pointer to a frame */
790{
791        buf_frame_t*    frame;
792
793        ut_ad(ptr);
794
795        frame = (buf_frame_t*) ut_align_down(ptr, UNIV_PAGE_SIZE);
796
797        return(frame);
798}
799
800/*********************************************************************//**
801Gets the page number of a block.
802@return	page number */
803UNIV_INLINE
804ulint
805buf_block_get_page_no(
806/*==================*/
807	const buf_block_t*	block)	/*!< in: pointer to the control block */
808{
809	ut_ad(block);
810	ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
811
812	return(block->page.offset);
813}
814
815/*********************************************************************//**
816Gets the compressed page size of a block.
817@return	compressed page size, or 0 */
818UNIV_INLINE
819ulint
820buf_page_get_zip_size(
821/*==================*/
822	const buf_page_t*	bpage)	/*!< in: pointer to the control block */
823{
824	return(bpage->zip.ssize
825	       ? (UNIV_ZIP_SIZE_MIN >> 1) << bpage->zip.ssize : 0);
826}
827
828/*********************************************************************//**
829Gets the compressed page size of a block.
830@return	compressed page size, or 0 */
831UNIV_INLINE
832ulint
833buf_block_get_zip_size(
834/*===================*/
835	const buf_block_t*	block)	/*!< in: pointer to the control block */
836{
837	return(block->page.zip.ssize
838	       ? (UNIV_ZIP_SIZE_MIN >> 1) << block->page.zip.ssize : 0);
839}
840
841#ifndef UNIV_HOTBACKUP
842#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG
843/*********************************************************************//**
844Gets the compressed page descriptor corresponding to an uncompressed page
845if applicable.
846@return	compressed page descriptor, or NULL */
847UNIV_INLINE
848const page_zip_des_t*
849buf_frame_get_page_zip(
850/*===================*/
851	const byte*	ptr)	/*!< in: pointer to the page */
852{
853	return(buf_block_get_page_zip(buf_block_align(ptr)));
854}
855#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
856#endif /* !UNIV_HOTBACKUP */
857
858/**********************************************************************//**
859Gets the space id, page offset, and byte offset within page of a
860pointer pointing to a buffer frame containing a file page. */
861UNIV_INLINE
862void
863buf_ptr_get_fsp_addr(
864/*=================*/
865	const void*	ptr,	/*!< in: pointer to a buffer frame */
866	ulint*		space,	/*!< out: space id */
867	fil_addr_t*	addr)	/*!< out: page offset and byte offset */
868{
869	const page_t*	page = (const page_t*) ut_align_down(ptr,
870							     UNIV_PAGE_SIZE);
871
872	*space = mach_read_from_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
873	addr->page = mach_read_from_4(page + FIL_PAGE_OFFSET);
874	addr->boffset = ut_align_offset(ptr, UNIV_PAGE_SIZE);
875}
876
877#ifndef UNIV_HOTBACKUP
878/**********************************************************************//**
879Gets the hash value of the page the pointer is pointing to. This can be used
880in searches in the lock hash table.
881@return	lock hash value */
882UNIV_INLINE
883ulint
884buf_block_get_lock_hash_val(
885/*========================*/
886	const buf_block_t*	block)	/*!< in: block */
887{
888	ut_ad(block);
889	ut_ad(buf_page_in_file(&block->page));
890#ifdef UNIV_SYNC_DEBUG
891	ut_ad(rw_lock_own(&(((buf_block_t*) block)->lock), RW_LOCK_EXCLUSIVE)
892	      || rw_lock_own(&(((buf_block_t*) block)->lock), RW_LOCK_SHARED));
893#endif /* UNIV_SYNC_DEBUG */
894	return(block->lock_hash_val);
895}
896
897/********************************************************************//**
898Allocates a buf_page_t descriptor. This function must succeed. In case
899of failure we assert in this function.
900@return: the allocated descriptor. */
901UNIV_INLINE
902buf_page_t*
903buf_page_alloc_descriptor(void)
904/*===========================*/
905{
906	buf_page_t*	bpage;
907
908	bpage = (buf_page_t*) ut_malloc(sizeof *bpage);
909	ut_d(memset(bpage, 0, sizeof *bpage));
910	UNIV_MEM_ALLOC(bpage, sizeof *bpage);
911
912	return(bpage);
913}
914
915/********************************************************************//**
916Free a buf_page_t descriptor. */
917UNIV_INLINE
918void
919buf_page_free_descriptor(
920/*=====================*/
921	buf_page_t*	bpage)	/*!< in: bpage descriptor to free. */
922{
923	ut_free(bpage);
924}
925
926/********************************************************************//**
927Frees a buffer block which does not contain a file page. */
928UNIV_INLINE
929void
930buf_block_free(
931/*===========*/
932	buf_block_t*	block)	/*!< in, own: block to be freed */
933{
934	mutex_enter(&block->mutex);
935
936	ut_a(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE);
937
938	buf_LRU_block_free_non_file_page(block);
939
940	mutex_exit(&block->mutex);
941}
942#endif /* !UNIV_HOTBACKUP */
943
944/*********************************************************************//**
945Copies contents of a buffer frame to a given buffer.
946@return	buf */
947UNIV_INLINE
948byte*
949buf_frame_copy(
950/*===========*/
951	byte*			buf,	/*!< in: buffer to copy to */
952	const buf_frame_t*	frame)	/*!< in: buffer frame */
953{
954	ut_ad(buf && frame);
955
956	ut_memcpy(buf, frame, UNIV_PAGE_SIZE);
957
958	return(buf);
959}
960
961#ifndef UNIV_HOTBACKUP
962/********************************************************************//**
963Calculates a folded value of a file page address to use in the page hash
964table.
965@return	the folded value */
966UNIV_INLINE
967ulint
968buf_page_address_fold(
969/*==================*/
970	ulint	space,	/*!< in: space id */
971	ulint	offset)	/*!< in: offset of the page within space */
972{
973	return((space << 20) + space + offset);
974}
975
976/********************************************************************//**
977Gets the youngest modification log sequence number for a frame.
978Returns zero if not file page or no modification occurred yet.
979@return	newest modification to page */
980UNIV_INLINE
981lsn_t
982buf_page_get_newest_modification(
983/*=============================*/
984	const buf_page_t*	bpage)	/*!< in: block containing the
985					page frame */
986{
987	lsn_t		lsn;
988	ib_mutex_t*	block_mutex = buf_page_get_mutex(bpage);
989
990	mutex_enter(block_mutex);
991
992	if (buf_page_in_file(bpage)) {
993		lsn = bpage->newest_modification;
994	} else {
995		lsn = 0;
996	}
997
998	mutex_exit(block_mutex);
999
1000	return(lsn);
1001}
1002
1003/********************************************************************//**
1004Increments the modify clock of a frame by 1. The caller must (1) own the
1005LRU list mutex and block bufferfix count has to be zero, (2) or own an x-lock
1006on the block. */
1007UNIV_INLINE
1008void
1009buf_block_modify_clock_inc(
1010/*=======================*/
1011	buf_block_t*	block)	/*!< in: block */
1012{
1013#ifdef UNIV_SYNC_DEBUG
1014	buf_pool_t*	buf_pool = buf_pool_from_bpage((buf_page_t*) block);
1015
1016	ut_ad((mutex_own(&buf_pool->LRU_list_mutex)
1017	       && (block->page.buf_fix_count == 0))
1018	      || rw_lock_own(&(block->lock), RW_LOCK_EXCLUSIVE));
1019#endif /* UNIV_SYNC_DEBUG */
1020
1021	block->modify_clock++;
1022}
1023
1024/********************************************************************//**
1025Returns the value of the modify clock. The caller must have an s-lock
1026or x-lock on the block.
1027@return	value */
1028UNIV_INLINE
1029ib_uint64_t
1030buf_block_get_modify_clock(
1031/*=======================*/
1032	buf_block_t*	block)	/*!< in: block */
1033{
1034#ifdef UNIV_SYNC_DEBUG
1035	ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
1036	      || rw_lock_own(&(block->lock), RW_LOCK_EXCLUSIVE));
1037#endif /* UNIV_SYNC_DEBUG */
1038
1039	return(block->modify_clock);
1040}
1041
1042/*******************************************************************//**
1043Increments the bufferfix count. */
1044UNIV_INLINE
1045void
1046buf_block_fix(
1047/*===========*/
1048	buf_block_t*	block)	/*!< in/out: block to bufferfix */
1049{
1050	ut_ad(!mutex_own(buf_page_get_mutex(&block->page)));
1051#ifdef PAGE_ATOMIC_REF_COUNT
1052	os_atomic_increment_uint32(&block->page.buf_fix_count, 1);
1053#else
1054	ib_mutex_t*	block_mutex = buf_page_get_mutex(&block->page);
1055
1056	mutex_enter(block_mutex);
1057	++block->page.buf_fix_count;
1058	mutex_exit(block_mutex);
1059#endif /* PAGE_ATOMIC_REF_COUNT */
1060}
1061
1062/*******************************************************************//**
1063Increments the bufferfix count. */
1064UNIV_INLINE
1065void
1066buf_block_buf_fix_inc_func(
1067/*=======================*/
1068#ifdef UNIV_SYNC_DEBUG
1069	const char*	file,	/*!< in: file name */
1070	ulint		line,	/*!< in: line */
1071#endif /* UNIV_SYNC_DEBUG */
1072	buf_block_t*	block)	/*!< in/out: block to bufferfix */
1073{
1074#ifdef UNIV_SYNC_DEBUG
1075	ibool	ret;
1076
1077	ret = rw_lock_s_lock_nowait(&(block->debug_latch), file, line);
1078	ut_a(ret);
1079#endif /* UNIV_SYNC_DEBUG */
1080
1081#ifdef PAGE_ATOMIC_REF_COUNT
1082	os_atomic_increment_uint32(&block->page.buf_fix_count, 1);
1083#else
1084	ut_ad(mutex_own(&block->mutex));
1085
1086	++block->page.buf_fix_count;
1087#endif /* PAGE_ATOMIC_REF_COUNT */
1088}
1089
1090/*******************************************************************//**
1091Decrements the bufferfix count. */
1092UNIV_INLINE
1093void
1094buf_block_unfix(
1095/*============*/
1096	buf_block_t*	block)	/*!< in/out: block to bufferunfix */
1097{
1098	ut_ad(block->page.buf_fix_count > 0);
1099	ut_ad(!mutex_own(buf_page_get_mutex(&block->page)));
1100
1101#ifdef PAGE_ATOMIC_REF_COUNT
1102	os_atomic_decrement_uint32(&block->page.buf_fix_count, 1);
1103#else
1104	ib_mutex_t*	block_mutex = buf_page_get_mutex(&block->page);
1105
1106	mutex_enter(block_mutex);
1107	--block->page.buf_fix_count;
1108	mutex_exit(block_mutex);
1109#endif /* PAGE_ATOMIC_REF_COUNT */
1110}
1111
1112/*******************************************************************//**
1113Decrements the bufferfix count. */
1114UNIV_INLINE
1115void
1116buf_block_buf_fix_dec(
1117/*==================*/
1118	buf_block_t*	block)	/*!< in/out: block to bufferunfix */
1119{
1120	ut_ad(block->page.buf_fix_count > 0);
1121
1122#ifdef PAGE_ATOMIC_REF_COUNT
1123	os_atomic_decrement_uint32(&block->page.buf_fix_count, 1);
1124#else
1125	mutex_enter(&block->mutex);
1126	--block->page.buf_fix_count;
1127	mutex_exit(&block->mutex);
1128#endif /* PAGE_ATOMIC_REF_COUNT */
1129
1130#ifdef UNIV_SYNC_DEBUG
1131	rw_lock_s_unlock(&block->debug_latch);
1132#endif
1133}
1134
1135/******************************************************************//**
1136Returns the buffer pool instance given space and offset of page
1137@return buffer pool */
1138UNIV_INLINE
1139buf_pool_t*
1140buf_pool_get(
1141/*==========*/
1142	ulint	space,	/*!< in: space id */
1143	ulint	offset)	/*!< in: offset of the page within space */
1144{
1145	ulint	fold;
1146	ulint	index;
1147	ulint	ignored_offset;
1148
1149	ignored_offset = offset >> 6; /* 2log of BUF_READ_AHEAD_AREA (64)*/
1150	fold = buf_page_address_fold(space, ignored_offset);
1151	index = fold % srv_buf_pool_instances;
1152	return(&buf_pool_ptr[index]);
1153}
1154
1155/******************************************************************//**
1156Returns the buffer pool instance given its array index
1157@return buffer pool */
1158UNIV_INLINE
1159buf_pool_t*
1160buf_pool_from_array(
1161/*================*/
1162	ulint	index)		/*!< in: array index to get
1163				buffer pool instance from */
1164{
1165	ut_ad(index < MAX_BUFFER_POOLS);
1166	ut_ad(index < srv_buf_pool_instances);
1167	return(&buf_pool_ptr[index]);
1168}
1169
1170/******************************************************************//**
1171Returns the control block of a file page, NULL if not found.
1172@return	block, NULL if not found */
1173UNIV_INLINE
1174buf_page_t*
1175buf_page_hash_get_low(
1176/*==================*/
1177	buf_pool_t*	buf_pool,/*!< buffer pool instance */
1178	ulint		space,	/*!< in: space id */
1179	ulint		offset,	/*!< in: offset of the page within space */
1180	ulint		fold)	/*!< in: buf_page_address_fold(space, offset) */
1181{
1182	buf_page_t*	bpage;
1183
1184#ifdef UNIV_SYNC_DEBUG
1185	ulint		hash_fold;
1186	prio_rw_lock_t*	hash_lock;
1187
1188	hash_fold = buf_page_address_fold(space, offset);
1189	ut_ad(hash_fold == fold);
1190
1191	hash_lock = hash_get_lock(buf_pool->page_hash, fold);
1192	ut_ad(rw_lock_own(hash_lock, RW_LOCK_EX)
1193	      || rw_lock_own(hash_lock, RW_LOCK_SHARED));
1194#endif /* UNIV_SYNC_DEBUG */
1195
1196	/* Look for the page in the hash table */
1197
1198	HASH_SEARCH(hash, buf_pool->page_hash, fold, buf_page_t*, bpage,
1199		    ut_ad(bpage->in_page_hash && !bpage->in_zip_hash
1200			  && buf_page_in_file(bpage)),
1201		    bpage->space == space && bpage->offset == offset);
1202	if (bpage) {
1203		ut_a(buf_page_in_file(bpage));
1204		ut_ad(bpage->in_page_hash);
1205		ut_ad(!bpage->in_zip_hash);
1206	}
1207
1208	return(bpage);
1209}
1210
1211/******************************************************************//**
1212Returns the control block of a file page, NULL if not found.
1213If the block is found and lock is not NULL then the appropriate
1214page_hash lock is acquired in the specified lock mode. Otherwise,
1215mode value is ignored. It is up to the caller to release the
1216lock. If the block is found and the lock is NULL then the page_hash
1217lock is released by this function.
1218@return	block, NULL if not found, or watch sentinel (if watch is true) */
1219UNIV_INLINE
1220buf_page_t*
1221buf_page_hash_get_locked(
1222/*=====================*/
1223					/*!< out: pointer to the bpage,
1224					or NULL; if NULL, hash_lock
1225					is also NULL. */
1226	buf_pool_t*	buf_pool,	/*!< buffer pool instance */
1227	ulint		space,		/*!< in: space id */
1228	ulint		offset,		/*!< in: page number */
1229	prio_rw_lock_t**	lock,	/*!< in/out: lock of the page
1230					hash acquired if bpage is
1231					found. NULL otherwise. If NULL
1232					is passed then the hash_lock
1233					is released by this function */
1234	ulint		lock_mode,	/*!< in: RW_LOCK_EX or
1235					RW_LOCK_SHARED. Ignored if
1236					lock == NULL */
1237	bool		watch)		/*!< in: if true, return watch
1238					sentinel also. */
1239{
1240	buf_page_t*	bpage = NULL;
1241	ulint		fold;
1242	prio_rw_lock_t*	hash_lock;
1243	ulint		mode = RW_LOCK_SHARED;
1244
1245	if (lock != NULL) {
1246		*lock = NULL;
1247		ut_ad(lock_mode == RW_LOCK_EX
1248		      || lock_mode == RW_LOCK_SHARED);
1249		mode = lock_mode;
1250	}
1251
1252	fold = buf_page_address_fold(space, offset);
1253	hash_lock = hash_get_lock(buf_pool->page_hash, fold);
1254
1255#ifdef UNIV_SYNC_DEBUG
1256	ut_ad(!rw_lock_own(hash_lock, RW_LOCK_EX)
1257	      && !rw_lock_own(hash_lock, RW_LOCK_SHARED));
1258#endif /* UNIV_SYNC_DEBUG */
1259
1260	if (mode == RW_LOCK_SHARED) {
1261		rw_lock_s_lock(hash_lock);
1262	} else {
1263		rw_lock_x_lock(hash_lock);
1264	}
1265
1266	bpage = buf_page_hash_get_low(buf_pool, space, offset, fold);
1267
1268	if (!bpage || buf_pool_watch_is_sentinel(buf_pool, bpage)) {
1269		if (!watch) {
1270			bpage = NULL;
1271		}
1272		goto unlock_and_exit;
1273	}
1274
1275	ut_ad(buf_page_in_file(bpage));
1276	ut_ad(offset == bpage->offset);
1277	ut_ad(space == bpage->space);
1278
1279	if (lock == NULL) {
1280		/* The caller wants us to release the page_hash lock */
1281		goto unlock_and_exit;
1282	} else {
1283		/* To be released by the caller */
1284		*lock = hash_lock;
1285		goto exit;
1286	}
1287
1288unlock_and_exit:
1289	if (mode == RW_LOCK_SHARED) {
1290		rw_lock_s_unlock(hash_lock);
1291	} else {
1292		rw_lock_x_unlock(hash_lock);
1293	}
1294exit:
1295	return(bpage);
1296}
1297
1298/******************************************************************//**
1299Returns the control block of a file page, NULL if not found.
1300If the block is found and lock is not NULL then the appropriate
1301page_hash lock is acquired in the specified lock mode. Otherwise,
1302mode value is ignored. It is up to the caller to release the
1303lock. If the block is found and the lock is NULL then the page_hash
1304lock is released by this function.
1305@return	block, NULL if not found */
1306UNIV_INLINE
1307buf_block_t*
1308buf_block_hash_get_locked(
1309/*=====================*/
1310					/*!< out: pointer to the bpage,
1311					or NULL; if NULL, hash_lock
1312					is also NULL. */
1313	buf_pool_t*	buf_pool,	/*!< buffer pool instance */
1314	ulint		space,		/*!< in: space id */
1315	ulint		offset,		/*!< in: page number */
1316	prio_rw_lock_t**	lock,	/*!< in/out: lock of the page
1317					hash acquired if bpage is
1318					found. NULL otherwise. If NULL
1319					is passed then the hash_lock
1320					is released by this function */
1321	ulint		lock_mode)	/*!< in: RW_LOCK_EX or
1322					RW_LOCK_SHARED. Ignored if
1323					lock == NULL */
1324{
1325	buf_page_t*	bpage = buf_page_hash_get_locked(buf_pool,
1326							 space,
1327							 offset,
1328							 lock,
1329							 lock_mode);
1330	buf_block_t*	block = buf_page_get_block(bpage);
1331
1332	if (block) {
1333		ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
1334#ifdef UNIV_SYNC_DEBUG
1335		ut_ad(!lock || rw_lock_own(*lock, lock_mode));
1336#endif /* UNIV_SYNC_DEBUG */
1337		return(block);
1338	} else if (bpage) {
1339		/* It is not a block. Just a bpage */
1340		ut_ad(buf_page_in_file(bpage));
1341
1342		if (lock) {
1343			if (lock_mode == RW_LOCK_SHARED) {
1344				rw_lock_s_unlock(*lock);
1345			} else {
1346				rw_lock_x_unlock(*lock);
1347			}
1348		}
1349		*lock = NULL;
1350		return(NULL);
1351	}
1352
1353	ut_ad(!bpage);
1354	ut_ad(lock == NULL ||*lock == NULL);
1355	return(NULL);
1356}
1357
1358/********************************************************************//**
1359Returns TRUE if the page can be found in the buffer pool hash table.
1360
1361NOTE that it is possible that the page is not yet read from disk,
1362though.
1363
1364@return	TRUE if found in the page hash table */
1365UNIV_INLINE
1366ibool
1367buf_page_peek(
1368/*==========*/
1369	ulint	space,	/*!< in: space id */
1370	ulint	offset)	/*!< in: page number */
1371{
1372	buf_pool_t*		buf_pool = buf_pool_get(space, offset);
1373
1374	return(buf_page_hash_get(buf_pool, space, offset) != NULL);
1375}
1376
1377/********************************************************************//**
1378Releases a compressed-only page acquired with buf_page_get_zip(). */
1379UNIV_INLINE
1380void
1381buf_page_release_zip(
1382/*=================*/
1383	buf_page_t*	bpage)		/*!< in: buffer block */
1384{
1385	buf_block_t*	block;
1386
1387	block = (buf_block_t*) bpage;
1388
1389	switch (buf_page_get_state(bpage)) {
1390	case BUF_BLOCK_FILE_PAGE:
1391#ifdef UNIV_SYNC_DEBUG
1392		rw_lock_s_unlock(&block->debug_latch);
1393#endif /* UNUV_SYNC_DEBUG */
1394		/* Fall through */
1395	case BUF_BLOCK_ZIP_PAGE:
1396	case BUF_BLOCK_ZIP_DIRTY:
1397		buf_block_unfix(block);
1398		return;
1399
1400	case BUF_BLOCK_POOL_WATCH:
1401	case BUF_BLOCK_NOT_USED:
1402	case BUF_BLOCK_READY_FOR_USE:
1403	case BUF_BLOCK_MEMORY:
1404	case BUF_BLOCK_REMOVE_HASH:
1405		break;
1406	}
1407
1408	ut_error;
1409}
1410
1411/********************************************************************//**
1412Decrements the bufferfix count of a buffer control block and releases
1413a latch, if specified. */
1414UNIV_INLINE
1415void
1416buf_page_release(
1417/*=============*/
1418	buf_block_t*	block,		/*!< in: buffer block */
1419	ulint		rw_latch)	/*!< in: RW_S_LATCH, RW_X_LATCH,
1420					RW_NO_LATCH */
1421{
1422	ut_a(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
1423
1424#ifdef UNIV_SYNC_DEBUG
1425	rw_lock_s_unlock(&(block->debug_latch));
1426#endif
1427	if (rw_latch == RW_S_LATCH) {
1428		rw_lock_s_unlock(&(block->lock));
1429	} else if (rw_latch == RW_X_LATCH) {
1430		rw_lock_x_unlock(&(block->lock));
1431	}
1432
1433	buf_block_unfix(block);
1434}
1435
1436#ifdef UNIV_SYNC_DEBUG
1437/*********************************************************************//**
1438Adds latch level info for the rw-lock protecting the buffer frame. This
1439should be called in the debug version after a successful latching of a
1440page if we know the latching order level of the acquired latch. */
1441UNIV_INLINE
1442void
1443buf_block_dbg_add_level(
1444/*====================*/
1445	buf_block_t*	block,	/*!< in: buffer page
1446				where we have acquired latch */
1447	ulint		level)	/*!< in: latching order level */
1448{
1449	sync_thread_add_level(&block->lock, level, FALSE);
1450}
1451
1452#endif /* UNIV_SYNC_DEBUG */
1453/*********************************************************************//**
1454Get the nth chunk's buffer block in the specified buffer pool.
1455@return the nth chunk's buffer block. */
1456UNIV_INLINE
1457buf_block_t*
1458buf_get_nth_chunk_block(
1459/*====================*/
1460	const buf_pool_t* buf_pool,	/*!< in: buffer pool instance */
1461	ulint		n,		/*!< in: nth chunk in the buffer pool */
1462	ulint*		chunk_size)	/*!< in: chunk size */
1463{
1464	const buf_chunk_t*	chunk;
1465
1466	chunk = buf_pool->chunks + n;
1467	*chunk_size = chunk->size;
1468	return(chunk->blocks);
1469}
1470
1471#ifdef UNIV_DEBUG
1472/********************************************************************//**
1473Checks if buf_pool->zip_mutex is owned and is serving for a given page as its
1474block mutex.
1475@return true if buf_pool->zip_mutex is owned. */
1476UNIV_INLINE
1477bool
1478buf_own_zip_mutex_for_page(
1479/*=======================*/
1480	const buf_page_t*	bpage)
1481{
1482	buf_pool_t*	buf_pool	= buf_pool_from_bpage(bpage);
1483
1484	ut_ad(buf_page_get_state(bpage) == BUF_BLOCK_ZIP_PAGE
1485	      || buf_page_get_state(bpage) == BUF_BLOCK_ZIP_DIRTY);
1486	ut_ad(buf_page_get_mutex(bpage) == &buf_pool->zip_mutex);
1487
1488	return(mutex_own(&buf_pool->zip_mutex));
1489}
1490#endif /* UNIV_DEBUG */
1491
1492#endif /* !UNIV_HOTBACKUP */
1493