1 /*****************************************************************************
2 
3 Copyright (c) 1995, 2021, Oracle and/or its affiliates.
4 
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License, version 2.0,
7 as published by the Free Software Foundation.
8 
9 This program is also distributed with certain software (including
10 but not limited to OpenSSL) that is licensed under separate terms,
11 as designated in a particular file or component or in included license
12 documentation.  The authors of MySQL hereby grant you an additional
13 permission to link the program and your derivative works with the
14 separately licensed software that they have included with MySQL.
15 
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19 GNU General Public License, version 2.0, for more details.
20 
21 You should have received a copy of the GNU General Public License along with
22 this program; if not, write to the Free Software Foundation, Inc.,
23 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
24 
25 *****************************************************************************/
26 
27 /**************************************************//**
28 @file include/buf0lru.h
29 The database buffer pool LRU replacement algorithm
30 
31 Created 11/5/1995 Heikki Tuuri
32 *******************************************************/
33 
34 #ifndef buf0lru_h
35 #define buf0lru_h
36 
37 #include "univ.i"
38 #ifndef UNIV_HOTBACKUP
39 #include "ut0byte.h"
40 #include "buf0types.h"
41 
42 // Forward declaration
43 struct trx_t;
44 
45 /******************************************************************//**
46 Returns TRUE if less than 25 % of the buffer pool is available. This can be
47 used in heuristics to prevent huge transactions eating up the whole buffer
48 pool for their locks.
49 @return TRUE if less than 25 % of buffer pool left */
50 ibool
51 buf_LRU_buf_pool_running_out(void);
52 /*==============================*/
53 
54 /*#######################################################################
55 These are low-level functions
56 #########################################################################*/
57 
58 /** Minimum LRU list length for which the LRU_old pointer is defined */
59 #define BUF_LRU_OLD_MIN_LEN	512	/* 8 megabytes of 16k pages */
60 
61 /******************************************************************//**
62 Flushes all dirty pages or removes all pages belonging
63 to a given tablespace. A PROBLEM: if readahead is being started, what
64 guarantees that it will not try to read in pages after this operation
65 has completed? */
66 void
67 buf_LRU_flush_or_remove_pages(
68 /*==========================*/
69 	ulint		id,		/*!< in: space id */
70 	buf_remove_t	buf_remove,	/*!< in: remove or flush strategy */
71 	const trx_t*	trx);		/*!< to check if the operation must
72 					be interrupted */
73 
74 #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
75 /********************************************************************//**
76 Insert a compressed block into buf_pool->zip_clean in the LRU order. */
77 void
78 buf_LRU_insert_zip_clean(
79 /*=====================*/
80 	buf_page_t*	bpage);	/*!< in: pointer to the block in question */
81 #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
82 
83 /******************************************************************//**
84 Try to free a block.  If bpage is a descriptor of a compressed-only
85 page, the descriptor object will be freed as well.
86 
87 NOTE: this function may temporarily release and relock the
88 buf_page_get_get_mutex(). Furthermore, the page frame will no longer be
89 accessible via bpage. If this function returns true, it will also release
90 the LRU list mutex.
91 
92 The caller must hold the LRU list and buf_page_get_mutex() mutexes.
93 
94 @return true if freed, false otherwise. */
95 bool
96 buf_LRU_free_page(
97 /*==============*/
98 	buf_page_t*	bpage,	/*!< in: block to be freed */
99 	bool		zip)	/*!< in: true if should remove also the
100 				compressed page of an uncompressed page */
101 	MY_ATTRIBUTE((nonnull, warn_unused_result));
102 /******************************************************************//**
103 Try to free a replaceable block.
104 @return true if found and freed */
105 bool
106 buf_LRU_scan_and_free_block(
107 /*========================*/
108 	buf_pool_t*	buf_pool,	/*!< in: buffer pool instance */
109 	bool		scan_all)	/*!< in: scan whole LRU list
110 					if true, otherwise scan only
111 					'old' blocks. */
112 	MY_ATTRIBUTE((nonnull,warn_unused_result));
113 /******************************************************************//**
114 Returns a free block from the buf_pool.  The block is taken off the
115 free list.  If it is empty, returns NULL.
116 @return a free control block, or NULL if the buf_block->free list is empty */
117 buf_block_t*
118 buf_LRU_get_free_only(
119 /*==================*/
120 	buf_pool_t*	buf_pool);	/*!< buffer pool instance */
121 /******************************************************************//**
122 Returns a free block from the buf_pool. The block is taken off the
123 free list. If free list is empty, blocks are moved from the end of the
124 LRU list to the free list.
125 This function is called from a user thread when it needs a clean
126 block to read in a page. Note that we only ever get a block from
127 the free list. Even when we flush a page or find a page in LRU scan
128 we put it to free list to be used.
129 * iteration 0:
130   * get a block from free list, success:done
131   * if buf_pool->try_LRU_scan is set
132     * scan LRU up to srv_LRU_scan_depth to find a clean block
133     * the above will put the block on free list
134     * success:retry the free list
135   * flush one dirty page from tail of LRU to disk
136     * the above will put the block on free list
137     * success: retry the free list
138 * iteration 1:
139   * same as iteration 0 except:
140     * scan whole LRU list
141     * scan LRU list even if buf_pool->try_LRU_scan is not set
142 * iteration > 1:
143   * same as iteration 1 but sleep 10ms
144 @return the free control block, in state BUF_BLOCK_READY_FOR_USE */
145 buf_block_t*
146 buf_LRU_get_free_block(
147 /*===================*/
148 	buf_pool_t*	buf_pool)	/*!< in/out: buffer pool instance */
149 	MY_ATTRIBUTE((nonnull,warn_unused_result));
150 /******************************************************************//**
151 Determines if the unzip_LRU list should be used for evicting a victim
152 instead of the general LRU list.
153 @return TRUE if should use unzip_LRU */
154 ibool
155 buf_LRU_evict_from_unzip_LRU(
156 /*=========================*/
157 	buf_pool_t*	buf_pool);
158 /******************************************************************//**
159 Puts a block back to the free list. */
160 void
161 buf_LRU_block_free_non_file_page(
162 /*=============================*/
163 	buf_block_t*	block);	/*!< in: block, must not contain a file page */
164 /******************************************************************//**
165 Adds a block to the LRU list. Please make sure that the page_size is
166 already set when invoking the function, so that we can get correct
167 page_size from the buffer page when adding a block into LRU */
168 void
169 buf_LRU_add_block(
170 /*==============*/
171 	buf_page_t*	bpage,	/*!< in: control block */
172 	ibool		old);	/*!< in: TRUE if should be put to the old
173 				blocks in the LRU list, else put to the
174 				start; if the LRU list is very short, added to
175 				the start regardless of this parameter */
176 /******************************************************************//**
177 Adds a block to the LRU list of decompressed zip pages. */
178 void
179 buf_unzip_LRU_add_block(
180 /*====================*/
181 	buf_block_t*	block,	/*!< in: control block */
182 	ibool		old);	/*!< in: TRUE if should be put to the end
183 				of the list, else put to the start */
184 /******************************************************************//**
185 Moves a block to the start of the LRU list. */
186 void
187 buf_LRU_make_block_young(
188 /*=====================*/
189 	buf_page_t*	bpage);	/*!< in: control block */
190 /******************************************************************//**
191 Moves a block to the end of the LRU list. */
192 void
193 buf_LRU_make_block_old(
194 /*===================*/
195 	buf_page_t*	bpage);	/*!< in: control block */
196 /**********************************************************************//**
197 Updates buf_pool->LRU_old_ratio.
198 @return updated old_pct */
199 uint
200 buf_LRU_old_ratio_update(
201 /*=====================*/
202 	uint	old_pct,/*!< in: Reserve this percentage of
203 			the buffer pool for "old" blocks. */
204 	ibool	adjust);/*!< in: TRUE=adjust the LRU list;
205 			FALSE=just assign buf_pool->LRU_old_ratio
206 			during the initialization of InnoDB */
207 /********************************************************************//**
208 Update the historical stats that we are collecting for LRU eviction
209 policy at the end of each interval. */
210 void
211 buf_LRU_stat_update(void);
212 /*=====================*/
213 
214 /******************************************************************//**
215 Remove one page from LRU list and put it to free list. The caller must hold the
216 LRU list and block mutexes and have page hash latched in X. The latch and
217 the block mutexes will be released. */
218 void
219 buf_LRU_free_one_page(
220 /*==================*/
221 	buf_page_t*	bpage,	/*!< in/out: block, must contain a file page and
222 				be in a state where it can be freed; there
223 				may or may not be a hash index to the page */
224 	bool		zip = true)/*!< in: true if should remove also the
225 				compressed page of an uncompressed page */
226 	MY_ATTRIBUTE((nonnull));
227 
228 /******************************************************************//**
229 Adjust LRU hazard pointers if needed. */
230 void
231 buf_LRU_adjust_hp(
232 /*==============*/
233 	buf_pool_t*		buf_pool,/*!< in: buffer pool instance */
234 	const buf_page_t*	bpage);	/*!< in: control block */
235 
236 #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
237 /**********************************************************************//**
238 Validates the LRU list.
239 @return TRUE */
240 ibool
241 buf_LRU_validate(void);
242 /*==================*/
243 #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
244 #if defined UNIV_DEBUG_PRINT || defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
245 /**********************************************************************//**
246 Prints the LRU list. */
247 void
248 buf_LRU_print(void);
249 /*===============*/
250 #endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */
251 
252 /** @name Heuristics for detecting index scan @{ */
253 /** The denominator of buf_pool->LRU_old_ratio. */
254 #define BUF_LRU_OLD_RATIO_DIV	1024
255 /** Maximum value of buf_pool->LRU_old_ratio.
256 @see buf_LRU_old_adjust_len
257 @see buf_pool->LRU_old_ratio_update */
258 #define BUF_LRU_OLD_RATIO_MAX	BUF_LRU_OLD_RATIO_DIV
259 /** Minimum value of buf_pool->LRU_old_ratio.
260 @see buf_LRU_old_adjust_len
261 @see buf_pool->LRU_old_ratio_update
262 The minimum must exceed
263 (BUF_LRU_OLD_TOLERANCE + 5) * BUF_LRU_OLD_RATIO_DIV / BUF_LRU_OLD_MIN_LEN. */
264 #define BUF_LRU_OLD_RATIO_MIN	51
265 
266 #if BUF_LRU_OLD_RATIO_MIN >= BUF_LRU_OLD_RATIO_MAX
267 # error "BUF_LRU_OLD_RATIO_MIN >= BUF_LRU_OLD_RATIO_MAX"
268 #endif
269 #if BUF_LRU_OLD_RATIO_MAX > BUF_LRU_OLD_RATIO_DIV
270 # error "BUF_LRU_OLD_RATIO_MAX > BUF_LRU_OLD_RATIO_DIV"
271 #endif
272 
273 /** Move blocks to "new" LRU list only if the first access was at
274 least this many milliseconds ago.  Not protected by any mutex or latch. */
275 extern uint	buf_LRU_old_threshold_ms;
276 /* @} */
277 
278 /** @brief Statistics for selecting the LRU list for eviction.
279 
280 These statistics are not 'of' LRU but 'for' LRU.  We keep count of I/O
281 and page_zip_decompress() operations.  Based on the statistics we decide
282 if we want to evict from buf_pool->unzip_LRU or buf_pool->LRU. */
283 struct buf_LRU_stat_t
284 {
285 	ulint	io;	/**< Counter of buffer pool I/O operations. */
286 	ulint	unzip;	/**< Counter of page_zip_decompress operations. */
287 };
288 
289 /** Current operation counters.  Not protected by any mutex.
290 Cleared by buf_LRU_stat_update(). */
291 extern buf_LRU_stat_t	buf_LRU_stat_cur;
292 
293 /** Running sum of past values of buf_LRU_stat_cur.
294 Updated by buf_LRU_stat_update(). Accesses protected by memory barriers. */
295 extern buf_LRU_stat_t	buf_LRU_stat_sum;
296 
297 /********************************************************************//**
298 Increments the I/O counter in buf_LRU_stat_cur. */
299 #define buf_LRU_stat_inc_io() buf_LRU_stat_cur.io++
300 /********************************************************************//**
301 Increments the page_zip_decompress() counter in buf_LRU_stat_cur. */
302 #define buf_LRU_stat_inc_unzip() buf_LRU_stat_cur.unzip++
303 
304 #ifndef UNIV_NONINL
305 #include "buf0lru.ic"
306 #endif
307 
308 #endif /* !UNIV_HOTBACKUP */
309 
310 #endif
311