1 /*****************************************************************************
2 
3 Copyright (c) 1995, 2020, Oracle and/or its affiliates. All Rights Reserved.
4 
5 This program is free software; you can redistribute it and/or modify it under
6 the terms of the GNU General Public License, version 2.0, as published by the
7 Free Software Foundation.
8 
9 This program is also distributed with certain software (including but not
10 limited to OpenSSL) that is licensed under separate terms, as designated in a
11 particular file or component or in included license documentation. The authors
12 of MySQL hereby grant you an additional permission to link the program and
13 your derivative works with the separately licensed software that they have
14 included with MySQL.
15 
16 This program is distributed in the hope that it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18 FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0,
19 for more details.
20 
21 You should have received a copy of the GNU General Public License along with
22 this program; if not, write to the Free Software Foundation, Inc.,
23 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
24 
25 *****************************************************************************/
26 
27 #include <sys/types.h>
28 
29 /** @file include/buf0lru.h
30  The database buffer pool LRU replacement algorithm
31 
32  Created 11/5/1995 Heikki Tuuri
33  *******************************************************/
34 
35 #ifndef buf0lru_h
36 #define buf0lru_h
37 
38 #include "buf0types.h"
39 #include "univ.i"
40 #ifndef UNIV_HOTBACKUP
41 #include "ut0byte.h"
42 
43 // Forward declaration
44 struct trx_t;
45 
46 /** Returns TRUE if less than 25 % of the buffer pool is available. This can be
47  used in heuristics to prevent huge transactions eating up the whole buffer
48  pool for their locks.
49  @return true if less than 25 % of buffer pool left */
50 ibool buf_LRU_buf_pool_running_out(void);
51 
52 /*#######################################################################
53 These are low-level functions
54 #########################################################################*/
55 
56 /** Minimum LRU list length for which the LRU_old pointer is defined */
57 #define BUF_LRU_OLD_MIN_LEN 512 /* 8 megabytes of 16k pages */
58 #endif                          /* !UNIV_HOTBACKUP */
59 
60 /** Flushes all dirty pages or removes all pages belonging to a given
61 tablespace. A PROBLEM: if readahead is being started, what guarantees
62 that it will not try to read in pages after this operation has completed?
63 @param[in]  id          tablespace ID
64 @param[in]  buf_remove  remove or flush strategy
65 @param[in]  trx         to check if the operation must be interrupted
66 @param[in]  strict      true, if no page from tablespace can be in
67                         buffer pool just after flush */
68 void buf_LRU_flush_or_remove_pages(space_id_t id, buf_remove_t buf_remove,
69                                    const trx_t *trx, bool strict = true);
70 
71 #ifndef UNIV_HOTBACKUP
72 #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
73 /** Insert a compressed block into buf_pool->zip_clean in the LRU order.
74 @param[in]	bpage	pointer to the block in question */
75 void buf_LRU_insert_zip_clean(buf_page_t *bpage);
76 #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
77 
78 /** Try to free a block.  If bpage is a descriptor of a compressed-only
79 page, the descriptor object will be freed as well.
80 NOTE: this function may temporarily release and relock the
81 buf_page_get_get_mutex(). Furthermore, the page frame will no longer be
82 accessible via bpage. If this function returns true, it will also release
83 the LRU list mutex.
84 The caller must hold the LRU list and buf_page_get_mutex() mutexes.
85 @param[in]	bpage	block to be freed
86 @param[in]	zip	true if should remove also the compressed page of
87                         an uncompressed page
88 @return true if freed, false otherwise. */
89 bool buf_LRU_free_page(buf_page_t *bpage, bool zip)
90     MY_ATTRIBUTE((warn_unused_result));
91 
92 /** Try to free a replaceable block.
93 @param[in,out]	buf_pool	buffer pool instance
94 @param[in]	scan_all	scan whole LRU list if ture, otherwise scan
95                                 only BUF_LRU_SEARCH_SCAN_THRESHOLD blocks
96 @return true if found and freed */
97 bool buf_LRU_scan_and_free_block(buf_pool_t *buf_pool, bool scan_all)
98     MY_ATTRIBUTE((warn_unused_result));
99 
100 /** Returns a free block from the buf_pool.  The block is taken off the
101 free list.  If it is empty, returns NULL.
102 @param[in]	buf_pool	buffer pool instance
103 @return a free control block, or NULL if the buf_block->free list is empty */
104 buf_block_t *buf_LRU_get_free_only(buf_pool_t *buf_pool);
105 
106 /** Returns a free block from the buf_pool. The block is taken off the
107 free list. If free list is empty, blocks are moved from the end of the
108 LRU list to the free list.
109 This function is called from a user thread when it needs a clean
110 block to read in a page. Note that we only ever get a block from
111 the free list. Even when we flush a page or find a page in LRU scan
112 we put it to free list to be used.
113 * iteration 0:
114   * get a block from free list, success:done
115   * if buf_pool->try_LRU_scan is set
116     * scan LRU up to srv_LRU_scan_depth to find a clean block
117     * the above will put the block on free list
118     * success:retry the free list
119   * flush one dirty page from tail of LRU to disk
120     * the above will put the block on free list
121     * success: retry the free list
122 * iteration 1:
123   * same as iteration 0 except:
124     * scan whole LRU list
125     * scan LRU list even if buf_pool->try_LRU_scan is not set
126 * iteration > 1:
127   * same as iteration 1 but sleep 10ms
128 @param[in,out]	buf_pool	buffer pool instance
129 @return the free control block, in state BUF_BLOCK_READY_FOR_USE */
130 buf_block_t *buf_LRU_get_free_block(buf_pool_t *buf_pool)
131     MY_ATTRIBUTE((warn_unused_result));
132 
133 /** Determines if the unzip_LRU list should be used for evicting a victim
134 instead of the general LRU list.
135 @param[in,out]	buf_pool	buffer pool instance
136 @return true if should use unzip_LRU */
137 ibool buf_LRU_evict_from_unzip_LRU(buf_pool_t *buf_pool);
138 
139 /** Puts a block back to the free list.
140 @param[in]	block	block must not contain a file page */
141 void buf_LRU_block_free_non_file_page(buf_block_t *block);
142 
143 /** Adds a block to the LRU list. Please make sure that the page_size is
144  already set when invoking the function, so that we can get correct
145  page_size from the buffer page when adding a block into LRU */
146 void buf_LRU_add_block(buf_page_t *bpage, /*!< in: control block */
147                        ibool old); /*!< in: TRUE if should be put to the old
148                                    blocks in the LRU list, else put to the
149                                    start; if the LRU list is very short, added
150                                    to the start regardless of this parameter */
151 
152 /** Adds a block to the LRU list of decompressed zip pages.
153 @param[in]	block	control block
154 @param[in]	old	TRUE if should be put to the end of the list,
155                         else put to the start */
156 void buf_unzip_LRU_add_block(buf_block_t *block, ibool old);
157 
158 /** Moves a block to the start of the LRU list.
159 @param[in]	bpage	control block */
160 void buf_LRU_make_block_young(buf_page_t *bpage);
161 
162 /** Updates buf_pool->LRU_old_ratio.
163  @return updated old_pct */
164 uint buf_LRU_old_ratio_update(
165     uint old_pct,  /*!< in: Reserve this percentage of
166                    the buffer pool for "old" blocks. */
167     ibool adjust); /*!< in: TRUE=adjust the LRU list;
168                    FALSE=just assign buf_pool->LRU_old_ratio
169                    during the initialization of InnoDB */
170 /** Update the historical stats that we are collecting for LRU eviction
171  policy at the end of each interval. */
172 void buf_LRU_stat_update(void);
173 
174 /** Remove one page from LRU list and put it to free list. The caller must hold
175 the LRU list and block mutexes and have page hash latched in X. The latch and
176 the block mutexes will be released.
177 @param[in,out]	bpage		block, must contain a file page and
178                                 be in a state where it can be freed; there
179                                 may or may not be a hash index to the page
180 @param[in]	zip		true if should remove also the compressed page
181                                 of an uncompressed page
182 @param[in]	ignore_content	true if should ignore page content, since it
183                                 could be not initialized */
184 void buf_LRU_free_one_page(buf_page_t *bpage, bool zip, bool ignore_content);
185 
186 /** Adjust LRU hazard pointers if needed. */
187 void buf_LRU_adjust_hp(buf_pool_t *buf_pool, /*!< in: buffer pool instance */
188                        const buf_page_t *bpage); /*!< in: control block */
189 
190 #if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
191 /** Validates the LRU list.
192  @return true */
193 ibool buf_LRU_validate(void);
194 #endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
195 #if defined UNIV_DEBUG_PRINT || defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
196 /** Prints the LRU list. */
197 void buf_LRU_print(void);
198 #endif /* UNIV_DEBUG_PRINT || UNIV_DEBUG || UNIV_BUF_DEBUG */
199 
200 /** @name Heuristics for detecting index scan @{ */
201 /** The denominator of buf_pool->LRU_old_ratio. */
202 #define BUF_LRU_OLD_RATIO_DIV 1024
203 /** Maximum value of buf_pool->LRU_old_ratio.
204 @see buf_LRU_old_adjust_len
205 @see buf_pool->LRU_old_ratio_update */
206 #define BUF_LRU_OLD_RATIO_MAX BUF_LRU_OLD_RATIO_DIV
207 /** Minimum value of buf_pool->LRU_old_ratio.
208 @see buf_LRU_old_adjust_len
209 @see buf_pool->LRU_old_ratio_update
210 The minimum must exceed
211 (BUF_LRU_OLD_TOLERANCE + 5) * BUF_LRU_OLD_RATIO_DIV / BUF_LRU_OLD_MIN_LEN. */
212 #define BUF_LRU_OLD_RATIO_MIN 51
213 
214 #if BUF_LRU_OLD_RATIO_MIN >= BUF_LRU_OLD_RATIO_MAX
215 #error "BUF_LRU_OLD_RATIO_MIN >= BUF_LRU_OLD_RATIO_MAX"
216 #endif
217 #if BUF_LRU_OLD_RATIO_MAX > BUF_LRU_OLD_RATIO_DIV
218 #error "BUF_LRU_OLD_RATIO_MAX > BUF_LRU_OLD_RATIO_DIV"
219 #endif
220 
221 /** Move blocks to "new" LRU list only if the first access was at
222 least this many milliseconds ago.  Not protected by any mutex or latch. */
223 extern uint buf_LRU_old_threshold_ms;
224 /* @} */
225 
226 /** @brief Statistics for selecting the LRU list for eviction.
227 
228 These statistics are not 'of' LRU but 'for' LRU.  We keep count of I/O
229 and page_zip_decompress() operations.  Based on the statistics we decide
230 if we want to evict from buf_pool->unzip_LRU or buf_pool->LRU. */
231 struct buf_LRU_stat_t {
232   ulint io;    /**< Counter of buffer pool I/O operations. */
233   ulint unzip; /**< Counter of page_zip_decompress operations. */
234 };
235 
236 /** Current operation counters.  Not protected by any mutex.
237 Cleared by buf_LRU_stat_update(). */
238 extern buf_LRU_stat_t buf_LRU_stat_cur;
239 
240 /** Running sum of past values of buf_LRU_stat_cur.
241 Updated by buf_LRU_stat_update(). Accesses protected by memory barriers. */
242 extern buf_LRU_stat_t buf_LRU_stat_sum;
243 
244 /** Increments the I/O counter in buf_LRU_stat_cur. */
245 #define buf_LRU_stat_inc_io() buf_LRU_stat_cur.io++
246 /** Increments the page_zip_decompress() counter in buf_LRU_stat_cur. */
247 #define buf_LRU_stat_inc_unzip() buf_LRU_stat_cur.unzip++
248 
249 #endif /* !UNIV_HOTBACKUP */
250 
251 #endif
252