1/*****************************************************************************
2
3Copyright (c) 1995, 2020, Oracle and/or its affiliates.
4
5This program is free software; you can redistribute it and/or modify it under
6the terms of the GNU General Public License, version 2.0, as published by the
7Free Software Foundation.
8
9This program is also distributed with certain software (including but not
10limited to OpenSSL) that is licensed under separate terms, as designated in a
11particular file or component or in included license documentation. The authors
12of MySQL hereby grant you an additional permission to link the program and
13your derivative works with the separately licensed software that they have
14included with MySQL.
15
16This program is distributed in the hope that it will be useful, but WITHOUT
17ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0,
19for more details.
20
21You should have received a copy of the GNU General Public License along with
22this program; if not, write to the Free Software Foundation, Inc.,
2351 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
24
25*****************************************************************************/
26
27/**************************************************/ /**
28 @file include/log0log.ic
29
30 Redo log - definition of inlined functions.
31
32 Created 12/9/1995 Heikki Tuuri
33 *******************************************************/
34
35#include <cstring>
36
37#include "mach0data.h"
38#include "os0file.h"
39#include "srv0mon.h"
40#include "srv0srv.h"
41#include "ut0crc32.h"
42
43#ifdef UNIV_LOG_LSN_DEBUG
44#include "mtr0types.h"
45#endif /* UNIV_LOG_LSN_DEBUG */
46
47/** @name Log blocks */
48
49/* @{ */
50
51inline bool log_block_get_flush_bit(const byte *log_block) {
52  if (LOG_BLOCK_FLUSH_BIT_MASK &
53      mach_read_from_4(log_block + LOG_BLOCK_HDR_NO)) {
54    return (true);
55  }
56
57  return (false);
58}
59
60inline void log_block_set_flush_bit(byte *log_block, bool value) {
61  uint32_t field = mach_read_from_4(log_block + LOG_BLOCK_HDR_NO);
62
63  ut_a(field != 0);
64
65  if (value) {
66    field = field | LOG_BLOCK_FLUSH_BIT_MASK;
67  } else {
68    field = field & ~LOG_BLOCK_FLUSH_BIT_MASK;
69  }
70
71  mach_write_to_4(log_block + LOG_BLOCK_HDR_NO, field);
72}
73
74inline bool log_block_get_encrypt_bit(const byte *log_block) {
75  if (LOG_BLOCK_ENCRYPT_BIT_MASK &
76      mach_read_from_2(log_block + LOG_BLOCK_HDR_DATA_LEN)) {
77    return (true);
78  }
79
80  return (false);
81}
82
83inline void log_block_set_encrypt_bit(byte *log_block, ibool val) {
84  uint32_t field;
85
86  field = mach_read_from_2(log_block + LOG_BLOCK_HDR_DATA_LEN);
87
88  if (val) {
89    field = field | LOG_BLOCK_ENCRYPT_BIT_MASK;
90  } else {
91    field = field & ~LOG_BLOCK_ENCRYPT_BIT_MASK;
92  }
93
94  mach_write_to_2(log_block + LOG_BLOCK_HDR_DATA_LEN, field);
95}
96
97inline uint32_t log_block_get_hdr_no(const byte *log_block) {
98  return (~LOG_BLOCK_FLUSH_BIT_MASK &
99          mach_read_from_4(log_block + LOG_BLOCK_HDR_NO));
100}
101
102inline void log_block_set_hdr_no(byte *log_block, uint32_t n) {
103  ut_a(n > 0);
104  ut_a(n < LOG_BLOCK_FLUSH_BIT_MASK);
105  ut_a(n <= LOG_BLOCK_MAX_NO);
106
107  mach_write_to_4(log_block + LOG_BLOCK_HDR_NO, n);
108}
109
110inline uint32_t log_block_get_data_len(const byte *log_block) {
111  return (mach_read_from_2(log_block + LOG_BLOCK_HDR_DATA_LEN));
112}
113
114inline void log_block_set_data_len(byte *log_block, ulint len) {
115  mach_write_to_2(log_block + LOG_BLOCK_HDR_DATA_LEN, len);
116}
117
118inline uint32_t log_block_get_first_rec_group(const byte *log_block) {
119  return (mach_read_from_2(log_block + LOG_BLOCK_FIRST_REC_GROUP));
120}
121
122inline void log_block_set_first_rec_group(byte *log_block, uint32_t offset) {
123  mach_write_to_2(log_block + LOG_BLOCK_FIRST_REC_GROUP, offset);
124}
125
126inline uint32_t log_block_get_checkpoint_no(const byte *log_block) {
127  return (mach_read_from_4(log_block + LOG_BLOCK_CHECKPOINT_NO));
128}
129
130inline void log_block_set_checkpoint_no(byte *log_block, uint64_t no) {
131  mach_write_to_4(log_block + LOG_BLOCK_CHECKPOINT_NO, (uint32_t)no);
132}
133
134inline uint32_t log_block_convert_lsn_to_no(lsn_t lsn) {
135  return ((uint32_t)(lsn / OS_FILE_LOG_BLOCK_SIZE) % LOG_BLOCK_MAX_NO + 1);
136}
137
138inline uint32_t log_block_calc_checksum(const byte *log_block) {
139  return (log_checksum_algorithm_ptr.load()(log_block));
140}
141
142inline uint32_t log_block_calc_checksum_crc32(const byte *log_block) {
143  return (ut_crc32(log_block, OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE));
144}
145
146inline uint32_t log_block_calc_checksum_none(const byte *log_block) {
147  return (LOG_NO_CHECKSUM_MAGIC);
148}
149
150inline uint32_t log_block_get_checksum(const byte *log_block) {
151  return (mach_read_from_4(log_block + OS_FILE_LOG_BLOCK_SIZE -
152                           LOG_BLOCK_CHECKSUM));
153}
154
155inline void log_block_set_checksum(byte *log_block, uint32_t checksum) {
156  mach_write_to_4(log_block + OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_CHECKSUM,
157                  checksum);
158}
159
160inline void log_block_store_checksum(byte *log_block) {
161  log_block_set_checksum(log_block, log_block_calc_checksum(log_block));
162}
163
164/* @} */
165
166#ifndef UNIV_HOTBACKUP
167
168inline bool log_needs_free_check(const log_t &log) {
169  const sn_t sn = log.sn.load();
170  return (sn > log.free_check_limit_sn.load());
171}
172
173inline bool log_needs_free_check() { return (log_needs_free_check(*log_sys)); }
174
175/** Call this function before starting a mini-transaction.  It will check
176for space in the redo log. It assures there is at least
177concurrency_safe_free_margin.  If the space is not available, this will
178wait until it is. Therefore it is important that the caller does not hold
179any latch that may be called by the page cleaner or log flush process.
180This includes any page block or file space latch. */
181inline void log_free_check() {
182  log_t &log = *log_sys;
183
184#ifdef UNIV_DEBUG
185  /* This function may be called while holding some latches. This is OK,
186  as long as we are not holding any latches on buffer blocks or file spaces.
187  The following latches are not held by any thread that frees up redo log
188  space. */
189  static const latch_level_t latches[] = {
190      SYNC_NO_ORDER_CHECK, /* used for non-labeled latches */
191      SYNC_RSEGS,          /* rsegs->x_lock in trx_rseg_create() */
192      SYNC_UNDO_DDL,       /* undo::ddl_mutex */
193      SYNC_UNDO_SPACES,    /* undo::spaces::m_latch */
194      SYNC_FTS_CACHE,      /* fts_cache_t::lock */
195      SYNC_DICT,           /* dict_sys->mutex in commit_try_rebuild() */
196      SYNC_DICT_OPERATION, /* X-latch in commit_try_rebuild() */
197      SYNC_INDEX_TREE      /* index->lock */
198  };
199
200  sync_allowed_latches check(latches,
201                             latches + sizeof(latches) / sizeof(*latches));
202
203  if (sync_check_iterate(check)) {
204#ifndef UNIV_NO_ERR_MSGS
205    ib::error(ER_IB_MSG_1381)
206#else
207    ib::error()
208#endif
209        << "log_free_check() was called while holding an un-listed latch.";
210    ut_error;
211  }
212#endif /* UNIV_DEBUG */
213
214  /** We prefer to wait now for the space in log file, because now
215  are not holding any latches of dirty pages. */
216
217  if (log_needs_free_check(log)) {
218    /* We need to wait, because the concurrency margin could be violated
219    if we let all threads to go forward after making this check now.
220
221    The waiting procedure is rather unlikely to happen for proper my.cnf.
222    Therefore we extracted the code to seperate function, to make the
223    inlined log_free_check() small. */
224
225    log_free_check_wait(log);
226  }
227}
228
229constexpr inline lsn_t log_translate_sn_to_lsn(lsn_t sn) {
230  return (sn / LOG_BLOCK_DATA_SIZE * OS_FILE_LOG_BLOCK_SIZE +
231          sn % LOG_BLOCK_DATA_SIZE + LOG_BLOCK_HDR_SIZE);
232}
233
234inline lsn_t log_translate_lsn_to_sn(lsn_t lsn) {
235  /* Calculate sn of the beginning of log block, which contains
236  the provided lsn value. */
237  const sn_t sn = lsn / OS_FILE_LOG_BLOCK_SIZE * LOG_BLOCK_DATA_SIZE;
238
239  /* Calculate offset for the provided lsn within the log block.
240  The offset includes LOG_BLOCK_HDR_SIZE bytes of block's header. */
241  const uint32_t diff = lsn % OS_FILE_LOG_BLOCK_SIZE;
242
243  if (diff < LOG_BLOCK_HDR_SIZE) {
244    /* The lsn points to some bytes inside the block's header.
245    Return sn for the beginning of the block. Note, that sn
246    values don't enumerate bytes of blocks' headers, so the
247    value of diff does not matter at all. */
248    return (sn);
249  }
250
251  if (diff > OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE) {
252    /* The lsn points to some bytes inside the block's footer.
253    Return sn for the beginning of the next block. Note, that
254    sn values don't enumerate bytes of blocks' footer, so the
255    value of diff does not matter at all. */
256    return (sn + LOG_BLOCK_DATA_SIZE);
257  }
258
259  /* Add the offset but skip bytes of block's header. */
260  return (sn + diff - LOG_BLOCK_HDR_SIZE);
261}
262
263#endif /* !UNIV_HOTBACKUP */
264
265inline bool log_lsn_validate(lsn_t lsn) {
266  const uint32_t offset = lsn % OS_FILE_LOG_BLOCK_SIZE;
267
268  return (lsn >= LOG_START_LSN && offset >= LOG_BLOCK_HDR_SIZE &&
269          offset < OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE);
270}
271
272#ifndef UNIV_HOTBACKUP
273
274/** @return total capacity of log files in bytes. */
275inline uint64_t log_get_file_capacity(const log_t &log) {
276  return (log.files_real_capacity);
277}
278
279inline lsn_t log_get_lsn(const log_t &log) {
280  return (log_translate_sn_to_lsn(log.sn.load()));
281}
282
283inline lsn_t log_get_checkpoint_lsn(const log_t &log) {
284  return (log.last_checkpoint_lsn.load());
285}
286
287inline lsn_t log_get_checkpoint_age(const log_t &log) {
288  const lsn_t last_checkpoint_lsn = log.last_checkpoint_lsn.load();
289
290  const lsn_t current_lsn = log_get_lsn(log);
291
292  if (current_lsn <= last_checkpoint_lsn) {
293    /* Writes or reads have been somehow reordered.
294    Note that this function does not provide any lock,
295    and does not assume any lock existing. Therefore
296    the calculated result is already outdated when the
297    function is finished. Hence, we might assume that
298    this time we calculated age = 0, because checkpoint
299    lsn is close to current lsn if such race happened. */
300    return (0);
301  }
302
303  return (current_lsn - last_checkpoint_lsn);
304}
305
306inline void log_buffer_flush_to_disk(bool sync) {
307  log_buffer_flush_to_disk(*log_sys, sync);
308}
309
310#if defined(UNIV_HOTBACKUP) && defined(UNIV_DEBUG)
311/** Print a log file header.
312@param[in]	block	pointer to the log buffer */
313UNIV_INLINE
314void meb_log_print_file_hdr(byte *block) {
315  ib::info(ER_IB_MSG_626) << "Log file header:"
316                          << " format "
317                          << mach_read_from_4(block + LOG_HEADER_FORMAT)
318                          << " pad1 "
319                          << mach_read_from_4(block + LOG_HEADER_PAD1)
320                          << " start_lsn "
321                          << mach_read_from_8(block + LOG_HEADER_START_LSN)
322                          << " creator '" << block + LOG_HEADER_CREATOR << "'"
323                          << " checksum " << log_block_get_checksum(block);
324}
325#endif /* UNIV_HOTBACKUP && UNIV_DEBUG */
326
327inline lsn_t log_buffer_ready_for_write_lsn(const log_t &log) {
328  return (log.recent_written.tail());
329}
330
331inline lsn_t log_buffer_dirty_pages_added_up_to_lsn(const log_t &log) {
332  return (log.recent_closed.tail());
333}
334
335inline lsn_t log_buffer_flush_order_lag(const log_t &log) {
336  return (log.recent_closed.capacity());
337}
338
339inline bool log_write_to_file_requests_are_frequent(uint64_t interval) {
340  return (interval < 1000); /* 1ms */
341}
342
343inline bool log_write_to_file_requests_are_frequent(const log_t &log) {
344  return (log_write_to_file_requests_are_frequent(
345      log.write_to_file_requests_interval.load(std::memory_order_relaxed)));
346}
347
348inline bool log_writer_is_active() {
349  return (srv_thread_is_active(srv_threads.m_log_writer));
350}
351
352inline bool log_write_notifier_is_active() {
353  return (srv_thread_is_active(srv_threads.m_log_write_notifier));
354}
355
356inline bool log_flusher_is_active() {
357  return (srv_thread_is_active(srv_threads.m_log_flusher));
358}
359
360inline bool log_flush_notifier_is_active() {
361  return (srv_thread_is_active(srv_threads.m_log_flush_notifier));
362}
363
364inline bool log_closer_is_active() {
365  return (srv_thread_is_active(srv_threads.m_log_closer));
366}
367
368inline bool log_checkpointer_is_active() {
369  return (srv_thread_is_active(srv_threads.m_log_checkpointer));
370}
371
372#endif /* !UNIV_HOTBACKUP */
373