1 /*****************************************************************************
2 
3 Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
4 
5 Portions of this file contain modifications contributed and copyrighted by
6 Google, Inc. Those modifications are gratefully acknowledged and are described
7 briefly in the InnoDB documentation. The contributions by Google are
8 incorporated with their permission, and subject to the conditions contained in
9 the file COPYING.Google.
10 
11 This program is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License, version 2.0, as published by the
13 Free Software Foundation.
14 
15 This program is also distributed with certain software (including but not
16 limited to OpenSSL) that is licensed under separate terms, as designated in a
17 particular file or component or in included license documentation. The authors
18 of MySQL hereby grant you an additional permission to link the program and
19 your derivative works with the separately licensed software that they have
20 included with MySQL.
21 
22 This program is distributed in the hope that it will be useful, but WITHOUT
23 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
24 FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0,
25 for more details.
26 
27 You should have received a copy of the GNU General Public License along with
28 this program; if not, write to the Free Software Foundation, Inc.,
29 51 Franklin St, Fifth Floor, Boston, MA 02110-1301  USA
30 
31 *****************************************************************************/
32 
33 /**************************************************/ /**
34  @file include/log0types.h
35 
36  Redo log types
37 
38  Created 2013-03-15 Sunny Bains
39  *******************************************************/
40 
41 #ifndef log0types_h
42 #define log0types_h
43 
44 #include <atomic>
45 #include <chrono>
46 #include <condition_variable>
47 #include <memory>
48 #include <mutex>
49 #include <string>
50 
51 #include "my_compiler.h"
52 #include "os0event.h"
53 #include "os0file.h"
54 #include "sync0sharded_rw.h"
55 #include "univ.i"
56 #include "ut0link_buf.h"
57 #include "ut0mutex.h"
58 
59 /** Type used for all log sequence number storage and arithmetics. */
60 typedef uint64_t lsn_t;
61 
62 /** Print format for lsn_t values, used in functions like printf. */
63 #define LSN_PF UINT64PF
64 
65 /** Alias for atomic based on lsn_t. */
66 using atomic_lsn_t = std::atomic<lsn_t>;
67 
68 /** Type used for sn values, which enumerate bytes of data stored in the log.
69 Note that these values skip bytes of headers and footers of log blocks. */
70 typedef uint64_t sn_t;
71 
72 /** Alias for atomic based on sn_t. */
73 using atomic_sn_t = std::atomic<sn_t>;
74 
75 /** Type used for checkpoint numbers (consecutive checkpoints receive
76 a number which is increased by one). */
77 typedef uint64_t checkpoint_no_t;
78 
79 /** Type used for counters in log_t: flushes_requested and flushes_expected.
80 They represent number of requests to flush the redo log to disk. */
81 typedef std::atomic<int64_t> log_flushes_t;
82 
83 /** Function used to calculate checksums of log blocks. */
84 typedef std::atomic<uint32_t (*)(const byte *log_block)> log_checksum_func_t;
85 
86 /** Clock used to measure time spent in redo log (e.g. when flushing). */
87 using Log_clock = std::chrono::high_resolution_clock;
88 
89 /** Time point defined by the Log_clock. */
90 using Log_clock_point = std::chrono::time_point<Log_clock>;
91 
92 /** Supported redo log formats. Stored in LOG_HEADER_FORMAT. */
93 enum log_header_format_t {
94   /** The MySQL 5.7.9 redo log format identifier. We can support recovery
95   from this format if the redo log is clean (logically empty). */
96   LOG_HEADER_FORMAT_5_7_9 = 1,
97 
98   /** Remove MLOG_FILE_NAME and MLOG_CHECKPOINT, introduce MLOG_FILE_OPEN
99   redo log record. */
100   LOG_HEADER_FORMAT_8_0_1 = 2,
101 
102   /** Allow checkpoint_lsn to point any data byte within redo log (before
103   it had to point the beginning of a group of log records). */
104   LOG_HEADER_FORMAT_8_0_3 = 3,
105 
106   /** Expand ulint compressed form. */
107   LOG_HEADER_FORMAT_8_0_19 = 4,
108 
109   /** The redo log format identifier
110   corresponding to the current format version. */
111   LOG_HEADER_FORMAT_CURRENT = LOG_HEADER_FORMAT_8_0_19
112 };
113 
114 /** The state of a log group */
115 enum class log_state_t {
116   /** No corruption detected */
117   OK,
118   /** Corrupted */
119   CORRUPTED
120 };
121 
122 /** The recovery implementation. */
123 struct redo_recover_t;
124 
125 typedef size_t log_lock_no_t;
126 
127 struct Log_handle {
128   log_lock_no_t lock_no;
129 
130   lsn_t start_lsn;
131 
132   lsn_t end_lsn;
133 };
134 
135 /** Redo log - single data structure with state of the redo log system.
136 In future, one could consider splitting this to multiple data structures. */
alignas(ut::INNODB_CACHE_LINE_SIZE)137 struct alignas(ut::INNODB_CACHE_LINE_SIZE) log_t {
138   /**************************************************/ /**
139 
140    @name Users writing to log buffer
141 
142    *******************************************************/
143 
144   /** @{ */
145 
146 #ifndef UNIV_HOTBACKUP
147   /** Sharded rw-lock which can be used to freeze redo log lsn.
148   When user thread reserves space in log, s-lock is acquired.
149   Log archiver (Clone plugin) acquires x-lock. */
150   mutable Sharded_rw_lock sn_lock;
151 
152   /** Current sn value. Used to reserve space in the redo log,
153   and used to acquire an exclusive access to the log buffer.
154   Represents number of data bytes that have ever been reserved.
155   Bytes of headers and footers of log blocks are not included.
156   Protected by: sn_lock. */
157   MY_COMPILER_DIAGNOSTIC_PUSH()
158   MY_COMPILER_CLANG_WORKAROUND_REF_DOCBUG()
159   /**
160   @see @ref subsect_redo_log_sn */
161   MY_COMPILER_DIAGNOSTIC_PUSH()
162   alignas(ut::INNODB_CACHE_LINE_SIZE) atomic_sn_t sn;
163 
164   /** Padding after the _sn to avoid false sharing issues for
165   constants below (due to changes of sn). */
166   alignas(ut::INNODB_CACHE_LINE_SIZE)
167 
168       /** Pointer to the log buffer, aligned up to OS_FILE_LOG_BLOCK_SIZE.
169       The alignment is to ensure that buffer parts specified for file IO write
170       operations will be aligned to sector size, which is required e.g. on
171       Windows when doing unbuffered file access.
172       Protected by: sn_lock. */
173       aligned_array_pointer<byte, OS_FILE_LOG_BLOCK_SIZE> buf;
174 
175   /** Size of the log buffer expressed in number of data bytes,
176   that is excluding bytes for headers and footers of log blocks. */
177   atomic_sn_t buf_size_sn;
178 
179   /** Size of the log buffer expressed in number of total bytes,
180   that is including bytes for headers and footers of log blocks. */
181   size_t buf_size;
182 
183   alignas(ut::INNODB_CACHE_LINE_SIZE)
184 
185       /** The recent written buffer.
186       Protected by: sn_lock or writer_mutex. */
187       Link_buf<lsn_t> recent_written;
188 
189   alignas(ut::INNODB_CACHE_LINE_SIZE)
190 
191       /** The recent closed buffer.
192       Protected by: sn_lock or closer_mutex. */
193       Link_buf<lsn_t> recent_closed;
194 
195   alignas(ut::INNODB_CACHE_LINE_SIZE)
196 
197       /** @} */
198 
199       /**************************************************/ /**
200 
201        @name Users <=> writer
202 
203        *******************************************************/
204 
205       /** @{ */
206 
207       /** Maximum sn up to which there is free space in both the log buffer
208       and the log files. This is limitation for the end of any write to the
209       log buffer. Threads, which are limited need to wait, and possibly they
210       hold latches of dirty pages making a deadlock possible.
211       Protected by: writer_mutex (writes). */
212       atomic_sn_t buf_limit_sn;
213 
214   /** Up to this lsn, data has been written to disk (fsync not required).
215   Protected by: writer_mutex (writes). */
216   MY_COMPILER_DIAGNOSTIC_PUSH()
217   MY_COMPILER_CLANG_WORKAROUND_REF_DOCBUG()
218   /*
219   @see @ref subsect_redo_log_write_lsn */
220   MY_COMPILER_DIAGNOSTIC_POP()
221   alignas(ut::INNODB_CACHE_LINE_SIZE) atomic_lsn_t write_lsn;
222 
223   alignas(ut::INNODB_CACHE_LINE_SIZE)
224 
225       /** Unaligned pointer to array with events, which are used for
226       notifications sent from the log write notifier thread to user threads.
227       The notifications are sent when write_lsn is advanced. User threads
228       wait for write_lsn >= lsn, for some lsn. Log writer advances the
229       write_lsn and notifies the log write notifier, which notifies all users
230       interested in nearby lsn values (lsn belonging to the same log block).
231       Note that false wake-ups are possible, in which case user threads
232       simply retry waiting. */
233       os_event_t *write_events;
234 
235   /** Number of entries in the array with writer_events. */
236   size_t write_events_size;
237 
238   /** Approx. number of requests to write/flush redo since startup. */
239   alignas(ut::INNODB_CACHE_LINE_SIZE)
240       std::atomic<uint64_t> write_to_file_requests_total;
241 
242   /** How often redo write/flush is requested in average.
243   Measures in microseconds. Log threads do not spin when
244   the write/flush requests are not frequent. */
245   alignas(ut::INNODB_CACHE_LINE_SIZE)
246       std::atomic<uint64_t> write_to_file_requests_interval;
247 
248   /** This padding is probably not needed, left for convenience. */
249   alignas(ut::INNODB_CACHE_LINE_SIZE)
250 
251       /** @} */
252 
253       /**************************************************/ /**
254 
255        @name Users <=> flusher
256 
257        *******************************************************/
258 
259       /** @{ */
260 
261       /** Unaligned pointer to array with events, which are used for
262       notifications sent from the log flush notifier thread to user threads.
263       The notifications are sent when flushed_to_disk_lsn is advanced.
264       User threads wait for flushed_to_disk_lsn >= lsn, for some lsn.
265       Log flusher advances the flushed_to_disk_lsn and notifies the
266       log flush notifier, which notifies all users interested in nearby lsn
267       values (lsn belonging to the same log block). Note that false
268       wake-ups are possible, in which case user threads simply retry
269       waiting. */
270       os_event_t *flush_events;
271 
272   /** Number of entries in the array with events. */
273   size_t flush_events_size;
274 
275   /** Padding before the frequently updated flushed_to_disk_lsn. */
276   alignas(ut::INNODB_CACHE_LINE_SIZE)
277 
278       /** Up to this lsn data has been flushed to disk (fsynced). */
279       atomic_lsn_t flushed_to_disk_lsn;
280 
281   /** Padding after the frequently updated flushed_to_disk_lsn. */
282   alignas(ut::INNODB_CACHE_LINE_SIZE)
283 
284       /** @} */
285 
286       /**************************************************/ /**
287 
288        @name Log flusher thread
289 
290        *******************************************************/
291 
292       /** @{ */
293 
294       /** Last flush start time. Updated just before fsync starts. */
295       Log_clock_point last_flush_start_time;
296 
297   /** Last flush end time. Updated just after fsync is finished.
298   If smaller than start time, then flush operation is pending. */
299   Log_clock_point last_flush_end_time;
300 
301   /** Flushing average time (in microseconds). */
302   double flush_avg_time;
303 
304   /** Mutex which can be used to pause log flusher thread. */
305   mutable ib_mutex_t flusher_mutex;
306 
307   alignas(ut::INNODB_CACHE_LINE_SIZE)
308 
309       os_event_t flusher_event;
310 
311   /** Padding to avoid any dependency between the log flusher
312   and the log writer threads. */
313   alignas(ut::INNODB_CACHE_LINE_SIZE)
314 
315       /** @} */
316 
317       /**************************************************/ /**
318 
319        @name Log writer thread
320 
321        *******************************************************/
322 
323       /** @{ */
324 
325       /** Space id for pages with log blocks. */
326       space_id_t files_space_id;
327 
328   /** Size of buffer used for the write-ahead (in bytes). */
329   uint32_t write_ahead_buf_size;
330 
331   /** Aligned pointer to buffer used for the write-ahead. It is aligned to
332   system page size (why?) and is currently limited by constant 64KB. */
333   aligned_array_pointer<byte, 64 * 1024> write_ahead_buf;
334 
335   /** Up to this file offset in the log files, the write-ahead
336   has been done or is not required (for any other reason). */
337   uint64_t write_ahead_end_offset;
338 
339   /** Aligned buffers for file headers. */
340   aligned_array_pointer<byte, OS_FILE_LOG_BLOCK_SIZE> *file_header_bufs;
341 #endif /* !UNIV_HOTBACKUP */
342 
343   /** Some lsn value within the current log file. */
344   lsn_t current_file_lsn;
345 
346   /** File offset for the current_file_lsn. */
347   uint64_t current_file_real_offset;
348 
349   /** Up to this file offset we are within the same current log file. */
350   uint64_t current_file_end_offset;
351 
352   /** Number of performed IO operations (only for printing stats). */
353   uint64_t n_log_ios;
354 
355   /** Size of each single log file (expressed in bytes, including
356   file header). */
357   uint64_t file_size;
358 
359   /** Number of log files. */
360   uint32_t n_files;
361 
362   /** Total capacity of all the log files (file_size * n_files),
363   including headers of the log files. */
364   uint64_t files_real_capacity;
365 
366   /** Capacity of redo log files for log writer thread. The log writer
367   does not to exceed this value. If space is not reclaimed after 1 sec
368   wait, it writes only as much as can fit the free space or crashes if
369   there is no free space at all (checkpoint did not advance for 1 sec). */
370   lsn_t lsn_capacity_for_writer;
371 
372   /** When this margin is being used, the log writer decides to increase
373   the concurrency_margin to stop new incoming mini transactions earlier,
374   on bigger margin. This is used to provide adaptive concurrency margin
375   calculation, which we need because we might have unlimited thread
376   concurrency setting or we could miss some log_free_check() calls.
377   It is just best effort to help getting out of the troubles. */
378   lsn_t extra_margin;
379 
380   /** True if we haven't increased the concurrency_margin since we entered
381   (lsn_capacity_for_margin_inc..lsn_capacity_for_writer] range. This allows
382   to increase the margin only once per issue and wait until the issue becomes
383   resolved, still having an option to increase margin even more, if new issue
384   comes later. */
385   bool concurrency_margin_ok;
386 
387   /** Maximum allowed concurrency_margin. We never set higher, even when we
388   increase the concurrency_margin in the adaptive solution. */
389   lsn_t max_concurrency_margin;
390 
391 #ifndef UNIV_HOTBACKUP
392   /** Mutex which can be used to pause log writer thread. */
393   mutable ib_mutex_t writer_mutex;
394 
395   alignas(ut::INNODB_CACHE_LINE_SIZE)
396 
397       os_event_t writer_event;
398 
399   /** Padding after section for the log writer thread, to avoid any
400   dependency between the log writer and the log closer threads. */
401   alignas(ut::INNODB_CACHE_LINE_SIZE)
402 
403       /** @} */
404 
405       /**************************************************/ /**
406 
407        @name Log closer thread
408 
409        *******************************************************/
410 
411       /** @{ */
412 
413       /** Event used by the log closer thread to wait for tasks. */
414       os_event_t closer_event;
415 
416   /** Mutex which can be used to pause log closer thread. */
417   mutable ib_mutex_t closer_mutex;
418 
419   /** Padding after the log closer thread and before the memory used
420   for communication between the log flusher and notifier threads. */
421   alignas(ut::INNODB_CACHE_LINE_SIZE)
422 
423       /** @} */
424 
425       /**************************************************/ /**
426 
427        @name Log flusher <=> flush_notifier
428 
429        *******************************************************/
430 
431       /** @{ */
432 
433       /** Event used by the log flusher thread to notify the log flush
434       notifier thread, that it should proceed with notifying user threads
435       waiting for the advanced flushed_to_disk_lsn (because it has been
436       advanced). */
437       os_event_t flush_notifier_event;
438 
439   /** Mutex which can be used to pause log flush notifier thread. */
440   mutable ib_mutex_t flush_notifier_mutex;
441 
442   /** Padding. */
443   alignas(ut::INNODB_CACHE_LINE_SIZE)
444 
445       /** @} */
446 
447       /**************************************************/ /**
448 
449        @name Log writer <=> write_notifier
450 
451        *******************************************************/
452 
453       /** @{ */
454 
455       /** Mutex which can be used to pause log write notifier thread. */
456       mutable ib_mutex_t write_notifier_mutex;
457 
458   alignas(ut::INNODB_CACHE_LINE_SIZE)
459 
460       /** Event used by the log writer thread to notify the log write
461       notifier thread, that it should proceed with notifying user threads
462       waiting for the advanced write_lsn (because it has been advanced). */
463       os_event_t write_notifier_event;
464 
465   alignas(ut::INNODB_CACHE_LINE_SIZE)
466 
467       /** @} */
468 
469       /**************************************************/ /**
470 
471        @name Maintenance
472 
473        *******************************************************/
474 
475       /** @{ */
476 
477       /** Used for stopping the log background threads. */
478       std::atomic_bool should_stop_threads;
479 
480   /** Number of total I/O operations performed when we printed
481   the statistics last time. */
482   mutable uint64_t n_log_ios_old;
483 
484   /** Wall time when we printed the statistics last time. */
485   mutable time_t last_printout_time;
486 
487   /** @} */
488 
489   /**************************************************/ /**
490 
491    @name Recovery
492 
493    *******************************************************/
494 
495   /** @{ */
496 
497   /** Lsn from which recovery has been started. */
498   lsn_t recovered_lsn;
499 
500   /** Format of the redo log: e.g., LOG_HEADER_FORMAT_CURRENT. */
501   uint32_t format;
502 
503   /** Corruption status. */
504   log_state_t state;
505 
506   /** Used only in recovery: recovery scan succeeded up to this lsn. */
507   lsn_t scanned_lsn;
508 
509 #ifdef UNIV_DEBUG
510 
511   /** When this is set, writing to the redo log should be disabled.
512   We check for this in functions that write to the redo log. */
513   bool disable_redo_writes;
514 
515   /** DEBUG only - if we copied or initialized the first block in buffer,
516   this is set to lsn for which we did that. We later ensure that we start
517   the redo log at the same lsn. Else it is zero and we would crash when
518   trying to start redo then. */
519   lsn_t first_block_is_correct_for_lsn;
520 
521 #endif /* UNIV_DEBUG */
522 
523   alignas(ut::INNODB_CACHE_LINE_SIZE)
524 
525       /** @} */
526 
527       /**************************************************/ /**
528 
529        @name Fields protected by the log_limits mutex.
530              Related to free space in the redo log.
531 
532        *******************************************************/
533 
534       /** @{ */
535 
536       /** Mutex which protects fields: available_for_checkpoint_lsn,
537       requested_checkpoint_lsn. It also synchronizes updates of:
538       free_check_limit_sn, concurrency_margin and dict_persist_margin.
539       It also protects the srv_checkpoint_disabled (together with the
540       checkpointer_mutex). */
541       mutable ib_mutex_t limits_mutex;
542 
543   /** A new checkpoint could be written for this lsn value.
544   Up to this lsn value, all dirty pages have been added to flush
545   lists and flushed. Updated in the log checkpointer thread by
546   taking minimum oldest_modification out of the last dirty pages
547   from each flush list. However it will not be bigger than the
548   current value of log.buf_dirty_pages_added_up_to_lsn.
549   Read by: user threads when requesting fuzzy checkpoint
550   Read by: log_print() (printing status of redo)
551   Updated by: log_checkpointer
552   Protected by: limits_mutex. */
553   MY_COMPILER_DIAGNOSTIC_PUSH()
554   MY_COMPILER_CLANG_WORKAROUND_REF_DOCBUG()
555   /**
556   @see @ref subsect_redo_log_available_for_checkpoint_lsn */
557   MY_COMPILER_DIAGNOSTIC_POP()
558   lsn_t available_for_checkpoint_lsn;
559 
560   /** When this is larger than the latest checkpoint, the log checkpointer
561   thread will be forced to write a new checkpoint (unless the new latest
562   checkpoint lsn would still be smaller than this value).
563   Read by: log_checkpointer
564   Updated by: user threads (log_free_check() or for sharp checkpoint)
565   Protected by: limits_mutex. */
566   lsn_t requested_checkpoint_lsn;
567 
568   /** Maximum lsn allowed for checkpoint by dict_persist or zero.
569   This will be set by dict_persist_to_dd_table_buffer(), which should
570   be always called before really making a checkpoint.
571   If non-zero, up to this lsn value, dynamic metadata changes have been
572   written back to mysql.innodb_dynamic_metadata under dict_persist->mutex
573   protection. All dynamic metadata changes after this lsn have to
574   be kept in redo logs, but not discarded. If zero, just ignore it.
575   Updated by: DD (when persisting dynamic meta data)
576   Updated by: log_checkpointer (reset when checkpoint is written)
577   Protected by: limits_mutex. */
578   lsn_t dict_max_allowed_checkpoint_lsn;
579 
580   /** If should perform checkpoints every innodb_log_checkpoint_every ms.
581   Disabled during startup / shutdown. Enabled in srv_start_threads.
582   Updated by: starting thread (srv_start_threads)
583   Read by: log_checkpointer */
584   bool periodical_checkpoints_enabled;
585 
586   /** Maximum sn up to which there is free space in the redo log.
587   Threads check this limit and compare to current log.sn, when they
588   are outside mini transactions and hold no latches. The formula used
589   to compute the limitation takes into account maximum size of mtr and
590   thread concurrency to include proper margins and avoid issues with
591   race condition (in which all threads check the limitation and then
592   all proceed with their mini transactions). Also extra margin is
593   there for dd table buffer cache (dict_persist_margin).
594   Read by: user threads (log_free_check())
595   Updated by: log_checkpointer (after update of checkpoint_lsn)
596   Updated by: log_writer (after increasing concurrency_margin)
597   Updated by: DD (after update of dict_persist_margin)
598   Protected by (updates only): limits_mutex. */
599   atomic_sn_t free_check_limit_sn;
600 
601   /** Margin used in calculation of @see free_check_limit_sn.
602   Read by: page_cleaners, log_checkpointer
603   Updated by: log_writer
604   Protected by (updates only): limits_mutex. */
605   atomic_sn_t concurrency_margin;
606 
607   /** Margin used in calculation of @see free_check_limit_sn.
608   Read by: page_cleaners, log_checkpointer
609   Updated by: DD
610   Protected by (updates only): limits_mutex. */
611   atomic_sn_t dict_persist_margin;
612 
613   alignas(ut::INNODB_CACHE_LINE_SIZE)
614 
615       /** @} */
616 
617       /**************************************************/ /**
618 
619        @name Log checkpointer thread
620 
621        *******************************************************/
622 
623       /** @{ */
624 
625       /** Event used by the log checkpointer thread to wait for requests. */
626       os_event_t checkpointer_event;
627 
628   /** Mutex which can be used to pause log checkpointer thread.
629   This is used by log_position_lock() together with log_buffer_x_lock(),
630   to pause any changes to current_lsn or last_checkpoint_lsn. */
631   mutable ib_mutex_t checkpointer_mutex;
632 
633   /** Latest checkpoint lsn.
634   Read by: user threads, log_print (no protection)
635   Read by: log_writer (under writer_mutex)
636   Updated by: log_checkpointer (under both mutexes)
637   Protected by (updates only): checkpointer_mutex + writer_mutex. */
638   MY_COMPILER_DIAGNOSTIC_PUSH()
639   MY_COMPILER_CLANG_WORKAROUND_REF_DOCBUG()
640   /**
641   @see @ref subsect_redo_log_last_checkpoint_lsn */
642   MY_COMPILER_DIAGNOSTIC_POP()
643   atomic_lsn_t last_checkpoint_lsn;
644 
645   /** Next checkpoint number.
646   Read by: log_get_last_block (no protection)
647   Read by: log_writer (under writer_mutex)
648   Updated by: log_checkpointer (under both mutexes)
649   Protected by: checkpoint_mutex + writer_mutex. */
650   std::atomic<checkpoint_no_t> next_checkpoint_no;
651 
652   /** Latest checkpoint wall time.
653   Used by (private): log_checkpointer. */
654   Log_clock_point last_checkpoint_time;
655 
656   /** Aligned buffer used for writing a checkpoint header. It is aligned
657   similarly to log.buf.
658   Used by (private): log_checkpointer, recovery code */
659   aligned_array_pointer<byte, OS_FILE_LOG_BLOCK_SIZE> checkpoint_buf;
660 
661   /** @} */
662 
663   /**************************************************/ /**
664 
665    @name Fields considered constant, updated when log system
666          is initialized (log_sys_init()) and not assigned to
667          particular log thread.
668 
669    *******************************************************/
670 
671   /** @{ */
672 
673   /** Capacity of the log files available for log_free_check(). */
674   lsn_t lsn_capacity_for_free_check;
675 
676   /** Capacity of log files excluding headers of the log files.
677   If the checkpoint age exceeds this, it is a serious error,
678   because in such case we have already overwritten redo log. */
679   lsn_t lsn_real_capacity;
680 
681   /** When the oldest dirty page age exceeds this value, we start
682   an asynchronous preflush of dirty pages. */
683   lsn_t max_modified_age_async;
684 
685   /** When the oldest dirty page age exceeds this value, we start
686   a synchronous flush of dirty pages. */
687   lsn_t max_modified_age_sync;
688 
689   /** When checkpoint age exceeds this value, we write checkpoints
690   if lag between oldest_lsn and checkpoint_lsn exceeds max_checkpoint_lag. */
691   lsn_t max_checkpoint_age_async;
692 
693   /** @} */
694 
695   /** true if redo logging is disabled. Read and write with writer_mutex  */
696   bool m_disable;
697 
698   /** true, if server is not recoverable. Read and write with writer_mutex */
699   bool m_crash_unsafe;
700 
701   /** start LSN of first redo log file. */
702   lsn_t m_first_file_lsn;
703 
704 #endif /* !UNIV_HOTBACKUP */
705 };
706 
707 #endif /* !log0types_h */
708