1 /*****************************************************************************
2
3 Copyright (c) 1996, 2020, Oracle and/or its affiliates.
4
5 This program is free software; you can redistribute it and/or modify it under
6 the terms of the GNU General Public License, version 2.0, as published by the
7 Free Software Foundation.
8
9 This program is also distributed with certain software (including but not
10 limited to OpenSSL) that is licensed under separate terms, as designated in a
11 particular file or component or in included license documentation. The authors
12 of MySQL hereby grant you an additional permission to link the program and
13 your derivative works with the separately licensed software that they have
14 included with MySQL.
15
16 This program is distributed in the hope that it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
18 FOR A PARTICULAR PURPOSE. See the GNU General Public License, version 2.0,
19 for more details.
20
21 You should have received a copy of the GNU General Public License along with
22 this program; if not, write to the Free Software Foundation, Inc.,
23 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24
25 *****************************************************************************/
26
27 /** @file trx/trx0undo.cc
28 Transaction undo log
29
30 Created 3/26/1996 Heikki Tuuri
31 *******************************************************/
32
33 #include <stddef.h>
34
35 #include <sql_thd_internal_api.h>
36
37 #include "fsp0fsp.h"
38 #include "ha_prototypes.h"
39 #include "trx0undo.h"
40
41 #include "my_dbug.h"
42
43 #ifndef UNIV_HOTBACKUP
44 #include "clone0clone.h"
45 #include "current_thd.h"
46 #include "dict0dd.h"
47 #include "mach0data.h"
48 #include "mtr0log.h"
49 #include "srv0mon.h"
50 #include "srv0srv.h"
51 #include "srv0start.h"
52 #include "trx0purge.h"
53 #include "trx0rec.h"
54 #include "trx0rseg.h"
55 #include "trx0trx.h"
56
57 /* How should the old versions in the history list be managed?
58 ----------------------------------------------------------
59 If each transaction is given a whole page for its update undo log, file
60 space consumption can be 10 times higher than necessary. Therefore,
61 partly filled update undo log pages should be reusable. But then there
62 is no way individual pages can be ordered so that the ordering agrees
63 with the serialization numbers of the transactions on the pages. Thus,
64 the history list must be formed of undo logs, not their header pages as
65 it was in the old implementation.
66 However, on a single header page the transactions are placed in
67 the order of their serialization numbers. As old versions are purged, we
68 may free the page when the last transaction on the page has been purged.
69 A problem is that the purge has to go through the transactions
70 in the serialization order. This means that we have to look through all
71 rollback segments for the one that has the smallest transaction number
72 in its history list.
73 When should we do a purge? A purge is necessary when space is
74 running out in any of the rollback segments. Then we may have to purge
75 also old version which might be needed by some consistent read. How do
76 we trigger the start of a purge? When a transaction writes to an undo log,
77 it may notice that the space is running out. When a read view is closed,
78 it may make some history superfluous. The server can have an utility which
79 periodically checks if it can purge some history.
80 In a parallellized purge we have the problem that a query thread
81 can remove a delete marked clustered index record before another query
82 thread has processed an earlier version of the record, which cannot then
83 be done because the row cannot be constructed from the clustered index
84 record. To avoid this problem, we will store in the update and delete mark
85 undo record also the columns necessary to construct the secondary index
86 entries which are modified.
87 We can latch the stack of versions of a single clustered index record
88 by taking a latch on the clustered index page. As long as the latch is held,
89 no new versions can be added and no versions removed by undo. But, a purge
90 can still remove old versions from the bottom of the stack. */
91
92 /* How to protect rollback segments, undo logs, and history lists with
93 -------------------------------------------------------------------
94 latches?
95 -------
96 The contention of the trx_sys_t::mutex should be minimized. When a transaction
97 does its first insert or modify in an index, an undo log is assigned for it.
98 Then we must have an x-latch to the rollback segment header.
99 When the transaction does more modifys or rolls back, the undo log is
100 protected with undo_mutex in the transaction.
101 When the transaction commits, its insert undo log is either reset and
102 cached for a fast reuse, or freed. In these cases we must have an x-latch on
103 the rollback segment page. The update undo log is put to the history list. If
104 it is not suitable for reuse, its slot in the rollback segment is reset. In
105 both cases, an x-latch must be acquired on the rollback segment.
106 The purge operation steps through the history list without modifying
107 it until a truncate operation occurs, which can remove undo logs from the end
108 of the list and release undo log segments. In stepping through the list,
109 s-latches on the undo log pages are enough, but in a truncate, x-latches must
110 be obtained on the rollback segment and individual pages. */
111 #endif /* !UNIV_HOTBACKUP */
112
113 /** Initializes the fields in an undo log segment page. */
114 static void trx_undo_page_init(
115 page_t *undo_page, /*!< in: undo log segment page */
116 ulint type, /*!< in: undo log segment type */
117 mtr_t *mtr); /*!< in: mtr */
118
119 #ifndef UNIV_HOTBACKUP
120 /** Creates and initializes an undo log memory object.
121 @param[in] rseg rollback segment memory object
122 @param[in] id slot index within rseg
123 @param[in] type type of the log: TRX_UNDO_INSERT or TRX_UNDO_UPDATE
124 @param[in] trx_id id of the trx for which the undo log is created
125 @param[in] xid X/Open XA transaction identification
126 @param[in] page_no undo log header page number
127 @param[in] offset undo log header byte offset on page
128 @return own: the undo log memory object */
129 static trx_undo_t *trx_undo_mem_create(trx_rseg_t *rseg, ulint id, ulint type,
130 trx_id_t trx_id, const XID *xid,
131 page_no_t page_no, ulint offset);
132 #endif /* !UNIV_HOTBACKUP */
133 /** Initializes a cached insert undo log header page for new use. NOTE that this
134 function has its own log record type MLOG_UNDO_HDR_REUSE. You must NOT change
135 the operation of this function!
136 @return undo log header byte offset on page */
137 static ulint trx_undo_insert_header_reuse(
138 page_t *undo_page, /*!< in/out: insert undo log segment
139 header page, x-latched */
140 trx_id_t trx_id, /*!< in: transaction id */
141 mtr_t *mtr); /*!< in: mtr */
142
143 #ifndef UNIV_HOTBACKUP
144 /** Gets the previous record in an undo log from the previous page.
145 @return undo log record, the page s-latched, NULL if none */
trx_undo_get_prev_rec_from_prev_page(trx_undo_rec_t * rec,page_no_t page_no,ulint offset,bool shared,mtr_t * mtr)146 static trx_undo_rec_t *trx_undo_get_prev_rec_from_prev_page(
147 trx_undo_rec_t *rec, /*!< in: undo record */
148 page_no_t page_no, /*!< in: undo log header page number */
149 ulint offset, /*!< in: undo log header offset on page */
150 bool shared, /*!< in: true=S-latch, false=X-latch */
151 mtr_t *mtr) /*!< in: mtr */
152 {
153 space_id_t space;
154 page_no_t prev_page_no;
155 page_t *prev_page;
156 page_t *undo_page;
157
158 undo_page = page_align(rec);
159
160 prev_page_no = flst_get_prev_addr(
161 undo_page + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE, mtr)
162 .page;
163
164 if (prev_page_no == FIL_NULL) {
165 return (nullptr);
166 }
167
168 space = page_get_space_id(undo_page);
169
170 bool found;
171 const page_size_t &page_size = fil_space_get_page_size(space, &found);
172
173 ut_ad(found);
174
175 buf_block_t *block = buf_page_get(page_id_t(space, prev_page_no), page_size,
176 shared ? RW_S_LATCH : RW_X_LATCH, mtr);
177
178 buf_block_dbg_add_level(block, SYNC_TRX_UNDO_PAGE);
179
180 prev_page = buf_block_get_frame(block);
181
182 return (trx_undo_page_get_last_rec(prev_page, page_no, offset));
183 }
184
185 /** Gets the previous record in an undo log.
186 @return undo log record, the page s-latched, NULL if none */
trx_undo_get_prev_rec(trx_undo_rec_t * rec,page_no_t page_no,ulint offset,bool shared,mtr_t * mtr)187 trx_undo_rec_t *trx_undo_get_prev_rec(
188 trx_undo_rec_t *rec, /*!< in: undo record */
189 page_no_t page_no, /*!< in: undo log header page number */
190 ulint offset, /*!< in: undo log header offset on page */
191 bool shared, /*!< in: true=S-latch, false=X-latch */
192 mtr_t *mtr) /*!< in: mtr */
193 {
194 trx_undo_rec_t *prev_rec;
195
196 prev_rec = trx_undo_page_get_prev_rec(rec, page_no, offset);
197
198 if (prev_rec) {
199 return (prev_rec);
200 }
201
202 /* We have to go to the previous undo log page to look for the
203 previous record */
204
205 return (
206 trx_undo_get_prev_rec_from_prev_page(rec, page_no, offset, shared, mtr));
207 }
208
209 /** Gets the next record in an undo log from the next page.
210 @param[in] space undo log header space
211 @param[in] page_size page size
212 @param[in] undo_page undo log page
213 @param[in] page_no undo log header page number
214 @param[in] offset undo log header offset on page
215 @param[in] mode latch mode: RW_S_LATCH or RW_X_LATCH
216 @param[in,out] mtr mini-transaction
217 @return undo log record, the page latched, NULL if none */
trx_undo_get_next_rec_from_next_page(space_id_t space,const page_size_t & page_size,const page_t * undo_page,page_no_t page_no,ulint offset,ulint mode,mtr_t * mtr)218 static trx_undo_rec_t *trx_undo_get_next_rec_from_next_page(
219 space_id_t space, const page_size_t &page_size, const page_t *undo_page,
220 page_no_t page_no, ulint offset, ulint mode, mtr_t *mtr) {
221 const trx_ulogf_t *log_hdr;
222 page_no_t next_page_no;
223 page_t *next_page;
224 ulint next;
225
226 if (page_no == page_get_page_no(undo_page)) {
227 log_hdr = undo_page + offset;
228 next = mach_read_from_2(log_hdr + TRX_UNDO_NEXT_LOG);
229
230 if (next != 0) {
231 return (nullptr);
232 }
233 }
234
235 next_page_no = flst_get_next_addr(
236 undo_page + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE, mtr)
237 .page;
238 if (next_page_no == FIL_NULL) {
239 return (nullptr);
240 }
241
242 const page_id_t next_page_id(space, next_page_no);
243
244 if (mode == RW_S_LATCH) {
245 next_page = trx_undo_page_get_s_latched(next_page_id, page_size, mtr);
246 } else {
247 ut_ad(mode == RW_X_LATCH);
248 next_page = trx_undo_page_get(next_page_id, page_size, mtr);
249 }
250
251 return (trx_undo_page_get_first_rec(next_page, page_no, offset));
252 }
253
254 /** Gets the next record in an undo log.
255 @return undo log record, the page s-latched, NULL if none */
trx_undo_get_next_rec(trx_undo_rec_t * rec,page_no_t page_no,ulint offset,mtr_t * mtr)256 trx_undo_rec_t *trx_undo_get_next_rec(
257 trx_undo_rec_t *rec, /*!< in: undo record */
258 page_no_t page_no, /*!< in: undo log header page number */
259 ulint offset, /*!< in: undo log header offset on page */
260 mtr_t *mtr) /*!< in: mtr */
261 {
262 space_id_t space;
263 trx_undo_rec_t *next_rec;
264
265 next_rec = trx_undo_page_get_next_rec(rec, page_no, offset);
266
267 if (next_rec) {
268 return (next_rec);
269 }
270
271 space = page_get_space_id(page_align(rec));
272
273 bool found;
274 const page_size_t &page_size = fil_space_get_page_size(space, &found);
275
276 ut_ad(found);
277
278 return (trx_undo_get_next_rec_from_next_page(
279 space, page_size, page_align(rec), page_no, offset, RW_S_LATCH, mtr));
280 }
281
282 /** Gets the first record in an undo log.
283 @param[out] modifier_trx_id the modifier trx identifier.
284 @param[in] space undo log header space
285 @param[in] page_size page size
286 @param[in] page_no undo log header page number
287 @param[in] offset undo log header offset on page
288 @param[in] mode latching mode: RW_S_LATCH or RW_X_LATCH
289 @param[in,out] mtr mini-transaction
290 @return undo log record, the page latched, NULL if none */
trx_undo_get_first_rec(trx_id_t * modifier_trx_id,space_id_t space,const page_size_t & page_size,page_no_t page_no,ulint offset,ulint mode,mtr_t * mtr)291 trx_undo_rec_t *trx_undo_get_first_rec(trx_id_t *modifier_trx_id,
292 space_id_t space,
293 const page_size_t &page_size,
294 page_no_t page_no, ulint offset,
295 ulint mode, mtr_t *mtr) {
296 page_t *undo_page;
297 trx_undo_rec_t *rec;
298
299 const page_id_t page_id(space, page_no);
300
301 if (mode == RW_S_LATCH) {
302 undo_page = trx_undo_page_get_s_latched(page_id, page_size, mtr);
303 } else {
304 undo_page = trx_undo_page_get(page_id, page_size, mtr);
305 }
306
307 if (modifier_trx_id != nullptr) {
308 trx_ulogf_t *undo_header = undo_page + offset;
309 *modifier_trx_id = mach_read_from_8(undo_header + TRX_UNDO_TRX_ID);
310 }
311
312 rec = trx_undo_page_get_first_rec(undo_page, page_no, offset);
313
314 if (rec) {
315 return (rec);
316 }
317
318 return (trx_undo_get_next_rec_from_next_page(space, page_size, undo_page,
319 page_no, offset, mode, mtr));
320 }
321
322 /*============== UNDO LOG FILE COPY CREATION AND FREEING ==================*/
323
324 /** Writes the mtr log entry of an undo log page initialization. */
325 UNIV_INLINE
trx_undo_page_init_log(page_t * undo_page,ulint type,mtr_t * mtr)326 void trx_undo_page_init_log(page_t *undo_page, /*!< in: undo log page */
327 ulint type, /*!< in: undo log type */
328 mtr_t *mtr) /*!< in: mtr */
329 {
330 mlog_write_initial_log_record(undo_page, MLOG_UNDO_INIT, mtr);
331
332 mlog_catenate_ulint_compressed(mtr, type);
333 }
334 #else /* !UNIV_HOTBACKUP */
335 #define trx_undo_page_init_log(undo_page, type, mtr) ((void)0)
336 #endif /* !UNIV_HOTBACKUP */
337
338 /** Parses the redo log entry of an undo log page initialization.
339 @return end of log record or NULL */
trx_undo_parse_page_init(const byte * ptr,const byte * end_ptr,page_t * page,mtr_t * mtr)340 byte *trx_undo_parse_page_init(const byte *ptr, /*!< in: buffer */
341 const byte *end_ptr, /*!< in: buffer end */
342 page_t *page, /*!< in: page or NULL */
343 mtr_t *mtr) /*!< in: mtr or NULL */
344 {
345 ulint type;
346
347 type = mach_parse_compressed(&ptr, end_ptr);
348
349 if (ptr == nullptr) {
350 return (nullptr);
351 }
352
353 if (page) {
354 trx_undo_page_init(page, type, mtr);
355 }
356
357 return (const_cast<byte *>(ptr));
358 }
359
360 /** Initializes the fields in an undo log segment page. */
trx_undo_page_init(page_t * undo_page,ulint type,mtr_t * mtr)361 static void trx_undo_page_init(
362 page_t *undo_page, /*!< in: undo log segment page */
363 ulint type, /*!< in: undo log segment type */
364 mtr_t *mtr) /*!< in: mtr */
365 {
366 trx_upagef_t *page_hdr;
367
368 page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
369
370 mach_write_to_2(page_hdr + TRX_UNDO_PAGE_TYPE, type);
371
372 mach_write_to_2(page_hdr + TRX_UNDO_PAGE_START,
373 TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE);
374 mach_write_to_2(page_hdr + TRX_UNDO_PAGE_FREE,
375 TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE);
376
377 fil_page_set_type(undo_page, FIL_PAGE_UNDO_LOG);
378
379 trx_undo_page_init_log(undo_page, type, mtr);
380 }
381
382 #ifndef UNIV_HOTBACKUP
383 /** Creates a new undo log segment in file.
384 @return DB_SUCCESS if page creation OK possible error codes are:
385 DB_TOO_MANY_CONCURRENT_TRXS DB_OUT_OF_FILE_SPACE */
trx_undo_seg_create(trx_rseg_t * rseg MY_ATTRIBUTE ((unused)),trx_rsegf_t * rseg_hdr,ulint type,ulint * id,page_t ** undo_page,mtr_t * mtr)386 static MY_ATTRIBUTE((warn_unused_result)) dberr_t trx_undo_seg_create(
387 trx_rseg_t *rseg MY_ATTRIBUTE((unused)), /*!< in: rollback segment */
388 trx_rsegf_t *rseg_hdr, /*!< in: rollback segment header, page
389 x-latched */
390 ulint type, /*!< in: type of the segment: TRX_UNDO_INSERT or
391 TRX_UNDO_UPDATE */
392 ulint *id, /*!< out: slot index within rseg header */
393 page_t **undo_page,
394 /*!< out: segment header page x-latched, NULL
395 if there was an error */
396 mtr_t *mtr) /*!< in: mtr */
397 {
398 ulint slot_no = ULINT_UNDEFINED;
399 space_id_t space;
400 buf_block_t *block;
401 trx_upagef_t *page_hdr;
402 trx_usegf_t *seg_hdr;
403 ulint n_reserved;
404 bool success;
405 dberr_t err = DB_SUCCESS;
406
407 ut_ad(mtr != nullptr);
408 ut_ad(id != nullptr);
409 ut_ad(rseg_hdr != nullptr);
410 ut_ad(mutex_own(&(rseg->mutex)));
411
412 #ifdef UNIV_DEBUG
413 if (!srv_inject_too_many_concurrent_trxs)
414 #endif
415 {
416 slot_no = trx_rsegf_undo_find_free(rseg_hdr, mtr);
417 }
418 if (slot_no == ULINT_UNDEFINED) {
419 ib::error(ER_IB_MSG_1212)
420 << "Cannot find a free slot for an undo log."
421 " You may have too many active transactions running concurrently."
422 " Please add more rollback segments or undo tablespaces.";
423
424 return (DB_TOO_MANY_CONCURRENT_TRXS);
425 }
426
427 space = page_get_space_id(page_align(rseg_hdr));
428
429 success = fsp_reserve_free_extents(&n_reserved, space, 2, FSP_UNDO, mtr);
430 if (!success) {
431 return (DB_OUT_OF_FILE_SPACE);
432 }
433
434 /* Allocate a new file segment for the undo log */
435 block = fseg_create_general(space, 0, TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER,
436 TRUE, mtr);
437
438 fil_space_release_free_extents(space, n_reserved);
439
440 if (block == nullptr) {
441 /* No space left */
442
443 return (DB_OUT_OF_FILE_SPACE);
444 }
445
446 buf_block_dbg_add_level(block, SYNC_TRX_UNDO_PAGE);
447
448 *undo_page = buf_block_get_frame(block);
449
450 page_hdr = *undo_page + TRX_UNDO_PAGE_HDR;
451 seg_hdr = *undo_page + TRX_UNDO_SEG_HDR;
452
453 trx_undo_page_init(*undo_page, type, mtr);
454
455 mlog_write_ulint(page_hdr + TRX_UNDO_PAGE_FREE,
456 TRX_UNDO_SEG_HDR + TRX_UNDO_SEG_HDR_SIZE, MLOG_2BYTES, mtr);
457
458 mlog_write_ulint(seg_hdr + TRX_UNDO_LAST_LOG, 0, MLOG_2BYTES, mtr);
459
460 flst_init(seg_hdr + TRX_UNDO_PAGE_LIST, mtr);
461
462 flst_add_last(seg_hdr + TRX_UNDO_PAGE_LIST, page_hdr + TRX_UNDO_PAGE_NODE,
463 mtr);
464
465 trx_rsegf_set_nth_undo(rseg_hdr, slot_no, page_get_page_no(*undo_page), mtr);
466 *id = slot_no;
467
468 MONITOR_INC(MONITOR_NUM_UNDO_SLOT_USED);
469
470 return (err);
471 }
472
473 /** Writes the mtr log entry of an undo log header initialization. */
474 UNIV_INLINE
trx_undo_header_create_log(const page_t * undo_page,trx_id_t trx_id,mtr_t * mtr)475 void trx_undo_header_create_log(
476 const page_t *undo_page, /*!< in: undo log header page */
477 trx_id_t trx_id, /*!< in: transaction id */
478 mtr_t *mtr) /*!< in: mtr */
479 {
480 mlog_write_initial_log_record(undo_page, MLOG_UNDO_HDR_CREATE, mtr);
481
482 mlog_catenate_ull_compressed(mtr, trx_id);
483 }
484 #else /* !UNIV_HOTBACKUP */
485 #define trx_undo_header_create_log(undo_page, trx_id, mtr) ((void)0)
486 #endif /* !UNIV_HOTBACKUP */
487
488 /** Creates a new undo log header in file. NOTE that this function has its own
489 log record type MLOG_UNDO_HDR_CREATE. You must NOT change the operation of
490 this function!
491 @return header byte offset on page */
trx_undo_header_create(page_t * undo_page,trx_id_t trx_id,mtr_t * mtr)492 static ulint trx_undo_header_create(
493 page_t *undo_page, /*!< in/out: undo log segment
494 header page, x-latched; it is
495 assumed that there is
496 TRX_UNDO_LOG_HDR_SIZE bytes
497 free space on it */
498 trx_id_t trx_id, /*!< in: transaction id */
499 mtr_t *mtr) /*!< in: mtr */
500 {
501 trx_upagef_t *page_hdr;
502 trx_usegf_t *seg_hdr;
503 trx_ulogf_t *log_hdr;
504 ulint prev_log;
505 ulint free;
506 ulint new_free;
507
508 ut_ad(mtr && undo_page);
509
510 page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
511 seg_hdr = undo_page + TRX_UNDO_SEG_HDR;
512
513 free = mach_read_from_2(page_hdr + TRX_UNDO_PAGE_FREE);
514
515 log_hdr = undo_page + free;
516
517 new_free = free + TRX_UNDO_LOG_OLD_HDR_SIZE;
518
519 ut_a(free + TRX_UNDO_LOG_HDR_SIZE < UNIV_PAGE_SIZE - 100);
520
521 mach_write_to_2(page_hdr + TRX_UNDO_PAGE_START, new_free);
522
523 mach_write_to_2(page_hdr + TRX_UNDO_PAGE_FREE, new_free);
524
525 mach_write_to_2(seg_hdr + TRX_UNDO_STATE, TRX_UNDO_ACTIVE);
526
527 prev_log = mach_read_from_2(seg_hdr + TRX_UNDO_LAST_LOG);
528
529 if (prev_log != 0) {
530 trx_ulogf_t *prev_log_hdr;
531
532 prev_log_hdr = undo_page + prev_log;
533
534 mach_write_to_2(prev_log_hdr + TRX_UNDO_NEXT_LOG, free);
535 }
536
537 mach_write_to_2(seg_hdr + TRX_UNDO_LAST_LOG, free);
538
539 log_hdr = undo_page + free;
540
541 mach_write_to_2(log_hdr + TRX_UNDO_DEL_MARKS, TRUE);
542
543 mach_write_to_8(log_hdr + TRX_UNDO_TRX_ID, trx_id);
544 mach_write_to_2(log_hdr + TRX_UNDO_LOG_START, new_free);
545
546 mach_write_to_1(log_hdr + TRX_UNDO_FLAGS, 0);
547 mach_write_to_1(log_hdr + TRX_UNDO_DICT_TRANS, FALSE);
548
549 mach_write_to_2(log_hdr + TRX_UNDO_NEXT_LOG, 0);
550 mach_write_to_2(log_hdr + TRX_UNDO_PREV_LOG, prev_log);
551
552 /* Write the log record about the header creation */
553 trx_undo_header_create_log(undo_page, trx_id, mtr);
554
555 return (free);
556 }
557
558 #ifndef UNIV_HOTBACKUP
559 /** Write X/Open XA Transaction Identification (XID) to undo log header */
trx_undo_write_xid(trx_ulogf_t * log_hdr,const XID * xid,mtr_t * mtr)560 static void trx_undo_write_xid(
561 trx_ulogf_t *log_hdr, /*!< in: undo log header */
562 const XID *xid, /*!< in: X/Open XA Transaction Identification */
563 mtr_t *mtr) /*!< in: mtr */
564 {
565 mlog_write_ulint(log_hdr + TRX_UNDO_XA_FORMAT,
566 static_cast<ulint>(xid->get_format_id()), MLOG_4BYTES, mtr);
567
568 mlog_write_ulint(log_hdr + TRX_UNDO_XA_TRID_LEN,
569 static_cast<ulint>(xid->get_gtrid_length()), MLOG_4BYTES,
570 mtr);
571
572 mlog_write_ulint(log_hdr + TRX_UNDO_XA_BQUAL_LEN,
573 static_cast<ulint>(xid->get_bqual_length()), MLOG_4BYTES,
574 mtr);
575
576 mlog_write_string(log_hdr + TRX_UNDO_XA_XID,
577 reinterpret_cast<const byte *>(xid->get_data()),
578 XIDDATASIZE, mtr);
579 }
580
trx_undo_gtid_flush_prepare(trx_t * trx)581 void trx_undo_gtid_flush_prepare(trx_t *trx) {
582 /* Only relevant for prepared transaction. */
583 if (!trx_state_eq(trx, TRX_STATE_PREPARED)) {
584 return;
585 }
586 /* Only external transactions have GTID for XA PREPARE. */
587 if (trx_is_mysql_xa(trx)) {
588 return;
589 }
590 /* Wait for XA Prepare GTID to flush. */
591 auto >id_persistor = clone_sys->get_gtid_persistor();
592 gtid_persistor.wait_flush(true, false, false, nullptr);
593 }
594
trx_undo_gtid_add_update_undo(trx_t * trx,bool prepare,bool rollback)595 dberr_t trx_undo_gtid_add_update_undo(trx_t *trx, bool prepare, bool rollback) {
596 ut_ad(!(prepare && rollback));
597 /* Check if GTID persistence is needed. */
598 auto >id_persistor = clone_sys->get_gtid_persistor();
599 bool alloc = gtid_persistor.trx_check_set(trx, prepare, rollback);
600
601 if (!alloc) {
602 return (DB_SUCCESS);
603 }
604
605 /* For GTID persistence we need update undo segment. Allocate update
606 undo segment here if it is insert only transaction. If no undo segment
607 is allocated yet, then transaction didn't do any modification and
608 no GTID would be allotted to it. */
609 auto undo_ptr = &trx->rsegs.m_redo;
610 dberr_t db_err = DB_SUCCESS;
611 if (undo_ptr->is_insert_only()) {
612 ut_ad(!rollback);
613 mutex_enter(&trx->undo_mutex);
614 db_err = trx_undo_assign_undo(trx, undo_ptr, TRX_UNDO_UPDATE);
615 mutex_exit(&trx->undo_mutex);
616 }
617 /* In rare cases we might find no available update undo segment for insert
618 only transactions. It is still fine to return error at prepare stage.
619 Cannot do it earlier as GTID information is not known before. Keep the
620 debug assert to know if it really happens ever. */
621 if (db_err != DB_SUCCESS) {
622 ut_ad(false);
623 trx->persists_gtid = false;
624 ib::error(ER_IB_CLONE_GTID_PERSIST)
625 << "Could not allocate undo segment"
626 << " slot for persisting GTID. DB Error: " << db_err;
627 }
628 return (db_err);
629 }
630
trx_undo_gtid_set(trx_t * trx,trx_undo_t * undo)631 void trx_undo_gtid_set(trx_t *trx, trx_undo_t *undo) {
632 /* Reset GTID flag */
633 undo->flag &= ~TRX_UNDO_FLAG_GTID;
634
635 if (!trx->persists_gtid) {
636 return;
637 }
638
639 /* Verify that we have allocated for GTID */
640 if (!undo->gtid_allocated) {
641 ut_ad(false);
642 ib::error(ER_IB_CLONE_GTID_PERSIST)
643 << "Could not persist GTID as space for GTID is not allocated.";
644 return;
645 }
646 undo->flag |= TRX_UNDO_FLAG_GTID;
647 }
648
trx_undo_gtid_read_and_persist(trx_ulogf_t * undo_header)649 void trx_undo_gtid_read_and_persist(trx_ulogf_t *undo_header) {
650 /* Check if undo log has GTID. */
651 auto flag = mach_read_ulint(undo_header + TRX_UNDO_FLAGS, MLOG_1BYTE);
652 if ((flag & TRX_UNDO_FLAG_GTID) == 0) {
653 return;
654 }
655 /* Extract and add GTID information of the transaction to the persister. */
656 Gtid_desc gtid_desc;
657
658 /* Get GTID format version. */
659 gtid_desc.m_version = static_cast<uint32_t>(
660 mach_read_from_1(undo_header + TRX_UNDO_LOG_GTID_VERSION));
661 /* Get GTID information string. */
662 memcpy(>id_desc.m_info[0], undo_header + TRX_UNDO_LOG_GTID,
663 TRX_UNDO_LOG_GTID_LEN);
664 /* Mark GTID valid. */
665 gtid_desc.m_is_set = true;
666
667 /* Get GTID persister */
668 auto >id_persistor = clone_sys->get_gtid_persistor();
669
670 /* No concurrency is involved during recovery but satisfy
671 the interface requirement. */
672 trx_sys_mutex_enter();
673 gtid_persistor.add(gtid_desc);
674 trx_sys_mutex_exit();
675 }
676
trx_undo_gtid_write(trx_t * trx,trx_ulogf_t * undo_header,trx_undo_t * undo,mtr_t * mtr)677 void trx_undo_gtid_write(trx_t *trx, trx_ulogf_t *undo_header, trx_undo_t *undo,
678 mtr_t *mtr) {
679 if ((undo->flag & TRX_UNDO_FLAG_GTID) == 0) {
680 return;
681 }
682
683 /* Reset GTID flag */
684 undo->flag &= ~TRX_UNDO_FLAG_GTID;
685
686 /* We must have allocated for GTID but add a safe check. */
687 if (!undo->gtid_allocated) {
688 ut_ad(false);
689 return;
690 }
691
692 Gtid_desc gtid_desc;
693 auto >id_persistor = clone_sys->get_gtid_persistor();
694
695 gtid_persistor.get_gtid_info(trx, gtid_desc);
696
697 if (gtid_desc.m_is_set) {
698 /* Persist GTID version */
699 mlog_write_ulint(undo_header + TRX_UNDO_LOG_GTID_VERSION,
700 gtid_desc.m_version, MLOG_1BYTE, mtr);
701 /* Persist fixed length GTID */
702 ut_ad(TRX_UNDO_LOG_GTID_LEN == GTID_INFO_SIZE);
703 mlog_write_string(undo_header + TRX_UNDO_LOG_GTID, >id_desc.m_info[0],
704 TRX_UNDO_LOG_GTID_LEN, mtr);
705 undo->flag |= TRX_UNDO_FLAG_GTID;
706 }
707 mlog_write_ulint(undo_header + TRX_UNDO_FLAGS, undo->flag, MLOG_1BYTE, mtr);
708 }
709
710 /** Read X/Open XA Transaction Identification (XID) from undo log header */
trx_undo_read_xid(trx_ulogf_t * log_hdr,XID * xid)711 static void trx_undo_read_xid(
712 trx_ulogf_t *log_hdr, /*!< in: undo log header */
713 XID *xid) /*!< out: X/Open XA Transaction Identification */
714 {
715 xid->set_format_id(
716 static_cast<long>(mach_read_from_4(log_hdr + TRX_UNDO_XA_FORMAT)));
717
718 xid->set_gtrid_length(
719 static_cast<long>(mach_read_from_4(log_hdr + TRX_UNDO_XA_TRID_LEN)));
720
721 xid->set_bqual_length(
722 static_cast<long>(mach_read_from_4(log_hdr + TRX_UNDO_XA_BQUAL_LEN)));
723
724 xid->set_data(log_hdr + TRX_UNDO_XA_XID, XIDDATASIZE);
725 }
726
727 /** Adds space for the XA XID after an undo log old-style header.
728 @param[in,out] undo_page undo log segment header page
729 @param[in,out] log_hdr undo log header
730 @param[in,out] mtr mini transaction
731 @param[in] add_gtid add space for GTID */
trx_undo_header_add_space_for_xid(page_t * undo_page,trx_ulogf_t * log_hdr,mtr_t * mtr,bool add_gtid)732 static void trx_undo_header_add_space_for_xid(page_t *undo_page,
733 trx_ulogf_t *log_hdr, mtr_t *mtr,
734 bool add_gtid) {
735 trx_upagef_t *page_hdr;
736 ulint free;
737 ulint new_free;
738
739 page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
740
741 free = mach_read_from_2(page_hdr + TRX_UNDO_PAGE_FREE);
742
743 /* free is now the end offset of the old style undo log header */
744 ut_a(free == (ulint)(log_hdr - undo_page) + TRX_UNDO_LOG_OLD_HDR_SIZE);
745
746 ulint new_limit = add_gtid ? TRX_UNDO_LOG_HDR_SIZE : TRX_UNDO_LOG_XA_HDR_SIZE;
747
748 new_free = free + (new_limit - TRX_UNDO_LOG_OLD_HDR_SIZE);
749
750 /* Add space for a XID after the header, update the free offset
751 fields on the undo log page and in the undo log header */
752
753 mlog_write_ulint(page_hdr + TRX_UNDO_PAGE_START, new_free, MLOG_2BYTES, mtr);
754
755 mlog_write_ulint(page_hdr + TRX_UNDO_PAGE_FREE, new_free, MLOG_2BYTES, mtr);
756
757 mlog_write_ulint(log_hdr + TRX_UNDO_LOG_START, new_free, MLOG_2BYTES, mtr);
758 }
759
760 /** Writes the mtr log entry of an undo log header reuse. */
761 UNIV_INLINE
trx_undo_insert_header_reuse_log(const page_t * undo_page,trx_id_t trx_id,mtr_t * mtr)762 void trx_undo_insert_header_reuse_log(
763 const page_t *undo_page, /*!< in: undo log header page */
764 trx_id_t trx_id, /*!< in: transaction id */
765 mtr_t *mtr) /*!< in: mtr */
766 {
767 mlog_write_initial_log_record(undo_page, MLOG_UNDO_HDR_REUSE, mtr);
768
769 mlog_catenate_ull_compressed(mtr, trx_id);
770 }
771 #else /* !UNIV_HOTBACKUP */
772 #define trx_undo_insert_header_reuse_log(undo_page, trx_id, mtr) ((void)0)
773 #endif /* !UNIV_HOTBACKUP */
774
775 /** Parse the redo log entry of an undo log page header create or reuse.
776 @param[in] type MLOG_UNDO_HDR_CREATE or MLOG_UNDO_HDR_REUSE
777 @param[in] ptr redo log record
778 @param[in] end_ptr end of log buffer
779 @param[in,out] page page frame or NULL
780 @param[in,out] mtr mini-transaction or NULL
781 @return end of log record or NULL */
trx_undo_parse_page_header(mlog_id_t type,const byte * ptr,const byte * end_ptr,page_t * page,mtr_t * mtr)782 byte *trx_undo_parse_page_header(mlog_id_t type, const byte *ptr,
783 const byte *end_ptr, page_t *page,
784 mtr_t *mtr) {
785 trx_id_t trx_id = mach_u64_parse_compressed(&ptr, end_ptr);
786
787 if (ptr != nullptr && page != nullptr) {
788 switch (type) {
789 case MLOG_UNDO_HDR_CREATE:
790 trx_undo_header_create(page, trx_id, mtr);
791 return (const_cast<byte *>(ptr));
792 case MLOG_UNDO_HDR_REUSE:
793 trx_undo_insert_header_reuse(page, trx_id, mtr);
794 return (const_cast<byte *>(ptr));
795 default:
796 break;
797 }
798 ut_ad(0);
799 }
800
801 return (const_cast<byte *>(ptr));
802 }
803
804 /** Initializes a cached insert undo log header page for new use. NOTE that this
805 function has its own log record type MLOG_UNDO_HDR_REUSE. You must NOT change
806 the operation of this function!
807 @return undo log header byte offset on page */
trx_undo_insert_header_reuse(page_t * undo_page,trx_id_t trx_id,mtr_t * mtr)808 static ulint trx_undo_insert_header_reuse(
809 page_t *undo_page, /*!< in/out: insert undo log segment
810 header page, x-latched */
811 trx_id_t trx_id, /*!< in: transaction id */
812 mtr_t *mtr) /*!< in: mtr */
813 {
814 trx_upagef_t *page_hdr;
815 trx_usegf_t *seg_hdr;
816 trx_ulogf_t *log_hdr;
817 ulint free;
818 ulint new_free;
819
820 ut_ad(mtr && undo_page);
821
822 page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
823 seg_hdr = undo_page + TRX_UNDO_SEG_HDR;
824
825 free = TRX_UNDO_SEG_HDR + TRX_UNDO_SEG_HDR_SIZE;
826
827 ut_a(free + TRX_UNDO_LOG_HDR_SIZE < UNIV_PAGE_SIZE - 100);
828
829 log_hdr = undo_page + free;
830
831 new_free = free + TRX_UNDO_LOG_OLD_HDR_SIZE;
832
833 /* Insert undo data is not needed after commit: we may free all
834 the space on the page */
835
836 ut_a(mach_read_from_2(undo_page + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_TYPE) ==
837 TRX_UNDO_INSERT);
838
839 mach_write_to_2(page_hdr + TRX_UNDO_PAGE_START, new_free);
840
841 mach_write_to_2(page_hdr + TRX_UNDO_PAGE_FREE, new_free);
842
843 mach_write_to_2(seg_hdr + TRX_UNDO_STATE, TRX_UNDO_ACTIVE);
844
845 log_hdr = undo_page + free;
846
847 mach_write_to_8(log_hdr + TRX_UNDO_TRX_ID, trx_id);
848 mach_write_to_2(log_hdr + TRX_UNDO_LOG_START, new_free);
849
850 mach_write_to_1(log_hdr + TRX_UNDO_FLAGS, 0);
851 mach_write_to_1(log_hdr + TRX_UNDO_DICT_TRANS, FALSE);
852
853 /* Write the log record MLOG_UNDO_HDR_REUSE */
854 trx_undo_insert_header_reuse_log(undo_page, trx_id, mtr);
855
856 return (free);
857 }
858
859 #ifndef UNIV_HOTBACKUP
860 /** Tries to add a page to the undo log segment where the undo log is placed.
861 @return X-latched block if success, else NULL */
trx_undo_add_page(trx_t * trx,trx_undo_t * undo,trx_undo_ptr_t * undo_ptr,mtr_t * mtr)862 buf_block_t *trx_undo_add_page(
863 trx_t *trx, /*!< in: transaction */
864 trx_undo_t *undo, /*!< in: undo log memory object */
865 trx_undo_ptr_t *undo_ptr, /*!< in: assign undo log from
866 referred rollback segment. */
867 mtr_t *mtr) /*!< in: mtr which does not have
868 a latch to any undo log page;
869 the caller must have reserved
870 the rollback segment mutex */
871 {
872 page_t *header_page;
873 buf_block_t *new_block;
874 page_t *new_page;
875 trx_rseg_t *rseg;
876 ulint n_reserved;
877
878 ut_ad(mutex_own(&(trx->undo_mutex)));
879 ut_ad(mutex_own(&(undo_ptr->rseg->mutex)));
880
881 rseg = undo_ptr->rseg;
882
883 if (rseg->get_curr_size() == rseg->max_size) {
884 return (nullptr);
885 }
886
887 header_page = trx_undo_page_get(page_id_t(undo->space, undo->hdr_page_no),
888 undo->page_size, mtr);
889
890 if (!fsp_reserve_free_extents(&n_reserved, undo->space, 1, FSP_UNDO, mtr)) {
891 return (nullptr);
892 }
893
894 new_block = fseg_alloc_free_page_general(
895 TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER + header_page,
896 undo->top_page_no + 1, FSP_UP, TRUE, mtr, mtr);
897
898 fil_space_release_free_extents(undo->space, n_reserved);
899
900 if (new_block == nullptr) {
901 /* No space left */
902
903 return (nullptr);
904 }
905
906 ut_ad(rw_lock_get_x_lock_count(&new_block->lock) == 1);
907 buf_block_dbg_add_level(new_block, SYNC_TRX_UNDO_PAGE);
908 undo->last_page_no = new_block->page.id.page_no();
909
910 new_page = buf_block_get_frame(new_block);
911
912 trx_undo_page_init(new_page, undo->type, mtr);
913
914 flst_add_last(header_page + TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST,
915 new_page + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE, mtr);
916 undo->size++;
917 rseg->incr_curr_size();
918
919 return (new_block);
920 }
921
922 /** Frees an undo log page that is not the header page.
923 @return last page number in remaining log */
trx_undo_free_page(trx_rseg_t * rseg,ibool in_history,space_id_t space,page_no_t hdr_page_no,page_no_t page_no,mtr_t * mtr)924 static page_no_t trx_undo_free_page(
925 trx_rseg_t *rseg, /*!< in: rollback segment */
926 ibool in_history, /*!< in: TRUE if the undo log is in the history
927 list */
928 space_id_t space, /*!< in: space */
929 page_no_t hdr_page_no, /*!< in: header page number */
930 page_no_t page_no, /*!< in: page number to free: must not be the
931 header page */
932 mtr_t *mtr) /*!< in: mtr which does not have a latch to any
933 undo log page; the caller must have reserved
934 the rollback segment mutex */
935 {
936 page_t *header_page;
937 page_t *undo_page;
938 fil_addr_t last_addr;
939 trx_rsegf_t *rseg_header;
940 ulint hist_size;
941
942 ut_a(hdr_page_no != page_no);
943 ut_ad(mutex_own(&(rseg->mutex)));
944
945 undo_page =
946 trx_undo_page_get(page_id_t(space, page_no), rseg->page_size, mtr);
947
948 header_page =
949 trx_undo_page_get(page_id_t(space, hdr_page_no), rseg->page_size, mtr);
950
951 flst_remove(header_page + TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST,
952 undo_page + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE, mtr);
953
954 fseg_free_page(header_page + TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER, space,
955 page_no, false, mtr);
956
957 last_addr =
958 flst_get_last(header_page + TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST, mtr);
959
960 rseg->decr_curr_size();
961
962 if (in_history) {
963 rseg_header = trx_rsegf_get(space, rseg->page_no, rseg->page_size, mtr);
964
965 hist_size =
966 mtr_read_ulint(rseg_header + TRX_RSEG_HISTORY_SIZE, MLOG_4BYTES, mtr);
967 ut_ad(hist_size > 0);
968 mlog_write_ulint(rseg_header + TRX_RSEG_HISTORY_SIZE, hist_size - 1,
969 MLOG_4BYTES, mtr);
970 }
971
972 return (last_addr.page);
973 }
974
975 /** Frees the last undo log page.
976 The caller must hold the rollback segment mutex. */
trx_undo_free_last_page_func(const trx_t * trx,trx_undo_t * undo,mtr_t * mtr)977 void trx_undo_free_last_page_func(
978 #ifdef UNIV_DEBUG
979 const trx_t *trx, /*!< in: transaction */
980 #endif /* UNIV_DEBUG */
981 trx_undo_t *undo, /*!< in/out: undo log memory copy */
982 mtr_t *mtr) /*!< in/out: mini-transaction which does not
983 have a latch to any undo log page or which
984 has allocated the undo log page */
985 {
986 ut_ad(mutex_own(&trx->undo_mutex));
987 ut_ad(undo->hdr_page_no != undo->last_page_no);
988 ut_ad(undo->size > 0);
989
990 undo->last_page_no =
991 trx_undo_free_page(undo->rseg, FALSE, undo->space, undo->hdr_page_no,
992 undo->last_page_no, mtr);
993
994 undo->size--;
995 }
996
997 /** Empties an undo log header page of undo records for that undo log.
998 Other undo logs may still have records on that page, if it is an update
999 undo log.
1000 @param[in] space_id Tablespace ID
1001 @param[in] page_size page size
1002 @param[in] hdr_page_no header page number
1003 @param[in] hdr_offset header offset
1004 @param[in,out] mtr mini-transaction */
trx_undo_empty_header_page(space_id_t space_id,const page_size_t & page_size,page_no_t hdr_page_no,ulint hdr_offset,mtr_t * mtr)1005 static void trx_undo_empty_header_page(space_id_t space_id,
1006 const page_size_t &page_size,
1007 page_no_t hdr_page_no, ulint hdr_offset,
1008 mtr_t *mtr) {
1009 page_t *header_page;
1010 trx_ulogf_t *log_hdr;
1011 ulint end;
1012
1013 header_page =
1014 trx_undo_page_get(page_id_t(space_id, hdr_page_no), page_size, mtr);
1015
1016 log_hdr = header_page + hdr_offset;
1017
1018 end = trx_undo_page_get_end(header_page, hdr_page_no, hdr_offset);
1019
1020 mlog_write_ulint(log_hdr + TRX_UNDO_LOG_START, end, MLOG_2BYTES, mtr);
1021 }
1022
1023 /** Get page offset up to which undo logs can be truncated.
1024 There are three possibilities.
1025 1. Truncate nothing on this page. Return -1
1026 2. Truncate part of the page. Return the offset
1027 3. Truncate the whole page. Return 0
1028 @param[in] undo undo log to truncate
1029 @param[in] undo_page undo log page to check
1030 @param[in] limit limit up to which undo logs to be truncated
1031 @return page offset to truncate to, 0 for whole page, -1 for nothing. */
trx_undo_page_truncate_offset(trx_undo_t * undo,page_t * undo_page,undo_no_t limit)1032 int trx_undo_page_truncate_offset(trx_undo_t *undo, page_t *undo_page,
1033 undo_no_t limit) {
1034 auto rec = trx_undo_page_get_last_rec(undo_page, undo->hdr_page_no,
1035 undo->hdr_offset);
1036 trx_undo_rec_t *trunc_rec = nullptr;
1037
1038 while (rec != nullptr) {
1039 /* Check if current record has gone below the limit. */
1040 if (trx_undo_rec_get_undo_no(rec) < limit) {
1041 /* If this is the first record on the page, don't truncate anything */
1042 if (trunc_rec == nullptr) {
1043 return (-1);
1044 }
1045
1046 /* Return an offset within the page. */
1047 return (trunc_rec - undo_page);
1048 }
1049
1050 /* Truncate at least up to this record, maybe more */
1051 trunc_rec = rec;
1052 rec = trx_undo_page_get_prev_rec(rec, undo->hdr_page_no, undo->hdr_offset);
1053 }
1054
1055 /* All records on the page are >= limit */
1056 if (undo->last_page_no == undo->hdr_page_no) {
1057 /* This is the header page. Return an offset
1058 if there are any records on the page. */
1059 if (trunc_rec != nullptr) {
1060 return (trunc_rec - undo_page);
1061 }
1062
1063 /* Header page is empty. Do not truncate anything. */
1064 return (-1);
1065 }
1066
1067 /* Truncate the whole page. */
1068 return (0);
1069 }
1070
1071 /** Truncates an undo log from the end. This function is used during a rollback
1072 to free space from an undo log. */
1073 #ifdef UNIV_DEBUG
1074 /**
1075 @param[in] trx transaction for this undo log */
1076 #endif /* UNIV_DEBUG */
1077 /**
1078 @param[in] undo undo log
1079 @param[in] limit all undo records with undo number;
1080 This value should be truncated. */
trx_undo_truncate_end_func(const trx_t * trx,trx_undo_t * undo,undo_no_t limit)1081 void trx_undo_truncate_end_func(
1082 #ifdef UNIV_DEBUG
1083 const trx_t *trx,
1084 #endif /* UNIV_DEBUG */
1085 trx_undo_t *undo, undo_no_t limit) {
1086 ut_ad(mutex_own(&trx->undo_mutex));
1087 ut_ad(mutex_own(&undo->rseg->mutex));
1088
1089 mtr_t mtr;
1090
1091 for (;;) {
1092 mtr.start();
1093
1094 /* Set NO_REDO for temporary undo logs. */
1095 if (fsp_is_system_temporary(undo->rseg->space_id)) {
1096 ut_ad(trx->rsegs.m_noredo.rseg == undo->rseg);
1097 mtr.set_log_mode(MTR_LOG_NO_REDO);
1098 } else {
1099 ut_ad(trx->rsegs.m_redo.rseg == undo->rseg);
1100 }
1101
1102 const page_id_t page_id(undo->space, undo->last_page_no);
1103
1104 auto undo_page = trx_undo_page_get(page_id, undo->page_size, &mtr);
1105
1106 int trunc_offset = trx_undo_page_truncate_offset(undo, undo_page, limit);
1107
1108 /* If offset is within the page, truncate part of the page and quit.*/
1109 if (trunc_offset > 0) {
1110 mlog_write_ulint(undo_page + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE,
1111 trunc_offset, MLOG_2BYTES, &mtr);
1112 break;
1113 }
1114
1115 /* If all recs are < limit, don't truncate anything. */
1116 if (trunc_offset < 0) {
1117 break;
1118 }
1119
1120 /* Free the last page and move on to the next. */
1121 ut_ad(undo->last_page_no != undo->hdr_page_no);
1122 trx_undo_free_last_page(trx, undo, &mtr);
1123
1124 mtr.commit();
1125 }
1126
1127 mtr.commit();
1128 }
1129
1130 /** Truncate the head of an undo log.
1131 NOTE that only whole pages are freed; the header page is not
1132 freed, but emptied, if all the records there are below the limit.
1133 @param[in,out] rseg rollback segment
1134 @param[in] hdr_page_no header page number
1135 @param[in] hdr_offset header offset on the page
1136 @param[in] limit first undo number to preserve
1137 (everything below the limit will be truncated) */
trx_undo_truncate_start(trx_rseg_t * rseg,page_no_t hdr_page_no,ulint hdr_offset,undo_no_t limit)1138 void trx_undo_truncate_start(trx_rseg_t *rseg, page_no_t hdr_page_no,
1139 ulint hdr_offset, undo_no_t limit) {
1140 page_t *undo_page;
1141 trx_undo_rec_t *rec;
1142 trx_undo_rec_t *last_rec;
1143 page_no_t page_no;
1144 mtr_t mtr;
1145
1146 ut_ad(mutex_own(&(rseg->mutex)));
1147
1148 if (!limit) {
1149 return;
1150 }
1151 loop:
1152 mtr.start();
1153
1154 if (fsp_is_system_temporary(rseg->space_id)) {
1155 mtr.set_log_mode(MTR_LOG_NO_REDO);
1156 }
1157
1158 rec = trx_undo_get_first_rec(nullptr, rseg->space_id, rseg->page_size,
1159 hdr_page_no, hdr_offset, RW_X_LATCH, &mtr);
1160 if (rec == nullptr) {
1161 /* Already empty */
1162
1163 mtr.commit();
1164
1165 return;
1166 }
1167
1168 undo_page = page_align(rec);
1169
1170 last_rec = trx_undo_page_get_last_rec(undo_page, hdr_page_no, hdr_offset);
1171 if (trx_undo_rec_get_undo_no(last_rec) >= limit) {
1172 mtr.commit();
1173
1174 return;
1175 }
1176
1177 page_no = page_get_page_no(undo_page);
1178
1179 if (page_no == hdr_page_no) {
1180 trx_undo_empty_header_page(rseg->space_id, rseg->page_size, hdr_page_no,
1181 hdr_offset, &mtr);
1182 } else {
1183 trx_undo_free_page(rseg, TRUE, rseg->space_id, hdr_page_no, page_no, &mtr);
1184 }
1185
1186 mtr.commit();
1187
1188 goto loop;
1189 }
1190
1191 /** Frees an undo log segment which is not in the history list.
1192 @param[in] undo undo log
1193 @param[in] noredo whether the undo tablespace is redo logged */
trx_undo_seg_free(const trx_undo_t * undo,bool noredo)1194 static void trx_undo_seg_free(const trx_undo_t *undo, bool noredo) {
1195 trx_rseg_t *rseg;
1196 fseg_header_t *file_seg;
1197 trx_rsegf_t *rseg_header;
1198 trx_usegf_t *seg_header;
1199 ibool finished;
1200 mtr_t mtr;
1201
1202 rseg = undo->rseg;
1203
1204 do {
1205 mtr.start();
1206
1207 if (noredo) {
1208 mtr.set_log_mode(MTR_LOG_NO_REDO);
1209 }
1210
1211 rseg->latch();
1212
1213 seg_header = trx_undo_page_get(page_id_t(undo->space, undo->hdr_page_no),
1214 undo->page_size, &mtr) +
1215 TRX_UNDO_SEG_HDR;
1216
1217 file_seg = seg_header + TRX_UNDO_FSEG_HEADER;
1218
1219 finished = fseg_free_step(file_seg, false, &mtr);
1220
1221 if (finished) {
1222 /* Update the rseg header */
1223 rseg_header =
1224 trx_rsegf_get(rseg->space_id, rseg->page_no, rseg->page_size, &mtr);
1225 trx_rsegf_set_nth_undo(rseg_header, undo->id, FIL_NULL, &mtr);
1226
1227 rseg->decr_curr_size(undo->size);
1228 MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_USED);
1229 }
1230
1231 rseg->unlatch();
1232 mtr.commit();
1233 } while (!finished);
1234 }
1235
1236 /*========== UNDO LOG MEMORY COPY INITIALIZATION =====================*/
1237
1238 /** Creates and initializes an undo log memory object for a newly created
1239 rseg. The memory object is inserted in the appropriate list in the rseg.
1240 @return own: the undo log memory object */
trx_undo_mem_init(trx_rseg_t * rseg,ulint id,page_no_t page_no,mtr_t * mtr)1241 static trx_undo_t *trx_undo_mem_init(
1242 trx_rseg_t *rseg, /*!< in: rollback segment memory object */
1243 ulint id, /*!< in: slot index within rseg */
1244 page_no_t page_no, /*!< in: undo log segment page number */
1245 mtr_t *mtr) /*!< in: mtr */
1246 {
1247 page_t *undo_page;
1248 trx_upagef_t *page_header;
1249 trx_usegf_t *seg_header;
1250 trx_ulogf_t *undo_header;
1251 trx_undo_t *undo;
1252 ulint type;
1253 ulint state;
1254 trx_id_t trx_id;
1255 ulint offset;
1256 fil_addr_t last_addr;
1257 page_t *last_page;
1258 trx_undo_rec_t *rec;
1259 XID xid;
1260
1261 ut_a(id < TRX_RSEG_N_SLOTS);
1262
1263 undo_page = trx_undo_page_get(page_id_t(rseg->space_id, page_no),
1264 rseg->page_size, mtr);
1265
1266 page_header = undo_page + TRX_UNDO_PAGE_HDR;
1267
1268 type = mtr_read_ulint(page_header + TRX_UNDO_PAGE_TYPE, MLOG_2BYTES, mtr);
1269 seg_header = undo_page + TRX_UNDO_SEG_HDR;
1270
1271 state = mach_read_from_2(seg_header + TRX_UNDO_STATE);
1272
1273 offset = mach_read_from_2(seg_header + TRX_UNDO_LAST_LOG);
1274
1275 undo_header = undo_page + offset;
1276
1277 trx_id = mach_read_from_8(undo_header + TRX_UNDO_TRX_ID);
1278
1279 auto flag = mtr_read_ulint(undo_header + TRX_UNDO_FLAGS, MLOG_1BYTE, mtr);
1280
1281 bool xid_exists = ((flag & TRX_UNDO_FLAG_XID) != 0);
1282
1283 bool gtid_exists = ((flag & TRX_UNDO_FLAG_GTID) != 0);
1284
1285 /* Read X/Open XA transaction identification if it exists, or
1286 set it to NULL. */
1287 xid.reset();
1288
1289 if (xid_exists) {
1290 trx_undo_read_xid(undo_header, &xid);
1291 }
1292
1293 undo = trx_undo_mem_create(rseg, id, type, trx_id, &xid, page_no, offset);
1294
1295 undo->dict_operation =
1296 mtr_read_ulint(undo_header + TRX_UNDO_DICT_TRANS, MLOG_1BYTE, mtr);
1297
1298 undo->flag = flag;
1299 undo->gtid_allocated = gtid_exists;
1300
1301 undo->state = state;
1302 undo->size = flst_get_len(seg_header + TRX_UNDO_PAGE_LIST);
1303
1304 /* If the log segment is being freed, the page list is inconsistent! */
1305 if (state == TRX_UNDO_TO_FREE) {
1306 goto add_to_list;
1307 }
1308
1309 last_addr = flst_get_last(seg_header + TRX_UNDO_PAGE_LIST, mtr);
1310
1311 undo->last_page_no = last_addr.page;
1312 undo->top_page_no = last_addr.page;
1313
1314 last_page = trx_undo_page_get(page_id_t(rseg->space_id, undo->last_page_no),
1315 rseg->page_size, mtr);
1316
1317 rec = trx_undo_page_get_last_rec(last_page, page_no, offset);
1318
1319 if (rec == nullptr) {
1320 undo->empty = TRUE;
1321 } else {
1322 undo->empty = FALSE;
1323 undo->top_offset = rec - last_page;
1324 undo->top_undo_no = trx_undo_rec_get_undo_no(rec);
1325 }
1326 add_to_list:
1327 if (type == TRX_UNDO_INSERT) {
1328 if (state != TRX_UNDO_CACHED) {
1329 UT_LIST_ADD_LAST(rseg->insert_undo_list, undo);
1330 } else {
1331 UT_LIST_ADD_LAST(rseg->insert_undo_cached, undo);
1332
1333 MONITOR_INC(MONITOR_NUM_UNDO_SLOT_CACHED);
1334 }
1335 } else {
1336 ut_ad(type == TRX_UNDO_UPDATE);
1337 if (state != TRX_UNDO_CACHED) {
1338 UT_LIST_ADD_LAST(rseg->update_undo_list, undo);
1339 /* For XA prepared transaction and XA rolled back transaction, we
1340 could have GTID to be persisted. */
1341 if (state == TRX_UNDO_PREPARED || state == TRX_UNDO_ACTIVE) {
1342 trx_undo_gtid_read_and_persist(undo_header);
1343 }
1344 } else {
1345 UT_LIST_ADD_LAST(rseg->update_undo_cached, undo);
1346
1347 MONITOR_INC(MONITOR_NUM_UNDO_SLOT_CACHED);
1348 }
1349 }
1350
1351 return (undo);
1352 }
1353
1354 /** Initializes the undo log lists for a rollback segment memory copy. This
1355 function is only called when the database is started or a new rollback
1356 segment is created.
1357 @return the combined size of undo log segments in pages */
trx_undo_lists_init(trx_rseg_t * rseg)1358 ulint trx_undo_lists_init(
1359 trx_rseg_t *rseg) /*!< in: rollback segment memory object */
1360 {
1361 ulint size = 0;
1362 trx_rsegf_t *rseg_header;
1363 ulint i;
1364 mtr_t mtr;
1365
1366 mtr.start();
1367
1368 rseg_header =
1369 trx_rsegf_get_new(rseg->space_id, rseg->page_no, rseg->page_size, &mtr);
1370
1371 for (i = 0; i < TRX_RSEG_N_SLOTS; i++) {
1372 page_no_t page_no;
1373
1374 page_no = trx_rsegf_get_nth_undo(rseg_header, i, &mtr);
1375
1376 /* In forced recovery: try to avoid operations which look
1377 at database pages; undo logs are rapidly changing data, and
1378 the probability that they are in an inconsistent state is
1379 high */
1380
1381 if (page_no != FIL_NULL &&
1382 srv_force_recovery < SRV_FORCE_NO_UNDO_LOG_SCAN) {
1383 trx_undo_t *undo;
1384
1385 undo = trx_undo_mem_init(rseg, i, page_no, &mtr);
1386
1387 size += undo->size;
1388
1389 mtr.commit();
1390
1391 mtr.start();
1392
1393 rseg_header =
1394 trx_rsegf_get(rseg->space_id, rseg->page_no, rseg->page_size, &mtr);
1395
1396 /* Found a used slot */
1397 MONITOR_INC(MONITOR_NUM_UNDO_SLOT_USED);
1398 }
1399 }
1400
1401 mtr.commit();
1402
1403 return (size);
1404 }
1405
1406 /** Creates and initializes an undo log memory object.
1407 @param[in] rseg rollback segment memory object
1408 @param[in] id slot index within rseg
1409 @param[in] type type of the log: TRX_UNDO_INSERT or TRX_UNDO_UPDATE
1410 @param[in] trx_id id of the trx for which the undo log is created
1411 @param[in] xid X/Open XA transaction identification
1412 @param[in] page_no undo log header page number
1413 @param[in] offset undo log header byte offset on page
1414 @return own: the undo log memory object */
trx_undo_mem_create(trx_rseg_t * rseg,ulint id,ulint type,trx_id_t trx_id,const XID * xid,page_no_t page_no,ulint offset)1415 static trx_undo_t *trx_undo_mem_create(trx_rseg_t *rseg, ulint id, ulint type,
1416 trx_id_t trx_id, const XID *xid,
1417 page_no_t page_no, ulint offset) {
1418 trx_undo_t *undo;
1419
1420 ut_a(id < TRX_RSEG_N_SLOTS);
1421
1422 undo = static_cast<trx_undo_t *>(ut_malloc_nokey(sizeof(*undo)));
1423
1424 if (undo == nullptr) {
1425 return (nullptr);
1426 }
1427
1428 undo->id = id;
1429 undo->type = type;
1430 undo->state = TRX_UNDO_ACTIVE;
1431 undo->del_marks = FALSE;
1432 undo->trx_id = trx_id;
1433 undo->xid = *xid;
1434
1435 undo->dict_operation = FALSE;
1436 undo->flag = 0;
1437 undo->gtid_allocated = false;
1438
1439 undo->rseg = rseg;
1440
1441 undo->space = rseg->space_id;
1442 undo->page_size.copy_from(rseg->page_size);
1443 undo->hdr_page_no = page_no;
1444 undo->hdr_offset = offset;
1445 undo->last_page_no = page_no;
1446 undo->size = 1;
1447
1448 undo->empty = TRUE;
1449 undo->top_page_no = page_no;
1450 undo->guess_block = nullptr;
1451 undo->withdraw_clock = 0;
1452
1453 return (undo);
1454 }
1455
1456 /** Initializes a cached undo log object for new use. */
trx_undo_mem_init_for_reuse(trx_undo_t * undo,trx_id_t trx_id,const XID * xid,ulint offset)1457 static void trx_undo_mem_init_for_reuse(
1458 trx_undo_t *undo, /*!< in: undo log to init */
1459 trx_id_t trx_id, /*!< in: id of the trx for which the undo log
1460 is created */
1461 const XID *xid, /*!< in: X/Open XA transaction identification*/
1462 ulint offset) /*!< in: undo log header byte offset on page */
1463 {
1464 ut_ad(mutex_own(&((undo->rseg)->mutex)));
1465
1466 ut_a(undo->id < TRX_RSEG_N_SLOTS);
1467
1468 undo->state = TRX_UNDO_ACTIVE;
1469 undo->del_marks = FALSE;
1470 undo->trx_id = trx_id;
1471 undo->xid = *xid;
1472
1473 undo->dict_operation = FALSE;
1474 undo->flag = 0;
1475 undo->gtid_allocated = false;
1476
1477 undo->hdr_offset = offset;
1478 undo->empty = TRUE;
1479 }
1480
1481 /** Frees an undo log memory copy. */
trx_undo_mem_free(trx_undo_t * undo)1482 void trx_undo_mem_free(trx_undo_t *undo) /*!< in: the undo object to be freed */
1483 {
1484 ut_a(undo->id < TRX_RSEG_N_SLOTS);
1485
1486 ut_free(undo);
1487 }
1488
1489 /** Create a new undo log in the given rollback segment.
1490 @param[in] trx transaction
1491 @param[in] rseg rollback segment memory copy
1492 @param[in] type type of the log: TRX_UNDO_INSERT or TRX_UNDO_UPDATE
1493 @param[in] trx_id id of the trx for which the undo log is created
1494 @param[in] xid X/Open transaction identification
1495 @param[in] is_gtid if transaction has GTID
1496 @param[out] undo the new undo log object, undefined if did not succeed
1497 @param[in] mtr mini-transation
1498 @retval DB_SUCCESS if successful in creating the new undo lob object,
1499 @retval DB_TOO_MANY_CONCURRENT_TRXS
1500 @retval DB_OUT_OF_FILE_SPACE
1501 @retval DB_OUT_OF_MEMORY */
1502 static MY_ATTRIBUTE((warn_unused_result)) dberr_t
trx_undo_create(trx_t * trx,trx_rseg_t * rseg,ulint type,trx_id_t trx_id,const XID * xid,bool is_gtid,trx_undo_t ** undo,mtr_t * mtr)1503 trx_undo_create(trx_t *trx, trx_rseg_t *rseg, ulint type, trx_id_t trx_id,
1504 const XID *xid, bool is_gtid, trx_undo_t **undo,
1505 mtr_t *mtr) {
1506 trx_rsegf_t *rseg_header;
1507 page_no_t page_no;
1508 ulint offset;
1509 ulint id;
1510 page_t *undo_page;
1511 dberr_t err;
1512
1513 ut_ad(mutex_own(&(rseg->mutex)));
1514
1515 if (rseg->get_curr_size() == rseg->max_size) {
1516 return (DB_OUT_OF_FILE_SPACE);
1517 }
1518
1519 rseg->incr_curr_size();
1520
1521 rseg_header =
1522 trx_rsegf_get(rseg->space_id, rseg->page_no, rseg->page_size, mtr);
1523
1524 err = trx_undo_seg_create(rseg, rseg_header, type, &id, &undo_page, mtr);
1525
1526 if (err != DB_SUCCESS) {
1527 /* Did not succeed */
1528
1529 rseg->decr_curr_size();
1530
1531 return (err);
1532 }
1533
1534 page_no = page_get_page_no(undo_page);
1535
1536 offset = trx_undo_header_create(undo_page, trx_id, mtr);
1537
1538 bool add_space_gtid = (is_gtid && type == TRX_UNDO_UPDATE);
1539 trx_undo_header_add_space_for_xid(undo_page, undo_page + offset, mtr,
1540 add_space_gtid);
1541
1542 *undo = trx_undo_mem_create(rseg, id, type, trx_id, xid, page_no, offset);
1543 if (*undo == nullptr) {
1544 err = DB_OUT_OF_MEMORY;
1545 } else {
1546 (*undo)->gtid_allocated = add_space_gtid;
1547 }
1548
1549 return (err);
1550 }
1551
1552 /*================ UNDO LOG ASSIGNMENT AND CLEANUP =====================*/
1553
1554 /** Reuses a cached undo log.
1555 @param[in,out] trx transaction
1556 @param[in,out] rseg rollback segment memory object
1557 @param[in] type type of the log: TRX_UNDO_INSERT or TRX_UNDO_UPDATE
1558 @param[in] trx_id id of the trx for which the undo log is used
1559 @param[in] xid X/Open XA transaction identification
1560 @param[in] is_gtid if transaction has GTID
1561 @param[in,out] mtr mini transaction
1562 @return the undo log memory object, NULL if none cached */
trx_undo_reuse_cached(trx_t * trx,trx_rseg_t * rseg,ulint type,trx_id_t trx_id,const XID * xid,bool is_gtid,mtr_t * mtr)1563 static trx_undo_t *trx_undo_reuse_cached(trx_t *trx, trx_rseg_t *rseg,
1564 ulint type, trx_id_t trx_id,
1565 const XID *xid, bool is_gtid,
1566 mtr_t *mtr) {
1567 trx_undo_t *undo;
1568
1569 ut_ad(mutex_own(&(rseg->mutex)));
1570
1571 if (type == TRX_UNDO_INSERT) {
1572 undo = UT_LIST_GET_FIRST(rseg->insert_undo_cached);
1573 if (undo == nullptr) {
1574 return (nullptr);
1575 }
1576
1577 UT_LIST_REMOVE(rseg->insert_undo_cached, undo);
1578
1579 MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_CACHED);
1580 } else {
1581 ut_ad(type == TRX_UNDO_UPDATE);
1582
1583 undo = UT_LIST_GET_FIRST(rseg->update_undo_cached);
1584 if (undo == nullptr) {
1585 return (nullptr);
1586 }
1587
1588 UT_LIST_REMOVE(rseg->update_undo_cached, undo);
1589
1590 MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_CACHED);
1591 }
1592
1593 ut_ad(undo->size == 1);
1594 ut_a(undo->id < TRX_RSEG_N_SLOTS);
1595
1596 auto undo_page = trx_undo_page_get(page_id_t(undo->space, undo->hdr_page_no),
1597 undo->page_size, mtr);
1598
1599 bool add_space_gtid = false;
1600 ulint offset;
1601
1602 if (type == TRX_UNDO_INSERT) {
1603 offset = trx_undo_insert_header_reuse(undo_page, trx_id, mtr);
1604
1605 trx_undo_header_add_space_for_xid(undo_page, undo_page + offset, mtr,
1606 false);
1607 } else {
1608 ut_a(mach_read_from_2(undo_page + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_TYPE) ==
1609 TRX_UNDO_UPDATE);
1610
1611 offset = trx_undo_header_create(undo_page, trx_id, mtr);
1612
1613 trx_undo_header_add_space_for_xid(undo_page, undo_page + offset, mtr,
1614 is_gtid);
1615 add_space_gtid = is_gtid;
1616 }
1617
1618 trx_undo_mem_init_for_reuse(undo, trx_id, xid, offset);
1619 undo->gtid_allocated = add_space_gtid;
1620
1621 return (undo);
1622 }
1623
1624 /** Marks an undo log header as a header of a data dictionary operation
1625 transaction. */
trx_undo_mark_as_dict_operation(trx_t * trx,trx_undo_t * undo,mtr_t * mtr)1626 static void trx_undo_mark_as_dict_operation(
1627 trx_t *trx, /*!< in: dict op transaction */
1628 trx_undo_t *undo, /*!< in: assigned undo log */
1629 mtr_t *mtr) /*!< in: mtr */
1630 {
1631 page_t *hdr_page;
1632
1633 hdr_page = trx_undo_page_get(page_id_t(undo->space, undo->hdr_page_no),
1634 undo->page_size, mtr);
1635
1636 mlog_write_ulint(hdr_page + undo->hdr_offset + TRX_UNDO_DICT_TRANS, TRUE,
1637 MLOG_1BYTE, mtr);
1638
1639 undo->dict_operation = TRUE;
1640 }
1641
1642 /** Assigns an undo log for a transaction. A new undo log is created or a cached
1643 undo log reused.
1644 @return DB_SUCCESS if undo log assign successful, possible error codes
1645 are: DB_TOO_MANY_CONCURRENT_TRXS DB_OUT_OF_FILE_SPACE DB_READ_ONLY
1646 DB_OUT_OF_MEMORY */
trx_undo_assign_undo(trx_t * trx,trx_undo_ptr_t * undo_ptr,ulint type)1647 dberr_t trx_undo_assign_undo(
1648 trx_t *trx, /*!< in: transaction */
1649 trx_undo_ptr_t *undo_ptr, /*!< in: assign undo log from
1650 referred rollback segment. */
1651 ulint type) /*!< in: TRX_UNDO_INSERT or
1652 TRX_UNDO_UPDATE */
1653 {
1654 trx_rseg_t *rseg;
1655 trx_undo_t *undo;
1656 mtr_t mtr;
1657 dberr_t err = DB_SUCCESS;
1658
1659 ut_ad(trx);
1660
1661 /* In case of read-only scenario trx->rsegs.m_redo.rseg can be NULL but
1662 still request for assigning undo logs is valid as temporary tables
1663 can be updated in read-only mode.
1664 If there is no rollback segment assigned to trx and still there is
1665 object being updated there is something wrong and so this condition
1666 check. */
1667 ut_ad(trx_is_rseg_assigned(trx));
1668
1669 rseg = undo_ptr->rseg;
1670
1671 ut_ad(mutex_own(&(trx->undo_mutex)));
1672
1673 bool no_redo = (&trx->rsegs.m_noredo == undo_ptr);
1674
1675 /* If none of the undo pointers are assigned then this is
1676 first time transaction is allocating undo segment. */
1677 bool is_first = undo_ptr->is_empty();
1678
1679 /* If any undo segment is assigned it is guaranteed that
1680 Innodb would persist GTID. Call it before any undo segment
1681 is assigned for transaction. We allocate space for GTID
1682 only if GTID is persisted. */
1683 bool is_gtid = false;
1684 if (!no_redo) {
1685 auto >id_persistor = clone_sys->get_gtid_persistor();
1686 if (is_first) {
1687 gtid_persistor.set_persist_gtid(trx, true);
1688 }
1689 /* Check if the undo segment needs to allocate for GTID. */
1690 is_gtid = gtid_persistor.persists_gtid(trx);
1691 }
1692
1693 mtr.start();
1694 if (no_redo) {
1695 mtr.set_log_mode(MTR_LOG_NO_REDO);
1696 } else {
1697 ut_ad(&trx->rsegs.m_redo == undo_ptr);
1698 }
1699
1700 rseg->latch();
1701
1702 DBUG_EXECUTE_IF("ib_create_table_fail_too_many_trx",
1703 err = DB_TOO_MANY_CONCURRENT_TRXS;
1704 goto func_exit;);
1705 undo =
1706 #ifdef UNIV_DEBUG
1707 srv_inject_too_many_concurrent_trxs
1708 ? nullptr
1709 :
1710 #endif
1711 trx_undo_reuse_cached(trx, rseg, type, trx->id, trx->xid, is_gtid,
1712 &mtr);
1713
1714 if (undo == nullptr) {
1715 err = trx_undo_create(trx, rseg, type, trx->id, trx->xid, is_gtid, &undo,
1716 &mtr);
1717 if (err != DB_SUCCESS) {
1718 goto func_exit;
1719 }
1720 }
1721
1722 if (type == TRX_UNDO_INSERT) {
1723 UT_LIST_ADD_FIRST(rseg->insert_undo_list, undo);
1724 ut_ad(undo_ptr->insert_undo == nullptr);
1725 undo_ptr->insert_undo = undo;
1726 } else {
1727 UT_LIST_ADD_FIRST(rseg->update_undo_list, undo);
1728 ut_ad(undo_ptr->update_undo == nullptr);
1729 undo_ptr->update_undo = undo;
1730 }
1731
1732 if (trx->mysql_thd && !trx->ddl_operation &&
1733 thd_is_dd_update_stmt(trx->mysql_thd)) {
1734 trx->ddl_operation = true;
1735 }
1736
1737 if (trx->ddl_operation || trx_get_dict_operation(trx) != TRX_DICT_OP_NONE) {
1738 trx_undo_mark_as_dict_operation(trx, undo, &mtr);
1739 }
1740
1741 /* For GTID persistence we might add undo segment to prepared transaction. If
1742 the transaction is in prepared state, we need to set XA properties. */
1743 if (trx_state_eq(trx, TRX_STATE_PREPARED)) {
1744 ut_ad(!is_first);
1745 undo->set_prepared(trx->xid);
1746 }
1747
1748 func_exit:
1749 rseg->unlatch();
1750 mtr.commit();
1751
1752 return (err);
1753 }
1754
1755 /** Sets the state of the undo log segment at a transaction finish.
1756 @return undo log segment header page, x-latched */
trx_undo_set_state_at_finish(trx_undo_t * undo,mtr_t * mtr)1757 page_t *trx_undo_set_state_at_finish(
1758 trx_undo_t *undo, /*!< in: undo log memory copy */
1759 mtr_t *mtr) /*!< in: mtr */
1760 {
1761 trx_usegf_t *seg_hdr;
1762 trx_upagef_t *page_hdr;
1763 page_t *undo_page;
1764 ulint state;
1765
1766 ut_a(undo->id < TRX_RSEG_N_SLOTS);
1767
1768 undo_page = trx_undo_page_get(page_id_t(undo->space, undo->hdr_page_no),
1769 undo->page_size, mtr);
1770
1771 seg_hdr = undo_page + TRX_UNDO_SEG_HDR;
1772 page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
1773
1774 if (undo->size == 1 && mach_read_from_2(page_hdr + TRX_UNDO_PAGE_FREE) <
1775 TRX_UNDO_PAGE_REUSE_LIMIT) {
1776 state = TRX_UNDO_CACHED;
1777
1778 } else if (undo->type == TRX_UNDO_INSERT) {
1779 state = TRX_UNDO_TO_FREE;
1780 } else {
1781 state = TRX_UNDO_TO_PURGE;
1782 }
1783
1784 undo->state = state;
1785
1786 mlog_write_ulint(seg_hdr + TRX_UNDO_STATE, state, MLOG_2BYTES, mtr);
1787
1788 return (undo_page);
1789 }
1790
1791 /** Set the state of the undo log segment at a XA PREPARE or XA ROLLBACK.
1792 @param[in,out] trx transaction
1793 @param[in,out] undo insert_undo or update_undo log
1794 @param[in] rollback false=XA PREPARE, true=XA ROLLBACK
1795 @param[in,out] mtr mini-transaction
1796 @return undo log segment header page, x-latched */
trx_undo_set_state_at_prepare(trx_t * trx,trx_undo_t * undo,bool rollback,mtr_t * mtr)1797 page_t *trx_undo_set_state_at_prepare(trx_t *trx, trx_undo_t *undo,
1798 bool rollback, mtr_t *mtr) {
1799 trx_usegf_t *seg_hdr;
1800 trx_ulogf_t *undo_header;
1801 page_t *undo_page;
1802 ulint offset;
1803
1804 ut_ad(trx && undo && mtr);
1805
1806 ut_a(undo->id < TRX_RSEG_N_SLOTS);
1807
1808 undo_page = trx_undo_page_get(page_id_t(undo->space, undo->hdr_page_no),
1809 undo->page_size, mtr);
1810
1811 seg_hdr = undo_page + TRX_UNDO_SEG_HDR;
1812
1813 offset = mach_read_from_2(seg_hdr + TRX_UNDO_LAST_LOG);
1814 undo_header = undo_page + offset;
1815
1816 /* Write GTID information if there. */
1817 trx_undo_gtid_write(trx, undo_header, undo, mtr);
1818
1819 if (rollback) {
1820 ut_ad(undo->state == TRX_UNDO_PREPARED);
1821 mlog_write_ulint(seg_hdr + TRX_UNDO_STATE, TRX_UNDO_ACTIVE, MLOG_2BYTES,
1822 mtr);
1823 return (undo_page);
1824 }
1825
1826 ut_ad(undo->state == TRX_UNDO_ACTIVE);
1827 undo->set_prepared(trx->xid);
1828
1829 mlog_write_ulint(seg_hdr + TRX_UNDO_STATE, undo->state, MLOG_2BYTES, mtr);
1830
1831 mlog_write_ulint(undo_header + TRX_UNDO_FLAGS, undo->flag, MLOG_1BYTE, mtr);
1832
1833 trx_undo_write_xid(undo_header, &undo->xid, mtr);
1834
1835 return (undo_page);
1836 }
1837
1838 /** Adds the update undo log header as the first in the history list, and
1839 frees the memory object, or puts it to the list of cached update undo log
1840 segments. */
trx_undo_update_cleanup(trx_t * trx,trx_undo_ptr_t * undo_ptr,page_t * undo_page,bool update_rseg_history_len,ulint n_added_logs,mtr_t * mtr)1841 void trx_undo_update_cleanup(
1842 trx_t *trx, /*!< in: trx owning the update
1843 undo log */
1844 trx_undo_ptr_t *undo_ptr, /*!< in: update undo log. */
1845 page_t *undo_page, /*!< in: update undo log header page,
1846 x-latched */
1847 bool update_rseg_history_len,
1848 /*!< in: if true: update rseg history
1849 len else skip updating it. */
1850 ulint n_added_logs, /*!< in: number of logs added */
1851 mtr_t *mtr) /*!< in: mtr */
1852 {
1853 trx_rseg_t *rseg;
1854 trx_undo_t *undo;
1855
1856 undo = undo_ptr->update_undo;
1857 rseg = undo_ptr->rseg;
1858
1859 ut_ad(mutex_own(&(rseg->mutex)));
1860
1861 trx_purge_add_update_undo_to_history(
1862 trx, undo_ptr, undo_page, update_rseg_history_len, n_added_logs, mtr);
1863
1864 UT_LIST_REMOVE(rseg->update_undo_list, undo);
1865
1866 undo_ptr->update_undo = nullptr;
1867
1868 if (undo->state == TRX_UNDO_CACHED) {
1869 UT_LIST_ADD_FIRST(rseg->update_undo_cached, undo);
1870
1871 MONITOR_INC(MONITOR_NUM_UNDO_SLOT_CACHED);
1872 } else {
1873 ut_ad(undo->state == TRX_UNDO_TO_PURGE);
1874
1875 trx_undo_mem_free(undo);
1876 }
1877 }
1878
1879 /** Frees an insert undo log after a transaction commit or rollback.
1880 Knowledge of inserts is not needed after a commit or rollback, therefore
1881 the data can be discarded.
1882 @param[in,out] undo_ptr undo log to clean up
1883 @param[in] noredo whether the undo tablespace is redo logged */
trx_undo_insert_cleanup(trx_undo_ptr_t * undo_ptr,bool noredo)1884 void trx_undo_insert_cleanup(trx_undo_ptr_t *undo_ptr, bool noredo) {
1885 trx_undo_t *undo;
1886 trx_rseg_t *rseg;
1887
1888 undo = undo_ptr->insert_undo;
1889 ut_ad(undo != nullptr);
1890
1891 rseg = undo_ptr->rseg;
1892
1893 ut_ad(noredo == fsp_is_system_temporary(rseg->space_id));
1894
1895 rseg->latch();
1896
1897 UT_LIST_REMOVE(rseg->insert_undo_list, undo);
1898 undo_ptr->insert_undo = nullptr;
1899
1900 if (undo->state == TRX_UNDO_CACHED) {
1901 UT_LIST_ADD_FIRST(rseg->insert_undo_cached, undo);
1902
1903 MONITOR_INC(MONITOR_NUM_UNDO_SLOT_CACHED);
1904 } else {
1905 ut_ad(undo->state == TRX_UNDO_TO_FREE);
1906
1907 /* Delete first the undo log segment in the file */
1908
1909 rseg->unlatch();
1910
1911 trx_undo_seg_free(undo, noredo);
1912
1913 rseg->latch();
1914
1915 trx_undo_mem_free(undo);
1916 }
1917
1918 rseg->unlatch();
1919 }
1920
trx_undo_free_trx_with_prepared_or_active_logs(trx_t * trx,ulint expected_undo_state)1921 void trx_undo_free_trx_with_prepared_or_active_logs(trx_t *trx,
1922 ulint expected_undo_state) {
1923 ut_a(expected_undo_state == TRX_UNDO_ACTIVE ||
1924 expected_undo_state == TRX_UNDO_PREPARED);
1925
1926 ut_a(srv_shutdown_state.load() == SRV_SHUTDOWN_EXIT_THREADS);
1927
1928 if (trx->rsegs.m_redo.update_undo) {
1929 ut_a(trx->rsegs.m_redo.update_undo->state == expected_undo_state);
1930 UT_LIST_REMOVE(trx->rsegs.m_redo.rseg->update_undo_list,
1931 trx->rsegs.m_redo.update_undo);
1932 trx_undo_mem_free(trx->rsegs.m_redo.update_undo);
1933
1934 trx->rsegs.m_redo.update_undo = nullptr;
1935 }
1936
1937 if (trx->rsegs.m_redo.insert_undo) {
1938 ut_a(trx->rsegs.m_redo.insert_undo->state == expected_undo_state);
1939 UT_LIST_REMOVE(trx->rsegs.m_redo.rseg->insert_undo_list,
1940 trx->rsegs.m_redo.insert_undo);
1941 trx_undo_mem_free(trx->rsegs.m_redo.insert_undo);
1942
1943 trx->rsegs.m_redo.insert_undo = nullptr;
1944 }
1945
1946 if (trx->rsegs.m_noredo.update_undo) {
1947 ut_a(trx->rsegs.m_noredo.update_undo->state == expected_undo_state);
1948
1949 UT_LIST_REMOVE(trx->rsegs.m_noredo.rseg->update_undo_list,
1950 trx->rsegs.m_noredo.update_undo);
1951 trx_undo_mem_free(trx->rsegs.m_noredo.update_undo);
1952
1953 trx->rsegs.m_noredo.update_undo = nullptr;
1954 }
1955 if (trx->rsegs.m_noredo.insert_undo) {
1956 ut_a(trx->rsegs.m_noredo.insert_undo->state == expected_undo_state);
1957
1958 UT_LIST_REMOVE(trx->rsegs.m_noredo.rseg->insert_undo_list,
1959 trx->rsegs.m_noredo.insert_undo);
1960 trx_undo_mem_free(trx->rsegs.m_noredo.insert_undo);
1961
1962 trx->rsegs.m_noredo.insert_undo = nullptr;
1963 }
1964 }
1965
trx_undo_truncate_tablespace(undo::Tablespace * marked_space)1966 bool trx_undo_truncate_tablespace(undo::Tablespace *marked_space) {
1967 #ifdef UNIV_DEBUG
1968 static undo::Inject_failure_once injector("ib_undo_trunc_fail_truncate");
1969 if (injector.should_fail()) {
1970 return (false);
1971 };
1972 #endif /* UNIV_DEBUG */
1973
1974 bool success = true;
1975
1976 auto old_space_id = marked_space->id();
1977 auto space_num = undo::id2num(old_space_id);
1978 auto marked_rsegs = marked_space->rsegs();
1979
1980 undo::unuse_space_id(old_space_id);
1981
1982 auto new_space_id = undo::use_next_space_id(space_num);
1983
1984 const auto n_pages = SRV_UNDO_TABLESPACE_SIZE_IN_PAGES;
1985
1986 fil_space_t *space = fil_space_get(old_space_id);
1987 bool is_encrypted = FSP_FLAGS_GET_ENCRYPTION(space->flags);
1988
1989 /* Step-1: Truncate tablespace by replacement with a new space_id. */
1990 success = fil_replace_tablespace(old_space_id, new_space_id, n_pages);
1991
1992 if (!success) {
1993 return (success);
1994 }
1995
1996 ut_d(undo::inject_crash("ib_undo_trunc_empty_file"));
1997
1998 /* This undo tablespace is unused. Lock the Rsegs before the
1999 file_space because SYNC_RSEGS > SYNC_FSP. */
2000 marked_rsegs->x_lock();
2001
2002 /* Step-2: Re-initialize tablespace header. */
2003 log_free_check();
2004
2005 mtr_t mtr;
2006
2007 mtr.start();
2008
2009 fsp_header_init(new_space_id, n_pages, &mtr, false);
2010
2011 /* If tablespace is to be encrypted, encrypt it now */
2012 if (is_encrypted && srv_undo_log_encrypt) {
2013 ut_d(bool ret =) set_undo_tablespace_encryption(new_space_id, &mtr, false);
2014 /* Don't expect any error here (unless keyring plugin is uninstalled). In
2015 that case too, continue truncation processing of tablespace. */
2016 ut_ad(!ret);
2017 }
2018
2019 /* Step-3: Add the RSEG_ARRAY page. */
2020 trx_rseg_array_create(new_space_id, &mtr);
2021
2022 mtr.commit();
2023
2024 /* Step-4: Re-initialize rollback segment header that resides
2025 in truncated tablespaces. */
2026
2027 ut_d(undo::inject_crash("ib_undo_trunc_before_rsegs"));
2028
2029 for (auto rseg : *marked_rsegs) {
2030 log_free_check();
2031
2032 mtr.start();
2033
2034 mtr_x_lock(fil_space_get_latch(new_space_id), &mtr);
2035
2036 rseg->space_id = new_space_id;
2037
2038 rseg->page_no = trx_rseg_header_create(new_space_id, univ_page_size,
2039 PAGE_NO_MAX, rseg->id, &mtr);
2040
2041 ut_a(rseg->page_no != FIL_NULL);
2042
2043 auto rseg_header =
2044 trx_rsegf_get_new(new_space_id, rseg->page_no, rseg->page_size, &mtr);
2045
2046 /* Before re-initialization ensure that we free the existing
2047 structure. There can't be any active transactions. */
2048 ut_a(UT_LIST_GET_LEN(rseg->update_undo_list) == 0);
2049 ut_a(UT_LIST_GET_LEN(rseg->insert_undo_list) == 0);
2050
2051 trx_undo_t *next_undo;
2052
2053 for (trx_undo_t *undo = UT_LIST_GET_FIRST(rseg->update_undo_cached);
2054 undo != nullptr; undo = next_undo) {
2055 next_undo = UT_LIST_GET_NEXT(undo_list, undo);
2056 UT_LIST_REMOVE(rseg->update_undo_cached, undo);
2057 MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_CACHED);
2058 trx_undo_mem_free(undo);
2059 }
2060
2061 for (trx_undo_t *undo = UT_LIST_GET_FIRST(rseg->insert_undo_cached);
2062 undo != nullptr; undo = next_undo) {
2063 next_undo = UT_LIST_GET_NEXT(undo_list, undo);
2064 UT_LIST_REMOVE(rseg->insert_undo_cached, undo);
2065 MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_CACHED);
2066 trx_undo_mem_free(undo);
2067 }
2068
2069 UT_LIST_INIT(rseg->update_undo_list, &trx_undo_t::undo_list);
2070 UT_LIST_INIT(rseg->update_undo_cached, &trx_undo_t::undo_list);
2071 UT_LIST_INIT(rseg->insert_undo_list, &trx_undo_t::undo_list);
2072 UT_LIST_INIT(rseg->insert_undo_cached, &trx_undo_t::undo_list);
2073
2074 rseg->max_size =
2075 mtr_read_ulint(rseg_header + TRX_RSEG_MAX_SIZE, MLOG_4BYTES, &mtr);
2076
2077 /* Initialize the undo log lists according to the rseg header */
2078 rseg->set_curr_size(
2079 mtr_read_ulint(rseg_header + TRX_RSEG_HISTORY_SIZE, MLOG_4BYTES, &mtr) +
2080 1);
2081
2082 mtr.commit();
2083
2084 ut_ad(rseg->get_curr_size() == 1);
2085 ut_ad(rseg->trx_ref_count == 0);
2086
2087 rseg->last_page_no = FIL_NULL;
2088 rseg->last_offset = 0;
2089 rseg->last_trx_no = 0;
2090 rseg->last_del_marks = FALSE;
2091 }
2092
2093 marked_rsegs->x_unlock();
2094
2095 /* Increment the space ID for this undo space now so that if anyone refers
2096 to this space, it is completely initialized. */
2097 marked_space->set_space_id(new_space_id);
2098
2099 return (success);
2100 }
2101
2102 #endif /* !UNIV_HOTBACKUP */
2103