1 /*****************************************************************************
2 
3 Copyright (c) 1996, 2021, Oracle and/or its affiliates.
4 
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License, version 2.0,
7 as published by the Free Software Foundation.
8 
9 This program is also distributed with certain software (including
10 but not limited to OpenSSL) that is licensed under separate terms,
11 as designated in a particular file or component or in included license
12 documentation.  The authors of MySQL hereby grant you an additional
13 permission to link the program and your derivative works with the
14 separately licensed software that they have included with MySQL.
15 
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19 GNU General Public License, version 2.0, for more details.
20 
21 You should have received a copy of the GNU General Public License along with
22 this program; if not, write to the Free Software Foundation, Inc.,
23 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
24 
25 *****************************************************************************/
26 
27 /**************************************************//**
28 @file trx/trx0undo.cc
29 Transaction undo log
30 
31 Created 3/26/1996 Heikki Tuuri
32 *******************************************************/
33 
34 #include "ha_prototypes.h"
35 
36 #include "trx0undo.h"
37 
38 #ifdef UNIV_NONINL
39 #include "trx0undo.ic"
40 #endif
41 
42 #include "fsp0fsp.h"
43 #ifndef UNIV_HOTBACKUP
44 #include "mach0data.h"
45 #include "mtr0log.h"
46 #include "srv0mon.h"
47 #include "srv0srv.h"
48 #include "srv0start.h"
49 #include "trx0purge.h"
50 #include "trx0rec.h"
51 #include "trx0rseg.h"
52 #include "trx0trx.h"
53 
54 /* How should the old versions in the history list be managed?
55    ----------------------------------------------------------
56 If each transaction is given a whole page for its update undo log, file
57 space consumption can be 10 times higher than necessary. Therefore,
58 partly filled update undo log pages should be reusable. But then there
59 is no way individual pages can be ordered so that the ordering agrees
60 with the serialization numbers of the transactions on the pages. Thus,
61 the history list must be formed of undo logs, not their header pages as
62 it was in the old implementation.
63 	However, on a single header page the transactions are placed in
64 the order of their serialization numbers. As old versions are purged, we
65 may free the page when the last transaction on the page has been purged.
66 	A problem is that the purge has to go through the transactions
67 in the serialization order. This means that we have to look through all
68 rollback segments for the one that has the smallest transaction number
69 in its history list.
70 	When should we do a purge? A purge is necessary when space is
71 running out in any of the rollback segments. Then we may have to purge
72 also old version which might be needed by some consistent read. How do
73 we trigger the start of a purge? When a transaction writes to an undo log,
74 it may notice that the space is running out. When a read view is closed,
75 it may make some history superfluous. The server can have an utility which
76 periodically checks if it can purge some history.
77 	In a parallellized purge we have the problem that a query thread
78 can remove a delete marked clustered index record before another query
79 thread has processed an earlier version of the record, which cannot then
80 be done because the row cannot be constructed from the clustered index
81 record. To avoid this problem, we will store in the update and delete mark
82 undo record also the columns necessary to construct the secondary index
83 entries which are modified.
84 	We can latch the stack of versions of a single clustered index record
85 by taking a latch on the clustered index page. As long as the latch is held,
86 no new versions can be added and no versions removed by undo. But, a purge
87 can still remove old versions from the bottom of the stack. */
88 
89 /* How to protect rollback segments, undo logs, and history lists with
90    -------------------------------------------------------------------
91 latches?
92 -------
93 The contention of the trx_sys_t::mutex should be minimized. When a transaction
94 does its first insert or modify in an index, an undo log is assigned for it.
95 Then we must have an x-latch to the rollback segment header.
96 	When the transaction does more modifys or rolls back, the undo log is
97 protected with undo_mutex in the transaction.
98 	When the transaction commits, its insert undo log is either reset and
99 cached for a fast reuse, or freed. In these cases we must have an x-latch on
100 the rollback segment page. The update undo log is put to the history list. If
101 it is not suitable for reuse, its slot in the rollback segment is reset. In
102 both cases, an x-latch must be acquired on the rollback segment.
103 	The purge operation steps through the history list without modifying
104 it until a truncate operation occurs, which can remove undo logs from the end
105 of the list and release undo log segments. In stepping through the list,
106 s-latches on the undo log pages are enough, but in a truncate, x-latches must
107 be obtained on the rollback segment and individual pages. */
108 #endif /* !UNIV_HOTBACKUP */
109 
110 /********************************************************************//**
111 Initializes the fields in an undo log segment page. */
112 static
113 void
114 trx_undo_page_init(
115 /*===============*/
116 	page_t* undo_page,	/*!< in: undo log segment page */
117 	ulint	type,		/*!< in: undo log segment type */
118 	mtr_t*	mtr);		/*!< in: mtr */
119 
120 #ifndef UNIV_HOTBACKUP
121 /********************************************************************//**
122 Creates and initializes an undo log memory object.
123 @return own: the undo log memory object */
124 static
125 trx_undo_t*
126 trx_undo_mem_create(
127 /*================*/
128 	trx_rseg_t*	rseg,	/*!< in: rollback segment memory object */
129 	ulint		id,	/*!< in: slot index within rseg */
130 	ulint		type,	/*!< in: type of the log: TRX_UNDO_INSERT or
131 				TRX_UNDO_UPDATE */
132 	trx_id_t	trx_id,	/*!< in: id of the trx for which the undo log
133 				is created */
134 	const XID*	xid,	/*!< in: X/Open XA transaction identification*/
135 	ulint		page_no,/*!< in: undo log header page number */
136 	ulint		offset);/*!< in: undo log header byte offset on page */
137 #endif /* !UNIV_HOTBACKUP */
138 /***************************************************************//**
139 Initializes a cached insert undo log header page for new use. NOTE that this
140 function has its own log record type MLOG_UNDO_HDR_REUSE. You must NOT change
141 the operation of this function!
142 @return undo log header byte offset on page */
143 static
144 ulint
145 trx_undo_insert_header_reuse(
146 /*=========================*/
147 	page_t*		undo_page,	/*!< in/out: insert undo log segment
148 					header page, x-latched */
149 	trx_id_t	trx_id,		/*!< in: transaction id */
150 	mtr_t*		mtr);		/*!< in: mtr */
151 /**********************************************************************//**
152 If an update undo log can be discarded immediately, this function frees the
153 space, resetting the page to the proper state for caching. */
154 static
155 void
156 trx_undo_discard_latest_update_undo(
157 /*================================*/
158 	page_t*	undo_page,	/*!< in: header page of an undo log of size 1 */
159 	mtr_t*	mtr);		/*!< in: mtr */
160 
161 #ifndef UNIV_HOTBACKUP
162 /***********************************************************************//**
163 Gets the previous record in an undo log from the previous page.
164 @return undo log record, the page s-latched, NULL if none */
165 static
166 trx_undo_rec_t*
trx_undo_get_prev_rec_from_prev_page(trx_undo_rec_t * rec,ulint page_no,ulint offset,bool shared,mtr_t * mtr)167 trx_undo_get_prev_rec_from_prev_page(
168 /*=================================*/
169 	trx_undo_rec_t*	rec,	/*!< in: undo record */
170 	ulint		page_no,/*!< in: undo log header page number */
171 	ulint		offset,	/*!< in: undo log header offset on page */
172 	bool		shared,	/*!< in: true=S-latch, false=X-latch */
173 	mtr_t*		mtr)	/*!< in: mtr */
174 {
175 	ulint	space;
176 	ulint	prev_page_no;
177 	page_t* prev_page;
178 	page_t*	undo_page;
179 
180 	undo_page = page_align(rec);
181 
182 	prev_page_no = flst_get_prev_addr(undo_page + TRX_UNDO_PAGE_HDR
183 					  + TRX_UNDO_PAGE_NODE, mtr)
184 		.page;
185 
186 	if (prev_page_no == FIL_NULL) {
187 
188 		return(NULL);
189 	}
190 
191 	space = page_get_space_id(undo_page);
192 
193 	bool			found;
194 	const page_size_t&	page_size = fil_space_get_page_size(space,
195 								    &found);
196 
197 	ut_ad(found);
198 
199 	buf_block_t*	block = buf_page_get(
200 		page_id_t(space, prev_page_no), page_size,
201 		shared ? RW_S_LATCH : RW_X_LATCH, mtr);
202 
203 	buf_block_dbg_add_level(block, SYNC_TRX_UNDO_PAGE);
204 
205 	prev_page = buf_block_get_frame(block);
206 
207 	return(trx_undo_page_get_last_rec(prev_page, page_no, offset));
208 }
209 
210 /***********************************************************************//**
211 Gets the previous record in an undo log.
212 @return undo log record, the page s-latched, NULL if none */
213 trx_undo_rec_t*
trx_undo_get_prev_rec(trx_undo_rec_t * rec,ulint page_no,ulint offset,bool shared,mtr_t * mtr)214 trx_undo_get_prev_rec(
215 /*==================*/
216 	trx_undo_rec_t*	rec,	/*!< in: undo record */
217 	ulint		page_no,/*!< in: undo log header page number */
218 	ulint		offset,	/*!< in: undo log header offset on page */
219 	bool		shared,	/*!< in: true=S-latch, false=X-latch */
220 	mtr_t*		mtr)	/*!< in: mtr */
221 {
222 	trx_undo_rec_t*	prev_rec;
223 
224 	prev_rec = trx_undo_page_get_prev_rec(rec, page_no, offset);
225 
226 	if (prev_rec) {
227 
228 		return(prev_rec);
229 	}
230 
231 	/* We have to go to the previous undo log page to look for the
232 	previous record */
233 
234 	return(trx_undo_get_prev_rec_from_prev_page(rec, page_no, offset,
235 						    shared, mtr));
236 }
237 
238 /** Gets the next record in an undo log from the next page.
239 @param[in]	space		undo log header space
240 @param[in]	page_size	page size
241 @param[in]	undo_page	undo log page
242 @param[in]	page_no		undo log header page number
243 @param[in]	offset		undo log header offset on page
244 @param[in]	mode		latch mode: RW_S_LATCH or RW_X_LATCH
245 @param[in,out]	mtr		mini-transaction
246 @return undo log record, the page latched, NULL if none */
247 static
248 trx_undo_rec_t*
trx_undo_get_next_rec_from_next_page(ulint space,const page_size_t & page_size,const page_t * undo_page,ulint page_no,ulint offset,ulint mode,mtr_t * mtr)249 trx_undo_get_next_rec_from_next_page(
250 	ulint			space,
251 	const page_size_t&	page_size,
252 	const page_t*		undo_page,
253 	ulint			page_no,
254 	ulint			offset,
255 	ulint			mode,
256 	mtr_t*			mtr)
257 {
258 	const trx_ulogf_t*	log_hdr;
259 	ulint			next_page_no;
260 	page_t*			next_page;
261 	ulint			next;
262 
263 	if (page_no == page_get_page_no(undo_page)) {
264 
265 		log_hdr = undo_page + offset;
266 		next = mach_read_from_2(log_hdr + TRX_UNDO_NEXT_LOG);
267 
268 		if (next != 0) {
269 
270 			return(NULL);
271 		}
272 	}
273 
274 	next_page_no = flst_get_next_addr(undo_page + TRX_UNDO_PAGE_HDR
275 					  + TRX_UNDO_PAGE_NODE, mtr)
276 		.page;
277 	if (next_page_no == FIL_NULL) {
278 
279 		return(NULL);
280 	}
281 
282 	const page_id_t	next_page_id(space, next_page_no);
283 
284 	if (mode == RW_S_LATCH) {
285 		next_page = trx_undo_page_get_s_latched(
286 			next_page_id, page_size, mtr);
287 	} else {
288 		ut_ad(mode == RW_X_LATCH);
289 		next_page = trx_undo_page_get(next_page_id, page_size, mtr);
290 	}
291 
292 	return(trx_undo_page_get_first_rec(next_page, page_no, offset));
293 }
294 
295 /***********************************************************************//**
296 Gets the next record in an undo log.
297 @return undo log record, the page s-latched, NULL if none */
298 trx_undo_rec_t*
trx_undo_get_next_rec(trx_undo_rec_t * rec,ulint page_no,ulint offset,mtr_t * mtr)299 trx_undo_get_next_rec(
300 /*==================*/
301 	trx_undo_rec_t*	rec,	/*!< in: undo record */
302 	ulint		page_no,/*!< in: undo log header page number */
303 	ulint		offset,	/*!< in: undo log header offset on page */
304 	mtr_t*		mtr)	/*!< in: mtr */
305 {
306 	ulint		space;
307 	trx_undo_rec_t*	next_rec;
308 
309 	next_rec = trx_undo_page_get_next_rec(rec, page_no, offset);
310 
311 	if (next_rec) {
312 		return(next_rec);
313 	}
314 
315 	space = page_get_space_id(page_align(rec));
316 
317 	bool			found;
318 	const page_size_t&	page_size = fil_space_get_page_size(space,
319 								    &found);
320 
321 	ut_ad(found);
322 
323 	return(trx_undo_get_next_rec_from_next_page(space, page_size,
324 						    page_align(rec),
325 						    page_no, offset,
326 						    RW_S_LATCH, mtr));
327 }
328 
329 /** Gets the first record in an undo log.
330 @param[in]	space		undo log header space
331 @param[in]	page_size	page size
332 @param[in]	page_no		undo log header page number
333 @param[in]	offset		undo log header offset on page
334 @param[in]	mode		latching mode: RW_S_LATCH or RW_X_LATCH
335 @param[in,out]	mtr		mini-transaction
336 @return undo log record, the page latched, NULL if none */
337 trx_undo_rec_t*
trx_undo_get_first_rec(ulint space,const page_size_t & page_size,ulint page_no,ulint offset,ulint mode,mtr_t * mtr)338 trx_undo_get_first_rec(
339 	ulint			space,
340 	const page_size_t&	page_size,
341 	ulint			page_no,
342 	ulint			offset,
343 	ulint			mode,
344 	mtr_t*			mtr)
345 {
346 	page_t*		undo_page;
347 	trx_undo_rec_t*	rec;
348 
349 	const page_id_t	page_id(space, page_no);
350 
351 	if (mode == RW_S_LATCH) {
352 		undo_page = trx_undo_page_get_s_latched(
353 			page_id, page_size, mtr);
354 	} else {
355 		undo_page = trx_undo_page_get(page_id, page_size, mtr);
356 	}
357 
358 	rec = trx_undo_page_get_first_rec(undo_page, page_no, offset);
359 
360 	if (rec) {
361 		return(rec);
362 	}
363 
364 	return(trx_undo_get_next_rec_from_next_page(space, page_size,
365 						    undo_page, page_no, offset,
366 						    mode, mtr));
367 }
368 
369 /*============== UNDO LOG FILE COPY CREATION AND FREEING ==================*/
370 
371 /**********************************************************************//**
372 Writes the mtr log entry of an undo log page initialization. */
373 UNIV_INLINE
374 void
trx_undo_page_init_log(page_t * undo_page,ulint type,mtr_t * mtr)375 trx_undo_page_init_log(
376 /*===================*/
377 	page_t* undo_page,	/*!< in: undo log page */
378 	ulint	type,		/*!< in: undo log type */
379 	mtr_t*	mtr)		/*!< in: mtr */
380 {
381 	mlog_write_initial_log_record(undo_page, MLOG_UNDO_INIT, mtr);
382 
383 	mlog_catenate_ulint_compressed(mtr, type);
384 }
385 #else /* !UNIV_HOTBACKUP */
386 # define trx_undo_page_init_log(undo_page,type,mtr) ((void) 0)
387 #endif /* !UNIV_HOTBACKUP */
388 
389 /***********************************************************//**
390 Parses the redo log entry of an undo log page initialization.
391 @return end of log record or NULL */
392 byte*
trx_undo_parse_page_init(const byte * ptr,const byte * end_ptr,page_t * page,mtr_t * mtr)393 trx_undo_parse_page_init(
394 /*=====================*/
395 	const byte*	ptr,	/*!< in: buffer */
396 	const byte*	end_ptr,/*!< in: buffer end */
397 	page_t*		page,	/*!< in: page or NULL */
398 	mtr_t*		mtr)	/*!< in: mtr or NULL */
399 {
400 	ulint	type;
401 
402 	type = mach_parse_compressed(&ptr, end_ptr);
403 
404 	if (ptr == NULL) {
405 
406 		return(NULL);
407 	}
408 
409 	if (page) {
410 		trx_undo_page_init(page, type, mtr);
411 	}
412 
413 	return(const_cast<byte*>(ptr));
414 }
415 
416 /********************************************************************//**
417 Initializes the fields in an undo log segment page. */
418 static
419 void
trx_undo_page_init(page_t * undo_page,ulint type,mtr_t * mtr)420 trx_undo_page_init(
421 /*===============*/
422 	page_t* undo_page,	/*!< in: undo log segment page */
423 	ulint	type,		/*!< in: undo log segment type */
424 	mtr_t*	mtr)		/*!< in: mtr */
425 {
426 	trx_upagef_t*	page_hdr;
427 
428 	page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
429 
430 	mach_write_to_2(page_hdr + TRX_UNDO_PAGE_TYPE, type);
431 
432 	mach_write_to_2(page_hdr + TRX_UNDO_PAGE_START,
433 			TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE);
434 	mach_write_to_2(page_hdr + TRX_UNDO_PAGE_FREE,
435 			TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE);
436 
437 	fil_page_set_type(undo_page, FIL_PAGE_UNDO_LOG);
438 
439 	trx_undo_page_init_log(undo_page, type, mtr);
440 }
441 
442 #ifndef UNIV_HOTBACKUP
443 /***************************************************************//**
444 Creates a new undo log segment in file.
445 @return DB_SUCCESS if page creation OK possible error codes are:
446 DB_TOO_MANY_CONCURRENT_TRXS DB_OUT_OF_FILE_SPACE */
447 static MY_ATTRIBUTE((warn_unused_result))
448 dberr_t
trx_undo_seg_create(trx_rseg_t * rseg MY_ATTRIBUTE ((unused)),trx_rsegf_t * rseg_hdr,ulint type,ulint * id,page_t ** undo_page,mtr_t * mtr)449 trx_undo_seg_create(
450 /*================*/
451 	trx_rseg_t*	rseg MY_ATTRIBUTE((unused)),/*!< in: rollback segment */
452 	trx_rsegf_t*	rseg_hdr,/*!< in: rollback segment header, page
453 				x-latched */
454 	ulint		type,	/*!< in: type of the segment: TRX_UNDO_INSERT or
455 				TRX_UNDO_UPDATE */
456 	ulint*		id,	/*!< out: slot index within rseg header */
457 	page_t**	undo_page,
458 				/*!< out: segment header page x-latched, NULL
459 				if there was an error */
460 	mtr_t*		mtr)	/*!< in: mtr */
461 {
462 	ulint		slot_no;
463 	ulint		space;
464 	buf_block_t*	block;
465 	trx_upagef_t*	page_hdr;
466 	trx_usegf_t*	seg_hdr;
467 	ulint		n_reserved;
468 	bool		success;
469 	dberr_t		err = DB_SUCCESS;
470 
471 	ut_ad(mtr != NULL);
472 	ut_ad(id != NULL);
473 	ut_ad(rseg_hdr != NULL);
474 	ut_ad(mutex_own(&(rseg->mutex)));
475 
476 	/*	fputs(type == TRX_UNDO_INSERT
477 	? "Creating insert undo log segment\n"
478 	: "Creating update undo log segment\n", stderr); */
479 	slot_no = trx_rsegf_undo_find_free(rseg_hdr, mtr);
480 
481 	if (slot_no == ULINT_UNDEFINED) {
482 		ib::warn() << "Cannot find a free slot for an undo log. Do"
483 			" you have too many active transactions running"
484 			" concurrently?";
485 
486 		return(DB_TOO_MANY_CONCURRENT_TRXS);
487 	}
488 
489 	space = page_get_space_id(page_align(rseg_hdr));
490 
491 	success = fsp_reserve_free_extents(&n_reserved, space, 2, FSP_UNDO,
492 					   mtr);
493 	if (!success) {
494 
495 		return(DB_OUT_OF_FILE_SPACE);
496 	}
497 
498 	/* Allocate a new file segment for the undo log */
499 	block = fseg_create_general(space, 0,
500 				    TRX_UNDO_SEG_HDR
501 				    + TRX_UNDO_FSEG_HEADER, TRUE, mtr);
502 
503 	fil_space_release_free_extents(space, n_reserved);
504 
505 	if (block == NULL) {
506 		/* No space left */
507 
508 		return(DB_OUT_OF_FILE_SPACE);
509 	}
510 
511 	buf_block_dbg_add_level(block, SYNC_TRX_UNDO_PAGE);
512 
513 	*undo_page = buf_block_get_frame(block);
514 
515 	page_hdr = *undo_page + TRX_UNDO_PAGE_HDR;
516 	seg_hdr = *undo_page + TRX_UNDO_SEG_HDR;
517 
518 	trx_undo_page_init(*undo_page, type, mtr);
519 
520 	mlog_write_ulint(page_hdr + TRX_UNDO_PAGE_FREE,
521 			 TRX_UNDO_SEG_HDR + TRX_UNDO_SEG_HDR_SIZE,
522 			 MLOG_2BYTES, mtr);
523 
524 	mlog_write_ulint(seg_hdr + TRX_UNDO_LAST_LOG, 0, MLOG_2BYTES, mtr);
525 
526 	flst_init(seg_hdr + TRX_UNDO_PAGE_LIST, mtr);
527 
528 	flst_add_last(seg_hdr + TRX_UNDO_PAGE_LIST,
529 		      page_hdr + TRX_UNDO_PAGE_NODE, mtr);
530 
531 	trx_rsegf_set_nth_undo(rseg_hdr, slot_no,
532 			       page_get_page_no(*undo_page), mtr);
533 	*id = slot_no;
534 
535 	MONITOR_INC(MONITOR_NUM_UNDO_SLOT_USED);
536 
537 	return(err);
538 }
539 
540 /**********************************************************************//**
541 Writes the mtr log entry of an undo log header initialization. */
542 UNIV_INLINE
543 void
trx_undo_header_create_log(const page_t * undo_page,trx_id_t trx_id,mtr_t * mtr)544 trx_undo_header_create_log(
545 /*=======================*/
546 	const page_t*	undo_page,	/*!< in: undo log header page */
547 	trx_id_t	trx_id,		/*!< in: transaction id */
548 	mtr_t*		mtr)		/*!< in: mtr */
549 {
550 	mlog_write_initial_log_record(undo_page, MLOG_UNDO_HDR_CREATE, mtr);
551 
552 	mlog_catenate_ull_compressed(mtr, trx_id);
553 }
554 #else /* !UNIV_HOTBACKUP */
555 # define trx_undo_header_create_log(undo_page,trx_id,mtr) ((void) 0)
556 #endif /* !UNIV_HOTBACKUP */
557 
558 /***************************************************************//**
559 Creates a new undo log header in file. NOTE that this function has its own
560 log record type MLOG_UNDO_HDR_CREATE. You must NOT change the operation of
561 this function!
562 @return header byte offset on page */
563 static
564 ulint
trx_undo_header_create(page_t * undo_page,trx_id_t trx_id,mtr_t * mtr)565 trx_undo_header_create(
566 /*===================*/
567 	page_t*		undo_page,	/*!< in/out: undo log segment
568 					header page, x-latched; it is
569 					assumed that there is
570 					TRX_UNDO_LOG_XA_HDR_SIZE bytes
571 					free space on it */
572 	trx_id_t	trx_id,		/*!< in: transaction id */
573 	mtr_t*		mtr)		/*!< in: mtr */
574 {
575 	trx_upagef_t*	page_hdr;
576 	trx_usegf_t*	seg_hdr;
577 	trx_ulogf_t*	log_hdr;
578 	ulint		prev_log;
579 	ulint		free;
580 	ulint		new_free;
581 
582 	ut_ad(mtr && undo_page);
583 
584 	page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
585 	seg_hdr = undo_page + TRX_UNDO_SEG_HDR;
586 
587 	free = mach_read_from_2(page_hdr + TRX_UNDO_PAGE_FREE);
588 
589 	log_hdr = undo_page + free;
590 
591 	new_free = free + TRX_UNDO_LOG_OLD_HDR_SIZE;
592 
593 	ut_a(free + TRX_UNDO_LOG_XA_HDR_SIZE < UNIV_PAGE_SIZE - 100);
594 
595 	mach_write_to_2(page_hdr + TRX_UNDO_PAGE_START, new_free);
596 
597 	mach_write_to_2(page_hdr + TRX_UNDO_PAGE_FREE, new_free);
598 
599 	mach_write_to_2(seg_hdr + TRX_UNDO_STATE, TRX_UNDO_ACTIVE);
600 
601 	prev_log = mach_read_from_2(seg_hdr + TRX_UNDO_LAST_LOG);
602 
603 	if (prev_log != 0) {
604 		trx_ulogf_t*	prev_log_hdr;
605 
606 		prev_log_hdr = undo_page + prev_log;
607 
608 		mach_write_to_2(prev_log_hdr + TRX_UNDO_NEXT_LOG, free);
609 	}
610 
611 	mach_write_to_2(seg_hdr + TRX_UNDO_LAST_LOG, free);
612 
613 	log_hdr = undo_page + free;
614 
615 	mach_write_to_2(log_hdr + TRX_UNDO_DEL_MARKS, TRUE);
616 
617 	mach_write_to_8(log_hdr + TRX_UNDO_TRX_ID, trx_id);
618 	mach_write_to_2(log_hdr + TRX_UNDO_LOG_START, new_free);
619 
620 	mach_write_to_1(log_hdr + TRX_UNDO_XID_EXISTS, FALSE);
621 	mach_write_to_1(log_hdr + TRX_UNDO_DICT_TRANS, FALSE);
622 
623 	mach_write_to_2(log_hdr + TRX_UNDO_NEXT_LOG, 0);
624 	mach_write_to_2(log_hdr + TRX_UNDO_PREV_LOG, prev_log);
625 
626 	/* Write the log record about the header creation */
627 	trx_undo_header_create_log(undo_page, trx_id, mtr);
628 
629 	return(free);
630 }
631 
632 #ifndef UNIV_HOTBACKUP
633 /********************************************************************//**
634 Write X/Open XA Transaction Identification (XID) to undo log header */
635 static
636 void
trx_undo_write_xid(trx_ulogf_t * log_hdr,const XID * xid,mtr_t * mtr)637 trx_undo_write_xid(
638 /*===============*/
639 	trx_ulogf_t*	log_hdr,/*!< in: undo log header */
640 	const XID*	xid,	/*!< in: X/Open XA Transaction Identification */
641 	mtr_t*		mtr)	/*!< in: mtr */
642 {
643 	mlog_write_ulint(log_hdr + TRX_UNDO_XA_FORMAT,
644 			 static_cast<ulint>(xid->get_format_id()),
645 			 MLOG_4BYTES, mtr);
646 
647 	mlog_write_ulint(log_hdr + TRX_UNDO_XA_TRID_LEN,
648 			 static_cast<ulint>(xid->get_gtrid_length()),
649 			 MLOG_4BYTES, mtr);
650 
651 	mlog_write_ulint(log_hdr + TRX_UNDO_XA_BQUAL_LEN,
652 			 static_cast<ulint>(xid->get_bqual_length()),
653 			 MLOG_4BYTES, mtr);
654 
655 	mlog_write_string(log_hdr + TRX_UNDO_XA_XID,
656 			  reinterpret_cast<const byte*>(xid->get_data()),
657 			  XIDDATASIZE, mtr);
658 }
659 
660 /********************************************************************//**
661 Read X/Open XA Transaction Identification (XID) from undo log header */
662 static
663 void
trx_undo_read_xid(trx_ulogf_t * log_hdr,XID * xid)664 trx_undo_read_xid(
665 /*==============*/
666 	trx_ulogf_t*	log_hdr,/*!< in: undo log header */
667 	XID*		xid)	/*!< out: X/Open XA Transaction Identification */
668 {
669 	xid->set_format_id(static_cast<long>(mach_read_from_4(
670 		log_hdr + TRX_UNDO_XA_FORMAT)));
671 
672 	xid->set_gtrid_length(static_cast<long>(mach_read_from_4(
673 		log_hdr + TRX_UNDO_XA_TRID_LEN)));
674 
675 	xid->set_bqual_length(static_cast<long>(mach_read_from_4(
676 		log_hdr + TRX_UNDO_XA_BQUAL_LEN)));
677 
678 	xid->set_data(log_hdr + TRX_UNDO_XA_XID, XIDDATASIZE);
679 }
680 
681 /***************************************************************//**
682 Adds space for the XA XID after an undo log old-style header. */
683 static
684 void
trx_undo_header_add_space_for_xid(page_t * undo_page,trx_ulogf_t * log_hdr,mtr_t * mtr)685 trx_undo_header_add_space_for_xid(
686 /*==============================*/
687 	page_t*		undo_page,/*!< in: undo log segment header page */
688 	trx_ulogf_t*	log_hdr,/*!< in: undo log header */
689 	mtr_t*		mtr)	/*!< in: mtr */
690 {
691 	trx_upagef_t*	page_hdr;
692 	ulint		free;
693 	ulint		new_free;
694 
695 	page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
696 
697 	free = mach_read_from_2(page_hdr + TRX_UNDO_PAGE_FREE);
698 
699 	/* free is now the end offset of the old style undo log header */
700 
701 	ut_a(free == (ulint)(log_hdr - undo_page) + TRX_UNDO_LOG_OLD_HDR_SIZE);
702 
703 	new_free = free + (TRX_UNDO_LOG_XA_HDR_SIZE
704 			   - TRX_UNDO_LOG_OLD_HDR_SIZE);
705 
706 	/* Add space for a XID after the header, update the free offset
707 	fields on the undo log page and in the undo log header */
708 
709 	mlog_write_ulint(page_hdr + TRX_UNDO_PAGE_START, new_free,
710 			 MLOG_2BYTES, mtr);
711 
712 	mlog_write_ulint(page_hdr + TRX_UNDO_PAGE_FREE, new_free,
713 			 MLOG_2BYTES, mtr);
714 
715 	mlog_write_ulint(log_hdr + TRX_UNDO_LOG_START, new_free,
716 			 MLOG_2BYTES, mtr);
717 }
718 
719 /**********************************************************************//**
720 Writes the mtr log entry of an undo log header reuse. */
721 UNIV_INLINE
722 void
trx_undo_insert_header_reuse_log(const page_t * undo_page,trx_id_t trx_id,mtr_t * mtr)723 trx_undo_insert_header_reuse_log(
724 /*=============================*/
725 	const page_t*	undo_page,	/*!< in: undo log header page */
726 	trx_id_t	trx_id,		/*!< in: transaction id */
727 	mtr_t*		mtr)		/*!< in: mtr */
728 {
729 	mlog_write_initial_log_record(undo_page, MLOG_UNDO_HDR_REUSE, mtr);
730 
731 	mlog_catenate_ull_compressed(mtr, trx_id);
732 }
733 #else /* !UNIV_HOTBACKUP */
734 # define trx_undo_insert_header_reuse_log(undo_page,trx_id,mtr) ((void) 0)
735 #endif /* !UNIV_HOTBACKUP */
736 
737 /** Parse the redo log entry of an undo log page header create or reuse.
738 @param[in]	type	MLOG_UNDO_HDR_CREATE or MLOG_UNDO_HDR_REUSE
739 @param[in]	ptr	redo log record
740 @param[in]	end_ptr	end of log buffer
741 @param[in,out]	page	page frame or NULL
742 @param[in,out]	mtr	mini-transaction or NULL
743 @return end of log record or NULL */
744 byte*
trx_undo_parse_page_header(mlog_id_t type,const byte * ptr,const byte * end_ptr,page_t * page,mtr_t * mtr)745 trx_undo_parse_page_header(
746 	mlog_id_t	type,
747 	const byte*	ptr,
748 	const byte*	end_ptr,
749 	page_t*		page,
750 	mtr_t*		mtr)
751 {
752 	trx_id_t	trx_id = mach_u64_parse_compressed(&ptr, end_ptr);
753 
754 	if (ptr != NULL && page != NULL) {
755 		switch (type) {
756 		case MLOG_UNDO_HDR_CREATE:
757 			trx_undo_header_create(page, trx_id, mtr);
758 			return(const_cast<byte*>(ptr));
759 		case MLOG_UNDO_HDR_REUSE:
760 			trx_undo_insert_header_reuse(page, trx_id, mtr);
761 			return(const_cast<byte*>(ptr));
762 		default:
763 			break;
764 		}
765 		ut_ad(0);
766 	}
767 
768 	return(const_cast<byte*>(ptr));
769 }
770 
771 /***************************************************************//**
772 Initializes a cached insert undo log header page for new use. NOTE that this
773 function has its own log record type MLOG_UNDO_HDR_REUSE. You must NOT change
774 the operation of this function!
775 @return undo log header byte offset on page */
776 static
777 ulint
trx_undo_insert_header_reuse(page_t * undo_page,trx_id_t trx_id,mtr_t * mtr)778 trx_undo_insert_header_reuse(
779 /*=========================*/
780 	page_t*		undo_page,	/*!< in/out: insert undo log segment
781 					header page, x-latched */
782 	trx_id_t	trx_id,		/*!< in: transaction id */
783 	mtr_t*		mtr)		/*!< in: mtr */
784 {
785 	trx_upagef_t*	page_hdr;
786 	trx_usegf_t*	seg_hdr;
787 	trx_ulogf_t*	log_hdr;
788 	ulint		free;
789 	ulint		new_free;
790 
791 	ut_ad(mtr && undo_page);
792 
793 	page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
794 	seg_hdr = undo_page + TRX_UNDO_SEG_HDR;
795 
796 	free = TRX_UNDO_SEG_HDR + TRX_UNDO_SEG_HDR_SIZE;
797 
798 	ut_a(free + TRX_UNDO_LOG_XA_HDR_SIZE < UNIV_PAGE_SIZE - 100);
799 
800 	log_hdr = undo_page + free;
801 
802 	new_free = free + TRX_UNDO_LOG_OLD_HDR_SIZE;
803 
804 	/* Insert undo data is not needed after commit: we may free all
805 	the space on the page */
806 
807 	ut_a(mach_read_from_2(undo_page + TRX_UNDO_PAGE_HDR
808 			      + TRX_UNDO_PAGE_TYPE)
809 	     == TRX_UNDO_INSERT);
810 
811 	mach_write_to_2(page_hdr + TRX_UNDO_PAGE_START, new_free);
812 
813 	mach_write_to_2(page_hdr + TRX_UNDO_PAGE_FREE, new_free);
814 
815 	mach_write_to_2(seg_hdr + TRX_UNDO_STATE, TRX_UNDO_ACTIVE);
816 
817 	log_hdr = undo_page + free;
818 
819 	mach_write_to_8(log_hdr + TRX_UNDO_TRX_ID, trx_id);
820 	mach_write_to_2(log_hdr + TRX_UNDO_LOG_START, new_free);
821 
822 	mach_write_to_1(log_hdr + TRX_UNDO_XID_EXISTS, FALSE);
823 	mach_write_to_1(log_hdr + TRX_UNDO_DICT_TRANS, FALSE);
824 
825 	/* Write the log record MLOG_UNDO_HDR_REUSE */
826 	trx_undo_insert_header_reuse_log(undo_page, trx_id, mtr);
827 
828 	return(free);
829 }
830 
831 #ifndef UNIV_HOTBACKUP
832 /**********************************************************************//**
833 Writes the redo log entry of an update undo log header discard. */
834 UNIV_INLINE
835 void
trx_undo_discard_latest_log(page_t * undo_page,mtr_t * mtr)836 trx_undo_discard_latest_log(
837 /*========================*/
838 	page_t* undo_page,	/*!< in: undo log header page */
839 	mtr_t*	mtr)		/*!< in: mtr */
840 {
841 	mlog_write_initial_log_record(undo_page, MLOG_UNDO_HDR_DISCARD, mtr);
842 }
843 #else /* !UNIV_HOTBACKUP */
844 # define trx_undo_discard_latest_log(undo_page, mtr) ((void) 0)
845 #endif /* !UNIV_HOTBACKUP */
846 
847 /***********************************************************//**
848 Parses the redo log entry of an undo log page header discard.
849 @return end of log record or NULL */
850 byte*
trx_undo_parse_discard_latest(byte * ptr,byte * end_ptr MY_ATTRIBUTE ((unused)),page_t * page,mtr_t * mtr)851 trx_undo_parse_discard_latest(
852 /*==========================*/
853 	byte*	ptr,	/*!< in: buffer */
854 	byte*	end_ptr MY_ATTRIBUTE((unused)), /*!< in: buffer end */
855 	page_t*	page,	/*!< in: page or NULL */
856 	mtr_t*	mtr)	/*!< in: mtr or NULL */
857 {
858 	ut_ad(end_ptr);
859 
860 	if (page) {
861 		trx_undo_discard_latest_update_undo(page, mtr);
862 	}
863 
864 	return(ptr);
865 }
866 
867 /**********************************************************************//**
868 If an update undo log can be discarded immediately, this function frees the
869 space, resetting the page to the proper state for caching. */
870 static
871 void
trx_undo_discard_latest_update_undo(page_t * undo_page,mtr_t * mtr)872 trx_undo_discard_latest_update_undo(
873 /*================================*/
874 	page_t*	undo_page,	/*!< in: header page of an undo log of size 1 */
875 	mtr_t*	mtr)		/*!< in: mtr */
876 {
877 	trx_usegf_t*	seg_hdr;
878 	trx_upagef_t*	page_hdr;
879 	trx_ulogf_t*	log_hdr;
880 	trx_ulogf_t*	prev_log_hdr;
881 	ulint		free;
882 	ulint		prev_hdr_offset;
883 
884 	seg_hdr = undo_page + TRX_UNDO_SEG_HDR;
885 	page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
886 
887 	free = mach_read_from_2(seg_hdr + TRX_UNDO_LAST_LOG);
888 	log_hdr = undo_page + free;
889 
890 	prev_hdr_offset = mach_read_from_2(log_hdr + TRX_UNDO_PREV_LOG);
891 
892 	if (prev_hdr_offset != 0) {
893 		prev_log_hdr = undo_page + prev_hdr_offset;
894 
895 		mach_write_to_2(page_hdr + TRX_UNDO_PAGE_START,
896 				mach_read_from_2(prev_log_hdr
897 						 + TRX_UNDO_LOG_START));
898 		mach_write_to_2(prev_log_hdr + TRX_UNDO_NEXT_LOG, 0);
899 	}
900 
901 	mach_write_to_2(page_hdr + TRX_UNDO_PAGE_FREE, free);
902 
903 	mach_write_to_2(seg_hdr + TRX_UNDO_STATE, TRX_UNDO_CACHED);
904 	mach_write_to_2(seg_hdr + TRX_UNDO_LAST_LOG, prev_hdr_offset);
905 
906 	trx_undo_discard_latest_log(undo_page, mtr);
907 }
908 
909 #ifndef UNIV_HOTBACKUP
910 /********************************************************************//**
911 Tries to add a page to the undo log segment where the undo log is placed.
912 @return X-latched block if success, else NULL */
913 buf_block_t*
trx_undo_add_page(trx_t * trx,trx_undo_t * undo,trx_undo_ptr_t * undo_ptr,mtr_t * mtr)914 trx_undo_add_page(
915 /*==============*/
916 	trx_t*		trx,		/*!< in: transaction */
917 	trx_undo_t*	undo,		/*!< in: undo log memory object */
918 	trx_undo_ptr_t*	undo_ptr,	/*!< in: assign undo log from
919 					referred rollback segment. */
920 	mtr_t*		mtr)		/*!< in: mtr which does not have
921 					a latch to any undo log page;
922 					the caller must have reserved
923 					the rollback segment mutex */
924 {
925 	page_t*		header_page;
926 	buf_block_t*	new_block;
927 	page_t*		new_page;
928 	trx_rseg_t*	rseg;
929 	ulint		n_reserved;
930 
931 	ut_ad(mutex_own(&(trx->undo_mutex)));
932 	ut_ad(mutex_own(&(undo_ptr->rseg->mutex)));
933 
934 	rseg = undo_ptr->rseg;
935 
936 	if (rseg->curr_size == rseg->max_size) {
937 
938 		return(NULL);
939 	}
940 
941 	header_page = trx_undo_page_get(
942 		page_id_t(undo->space, undo->hdr_page_no),
943 		undo->page_size, mtr);
944 
945 	if (!fsp_reserve_free_extents(&n_reserved, undo->space, 1,
946 				      FSP_UNDO, mtr)) {
947 
948 		return(NULL);
949 	}
950 
951 	new_block = fseg_alloc_free_page_general(
952 		TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER
953 		+ header_page,
954 		undo->top_page_no + 1, FSP_UP, TRUE, mtr, mtr);
955 
956 	fil_space_release_free_extents(undo->space, n_reserved);
957 
958 	if (new_block == NULL) {
959 
960 		/* No space left */
961 
962 		return(NULL);
963 	}
964 
965 	ut_ad(rw_lock_get_x_lock_count(&new_block->lock) == 1);
966 	buf_block_dbg_add_level(new_block, SYNC_TRX_UNDO_PAGE);
967 	undo->last_page_no = new_block->page.id.page_no();
968 
969 	new_page = buf_block_get_frame(new_block);
970 
971 	trx_undo_page_init(new_page, undo->type, mtr);
972 
973 	flst_add_last(header_page + TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST,
974 		      new_page + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE, mtr);
975 	undo->size++;
976 	rseg->curr_size++;
977 
978 	return(new_block);
979 }
980 
981 /********************************************************************//**
982 Frees an undo log page that is not the header page.
983 @return last page number in remaining log */
984 static
985 ulint
trx_undo_free_page(trx_rseg_t * rseg,ibool in_history,ulint space,ulint hdr_page_no,ulint page_no,mtr_t * mtr)986 trx_undo_free_page(
987 /*===============*/
988 	trx_rseg_t* rseg,	/*!< in: rollback segment */
989 	ibool	in_history,	/*!< in: TRUE if the undo log is in the history
990 				list */
991 	ulint	space,		/*!< in: space */
992 	ulint	hdr_page_no,	/*!< in: header page number */
993 	ulint	page_no,	/*!< in: page number to free: must not be the
994 				header page */
995 	mtr_t*	mtr)		/*!< in: mtr which does not have a latch to any
996 				undo log page; the caller must have reserved
997 				the rollback segment mutex */
998 {
999 	page_t*		header_page;
1000 	page_t*		undo_page;
1001 	fil_addr_t	last_addr;
1002 	trx_rsegf_t*	rseg_header;
1003 	ulint		hist_size;
1004 
1005 	ut_a(hdr_page_no != page_no);
1006 	ut_ad(mutex_own(&(rseg->mutex)));
1007 
1008 	undo_page = trx_undo_page_get(
1009 		page_id_t(space, page_no), rseg->page_size, mtr);
1010 
1011 	header_page = trx_undo_page_get(
1012 		page_id_t(space, hdr_page_no), rseg->page_size, mtr);
1013 
1014 	flst_remove(header_page + TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST,
1015 		    undo_page + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_NODE, mtr);
1016 
1017 	fseg_free_page(header_page + TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER,
1018 		       space, page_no, false, mtr);
1019 
1020 	last_addr = flst_get_last(header_page + TRX_UNDO_SEG_HDR
1021 				  + TRX_UNDO_PAGE_LIST, mtr);
1022 	rseg->curr_size--;
1023 
1024 	if (in_history) {
1025 		rseg_header = trx_rsegf_get(space, rseg->page_no,
1026 					    rseg->page_size, mtr);
1027 
1028 		hist_size = mtr_read_ulint(rseg_header + TRX_RSEG_HISTORY_SIZE,
1029 					   MLOG_4BYTES, mtr);
1030 		ut_ad(hist_size > 0);
1031 		mlog_write_ulint(rseg_header + TRX_RSEG_HISTORY_SIZE,
1032 				 hist_size - 1, MLOG_4BYTES, mtr);
1033 	}
1034 
1035 	return(last_addr.page);
1036 }
1037 
1038 /********************************************************************//**
1039 Frees the last undo log page.
1040 The caller must hold the rollback segment mutex. */
1041 void
trx_undo_free_last_page_func(const trx_t * trx,trx_undo_t * undo,mtr_t * mtr)1042 trx_undo_free_last_page_func(
1043 /*==========================*/
1044 #ifdef UNIV_DEBUG
1045 	const trx_t*	trx,	/*!< in: transaction */
1046 #endif /* UNIV_DEBUG */
1047 	trx_undo_t*	undo,	/*!< in/out: undo log memory copy */
1048 	mtr_t*		mtr)	/*!< in/out: mini-transaction which does not
1049 				have a latch to any undo log page or which
1050 				has allocated the undo log page */
1051 {
1052 	ut_ad(mutex_own(&trx->undo_mutex));
1053 	ut_ad(undo->hdr_page_no != undo->last_page_no);
1054 	ut_ad(undo->size > 0);
1055 
1056 	undo->last_page_no = trx_undo_free_page(
1057 		undo->rseg, FALSE, undo->space,
1058 		undo->hdr_page_no, undo->last_page_no, mtr);
1059 
1060 	undo->size--;
1061 }
1062 
1063 /** Empties an undo log header page of undo records for that undo log.
1064 Other undo logs may still have records on that page, if it is an update
1065 undo log.
1066 @param[in]	space		space
1067 @param[in]	page_size	page size
1068 @param[in]	hdr_page_no	header page number
1069 @param[in]	hdr_offset	header offset
1070 @param[in,out]	mtr		mini-transaction */
1071 static
1072 void
trx_undo_empty_header_page(ulint space,const page_size_t & page_size,ulint hdr_page_no,ulint hdr_offset,mtr_t * mtr)1073 trx_undo_empty_header_page(
1074 	ulint			space,
1075 	const page_size_t&	page_size,
1076 	ulint			hdr_page_no,
1077 	ulint			hdr_offset,
1078 	mtr_t*			mtr)
1079 {
1080 	page_t*		header_page;
1081 	trx_ulogf_t*	log_hdr;
1082 	ulint		end;
1083 
1084 	header_page = trx_undo_page_get(
1085 		page_id_t(space, hdr_page_no), page_size, mtr);
1086 
1087 	log_hdr = header_page + hdr_offset;
1088 
1089 	end = trx_undo_page_get_end(header_page, hdr_page_no, hdr_offset);
1090 
1091 	mlog_write_ulint(log_hdr + TRX_UNDO_LOG_START, end, MLOG_2BYTES, mtr);
1092 }
1093 
1094 /***********************************************************************//**
1095 Truncates an undo log from the end. This function is used during a rollback
1096 to free space from an undo log. */
1097 void
trx_undo_truncate_end_func(const trx_t * trx,trx_undo_t * undo,undo_no_t limit)1098 trx_undo_truncate_end_func(
1099 /*=======================*/
1100 #ifdef UNIV_DEBUG
1101 	const trx_t*	trx,	/*!< in: transaction whose undo log it is */
1102 #endif /* UNIV_DEBUG */
1103 	trx_undo_t*	undo,	/*!< in: undo log */
1104 	undo_no_t	limit)	/*!< in: all undo records with undo number
1105 				>= this value should be truncated */
1106 {
1107 	page_t*		undo_page;
1108 	ulint		last_page_no;
1109 	trx_undo_rec_t* rec;
1110 	trx_undo_rec_t* trunc_here;
1111 	mtr_t		mtr;
1112 	const bool	noredo = trx_sys_is_noredo_rseg_slot(undo->rseg->id);
1113 
1114 	ut_ad(mutex_own(&(trx->undo_mutex)));
1115 
1116 	ut_ad(mutex_own(&undo->rseg->mutex));
1117 
1118 	for (;;) {
1119 		mtr_start(&mtr);
1120 		if (noredo) {
1121 			mtr.set_log_mode(MTR_LOG_NO_REDO);
1122 			ut_ad(trx->rsegs.m_noredo.rseg == undo->rseg);
1123 		} else {
1124 			ut_ad(trx->rsegs.m_redo.rseg == undo->rseg);
1125 		}
1126 
1127 		trunc_here = NULL;
1128 
1129 		last_page_no = undo->last_page_no;
1130 
1131 		undo_page = trx_undo_page_get(
1132 			page_id_t(undo->space, last_page_no),
1133 			undo->page_size, &mtr);
1134 
1135 		rec = trx_undo_page_get_last_rec(undo_page, undo->hdr_page_no,
1136 						 undo->hdr_offset);
1137 		while (rec) {
1138 			if (trx_undo_rec_get_undo_no(rec) >= limit) {
1139 				/* Truncate at least this record off, maybe
1140 				more */
1141 				trunc_here = rec;
1142 			} else {
1143 				goto function_exit;
1144 			}
1145 
1146 			rec = trx_undo_page_get_prev_rec(rec,
1147 							 undo->hdr_page_no,
1148 							 undo->hdr_offset);
1149 		}
1150 
1151 		if (last_page_no == undo->hdr_page_no) {
1152 
1153 			goto function_exit;
1154 		}
1155 
1156 		ut_ad(last_page_no == undo->last_page_no);
1157 		trx_undo_free_last_page(trx, undo, &mtr);
1158 
1159 		mtr_commit(&mtr);
1160 	}
1161 
1162 function_exit:
1163 	if (trunc_here) {
1164 		mlog_write_ulint(undo_page + TRX_UNDO_PAGE_HDR
1165 				 + TRX_UNDO_PAGE_FREE,
1166 				 trunc_here - undo_page, MLOG_2BYTES, &mtr);
1167 	}
1168 
1169 	mtr_commit(&mtr);
1170 }
1171 
1172 /** Truncate the head of an undo log.
1173 NOTE that only whole pages are freed; the header page is not
1174 freed, but emptied, if all the records there are below the limit.
1175 @param[in,out]	rseg		rollback segment
1176 @param[in]	hdr_page_no	header page number
1177 @param[in]	hdr_offset	header offset on the page
1178 @param[in]	limit		first undo number to preserve
1179 (everything below the limit will be truncated) */
1180 void
trx_undo_truncate_start(trx_rseg_t * rseg,ulint hdr_page_no,ulint hdr_offset,undo_no_t limit)1181 trx_undo_truncate_start(
1182 	trx_rseg_t*	rseg,
1183 	ulint		hdr_page_no,
1184 	ulint		hdr_offset,
1185 	undo_no_t	limit)
1186 {
1187 	page_t*		undo_page;
1188 	trx_undo_rec_t* rec;
1189 	trx_undo_rec_t* last_rec;
1190 	ulint		page_no;
1191 	mtr_t		mtr;
1192 
1193 	ut_ad(mutex_own(&(rseg->mutex)));
1194 
1195 	if (!limit) {
1196 		return;
1197 	}
1198 loop:
1199 	mtr_start(&mtr);
1200 
1201 	if (trx_sys_is_noredo_rseg_slot(rseg->id)) {
1202 		mtr.set_log_mode(MTR_LOG_NO_REDO);
1203 	}
1204 
1205 	rec = trx_undo_get_first_rec(rseg->space, rseg->page_size,
1206 				     hdr_page_no, hdr_offset,
1207 				     RW_X_LATCH, &mtr);
1208 	if (rec == NULL) {
1209 		/* Already empty */
1210 
1211 		mtr_commit(&mtr);
1212 
1213 		return;
1214 	}
1215 
1216 	undo_page = page_align(rec);
1217 
1218 	last_rec = trx_undo_page_get_last_rec(undo_page, hdr_page_no,
1219 					      hdr_offset);
1220 	if (trx_undo_rec_get_undo_no(last_rec) >= limit) {
1221 
1222 		mtr_commit(&mtr);
1223 
1224 		return;
1225 	}
1226 
1227 	page_no = page_get_page_no(undo_page);
1228 
1229 	if (page_no == hdr_page_no) {
1230 		trx_undo_empty_header_page(rseg->space, rseg->page_size,
1231 					   hdr_page_no, hdr_offset,
1232 					   &mtr);
1233 	} else {
1234 		trx_undo_free_page(rseg, TRUE, rseg->space, hdr_page_no,
1235 				   page_no, &mtr);
1236 	}
1237 
1238 	mtr_commit(&mtr);
1239 
1240 	goto loop;
1241 }
1242 
1243 /** Frees an undo log segment which is not in the history list.
1244 @param[in]	undo	undo log
1245 @param[in]	noredo	whether the undo tablespace is redo logged */
1246 static
1247 void
trx_undo_seg_free(const trx_undo_t * undo,bool noredo)1248 trx_undo_seg_free(
1249 	const trx_undo_t*	undo,
1250 	bool			noredo)
1251 {
1252 	trx_rseg_t*	rseg;
1253 	fseg_header_t*	file_seg;
1254 	trx_rsegf_t*	rseg_header;
1255 	trx_usegf_t*	seg_header;
1256 	ibool		finished;
1257 	mtr_t		mtr;
1258 
1259 	rseg = undo->rseg;
1260 
1261 	do {
1262 
1263 		mtr_start(&mtr);
1264 
1265 		if (noredo) {
1266 			mtr.set_log_mode(MTR_LOG_NO_REDO);
1267 		}
1268 
1269 		mutex_enter(&(rseg->mutex));
1270 
1271 		seg_header = trx_undo_page_get(page_id_t(undo->space,
1272 							 undo->hdr_page_no),
1273 					       undo->page_size, &mtr)
1274 			+ TRX_UNDO_SEG_HDR;
1275 
1276 		file_seg = seg_header + TRX_UNDO_FSEG_HEADER;
1277 
1278 		finished = fseg_free_step(file_seg, false, &mtr);
1279 
1280 		if (finished) {
1281 			/* Update the rseg header */
1282 			rseg_header = trx_rsegf_get(
1283 				rseg->space, rseg->page_no, rseg->page_size,
1284 				&mtr);
1285 			trx_rsegf_set_nth_undo(rseg_header, undo->id, FIL_NULL,
1286 					       &mtr);
1287 
1288 			MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_USED);
1289 		}
1290 
1291 		mutex_exit(&(rseg->mutex));
1292 		mtr_commit(&mtr);
1293 	} while (!finished);
1294 }
1295 
1296 /*========== UNDO LOG MEMORY COPY INITIALIZATION =====================*/
1297 
1298 /********************************************************************//**
1299 Creates and initializes an undo log memory object according to the values
1300 in the header in file, when the database is started. The memory object is
1301 inserted in the appropriate list of rseg.
1302 @return own: the undo log memory object */
1303 static
1304 trx_undo_t*
trx_undo_mem_create_at_db_start(trx_rseg_t * rseg,ulint id,ulint page_no,mtr_t * mtr)1305 trx_undo_mem_create_at_db_start(
1306 /*============================*/
1307 	trx_rseg_t*	rseg,	/*!< in: rollback segment memory object */
1308 	ulint		id,	/*!< in: slot index within rseg */
1309 	ulint		page_no,/*!< in: undo log segment page number */
1310 	mtr_t*		mtr)	/*!< in: mtr */
1311 {
1312 	page_t*		undo_page;
1313 	trx_upagef_t*	page_header;
1314 	trx_usegf_t*	seg_header;
1315 	trx_ulogf_t*	undo_header;
1316 	trx_undo_t*	undo;
1317 	ulint		type;
1318 	ulint		state;
1319 	trx_id_t	trx_id;
1320 	ulint		offset;
1321 	fil_addr_t	last_addr;
1322 	page_t*		last_page;
1323 	trx_undo_rec_t*	rec;
1324 	XID		xid;
1325 	ibool		xid_exists = FALSE;
1326 
1327 	ut_a(id < TRX_RSEG_N_SLOTS);
1328 
1329 	undo_page = trx_undo_page_get(
1330 		page_id_t(rseg->space, page_no), rseg->page_size, mtr);
1331 
1332 	page_header = undo_page + TRX_UNDO_PAGE_HDR;
1333 
1334 	type = mtr_read_ulint(page_header + TRX_UNDO_PAGE_TYPE, MLOG_2BYTES,
1335 			      mtr);
1336 	seg_header = undo_page + TRX_UNDO_SEG_HDR;
1337 
1338 	state = mach_read_from_2(seg_header + TRX_UNDO_STATE);
1339 
1340 	offset = mach_read_from_2(seg_header + TRX_UNDO_LAST_LOG);
1341 
1342 	undo_header = undo_page + offset;
1343 
1344 	trx_id = mach_read_from_8(undo_header + TRX_UNDO_TRX_ID);
1345 
1346 	xid_exists = mtr_read_ulint(undo_header + TRX_UNDO_XID_EXISTS,
1347 				    MLOG_1BYTE, mtr);
1348 
1349 	/* Read X/Open XA transaction identification if it exists, or
1350 	set it to NULL. */
1351 	xid.reset();
1352 
1353 	if (xid_exists == TRUE) {
1354 		trx_undo_read_xid(undo_header, &xid);
1355 	}
1356 
1357 	mutex_enter(&(rseg->mutex));
1358 
1359 	undo = trx_undo_mem_create(rseg, id, type, trx_id, &xid,
1360 				   page_no, offset);
1361 	mutex_exit(&(rseg->mutex));
1362 
1363 	undo->dict_operation =	mtr_read_ulint(
1364 		undo_header + TRX_UNDO_DICT_TRANS, MLOG_1BYTE, mtr);
1365 
1366 	undo->table_id = mach_read_from_8(undo_header + TRX_UNDO_TABLE_ID);
1367 	undo->state = state;
1368 	undo->size = flst_get_len(seg_header + TRX_UNDO_PAGE_LIST);
1369 
1370 	/* If the log segment is being freed, the page list is inconsistent! */
1371 	if (state == TRX_UNDO_TO_FREE) {
1372 
1373 		goto add_to_list;
1374 	}
1375 
1376 	last_addr = flst_get_last(seg_header + TRX_UNDO_PAGE_LIST, mtr);
1377 
1378 	undo->last_page_no = last_addr.page;
1379 	undo->top_page_no = last_addr.page;
1380 
1381 	last_page = trx_undo_page_get(
1382 		page_id_t(rseg->space, undo->last_page_no),
1383 		rseg->page_size, mtr);
1384 
1385 	rec = trx_undo_page_get_last_rec(last_page, page_no, offset);
1386 
1387 	if (rec == NULL) {
1388 		undo->empty = TRUE;
1389 	} else {
1390 		undo->empty = FALSE;
1391 		undo->top_offset = rec - last_page;
1392 		undo->top_undo_no = trx_undo_rec_get_undo_no(rec);
1393 	}
1394 add_to_list:
1395 	if (type == TRX_UNDO_INSERT) {
1396 		if (state != TRX_UNDO_CACHED) {
1397 
1398 			UT_LIST_ADD_LAST(rseg->insert_undo_list, undo);
1399 		} else {
1400 
1401 			UT_LIST_ADD_LAST(rseg->insert_undo_cached, undo);
1402 
1403 			MONITOR_INC(MONITOR_NUM_UNDO_SLOT_CACHED);
1404 		}
1405 	} else {
1406 		ut_ad(type == TRX_UNDO_UPDATE);
1407 		if (state != TRX_UNDO_CACHED) {
1408 
1409 			UT_LIST_ADD_LAST(rseg->update_undo_list, undo);
1410 		} else {
1411 
1412 			UT_LIST_ADD_LAST(rseg->update_undo_cached, undo);
1413 
1414 			MONITOR_INC(MONITOR_NUM_UNDO_SLOT_CACHED);
1415 		}
1416 	}
1417 
1418 	return(undo);
1419 }
1420 
1421 /********************************************************************//**
1422 Initializes the undo log lists for a rollback segment memory copy. This
1423 function is only called when the database is started or a new rollback
1424 segment is created.
1425 @return the combined size of undo log segments in pages */
1426 ulint
trx_undo_lists_init(trx_rseg_t * rseg)1427 trx_undo_lists_init(
1428 /*================*/
1429 	trx_rseg_t*	rseg)	/*!< in: rollback segment memory object */
1430 {
1431 	ulint		size	= 0;
1432 	trx_rsegf_t*	rseg_header;
1433 	ulint		i;
1434 	mtr_t		mtr;
1435 
1436 	mtr_start(&mtr);
1437 
1438 	rseg_header = trx_rsegf_get_new(
1439 		rseg->space, rseg->page_no, rseg->page_size, &mtr);
1440 
1441 	for (i = 0; i < TRX_RSEG_N_SLOTS; i++) {
1442 		ulint	page_no;
1443 
1444 		page_no = trx_rsegf_get_nth_undo(rseg_header, i, &mtr);
1445 
1446 		/* In forced recovery: try to avoid operations which look
1447 		at database pages; undo logs are rapidly changing data, and
1448 		the probability that they are in an inconsistent state is
1449 		high */
1450 
1451 		if (page_no != FIL_NULL
1452 		    && srv_force_recovery < SRV_FORCE_NO_UNDO_LOG_SCAN) {
1453 
1454 			trx_undo_t*	undo;
1455 
1456 			undo = trx_undo_mem_create_at_db_start(
1457 				rseg, i, page_no, &mtr);
1458 
1459 			size += undo->size;
1460 
1461 			mtr_commit(&mtr);
1462 
1463 			mtr_start(&mtr);
1464 
1465 			rseg_header = trx_rsegf_get(
1466 				rseg->space, rseg->page_no, rseg->page_size,
1467 				&mtr);
1468 
1469 			/* Found a used slot */
1470 			MONITOR_INC(MONITOR_NUM_UNDO_SLOT_USED);
1471 		}
1472 	}
1473 
1474 	mtr_commit(&mtr);
1475 
1476 	return(size);
1477 }
1478 
1479 /********************************************************************//**
1480 Creates and initializes an undo log memory object.
1481 @return own: the undo log memory object */
1482 static
1483 trx_undo_t*
trx_undo_mem_create(trx_rseg_t * rseg,ulint id,ulint type,trx_id_t trx_id,const XID * xid,ulint page_no,ulint offset)1484 trx_undo_mem_create(
1485 /*================*/
1486 	trx_rseg_t*	rseg,	/*!< in: rollback segment memory object */
1487 	ulint		id,	/*!< in: slot index within rseg */
1488 	ulint		type,	/*!< in: type of the log: TRX_UNDO_INSERT or
1489 				TRX_UNDO_UPDATE */
1490 	trx_id_t	trx_id,	/*!< in: id of the trx for which the undo log
1491 				is created */
1492 	const XID*	xid,	/*!< in: X/Open transaction identification */
1493 	ulint		page_no,/*!< in: undo log header page number */
1494 	ulint		offset)	/*!< in: undo log header byte offset on page */
1495 {
1496 	trx_undo_t*	undo;
1497 
1498 	ut_ad(mutex_own(&(rseg->mutex)));
1499 
1500 	ut_a(id < TRX_RSEG_N_SLOTS);
1501 
1502 	undo = static_cast<trx_undo_t*>(ut_malloc_nokey(sizeof(*undo)));
1503 
1504 	if (undo == NULL) {
1505 
1506 		return(NULL);
1507 	}
1508 
1509 	undo->id = id;
1510 	undo->type = type;
1511 	undo->state = TRX_UNDO_ACTIVE;
1512 	undo->del_marks = FALSE;
1513 	undo->trx_id = trx_id;
1514 	undo->xid = *xid;
1515 
1516 	undo->dict_operation = FALSE;
1517 
1518 	undo->rseg = rseg;
1519 
1520 	undo->space = rseg->space;
1521 	undo->page_size.copy_from(rseg->page_size);
1522 	undo->hdr_page_no = page_no;
1523 	undo->hdr_offset = offset;
1524 	undo->last_page_no = page_no;
1525 	undo->size = 1;
1526 
1527 	undo->empty = TRUE;
1528 	undo->top_page_no = page_no;
1529 	undo->guess_block = NULL;
1530 
1531 	return(undo);
1532 }
1533 
1534 /********************************************************************//**
1535 Initializes a cached undo log object for new use. */
1536 static
1537 void
trx_undo_mem_init_for_reuse(trx_undo_t * undo,trx_id_t trx_id,const XID * xid,ulint offset)1538 trx_undo_mem_init_for_reuse(
1539 /*========================*/
1540 	trx_undo_t*	undo,	/*!< in: undo log to init */
1541 	trx_id_t	trx_id,	/*!< in: id of the trx for which the undo log
1542 				is created */
1543 	const XID*	xid,	/*!< in: X/Open XA transaction identification*/
1544 	ulint		offset)	/*!< in: undo log header byte offset on page */
1545 {
1546 	ut_ad(mutex_own(&((undo->rseg)->mutex)));
1547 
1548 	ut_a(undo->id < TRX_RSEG_N_SLOTS);
1549 
1550 	undo->state = TRX_UNDO_ACTIVE;
1551 	undo->del_marks = FALSE;
1552 	undo->trx_id = trx_id;
1553 	undo->xid = *xid;
1554 
1555 	undo->dict_operation = FALSE;
1556 
1557 	undo->hdr_offset = offset;
1558 	undo->empty = TRUE;
1559 }
1560 
1561 /********************************************************************//**
1562 Frees an undo log memory copy. */
1563 void
trx_undo_mem_free(trx_undo_t * undo)1564 trx_undo_mem_free(
1565 /*==============*/
1566 	trx_undo_t*	undo)	/*!< in: the undo object to be freed */
1567 {
1568 	ut_a(undo->id < TRX_RSEG_N_SLOTS);
1569 
1570 	ut_free(undo);
1571 }
1572 
1573 /**********************************************************************//**
1574 Creates a new undo log.
1575 @return DB_SUCCESS if successful in creating the new undo lob object,
1576 possible error codes are: DB_TOO_MANY_CONCURRENT_TRXS
1577 DB_OUT_OF_FILE_SPACE DB_OUT_OF_MEMORY */
1578 static MY_ATTRIBUTE((nonnull, warn_unused_result))
1579 dberr_t
trx_undo_create(trx_t * trx,trx_rseg_t * rseg,ulint type,trx_id_t trx_id,const XID * xid,trx_undo_t ** undo,mtr_t * mtr)1580 trx_undo_create(
1581 /*============*/
1582 	trx_t*		trx,	/*!< in: transaction */
1583 	trx_rseg_t*	rseg,	/*!< in: rollback segment memory copy */
1584 	ulint		type,	/*!< in: type of the log: TRX_UNDO_INSERT or
1585 				TRX_UNDO_UPDATE */
1586 	trx_id_t	trx_id,	/*!< in: id of the trx for which the undo log
1587 				is created */
1588 	const XID*	xid,	/*!< in: X/Open transaction identification*/
1589 	trx_undo_t**	undo,	/*!< out: the new undo log object, undefined
1590 				 * if did not succeed */
1591 	mtr_t*		mtr)	/*!< in: mtr */
1592 {
1593 	trx_rsegf_t*	rseg_header;
1594 	ulint		page_no;
1595 	ulint		offset;
1596 	ulint		id;
1597 	page_t*		undo_page;
1598 	dberr_t		err;
1599 
1600 	ut_ad(mutex_own(&(rseg->mutex)));
1601 
1602 	if (rseg->curr_size == rseg->max_size) {
1603 
1604 		return(DB_OUT_OF_FILE_SPACE);
1605 	}
1606 
1607 	rseg->curr_size++;
1608 
1609 	rseg_header = trx_rsegf_get(rseg->space, rseg->page_no,
1610 				    rseg->page_size, mtr);
1611 
1612 	err = trx_undo_seg_create(rseg, rseg_header, type, &id,
1613 				  &undo_page, mtr);
1614 
1615 	if (err != DB_SUCCESS) {
1616 		/* Did not succeed */
1617 
1618 		rseg->curr_size--;
1619 
1620 		return(err);
1621 	}
1622 
1623 	page_no = page_get_page_no(undo_page);
1624 
1625 	offset = trx_undo_header_create(undo_page, trx_id, mtr);
1626 
1627 	trx_undo_header_add_space_for_xid(undo_page, undo_page + offset, mtr);
1628 
1629 	*undo = trx_undo_mem_create(rseg, id, type, trx_id, xid,
1630 				   page_no, offset);
1631 	if (*undo == NULL) {
1632 
1633 		err = DB_OUT_OF_MEMORY;
1634 	}
1635 
1636 	return(err);
1637 }
1638 
1639 /*================ UNDO LOG ASSIGNMENT AND CLEANUP =====================*/
1640 
1641 /********************************************************************//**
1642 Reuses a cached undo log.
1643 @return the undo log memory object, NULL if none cached */
1644 static
1645 trx_undo_t*
trx_undo_reuse_cached(trx_t * trx,trx_rseg_t * rseg,ulint type,trx_id_t trx_id,const XID * xid,mtr_t * mtr)1646 trx_undo_reuse_cached(
1647 /*==================*/
1648 	trx_t*		trx,	/*!< in: transaction */
1649 	trx_rseg_t*	rseg,	/*!< in: rollback segment memory object */
1650 	ulint		type,	/*!< in: type of the log: TRX_UNDO_INSERT or
1651 				TRX_UNDO_UPDATE */
1652 	trx_id_t	trx_id,	/*!< in: id of the trx for which the undo log
1653 				is used */
1654 	const XID*	xid,	/*!< in: X/Open XA transaction identification */
1655 	mtr_t*		mtr)	/*!< in: mtr */
1656 {
1657 	trx_undo_t*	undo;
1658 	page_t*		undo_page;
1659 	ulint		offset;
1660 
1661 	ut_ad(mutex_own(&(rseg->mutex)));
1662 
1663 	if (type == TRX_UNDO_INSERT) {
1664 
1665 		undo = UT_LIST_GET_FIRST(rseg->insert_undo_cached);
1666 		if (undo == NULL) {
1667 
1668 			return(NULL);
1669 		}
1670 
1671 		UT_LIST_REMOVE(rseg->insert_undo_cached, undo);
1672 
1673 		MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_CACHED);
1674 	} else {
1675 		ut_ad(type == TRX_UNDO_UPDATE);
1676 
1677 		undo = UT_LIST_GET_FIRST(rseg->update_undo_cached);
1678 		if (undo == NULL) {
1679 
1680 			return(NULL);
1681 		}
1682 
1683 		UT_LIST_REMOVE(rseg->update_undo_cached, undo);
1684 
1685 		MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_CACHED);
1686 	}
1687 
1688 	ut_ad(undo->size == 1);
1689 	ut_a(undo->id < TRX_RSEG_N_SLOTS);
1690 
1691 	undo_page = trx_undo_page_get(
1692 		page_id_t(undo->space, undo->hdr_page_no),
1693 		undo->page_size, mtr);
1694 
1695 	if (type == TRX_UNDO_INSERT) {
1696 		offset = trx_undo_insert_header_reuse(undo_page, trx_id, mtr);
1697 
1698 		trx_undo_header_add_space_for_xid(
1699 			undo_page, undo_page + offset, mtr);
1700 	} else {
1701 		ut_a(mach_read_from_2(undo_page + TRX_UNDO_PAGE_HDR
1702 				      + TRX_UNDO_PAGE_TYPE)
1703 		     == TRX_UNDO_UPDATE);
1704 
1705 		offset = trx_undo_header_create(undo_page, trx_id, mtr);
1706 
1707 		trx_undo_header_add_space_for_xid(
1708 			undo_page, undo_page + offset, mtr);
1709 	}
1710 
1711 	trx_undo_mem_init_for_reuse(undo, trx_id, xid, offset);
1712 
1713 	return(undo);
1714 }
1715 
1716 /**********************************************************************//**
1717 Marks an undo log header as a header of a data dictionary operation
1718 transaction. */
1719 static
1720 void
trx_undo_mark_as_dict_operation(trx_t * trx,trx_undo_t * undo,mtr_t * mtr)1721 trx_undo_mark_as_dict_operation(
1722 /*============================*/
1723 	trx_t*		trx,	/*!< in: dict op transaction */
1724 	trx_undo_t*	undo,	/*!< in: assigned undo log */
1725 	mtr_t*		mtr)	/*!< in: mtr */
1726 {
1727 	page_t*	hdr_page;
1728 
1729 	hdr_page = trx_undo_page_get(
1730 		page_id_t(undo->space, undo->hdr_page_no),
1731 		undo->page_size, mtr);
1732 
1733 	switch (trx_get_dict_operation(trx)) {
1734 	case TRX_DICT_OP_NONE:
1735 		ut_error;
1736 	case TRX_DICT_OP_INDEX:
1737 		/* Do not discard the table on recovery. */
1738 		undo->table_id = 0;
1739 		break;
1740 	case TRX_DICT_OP_TABLE:
1741 		undo->table_id = trx->table_id;
1742 		break;
1743 	}
1744 
1745 	mlog_write_ulint(hdr_page + undo->hdr_offset
1746 			 + TRX_UNDO_DICT_TRANS,
1747 			 TRUE, MLOG_1BYTE, mtr);
1748 
1749 	mlog_write_ull(hdr_page + undo->hdr_offset + TRX_UNDO_TABLE_ID,
1750 		       undo->table_id, mtr);
1751 
1752 	undo->dict_operation = TRUE;
1753 }
1754 
1755 /**********************************************************************//**
1756 Assigns an undo log for a transaction. A new undo log is created or a cached
1757 undo log reused.
1758 @return DB_SUCCESS if undo log assign successful, possible error codes
1759 are: DB_TOO_MANY_CONCURRENT_TRXS DB_OUT_OF_FILE_SPACE DB_READ_ONLY
1760 DB_OUT_OF_MEMORY */
1761 dberr_t
trx_undo_assign_undo(trx_t * trx,trx_undo_ptr_t * undo_ptr,ulint type)1762 trx_undo_assign_undo(
1763 /*=================*/
1764 	trx_t*		trx,		/*!< in: transaction */
1765 	trx_undo_ptr_t*	undo_ptr,	/*!< in: assign undo log from
1766 					referred rollback segment. */
1767 	ulint		type)		/*!< in: TRX_UNDO_INSERT or
1768 					TRX_UNDO_UPDATE */
1769 {
1770 	trx_rseg_t*	rseg;
1771 	trx_undo_t*	undo;
1772 	mtr_t		mtr;
1773 	dberr_t		err = DB_SUCCESS;
1774 
1775 	ut_ad(trx);
1776 
1777 	/* In case of read-only scenario trx->rsegs.m_redo.rseg can be NULL but
1778 	still request for assigning undo logs is valid as temporary tables
1779 	can be updated in read-only mode.
1780 	If there is no rollback segment assigned to trx and still there is
1781 	object being updated there is something wrong and so this condition
1782 	check. */
1783 	ut_ad(trx_is_rseg_assigned(trx));
1784 
1785 	rseg = undo_ptr->rseg;
1786 
1787 	ut_ad(mutex_own(&(trx->undo_mutex)));
1788 
1789 	mtr_start(&mtr);
1790 	if (&trx->rsegs.m_noredo == undo_ptr) {
1791 		mtr.set_log_mode(MTR_LOG_NO_REDO);;
1792 	} else {
1793 		ut_ad(&trx->rsegs.m_redo == undo_ptr);
1794 	}
1795 
1796 	if (trx_sys_is_noredo_rseg_slot(rseg->id)) {
1797 		mtr.set_log_mode(MTR_LOG_NO_REDO);;
1798 		ut_ad(undo_ptr == &trx->rsegs.m_noredo);
1799 	} else {
1800 		ut_ad(undo_ptr == &trx->rsegs.m_redo);
1801 	}
1802 
1803 	mutex_enter(&rseg->mutex);
1804 
1805 	DBUG_EXECUTE_IF(
1806 		"ib_create_table_fail_too_many_trx",
1807 		err = DB_TOO_MANY_CONCURRENT_TRXS;
1808 		goto func_exit;
1809 	);
1810 
1811 	undo = trx_undo_reuse_cached(trx, rseg, type, trx->id, trx->xid,
1812 				     &mtr);
1813 	if (undo == NULL) {
1814 		err = trx_undo_create(trx, rseg, type, trx->id, trx->xid,
1815 				      &undo, &mtr);
1816 		if (err != DB_SUCCESS) {
1817 
1818 			goto func_exit;
1819 		}
1820 	}
1821 
1822 	if (type == TRX_UNDO_INSERT) {
1823 		UT_LIST_ADD_FIRST(rseg->insert_undo_list, undo);
1824 		ut_ad(undo_ptr->insert_undo == NULL);
1825 		undo_ptr->insert_undo = undo;
1826 	} else {
1827 		UT_LIST_ADD_FIRST(rseg->update_undo_list, undo);
1828 		ut_ad(undo_ptr->update_undo == NULL);
1829 		undo_ptr->update_undo = undo;
1830 	}
1831 
1832 	if (trx_get_dict_operation(trx) != TRX_DICT_OP_NONE) {
1833 		trx_undo_mark_as_dict_operation(trx, undo, &mtr);
1834 	}
1835 
1836 func_exit:
1837 	mutex_exit(&(rseg->mutex));
1838 	mtr_commit(&mtr);
1839 
1840 	return(err);
1841 }
1842 
1843 /******************************************************************//**
1844 Sets the state of the undo log segment at a transaction finish.
1845 @return undo log segment header page, x-latched */
1846 page_t*
trx_undo_set_state_at_finish(trx_undo_t * undo,mtr_t * mtr)1847 trx_undo_set_state_at_finish(
1848 /*=========================*/
1849 	trx_undo_t*	undo,	/*!< in: undo log memory copy */
1850 	mtr_t*		mtr)	/*!< in: mtr */
1851 {
1852 	trx_usegf_t*	seg_hdr;
1853 	trx_upagef_t*	page_hdr;
1854 	page_t*		undo_page;
1855 	ulint		state;
1856 
1857 	ut_a(undo->id < TRX_RSEG_N_SLOTS);
1858 
1859 	undo_page = trx_undo_page_get(
1860 		page_id_t(undo->space, undo->hdr_page_no),
1861 		undo->page_size, mtr);
1862 
1863 	seg_hdr = undo_page + TRX_UNDO_SEG_HDR;
1864 	page_hdr = undo_page + TRX_UNDO_PAGE_HDR;
1865 
1866 	if (undo->size == 1
1867 	    && mach_read_from_2(page_hdr + TRX_UNDO_PAGE_FREE)
1868 	       < TRX_UNDO_PAGE_REUSE_LIMIT) {
1869 
1870 		state = TRX_UNDO_CACHED;
1871 
1872 	} else if (undo->type == TRX_UNDO_INSERT) {
1873 
1874 		state = TRX_UNDO_TO_FREE;
1875 	} else {
1876 		state = TRX_UNDO_TO_PURGE;
1877 	}
1878 
1879 	undo->state = state;
1880 
1881 	mlog_write_ulint(seg_hdr + TRX_UNDO_STATE, state, MLOG_2BYTES, mtr);
1882 
1883 	return(undo_page);
1884 }
1885 
1886 /** Set the state of the undo log segment at a XA PREPARE or XA ROLLBACK.
1887 @param[in,out]	trx		transaction
1888 @param[in,out]	undo		insert_undo or update_undo log
1889 @param[in]	rollback	false=XA PREPARE, true=XA ROLLBACK
1890 @param[in,out]	mtr		mini-transaction
1891 @return undo log segment header page, x-latched */
1892 page_t*
trx_undo_set_state_at_prepare(trx_t * trx,trx_undo_t * undo,bool rollback,mtr_t * mtr)1893 trx_undo_set_state_at_prepare(
1894 	trx_t*		trx,
1895 	trx_undo_t*	undo,
1896 	bool		rollback,
1897 	mtr_t*		mtr)
1898 {
1899 	trx_usegf_t*	seg_hdr;
1900 	trx_ulogf_t*	undo_header;
1901 	page_t*		undo_page;
1902 	ulint		offset;
1903 
1904 	ut_ad(trx && undo && mtr);
1905 
1906 	ut_a(undo->id < TRX_RSEG_N_SLOTS);
1907 
1908 	undo_page = trx_undo_page_get(
1909 		page_id_t(undo->space, undo->hdr_page_no),
1910 		undo->page_size, mtr);
1911 
1912 	seg_hdr = undo_page + TRX_UNDO_SEG_HDR;
1913 
1914 	if (rollback) {
1915 		ut_ad(undo->state == TRX_UNDO_PREPARED);
1916 		mlog_write_ulint(seg_hdr + TRX_UNDO_STATE, TRX_UNDO_ACTIVE,
1917 				 MLOG_2BYTES, mtr);
1918 		return(undo_page);
1919 	}
1920 
1921 	/*------------------------------*/
1922 	ut_ad(undo->state == TRX_UNDO_ACTIVE);
1923 	undo->state = TRX_UNDO_PREPARED;
1924 	undo->xid   = *trx->xid;
1925 	/*------------------------------*/
1926 
1927 	mlog_write_ulint(seg_hdr + TRX_UNDO_STATE, undo->state,
1928 			 MLOG_2BYTES, mtr);
1929 
1930 	offset = mach_read_from_2(seg_hdr + TRX_UNDO_LAST_LOG);
1931 	undo_header = undo_page + offset;
1932 
1933 	mlog_write_ulint(undo_header + TRX_UNDO_XID_EXISTS,
1934 			 TRUE, MLOG_1BYTE, mtr);
1935 
1936 	trx_undo_write_xid(undo_header, &undo->xid, mtr);
1937 
1938 	return(undo_page);
1939 }
1940 
1941 /**********************************************************************//**
1942 Adds the update undo log header as the first in the history list, and
1943 frees the memory object, or puts it to the list of cached update undo log
1944 segments. */
1945 void
trx_undo_update_cleanup(trx_t * trx,trx_undo_ptr_t * undo_ptr,page_t * undo_page,bool update_rseg_history_len,ulint n_added_logs,mtr_t * mtr)1946 trx_undo_update_cleanup(
1947 /*====================*/
1948 	trx_t*		trx,		/*!< in: trx owning the update
1949 					undo log */
1950 	trx_undo_ptr_t*	undo_ptr,	/*!< in: update undo log. */
1951 	page_t*		undo_page,	/*!< in: update undo log header page,
1952 					x-latched */
1953 	bool		update_rseg_history_len,
1954 					/*!< in: if true: update rseg history
1955 					len else skip updating it. */
1956 	ulint		n_added_logs,	/*!< in: number of logs added */
1957 	mtr_t*		mtr)		/*!< in: mtr */
1958 {
1959 	trx_rseg_t*	rseg;
1960 	trx_undo_t*	undo;
1961 
1962 	undo = undo_ptr->update_undo;
1963 	rseg = undo_ptr->rseg;
1964 
1965 	ut_ad(mutex_own(&(rseg->mutex)));
1966 
1967 	trx_purge_add_update_undo_to_history(
1968 		trx, undo_ptr, undo_page,
1969 		update_rseg_history_len, n_added_logs, mtr);
1970 
1971 	UT_LIST_REMOVE(rseg->update_undo_list, undo);
1972 
1973 	undo_ptr->update_undo = NULL;
1974 
1975 	if (undo->state == TRX_UNDO_CACHED) {
1976 
1977 		UT_LIST_ADD_FIRST(rseg->update_undo_cached, undo);
1978 
1979 		MONITOR_INC(MONITOR_NUM_UNDO_SLOT_CACHED);
1980 	} else {
1981 		ut_ad(undo->state == TRX_UNDO_TO_PURGE);
1982 
1983 		trx_undo_mem_free(undo);
1984 	}
1985 }
1986 
1987 /** Frees an insert undo log after a transaction commit or rollback.
1988 Knowledge of inserts is not needed after a commit or rollback, therefore
1989 the data can be discarded.
1990 @param[in,out]	undo_ptr	undo log to clean up
1991 @param[in]	noredo		whether the undo tablespace is redo logged */
1992 void
trx_undo_insert_cleanup(trx_undo_ptr_t * undo_ptr,bool noredo)1993 trx_undo_insert_cleanup(
1994 	trx_undo_ptr_t*	undo_ptr,
1995 	bool		noredo)
1996 {
1997 	trx_undo_t*	undo;
1998 	trx_rseg_t*	rseg;
1999 
2000 	undo = undo_ptr->insert_undo;
2001 	ut_ad(undo != NULL);
2002 
2003 	rseg = undo_ptr->rseg;
2004 
2005 	ut_ad(noredo == trx_sys_is_noredo_rseg_slot(rseg->id));
2006 
2007 	mutex_enter(&(rseg->mutex));
2008 
2009 	UT_LIST_REMOVE(rseg->insert_undo_list, undo);
2010 	undo_ptr->insert_undo = NULL;
2011 
2012 	if (undo->state == TRX_UNDO_CACHED) {
2013 
2014 		UT_LIST_ADD_FIRST(rseg->insert_undo_cached, undo);
2015 
2016 		MONITOR_INC(MONITOR_NUM_UNDO_SLOT_CACHED);
2017 	} else {
2018 		ut_ad(undo->state == TRX_UNDO_TO_FREE);
2019 
2020 		/* Delete first the undo log segment in the file */
2021 
2022 		mutex_exit(&(rseg->mutex));
2023 
2024 		trx_undo_seg_free(undo, noredo);
2025 
2026 		mutex_enter(&(rseg->mutex));
2027 
2028 		ut_ad(rseg->curr_size > undo->size);
2029 
2030 		rseg->curr_size -= undo->size;
2031 
2032 		trx_undo_mem_free(undo);
2033 	}
2034 
2035 	mutex_exit(&(rseg->mutex));
2036 }
2037 
2038 /********************************************************************//**
2039 At shutdown, frees the undo logs of a PREPARED transaction. */
2040 void
trx_undo_free_prepared(trx_t * trx)2041 trx_undo_free_prepared(
2042 /*===================*/
2043 	trx_t*	trx)	/*!< in/out: PREPARED transaction */
2044 {
2045 	ut_ad(srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS);
2046 
2047 	if (trx->rsegs.m_redo.update_undo) {
2048 		ut_a(trx->rsegs.m_redo.update_undo->state == TRX_UNDO_PREPARED);
2049 		UT_LIST_REMOVE(trx->rsegs.m_redo.rseg->update_undo_list,
2050 			       trx->rsegs.m_redo.update_undo);
2051 		trx_undo_mem_free(trx->rsegs.m_redo.update_undo);
2052 
2053 		trx->rsegs.m_redo.update_undo = NULL;
2054 	}
2055 
2056 	if (trx->rsegs.m_redo.insert_undo) {
2057 		ut_a(trx->rsegs.m_redo.insert_undo->state == TRX_UNDO_PREPARED);
2058 		UT_LIST_REMOVE(trx->rsegs.m_redo.rseg->insert_undo_list,
2059 			       trx->rsegs.m_redo.insert_undo);
2060 		trx_undo_mem_free(trx->rsegs.m_redo.insert_undo);
2061 
2062 		trx->rsegs.m_redo.insert_undo = NULL;
2063 	}
2064 
2065 	if (trx->rsegs.m_noredo.update_undo) {
2066 
2067 		ut_a(trx->rsegs.m_noredo.update_undo->state
2068 			== TRX_UNDO_PREPARED);
2069 
2070 		UT_LIST_REMOVE(trx->rsegs.m_noredo.rseg->update_undo_list,
2071 			       trx->rsegs.m_noredo.update_undo);
2072 		trx_undo_mem_free(trx->rsegs.m_noredo.update_undo);
2073 
2074 		trx->rsegs.m_noredo.update_undo = NULL;
2075 	}
2076 	if (trx->rsegs.m_noredo.insert_undo) {
2077 
2078 		ut_a(trx->rsegs.m_noredo.insert_undo->state
2079 			== TRX_UNDO_PREPARED);
2080 
2081 		UT_LIST_REMOVE(trx->rsegs.m_noredo.rseg->insert_undo_list,
2082 			       trx->rsegs.m_noredo.insert_undo);
2083 		trx_undo_mem_free(trx->rsegs.m_noredo.insert_undo);
2084 
2085 		trx->rsegs.m_noredo.insert_undo = NULL;
2086 	}
2087 }
2088 
2089 /** Truncate UNDO tablespace, reinitialize header and rseg.
2090 @param[in]	undo_trunc	UNDO tablespace handler
2091 @return true if success else false. */
2092 bool
trx_undo_truncate_tablespace(undo::Truncate * undo_trunc)2093 trx_undo_truncate_tablespace(
2094 	undo::Truncate*	undo_trunc)
2095 
2096 {
2097 	bool	success = true;
2098 	ulint	space_id = undo_trunc->get_marked_space_id();
2099 
2100 	/* Step-1: Truncate tablespace. */
2101 	success = fil_truncate_tablespace(
2102 		space_id, SRV_UNDO_TABLESPACE_SIZE_IN_PAGES);
2103 
2104 	if (!success) {
2105 		return(success);
2106 	}
2107 
2108 	/* Step-2: Re-initialize tablespace header.
2109 	Avoid REDO logging as we don't want to apply the action if server
2110 	crashes. For fix-up we have UNDO-truncate-ddl-log. */
2111 	mtr_t		mtr;
2112 	mtr_start(&mtr);
2113 	mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO);
2114 	fsp_header_init(space_id, SRV_UNDO_TABLESPACE_SIZE_IN_PAGES, &mtr);
2115 	mtr_commit(&mtr);
2116 
2117 	/* Step-3: Re-initialize rollback segment header that resides
2118 	in truncated tablespaced. */
2119 	mtr_start(&mtr);
2120 	mtr_set_log_mode(&mtr, MTR_LOG_NO_REDO);
2121 	mtr_x_lock(fil_space_get_latch(space_id, NULL), &mtr);
2122 
2123 	for (ulint i = 0; i < undo_trunc->rsegs_size(); ++i) {
2124 		trx_rsegf_t*	rseg_header;
2125 
2126 		trx_rseg_t*	rseg = undo_trunc->get_ith_rseg(i);
2127 
2128 		rseg->page_no = trx_rseg_header_create(
2129 			space_id, univ_page_size, ULINT_MAX, rseg->id, &mtr);
2130 
2131 		rseg_header = trx_rsegf_get_new(
2132 			space_id, rseg->page_no, rseg->page_size, &mtr);
2133 
2134 		/* Before re-initialization ensure that we free the existing
2135 		structure. There can't be any active transactions. */
2136 		ut_a(UT_LIST_GET_LEN(rseg->update_undo_list) == 0);
2137 		ut_a(UT_LIST_GET_LEN(rseg->insert_undo_list) == 0);
2138 
2139 		trx_undo_t*	next_undo;
2140 
2141 		for (trx_undo_t* undo =
2142 			UT_LIST_GET_FIRST(rseg->update_undo_cached);
2143 		     undo != NULL;
2144 		     undo = next_undo) {
2145 
2146 			next_undo = UT_LIST_GET_NEXT(undo_list, undo);
2147 			UT_LIST_REMOVE(rseg->update_undo_cached, undo);
2148 			MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_CACHED);
2149 			trx_undo_mem_free(undo);
2150 		}
2151 
2152 		for (trx_undo_t* undo =
2153 			UT_LIST_GET_FIRST(rseg->insert_undo_cached);
2154 		     undo != NULL;
2155 		     undo = next_undo) {
2156 
2157 			next_undo = UT_LIST_GET_NEXT(undo_list, undo);
2158 			UT_LIST_REMOVE(rseg->insert_undo_cached, undo);
2159 			MONITOR_DEC(MONITOR_NUM_UNDO_SLOT_CACHED);
2160 			trx_undo_mem_free(undo);
2161 		}
2162 
2163 		UT_LIST_INIT(rseg->update_undo_list, &trx_undo_t::undo_list);
2164 		UT_LIST_INIT(rseg->update_undo_cached, &trx_undo_t::undo_list);
2165 		UT_LIST_INIT(rseg->insert_undo_list, &trx_undo_t::undo_list);
2166 		UT_LIST_INIT(rseg->insert_undo_cached, &trx_undo_t::undo_list);
2167 
2168 		rseg->max_size = mtr_read_ulint(
2169 			rseg_header + TRX_RSEG_MAX_SIZE, MLOG_4BYTES, &mtr);
2170 
2171 		/* Initialize the undo log lists according to the rseg header */
2172 		rseg->curr_size = mtr_read_ulint(
2173 			rseg_header + TRX_RSEG_HISTORY_SIZE, MLOG_4BYTES, &mtr)
2174 			+ 1;
2175 
2176 		ut_ad(rseg->curr_size == 1);
2177 
2178 		rseg->trx_ref_count = 0;
2179 		rseg->last_page_no = FIL_NULL;
2180 		rseg->last_offset = 0;
2181 		rseg->last_trx_no = 0;
2182 		rseg->last_del_marks = FALSE;
2183 	}
2184 
2185 	/* During Upgrade, existing rsegs in range from slot-1....slot-32
2186 	were added into the array pending_purge_rseg_array[]. These rsegs also
2187 	reside in system or undo tablespace. */
2188 	trx_sysf_t* sys_header = trx_sysf_get(&mtr);
2189 	for (ulint i = 0; i < TRX_SYS_N_RSEGS; ++i) {
2190 		trx_rseg_t*	rseg = trx_sys->pending_purge_rseg_array[i];
2191 		if(rseg != NULL
2192 			&& rseg->space == undo_trunc->get_marked_space_id()) {
2193 			/* Reset the rollback segment slot in the trx
2194 			system header */
2195 			trx_sysf_rseg_set_page_no(
2196 				sys_header, rseg->id, FIL_NULL, &mtr);
2197 			/* Free a pending rollback segment instance in memory */
2198 			trx_rseg_mem_free(rseg,
2199 				trx_sys->pending_purge_rseg_array);
2200 		}
2201 	}
2202 	mtr_commit(&mtr);
2203 
2204 	return(success);
2205 }
2206 
2207 #endif /* !UNIV_HOTBACKUP */
2208