1 /*****************************************************************************
2
3 Copyright (c) 1996, 2021, Oracle and/or its affiliates.
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License, version 2.0,
7 as published by the Free Software Foundation.
8
9 This program is also distributed with certain software (including
10 but not limited to OpenSSL) that is licensed under separate terms,
11 as designated in a particular file or component or in included license
12 documentation. The authors of MySQL hereby grant you an additional
13 permission to link the program and your derivative works with the
14 separately licensed software that they have included with MySQL.
15
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License, version 2.0, for more details.
20
21 You should have received a copy of the GNU General Public License along with
22 this program; if not, write to the Free Software Foundation, Inc.,
23 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
24
25 *****************************************************************************/
26
27 /**************************************************//**
28 @file row/row0upd.cc
29 Update of a row
30
31 Created 12/27/1996 Heikki Tuuri
32 *******************************************************/
33
34 #include "ha_prototypes.h"
35
36 #include "row0upd.h"
37
38 #ifdef UNIV_NONINL
39 #include "row0upd.ic"
40 #endif
41
42 #include "dict0dict.h"
43 #include "trx0undo.h"
44 #include "rem0rec.h"
45 #ifndef UNIV_HOTBACKUP
46 #include "dict0boot.h"
47 #include "dict0crea.h"
48 #include "mach0data.h"
49 #include "btr0btr.h"
50 #include "btr0cur.h"
51 #include "que0que.h"
52 #include "row0ext.h"
53 #include "row0ins.h"
54 #include "row0log.h"
55 #include "row0row.h"
56 #include "row0sel.h"
57 #include "rem0cmp.h"
58 #include "lock0lock.h"
59 #include "log0log.h"
60 #include "pars0sym.h"
61 #include "eval0eval.h"
62 #include "buf0lru.h"
63 #ifdef WITH_WSREP
64 extern my_bool wsrep_debug;
65 #endif
66 #include "trx0rec.h"
67 #include "fts0fts.h"
68 #include "fts0types.h"
69 #include <algorithm>
70
71 /* What kind of latch and lock can we assume when the control comes to
72 -------------------------------------------------------------------
73 an update node?
74 --------------
75 Efficiency of massive updates would require keeping an x-latch on a
76 clustered index page through many updates, and not setting an explicit
77 x-lock on clustered index records, as they anyway will get an implicit
78 x-lock when they are updated. A problem is that the read nodes in the
79 graph should know that they must keep the latch when passing the control
80 up to the update node, and not set any record lock on the record which
81 will be updated. Another problem occurs if the execution is stopped,
82 as the kernel switches to another query thread, or the transaction must
83 wait for a lock. Then we should be able to release the latch and, maybe,
84 acquire an explicit x-lock on the record.
85 Because this seems too complicated, we conclude that the less
86 efficient solution of releasing all the latches when the control is
87 transferred to another node, and acquiring explicit x-locks, is better. */
88
89 /* How is a delete performed? If there is a delete without an
90 explicit cursor, i.e., a searched delete, there are at least
91 two different situations:
92 the implicit select cursor may run on (1) the clustered index or
93 on (2) a secondary index. The delete is performed by setting
94 the delete bit in the record and substituting the id of the
95 deleting transaction for the original trx id, and substituting a
96 new roll ptr for previous roll ptr. The old trx id and roll ptr
97 are saved in the undo log record. Thus, no physical changes occur
98 in the index tree structure at the time of the delete. Only
99 when the undo log is purged, the index records will be physically
100 deleted from the index trees.
101
102 The query graph executing a searched delete would consist of
103 a delete node which has as a subtree a select subgraph.
104 The select subgraph should return a (persistent) cursor
105 in the clustered index, placed on page which is x-latched.
106 The delete node should look for all secondary index records for
107 this clustered index entry and mark them as deleted. When is
108 the x-latch freed? The most efficient way for performing a
109 searched delete is obviously to keep the x-latch for several
110 steps of query graph execution. */
111
112 /*************************************************************************
113 IMPORTANT NOTE: Any operation that generates redo MUST check that there
114 is enough space in the redo log before for that operation. This is
115 done by calling log_free_check(). The reason for checking the
116 availability of the redo log space before the start of the operation is
117 that we MUST not hold any synchonization objects when performing the
118 check.
119 If you make a change in this module make sure that no codepath is
120 introduced where a call to log_free_check() is bypassed. */
121
122 /***********************************************************//**
123 Checks if an update vector changes some of the first ordering fields of an
124 index record. This is only used in foreign key checks and we can assume
125 that index does not contain column prefixes.
126 @return TRUE if changes */
127 static
128 ibool
129 row_upd_changes_first_fields_binary(
130 /*================================*/
131 dtuple_t* entry, /*!< in: old value of index entry */
132 dict_index_t* index, /*!< in: index of entry */
133 const upd_t* update, /*!< in: update vector for the row */
134 ulint n); /*!< in: how many first fields to check */
135
136
137 /*********************************************************************//**
138 Checks if index currently is mentioned as a referenced index in a foreign
139 key constraint.
140
141 NOTE that since we do not hold dict_operation_lock when leaving the
142 function, it may be that the referencing table has been dropped when
143 we leave this function: this function is only for heuristic use!
144
145 @return TRUE if referenced */
146 static
147 ibool
row_upd_index_is_referenced(dict_index_t * index,trx_t * trx)148 row_upd_index_is_referenced(
149 /*========================*/
150 dict_index_t* index, /*!< in: index */
151 trx_t* trx) /*!< in: transaction */
152 {
153 dict_table_t* table = index->table;
154 ibool froze_data_dict = FALSE;
155 ibool is_referenced = FALSE;
156
157 if (table->referenced_set.empty()) {
158 return(FALSE);
159 }
160
161 if (trx->dict_operation_lock_mode == 0) {
162 row_mysql_freeze_data_dictionary(trx);
163 froze_data_dict = TRUE;
164 }
165
166 dict_foreign_set::iterator it
167 = std::find_if(table->referenced_set.begin(),
168 table->referenced_set.end(),
169 dict_foreign_with_index(index));
170
171 is_referenced = (it != table->referenced_set.end());
172
173 if (froze_data_dict) {
174 row_mysql_unfreeze_data_dictionary(trx);
175 }
176
177 return(is_referenced);
178 }
179
180 #ifdef WITH_WSREP
181 static
182 ibool
wsrep_row_upd_index_is_foreign(dict_index_t * index,trx_t * trx)183 wsrep_row_upd_index_is_foreign(
184 /*========================*/
185 dict_index_t* index, /*!< in: index */
186 trx_t* trx) /*!< in: transaction */
187 {
188 dict_table_t* table = index->table;
189 ibool froze_data_dict = FALSE;
190 ibool is_referenced = FALSE;
191
192 if (table->foreign_set.empty()) {
193 return(FALSE);
194 }
195
196 if (trx->dict_operation_lock_mode == 0) {
197 row_mysql_freeze_data_dictionary(trx);
198 froze_data_dict = TRUE;
199 }
200
201 dict_foreign_set::iterator it
202 = std::find_if(table->foreign_set.begin(),
203 table->foreign_set.end(),
204 dict_foreign_with_foreign_index(index));
205
206 is_referenced = (it != table->foreign_set.end());
207
208 if (froze_data_dict) {
209 row_mysql_unfreeze_data_dictionary(trx);
210 }
211
212 return(is_referenced);
213 }
214 #endif /* WITH_WSREP */
215
216 /*********************************************************************//**
217 Checks if possible foreign key constraints hold after a delete of the record
218 under pcur.
219
220 NOTE that this function will temporarily commit mtr and lose the
221 pcur position!
222
223 @return DB_SUCCESS or an error code */
224 static MY_ATTRIBUTE((warn_unused_result))
225 dberr_t
row_upd_check_references_constraints(upd_node_t * node,btr_pcur_t * pcur,dict_table_t * table,dict_index_t * index,ulint * offsets,que_thr_t * thr,mtr_t * mtr)226 row_upd_check_references_constraints(
227 /*=================================*/
228 upd_node_t* node, /*!< in: row update node */
229 btr_pcur_t* pcur, /*!< in: cursor positioned on a record; NOTE: the
230 cursor position is lost in this function! */
231 dict_table_t* table, /*!< in: table in question */
232 dict_index_t* index, /*!< in: index of the cursor */
233 ulint* offsets,/*!< in/out: rec_get_offsets(pcur.rec, index) */
234 que_thr_t* thr, /*!< in: query thread */
235 mtr_t* mtr) /*!< in: mtr */
236 {
237 dict_foreign_t* foreign;
238 mem_heap_t* heap;
239 dtuple_t* entry;
240 trx_t* trx;
241 const rec_t* rec;
242 ulint n_ext;
243 dberr_t err;
244 ibool got_s_lock = FALSE;
245
246 DBUG_ENTER("row_upd_check_references_constraints");
247
248 if (table->referenced_set.empty()) {
249 DBUG_RETURN(DB_SUCCESS);
250 }
251
252 trx = thr_get_trx(thr);
253
254 rec = btr_pcur_get_rec(pcur);
255 ut_ad(rec_offs_validate(rec, index, offsets));
256
257 heap = mem_heap_create(500);
258
259 entry = row_rec_to_index_entry(rec, index, offsets, &n_ext, heap);
260
261 mtr_commit(mtr);
262
263 DEBUG_SYNC_C("foreign_constraint_check_for_update");
264
265 mtr_start(mtr);
266
267 if (trx->dict_operation_lock_mode == 0) {
268 got_s_lock = TRUE;
269
270 row_mysql_freeze_data_dictionary(trx);
271 }
272
273 DEBUG_SYNC_C_IF_THD(thr_get_trx(thr)->mysql_thd,
274 "foreign_constraint_check_for_insert");
275
276 for (dict_foreign_set::iterator it = table->referenced_set.begin();
277 it != table->referenced_set.end();
278 ++it) {
279
280 foreign = *it;
281
282 /* Note that we may have an update which updates the index
283 record, but does NOT update the first fields which are
284 referenced in a foreign key constraint. Then the update does
285 NOT break the constraint. */
286
287 if (foreign->referenced_index == index
288 && (node->is_delete
289 || row_upd_changes_first_fields_binary(
290 entry, index, node->update,
291 foreign->n_fields))) {
292 dict_table_t* foreign_table = foreign->foreign_table;
293
294 dict_table_t* ref_table = NULL;
295
296 if (foreign_table == NULL) {
297
298 ref_table = dict_table_open_on_name(
299 foreign->foreign_table_name_lookup,
300 FALSE, FALSE, DICT_ERR_IGNORE_NONE);
301 }
302
303 /** dict_operation_lock is held both here and by truncate operations.
304 If a truncate is in process by another concurrent thread,
305 there can be 2 conditions possible:
306 1) row_truncate_table_for_mysql() is not yet called.
307 2) Truncate releases dict_operation_lock
308 during eviction of pages from buffer pool
309 for a file-per-table tablespace.
310
311 In case of (1), truncate will wait for FK operation
312 to complete.
313 In case of (2), truncate will be rolled forward even
314 if it is interrupted. So if the foreign table is
315 undergoing a truncate, ignore the FK check. */
316
317 if (foreign_table != NULL &&
318 (dict_table_is_discarded(foreign_table)
319 || fil_space_is_being_truncated(
320 foreign_table->space))) {
321 continue;
322 }
323
324 /* NOTE that if the thread ends up waiting for a lock
325 we will release dict_operation_lock temporarily!
326 But the counter on the table protects 'foreign' from
327 being dropped while the check is running. */
328
329
330 if (foreign_table) {
331 os_atomic_increment_ulint(&foreign_table->n_foreign_key_checks_running, 1);
332 }
333
334 err = row_ins_check_foreign_constraint(
335 FALSE, foreign, table, entry, thr);
336
337 if (foreign_table) {
338 os_atomic_decrement_ulint(&foreign_table->n_foreign_key_checks_running, 1);
339 }
340 if (ref_table != NULL) {
341 dict_table_close(ref_table, FALSE, FALSE);
342 }
343
344 if (err != DB_SUCCESS) {
345 goto func_exit;
346 }
347 }
348 }
349
350 err = DB_SUCCESS;
351
352 func_exit:
353 if (got_s_lock) {
354 row_mysql_unfreeze_data_dictionary(trx);
355 }
356
357 mem_heap_free(heap);
358
359 DEBUG_SYNC_C("foreign_constraint_check_for_update_done");
360 DBUG_RETURN(err);
361 }
362 #ifdef WITH_WSREP
363 static
364 dberr_t
wsrep_row_upd_check_foreign_constraints(upd_node_t * node,btr_pcur_t * pcur,dict_table_t * table,dict_index_t * index,ulint * offsets,que_thr_t * thr,mtr_t * mtr)365 wsrep_row_upd_check_foreign_constraints(
366 /*=================================*/
367 upd_node_t* node, /*!< in: row update node */
368 btr_pcur_t* pcur, /*!< in: cursor positioned on a record; NOTE: the
369 cursor position is lost in this function! */
370 dict_table_t* table, /*!< in: table in question */
371 dict_index_t* index, /*!< in: index of the cursor */
372 ulint* offsets,/*!< in/out: rec_get_offsets(pcur.rec, index) */
373 que_thr_t* thr, /*!< in: query thread */
374 mtr_t* mtr) /*!< in: mtr */
375 {
376 dict_foreign_t* foreign;
377 mem_heap_t* heap;
378 dtuple_t* entry;
379 trx_t* trx;
380 const rec_t* rec;
381 ulint n_ext;
382 dberr_t err;
383 ibool got_s_lock = FALSE;
384 ibool opened = FALSE;
385
386 if (table->foreign_set.empty()) {
387 return(DB_SUCCESS);
388 }
389
390 trx = thr_get_trx(thr);
391
392 /* TODO: make native slave thread bail out here */
393
394 rec = btr_pcur_get_rec(pcur);
395 ut_ad(rec_offs_validate(rec, index, offsets));
396
397 heap = mem_heap_create(500);
398
399 entry = row_rec_to_index_entry(rec, index, offsets,
400 &n_ext, heap);
401
402 mtr_commit(mtr);
403
404 mtr_start(mtr);
405
406 if (trx->dict_operation_lock_mode == 0) {
407 got_s_lock = TRUE;
408
409 row_mysql_freeze_data_dictionary(trx);
410 }
411
412 for (dict_foreign_set::iterator it = table->foreign_set.begin();
413 it != table->foreign_set.end();
414 ++it) {
415
416 foreign = *it;
417 /* Note that we may have an update which updates the index
418 record, but does NOT update the first fields which are
419 referenced in a foreign key constraint. Then the update does
420 NOT break the constraint. */
421
422 if (foreign->foreign_index == index
423 && (node->is_delete
424 || row_upd_changes_first_fields_binary(
425 entry, index, node->update,
426 foreign->n_fields))) {
427
428 if (foreign->referenced_table == NULL) {
429 foreign->referenced_table =
430 dict_table_open_on_name(
431 foreign->referenced_table_name_lookup,
432 FALSE, FALSE, DICT_ERR_IGNORE_NONE);
433 opened = (foreign->referenced_table) ? TRUE : FALSE;
434 }
435
436 /* NOTE that if the thread ends up waiting for a lock
437 we will release dict_operation_lock temporarily!
438 But the counter on the table protects 'foreign' from
439 being dropped while the check is running. */
440
441 err = row_ins_check_foreign_constraint(
442 TRUE, foreign, table, entry, thr);
443
444 if (foreign->referenced_table) {
445 if (opened == TRUE) {
446 dict_table_close(foreign->referenced_table, FALSE, FALSE);
447 opened = FALSE;
448 }
449 }
450
451 if (err != DB_SUCCESS) {
452 goto func_exit;
453 }
454 }
455 }
456
457 err = DB_SUCCESS;
458 func_exit:
459 if (got_s_lock) {
460 row_mysql_unfreeze_data_dictionary(trx);
461 }
462
463 mem_heap_free(heap);
464
465 return(err);
466 }
467 #endif /* WITH_WSREP */
468
469 /*********************************************************************//**
470 Creates an update node for a query graph.
471 @return own: update node */
472 upd_node_t*
upd_node_create(mem_heap_t * heap)473 upd_node_create(
474 /*============*/
475 mem_heap_t* heap) /*!< in: mem heap where created */
476 {
477 upd_node_t* node;
478
479 node = static_cast<upd_node_t*>(
480 mem_heap_zalloc(heap, sizeof(upd_node_t)));
481
482 node->common.type = QUE_NODE_UPDATE;
483 node->state = UPD_NODE_UPDATE_CLUSTERED;
484 node->heap = mem_heap_create(128);
485 node->magic_n = UPD_NODE_MAGIC_N;
486
487 return(node);
488 }
489 #endif /* !UNIV_HOTBACKUP */
490
491 /*********************************************************************//**
492 Updates the trx id and roll ptr field in a clustered index record in database
493 recovery. */
494 void
row_upd_rec_sys_fields_in_recovery(rec_t * rec,page_zip_des_t * page_zip,const ulint * offsets,ulint pos,trx_id_t trx_id,roll_ptr_t roll_ptr)495 row_upd_rec_sys_fields_in_recovery(
496 /*===============================*/
497 rec_t* rec, /*!< in/out: record */
498 page_zip_des_t* page_zip,/*!< in/out: compressed page, or NULL */
499 const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
500 ulint pos, /*!< in: TRX_ID position in rec */
501 trx_id_t trx_id, /*!< in: transaction id */
502 roll_ptr_t roll_ptr)/*!< in: roll ptr of the undo log record */
503 {
504 ut_ad(rec_offs_validate(rec, NULL, offsets));
505
506 if (page_zip) {
507 page_zip_write_trx_id_and_roll_ptr(
508 page_zip, rec, offsets, pos, trx_id, roll_ptr);
509 } else {
510 byte* field;
511 ulint len;
512
513 field = rec_get_nth_field(rec, offsets, pos, &len);
514 ut_ad(len == DATA_TRX_ID_LEN);
515 #if DATA_TRX_ID + 1 != DATA_ROLL_PTR
516 # error "DATA_TRX_ID + 1 != DATA_ROLL_PTR"
517 #endif
518 trx_write_trx_id(field, trx_id);
519 trx_write_roll_ptr(field + DATA_TRX_ID_LEN, roll_ptr);
520 }
521 }
522
523 #ifndef UNIV_HOTBACKUP
524 /*********************************************************************//**
525 Sets the trx id or roll ptr field of a clustered index entry. */
526 void
row_upd_index_entry_sys_field(dtuple_t * entry,dict_index_t * index,ulint type,ib_uint64_t val)527 row_upd_index_entry_sys_field(
528 /*==========================*/
529 dtuple_t* entry, /*!< in/out: index entry, where the memory
530 buffers for sys fields are already allocated:
531 the function just copies the new values to
532 them */
533 dict_index_t* index, /*!< in: clustered index */
534 ulint type, /*!< in: DATA_TRX_ID or DATA_ROLL_PTR */
535 ib_uint64_t val) /*!< in: value to write */
536 {
537 dfield_t* dfield;
538 byte* field;
539 ulint pos;
540
541 ut_ad(dict_index_is_clust(index));
542
543 pos = dict_index_get_sys_col_pos(index, type);
544
545 dfield = dtuple_get_nth_field(entry, pos);
546 field = static_cast<byte*>(dfield_get_data(dfield));
547
548 if (type == DATA_TRX_ID) {
549 ut_ad(val > 0);
550 trx_write_trx_id(field, val);
551 } else {
552 ut_ad(type == DATA_ROLL_PTR);
553 trx_write_roll_ptr(field, val);
554 }
555 }
556
557 /***********************************************************//**
558 Returns TRUE if row update changes size of some field in index or if some
559 field to be updated is stored externally in rec or update.
560 @return TRUE if the update changes the size of some field in index or
561 the field is external in rec or update */
562 ibool
row_upd_changes_field_size_or_external(dict_index_t * index,const ulint * offsets,const upd_t * update)563 row_upd_changes_field_size_or_external(
564 /*===================================*/
565 dict_index_t* index, /*!< in: index */
566 const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
567 const upd_t* update) /*!< in: update vector */
568 {
569 const upd_field_t* upd_field;
570 const dfield_t* new_val;
571 ulint old_len;
572 ulint new_len;
573 ulint n_fields;
574 ulint i;
575
576 ut_ad(rec_offs_validate(NULL, index, offsets));
577 n_fields = upd_get_n_fields(update);
578
579 for (i = 0; i < n_fields; i++) {
580 upd_field = upd_get_nth_field(update, i);
581
582 /* We should ignore virtual field if the index is not
583 a virtual index */
584 if (upd_fld_is_virtual_col(upd_field)
585 && dict_index_has_virtual(index) != DICT_VIRTUAL) {
586 continue;
587 }
588
589 new_val = &(upd_field->new_val);
590 new_len = dfield_get_len(new_val);
591
592 if (dfield_is_null(new_val) && !rec_offs_comp(offsets)) {
593 /* A bug fixed on Dec 31st, 2004: we looked at the
594 SQL NULL size from the wrong field! We may backport
595 this fix also to 4.0. The merge to 5.0 will be made
596 manually immediately after we commit this to 4.1. */
597
598 new_len = dict_col_get_sql_null_size(
599 dict_index_get_nth_col(index,
600 upd_field->field_no),
601 0);
602 }
603
604 old_len = rec_offs_nth_size(offsets, upd_field->field_no);
605
606 if (rec_offs_comp(offsets)
607 && rec_offs_nth_sql_null(offsets,
608 upd_field->field_no)) {
609 /* Note that in the compact table format, for a
610 variable length field, an SQL NULL will use zero
611 bytes in the offset array at the start of the physical
612 record, but a zero-length value (empty string) will
613 use one byte! Thus, we cannot use update-in-place
614 if we update an SQL NULL varchar to an empty string! */
615
616 old_len = UNIV_SQL_NULL;
617 }
618
619 if (dfield_is_ext(new_val) || old_len != new_len
620 || rec_offs_nth_extern(offsets, upd_field->field_no)) {
621
622 return(TRUE);
623 }
624 }
625
626 return(FALSE);
627 }
628
629 /***********************************************************//**
630 Returns true if row update contains disowned external fields.
631 @return true if the update contains disowned external fields. */
632 bool
row_upd_changes_disowned_external(const upd_t * update)633 row_upd_changes_disowned_external(
634 /*==============================*/
635 const upd_t* update) /*!< in: update vector */
636 {
637 const upd_field_t* upd_field;
638 const dfield_t* new_val;
639 ulint new_len;
640 ulint n_fields;
641 ulint i;
642
643 n_fields = upd_get_n_fields(update);
644
645 for (i = 0; i < n_fields; i++) {
646 const byte* field_ref;
647
648 upd_field = upd_get_nth_field(update, i);
649 new_val = &(upd_field->new_val);
650 new_len = dfield_get_len(new_val);
651
652 if (!dfield_is_ext(new_val)) {
653 continue;
654 }
655
656 ut_ad(new_len >= BTR_EXTERN_FIELD_REF_SIZE);
657
658 field_ref = static_cast<const byte*>(dfield_get_data(new_val))
659 + new_len - BTR_EXTERN_FIELD_REF_SIZE;
660
661 if (field_ref[BTR_EXTERN_LEN] & BTR_EXTERN_OWNER_FLAG) {
662 return(true);
663 }
664 }
665
666 return(false);
667 }
668 #endif /* !UNIV_HOTBACKUP */
669
670 /***********************************************************//**
671 Replaces the new column values stored in the update vector to the
672 record given. No field size changes are allowed. This function is
673 usually invoked on a clustered index. The only use case for a
674 secondary index is row_ins_sec_index_entry_by_modify() or its
675 counterpart in ibuf_insert_to_index_page(). */
676 void
row_upd_rec_in_place(rec_t * rec,dict_index_t * index,const ulint * offsets,const upd_t * update,page_zip_des_t * page_zip)677 row_upd_rec_in_place(
678 /*=================*/
679 rec_t* rec, /*!< in/out: record where replaced */
680 dict_index_t* index, /*!< in: the index the record belongs to */
681 const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
682 const upd_t* update, /*!< in: update vector */
683 page_zip_des_t* page_zip)/*!< in: compressed page with enough space
684 available, or NULL */
685 {
686 const upd_field_t* upd_field;
687 const dfield_t* new_val;
688 ulint n_fields;
689 ulint i;
690
691 ut_ad(rec_offs_validate(rec, index, offsets));
692
693 if (rec_offs_comp(offsets)) {
694 rec_set_info_bits_new(rec, update->info_bits);
695 } else {
696 rec_set_info_bits_old(rec, update->info_bits);
697 }
698
699 n_fields = upd_get_n_fields(update);
700
701 for (i = 0; i < n_fields; i++) {
702 upd_field = upd_get_nth_field(update, i);
703
704 /* No need to update virtual columns for non-virtual index */
705 if (upd_fld_is_virtual_col(upd_field)
706 && !dict_index_has_virtual(index)) {
707 continue;
708 }
709
710 new_val = &(upd_field->new_val);
711 ut_ad(!dfield_is_ext(new_val) ==
712 !rec_offs_nth_extern(offsets, upd_field->field_no));
713
714 rec_set_nth_field(rec, offsets, upd_field->field_no,
715 dfield_get_data(new_val),
716 dfield_get_len(new_val));
717 }
718
719 if (page_zip) {
720 page_zip_write_rec(page_zip, rec, index, offsets, 0);
721 }
722 }
723
724 #ifndef UNIV_HOTBACKUP
725 /*********************************************************************//**
726 Writes into the redo log the values of trx id and roll ptr and enough info
727 to determine their positions within a clustered index record.
728 @return new pointer to mlog */
729 byte*
row_upd_write_sys_vals_to_log(dict_index_t * index,trx_id_t trx_id,roll_ptr_t roll_ptr,byte * log_ptr,mtr_t * mtr MY_ATTRIBUTE ((unused)))730 row_upd_write_sys_vals_to_log(
731 /*==========================*/
732 dict_index_t* index, /*!< in: clustered index */
733 trx_id_t trx_id, /*!< in: transaction id */
734 roll_ptr_t roll_ptr,/*!< in: roll ptr of the undo log record */
735 byte* log_ptr,/*!< pointer to a buffer of size > 20 opened
736 in mlog */
737 mtr_t* mtr MY_ATTRIBUTE((unused))) /*!< in: mtr */
738 {
739 ut_ad(dict_index_is_clust(index));
740 ut_ad(mtr);
741
742 log_ptr += mach_write_compressed(log_ptr,
743 dict_index_get_sys_col_pos(
744 index, DATA_TRX_ID));
745
746 trx_write_roll_ptr(log_ptr, roll_ptr);
747 log_ptr += DATA_ROLL_PTR_LEN;
748
749 log_ptr += mach_u64_write_compressed(log_ptr, trx_id);
750
751 return(log_ptr);
752 }
753 #endif /* !UNIV_HOTBACKUP */
754
755 /*********************************************************************//**
756 Parses the log data of system field values.
757 @return log data end or NULL */
758 byte*
row_upd_parse_sys_vals(const byte * ptr,const byte * end_ptr,ulint * pos,trx_id_t * trx_id,roll_ptr_t * roll_ptr)759 row_upd_parse_sys_vals(
760 /*===================*/
761 const byte* ptr, /*!< in: buffer */
762 const byte* end_ptr,/*!< in: buffer end */
763 ulint* pos, /*!< out: TRX_ID position in record */
764 trx_id_t* trx_id, /*!< out: trx id */
765 roll_ptr_t* roll_ptr)/*!< out: roll ptr */
766 {
767 *pos = mach_parse_compressed(&ptr, end_ptr);
768
769 if (ptr == NULL) {
770
771 return(NULL);
772 }
773
774 if (end_ptr < ptr + DATA_ROLL_PTR_LEN) {
775
776 return(NULL);
777 }
778
779 *roll_ptr = trx_read_roll_ptr(ptr);
780 ptr += DATA_ROLL_PTR_LEN;
781
782 *trx_id = mach_u64_parse_compressed(&ptr, end_ptr);
783
784 return(const_cast<byte*>(ptr));
785 }
786
787 #ifndef UNIV_HOTBACKUP
788 /***********************************************************//**
789 Writes to the redo log the new values of the fields occurring in the index. */
790 void
row_upd_index_write_log(const upd_t * update,byte * log_ptr,mtr_t * mtr)791 row_upd_index_write_log(
792 /*====================*/
793 const upd_t* update, /*!< in: update vector */
794 byte* log_ptr,/*!< in: pointer to mlog buffer: must
795 contain at least MLOG_BUF_MARGIN bytes
796 of free space; the buffer is closed
797 within this function */
798 mtr_t* mtr) /*!< in: mtr into whose log to write */
799 {
800 const upd_field_t* upd_field;
801 const dfield_t* new_val;
802 ulint len;
803 ulint n_fields;
804 byte* buf_end;
805 ulint i;
806
807 n_fields = upd_get_n_fields(update);
808
809 buf_end = log_ptr + MLOG_BUF_MARGIN;
810
811 mach_write_to_1(log_ptr, update->info_bits);
812 log_ptr++;
813 log_ptr += mach_write_compressed(log_ptr, n_fields);
814
815 for (i = 0; i < n_fields; i++) {
816
817 #if MLOG_BUF_MARGIN <= 30
818 # error "MLOG_BUF_MARGIN <= 30"
819 #endif
820
821 if (log_ptr + 30 > buf_end) {
822 mlog_close(mtr, log_ptr);
823
824 log_ptr = mlog_open(mtr, MLOG_BUF_MARGIN);
825 buf_end = log_ptr + MLOG_BUF_MARGIN;
826 }
827
828 upd_field = upd_get_nth_field(update, i);
829
830 new_val = &(upd_field->new_val);
831
832 len = dfield_get_len(new_val);
833
834 /* If this is a virtual column, mark it using special
835 field_no */
836 ulint field_no = upd_fld_is_virtual_col(upd_field)
837 ? REC_MAX_N_FIELDS + upd_field->field_no
838 : upd_field->field_no;
839
840 log_ptr += mach_write_compressed(log_ptr, field_no);
841 log_ptr += mach_write_compressed(log_ptr, len);
842
843 if (len != UNIV_SQL_NULL) {
844 if (log_ptr + len < buf_end) {
845 memcpy(log_ptr, dfield_get_data(new_val), len);
846
847 log_ptr += len;
848 } else {
849 mlog_close(mtr, log_ptr);
850
851 mlog_catenate_string(
852 mtr,
853 static_cast<byte*>(
854 dfield_get_data(new_val)),
855 len);
856
857 log_ptr = mlog_open(mtr, MLOG_BUF_MARGIN);
858 buf_end = log_ptr + MLOG_BUF_MARGIN;
859 }
860 }
861 }
862
863 mlog_close(mtr, log_ptr);
864 }
865 #endif /* !UNIV_HOTBACKUP */
866
867 /*********************************************************************//**
868 Parses the log data written by row_upd_index_write_log.
869 @return log data end or NULL */
870 byte*
row_upd_index_parse(const byte * ptr,const byte * end_ptr,mem_heap_t * heap,upd_t ** update_out)871 row_upd_index_parse(
872 /*================*/
873 const byte* ptr, /*!< in: buffer */
874 const byte* end_ptr,/*!< in: buffer end */
875 mem_heap_t* heap, /*!< in: memory heap where update vector is
876 built */
877 upd_t** update_out)/*!< out: update vector */
878 {
879 upd_t* update;
880 upd_field_t* upd_field;
881 dfield_t* new_val;
882 ulint len;
883 ulint n_fields;
884 ulint info_bits;
885 ulint i;
886
887 if (end_ptr < ptr + 1) {
888
889 return(NULL);
890 }
891
892 info_bits = mach_read_from_1(ptr);
893 ptr++;
894 n_fields = mach_parse_compressed(&ptr, end_ptr);
895
896 if (ptr == NULL) {
897
898 return(NULL);
899 }
900
901 update = upd_create(n_fields, heap);
902 update->info_bits = info_bits;
903
904 for (i = 0; i < n_fields; i++) {
905 ulint field_no;
906 upd_field = upd_get_nth_field(update, i);
907 new_val = &(upd_field->new_val);
908
909 field_no = mach_parse_compressed(&ptr, end_ptr);
910
911 if (ptr == NULL) {
912
913 return(NULL);
914 }
915
916 /* Check if this is a virtual column, mark the prtype
917 if that is the case */
918 if (field_no >= REC_MAX_N_FIELDS) {
919 new_val->type.prtype |= DATA_VIRTUAL;
920 field_no -= REC_MAX_N_FIELDS;
921 }
922
923 upd_field->field_no = field_no;
924
925 len = mach_parse_compressed(&ptr, end_ptr);
926
927 if (ptr == NULL) {
928
929 return(NULL);
930 }
931
932 if (len != UNIV_SQL_NULL) {
933
934 if (end_ptr < ptr + len) {
935
936 return(NULL);
937 }
938
939 dfield_set_data(new_val,
940 mem_heap_dup(heap, ptr, len), len);
941 ptr += len;
942 } else {
943 dfield_set_null(new_val);
944 }
945 }
946
947 *update_out = update;
948
949 return(const_cast<byte*>(ptr));
950 }
951
952 #ifndef UNIV_HOTBACKUP
953 /***************************************************************//**
954 Builds an update vector from those fields which in a secondary index entry
955 differ from a record that has the equal ordering fields. NOTE: we compare
956 the fields as binary strings!
957 @return own: update vector of differing fields */
958 upd_t*
row_upd_build_sec_rec_difference_binary(const rec_t * rec,dict_index_t * index,const ulint * offsets,const dtuple_t * entry,mem_heap_t * heap)959 row_upd_build_sec_rec_difference_binary(
960 /*====================================*/
961 const rec_t* rec, /*!< in: secondary index record */
962 dict_index_t* index, /*!< in: index */
963 const ulint* offsets,/*!< in: rec_get_offsets(rec, index) */
964 const dtuple_t* entry, /*!< in: entry to insert */
965 mem_heap_t* heap) /*!< in: memory heap from which allocated */
966 {
967 upd_field_t* upd_field;
968 const dfield_t* dfield;
969 const byte* data;
970 ulint len;
971 upd_t* update;
972 ulint n_diff;
973 ulint i;
974
975 /* This function is used only for a secondary index */
976 ut_a(!dict_index_is_clust(index));
977 ut_ad(rec_offs_validate(rec, index, offsets));
978 ut_ad(rec_offs_n_fields(offsets) == dtuple_get_n_fields(entry));
979 ut_ad(!rec_offs_any_extern(offsets));
980
981 update = upd_create(dtuple_get_n_fields(entry), heap);
982
983 n_diff = 0;
984
985 for (i = 0; i < dtuple_get_n_fields(entry); i++) {
986
987 data = rec_get_nth_field(rec, offsets, i, &len);
988
989 dfield = dtuple_get_nth_field(entry, i);
990
991 /* NOTE that it may be that len != dfield_get_len(dfield) if we
992 are updating in a character set and collation where strings of
993 different length can be equal in an alphabetical comparison,
994 and also in the case where we have a column prefix index
995 and the last characters in the index field are spaces; the
996 latter case probably caused the assertion failures reported at
997 row0upd.cc line 713 in versions 4.0.14 - 4.0.16. */
998
999 /* NOTE: we compare the fields as binary strings!
1000 (No collation) */
1001
1002 if (!dfield_data_is_binary_equal(dfield, len, data)) {
1003
1004 upd_field = upd_get_nth_field(update, n_diff);
1005
1006 dfield_copy(&(upd_field->new_val), dfield);
1007
1008 upd_field_set_field_no(upd_field, i, index, NULL);
1009
1010 n_diff++;
1011 }
1012 }
1013
1014 update->n_fields = n_diff;
1015
1016 return(update);
1017 }
1018
1019 /** Builds an update vector from those fields, excluding the roll ptr and
1020 trx id fields, which in an index entry differ from a record that has
1021 the equal ordering fields. NOTE: we compare the fields as binary strings!
1022 @param[in] index clustered index
1023 @param[in] entry clustered index entry to insert
1024 @param[in] rec clustered index record
1025 @param[in] offsets rec_get_offsets(rec,index), or NULL
1026 @param[in] no_sys skip the system columns
1027 DB_TRX_ID and DB_ROLL_PTR
1028 @param[in] trx transaction (for diagnostics),
1029 or NULL
1030 @param[in] heap memory heap from which allocated
1031 @param[in] mysql_table NULL, or mysql table object when
1032 user thread invokes dml
1033 @param[out] error error number in case of failure
1034 @return own: update vector of differing fields, excluding roll ptr and
1035 trx id,if error is not equal to DB_SUCCESS, return NULL */
1036 upd_t*
row_upd_build_difference_binary(dict_index_t * index,const dtuple_t * entry,const rec_t * rec,const ulint * offsets,bool no_sys,trx_t * trx,mem_heap_t * heap,TABLE * mysql_table,dberr_t * error)1037 row_upd_build_difference_binary(
1038 dict_index_t* index,
1039 const dtuple_t* entry,
1040 const rec_t* rec,
1041 const ulint* offsets,
1042 bool no_sys,
1043 trx_t* trx,
1044 mem_heap_t* heap,
1045 TABLE* mysql_table,
1046 dberr_t* error)
1047 {
1048 upd_field_t* upd_field;
1049 dfield_t* dfield;
1050 const byte* data;
1051 ulint len;
1052 upd_t* update;
1053 ulint n_diff;
1054 ulint trx_id_pos;
1055 ulint i;
1056 ulint offsets_[REC_OFFS_NORMAL_SIZE];
1057 ulint n_fld = dtuple_get_n_fields(entry);
1058 ulint n_v_fld = dtuple_get_n_v_fields(entry);
1059 rec_offs_init(offsets_);
1060
1061 /* This function is used only for a clustered index */
1062 ut_a(dict_index_is_clust(index));
1063
1064 update = upd_create(n_fld + n_v_fld, heap);
1065
1066 n_diff = 0;
1067
1068 trx_id_pos = dict_index_get_sys_col_pos(index, DATA_TRX_ID);
1069 ut_ad(dict_table_is_intrinsic(index->table)
1070 || (dict_index_get_sys_col_pos(index, DATA_ROLL_PTR)
1071 == trx_id_pos + 1));
1072
1073 if (!offsets) {
1074 offsets = rec_get_offsets(rec, index, offsets_,
1075 ULINT_UNDEFINED, &heap);
1076 } else {
1077 ut_ad(rec_offs_validate(rec, index, offsets));
1078 }
1079
1080 for (i = 0; i < n_fld; i++) {
1081
1082 data = rec_get_nth_field(rec, offsets, i, &len);
1083
1084 dfield = dtuple_get_nth_field(entry, i);
1085
1086 /* NOTE: we compare the fields as binary strings!
1087 (No collation) */
1088 if (no_sys) {
1089 /* TRX_ID */
1090 if (i == trx_id_pos) {
1091 continue;
1092 }
1093
1094 /* DB_ROLL_PTR */
1095 if (i == trx_id_pos + 1
1096 && !dict_table_is_intrinsic(index->table)) {
1097 continue;
1098 }
1099 }
1100
1101 if (!dfield_is_ext(dfield)
1102 != !rec_offs_nth_extern(offsets, i)
1103 || !dfield_data_is_binary_equal(dfield, len, data)) {
1104
1105 upd_field = upd_get_nth_field(update, n_diff);
1106
1107 dfield_copy(&(upd_field->new_val), dfield);
1108
1109 upd_field_set_field_no(upd_field, i, index, trx);
1110
1111 n_diff++;
1112 }
1113 }
1114
1115 /* Check the virtual columns updates. Even if there is no non-virtual
1116 column (base columns) change, we will still need to build the
1117 indexed virtual column value so that undo log would log them (
1118 for purge/mvcc purpose) */
1119 if (n_v_fld > 0) {
1120 row_ext_t* ext;
1121 mem_heap_t* v_heap = NULL;
1122 THD* thd;
1123
1124 if (trx == NULL) {
1125 thd = current_thd;
1126 } else {
1127 thd = trx->mysql_thd;
1128 }
1129
1130 ut_ad(!update->old_vrow);
1131
1132 for (i = 0; i < n_v_fld; i++) {
1133 const dict_v_col_t* col
1134 = dict_table_get_nth_v_col(index->table, i);
1135
1136 if (!col->m_col.ord_part) {
1137 continue;
1138 }
1139
1140 if (update->old_vrow == NULL) {
1141 update->old_vrow = row_build(
1142 ROW_COPY_POINTERS, index, rec, offsets,
1143 index->table, NULL, NULL, &ext, heap);
1144 }
1145
1146 dfield = dtuple_get_nth_v_field(entry, i);
1147
1148 dfield_t* vfield = innobase_get_computed_value(
1149 update->old_vrow, col, index,
1150 &v_heap, heap, NULL, thd, mysql_table,
1151 NULL, NULL, NULL);
1152 if (vfield == NULL) {
1153 *error = DB_COMPUTE_VALUE_FAILED;
1154 return(NULL);
1155 }
1156
1157 if (!dfield_data_is_binary_equal(
1158 dfield, vfield->len,
1159 static_cast<byte*>(vfield->data))) {
1160 upd_field = upd_get_nth_field(update, n_diff);
1161
1162 upd_field->old_v_val = static_cast<dfield_t*>(
1163 mem_heap_alloc(
1164 heap,
1165 sizeof *upd_field->old_v_val));
1166
1167 dfield_copy(upd_field->old_v_val, vfield);
1168
1169 dfield_copy(&(upd_field->new_val), dfield);
1170
1171 upd_field_set_v_field_no(
1172 upd_field, i, index);
1173
1174 n_diff++;
1175
1176 }
1177 }
1178
1179 if (v_heap) {
1180 mem_heap_free(v_heap);
1181 }
1182 }
1183
1184 update->n_fields = n_diff;
1185 ut_ad(update->validate());
1186
1187 return(update);
1188 }
1189
1190 /** Fetch a prefix of an externally stored column.
1191 This is similar to row_ext_lookup(), but the row_ext_t holds the old values
1192 of the column and must not be poisoned with the new values.
1193 @param[in] data 'internally' stored part of the field
1194 containing also the reference to the external part
1195 @param[in] local_len length of data, in bytes
1196 @param[in] page_size BLOB page size
1197 @param[in,out] len input - length of prefix to
1198 fetch; output: fetched length of the prefix
1199 @param[in,out] heap heap where to allocate
1200 @return BLOB prefix */
1201 static
1202 byte*
row_upd_ext_fetch(const byte * data,ulint local_len,const page_size_t & page_size,ulint * len,mem_heap_t * heap)1203 row_upd_ext_fetch(
1204 const byte* data,
1205 ulint local_len,
1206 const page_size_t& page_size,
1207 ulint* len,
1208 mem_heap_t* heap)
1209 {
1210 byte* buf = static_cast<byte*>(mem_heap_alloc(heap, *len));
1211
1212 *len = btr_copy_externally_stored_field_prefix(
1213 buf, *len, page_size, data, local_len);
1214
1215 /* We should never update records containing a half-deleted BLOB. */
1216 ut_a(*len);
1217
1218 return(buf);
1219 }
1220
1221 /** Replaces the new column value stored in the update vector in
1222 the given index entry field.
1223 @param[in,out] dfield data field of the index entry
1224 @param[in] field index field
1225 @param[in] col field->col
1226 @param[in] uf update field
1227 @param[in,out] heap memory heap for allocating and copying
1228 the new value
1229 @param[in] page_size page size */
1230 static
1231 void
row_upd_index_replace_new_col_val(dfield_t * dfield,const dict_field_t * field,const dict_col_t * col,const upd_field_t * uf,mem_heap_t * heap,const page_size_t & page_size)1232 row_upd_index_replace_new_col_val(
1233 dfield_t* dfield,
1234 const dict_field_t* field,
1235 const dict_col_t* col,
1236 const upd_field_t* uf,
1237 mem_heap_t* heap,
1238 const page_size_t& page_size)
1239 {
1240 ulint len;
1241 const byte* data;
1242
1243 dfield_copy_data(dfield, &uf->new_val);
1244
1245 if (dfield_is_null(dfield)) {
1246 return;
1247 }
1248
1249 len = dfield_get_len(dfield);
1250 data = static_cast<const byte*>(dfield_get_data(dfield));
1251
1252 if (field->prefix_len > 0) {
1253 ibool fetch_ext = dfield_is_ext(dfield)
1254 && len < (ulint) field->prefix_len
1255 + BTR_EXTERN_FIELD_REF_SIZE;
1256
1257 if (fetch_ext) {
1258 ulint l = len;
1259
1260 len = field->prefix_len;
1261
1262 data = row_upd_ext_fetch(data, l, page_size,
1263 &len, heap);
1264 }
1265
1266 len = dtype_get_at_most_n_mbchars(col->prtype,
1267 col->mbminmaxlen,
1268 field->prefix_len, len,
1269 (const char*) data);
1270
1271 dfield_set_data(dfield, data, len);
1272
1273 if (!fetch_ext) {
1274 dfield_dup(dfield, heap);
1275 }
1276
1277 return;
1278 }
1279
1280 switch (uf->orig_len) {
1281 byte* buf;
1282 case BTR_EXTERN_FIELD_REF_SIZE:
1283 /* Restore the original locally stored
1284 part of the column. In the undo log,
1285 InnoDB writes a longer prefix of externally
1286 stored columns, so that column prefixes
1287 in secondary indexes can be reconstructed. */
1288 dfield_set_data(dfield,
1289 data + len - BTR_EXTERN_FIELD_REF_SIZE,
1290 BTR_EXTERN_FIELD_REF_SIZE);
1291 dfield_set_ext(dfield);
1292 /* fall through */
1293 case 0:
1294 dfield_dup(dfield, heap);
1295 break;
1296 default:
1297 /* Reconstruct the original locally
1298 stored part of the column. The data
1299 will have to be copied. */
1300 ut_a(uf->orig_len > BTR_EXTERN_FIELD_REF_SIZE);
1301 buf = static_cast<byte*>(mem_heap_alloc(heap, uf->orig_len));
1302
1303 /* Copy the locally stored prefix. */
1304 memcpy(buf, data,
1305 uf->orig_len - BTR_EXTERN_FIELD_REF_SIZE);
1306
1307 /* Copy the BLOB pointer. */
1308 memcpy(buf + uf->orig_len - BTR_EXTERN_FIELD_REF_SIZE,
1309 data + len - BTR_EXTERN_FIELD_REF_SIZE,
1310 BTR_EXTERN_FIELD_REF_SIZE);
1311
1312 dfield_set_data(dfield, buf, uf->orig_len);
1313 dfield_set_ext(dfield);
1314 break;
1315 }
1316 }
1317
1318 /***********************************************************//**
1319 Replaces the new column values stored in the update vector to the index entry
1320 given. */
1321 void
row_upd_index_replace_new_col_vals_index_pos(dtuple_t * entry,dict_index_t * index,const upd_t * update,ibool order_only,mem_heap_t * heap)1322 row_upd_index_replace_new_col_vals_index_pos(
1323 /*=========================================*/
1324 dtuple_t* entry, /*!< in/out: index entry where replaced;
1325 the clustered index record must be
1326 covered by a lock or a page latch to
1327 prevent deletion (rollback or purge) */
1328 dict_index_t* index, /*!< in: index; NOTE that this may also be a
1329 non-clustered index */
1330 const upd_t* update, /*!< in: an update vector built for the index so
1331 that the field number in an upd_field is the
1332 index position */
1333 ibool order_only,
1334 /*!< in: if TRUE, limit the replacement to
1335 ordering fields of index; note that this
1336 does not work for non-clustered indexes. */
1337 mem_heap_t* heap) /*!< in: memory heap for allocating and
1338 copying the new values */
1339 {
1340 ulint i;
1341 ulint n_fields;
1342 const page_size_t& page_size = dict_table_page_size(index->table);
1343
1344 ut_ad(index);
1345
1346 dtuple_set_info_bits(entry, update->info_bits);
1347
1348 if (order_only) {
1349 n_fields = dict_index_get_n_unique(index);
1350 } else {
1351 n_fields = dict_index_get_n_fields(index);
1352 }
1353
1354 for (i = 0; i < n_fields; i++) {
1355 const dict_field_t* field;
1356 const dict_col_t* col;
1357 const upd_field_t* uf;
1358
1359 field = dict_index_get_nth_field(index, i);
1360 col = dict_field_get_col(field);
1361 if (dict_col_is_virtual(col)) {
1362 const dict_v_col_t* vcol = reinterpret_cast<
1363 const dict_v_col_t*>(
1364 col);
1365
1366 uf = upd_get_field_by_field_no(
1367 update, vcol->v_pos, true);
1368 } else {
1369 uf = upd_get_field_by_field_no(
1370 update, i, false);
1371 }
1372
1373 if (uf) {
1374 row_upd_index_replace_new_col_val(
1375 dtuple_get_nth_field(entry, i),
1376 field, col, uf, heap, page_size);
1377 }
1378 }
1379 }
1380
1381 /***********************************************************//**
1382 Replaces the new column values stored in the update vector to the index entry
1383 given. */
1384 void
row_upd_index_replace_new_col_vals(dtuple_t * entry,dict_index_t * index,const upd_t * update,mem_heap_t * heap)1385 row_upd_index_replace_new_col_vals(
1386 /*===============================*/
1387 dtuple_t* entry, /*!< in/out: index entry where replaced;
1388 the clustered index record must be
1389 covered by a lock or a page latch to
1390 prevent deletion (rollback or purge) */
1391 dict_index_t* index, /*!< in: index; NOTE that this may also be a
1392 non-clustered index */
1393 const upd_t* update, /*!< in: an update vector built for the
1394 CLUSTERED index so that the field number in
1395 an upd_field is the clustered index position */
1396 mem_heap_t* heap) /*!< in: memory heap for allocating and
1397 copying the new values */
1398 {
1399 ulint i;
1400 const dict_index_t* clust_index
1401 = dict_table_get_first_index(index->table);
1402 const page_size_t& page_size = dict_table_page_size(index->table);
1403
1404 dtuple_set_info_bits(entry, update->info_bits);
1405
1406 for (i = 0; i < dict_index_get_n_fields(index); i++) {
1407 const dict_field_t* field;
1408 const dict_col_t* col;
1409 const upd_field_t* uf;
1410
1411 field = dict_index_get_nth_field(index, i);
1412 col = dict_field_get_col(field);
1413 if (dict_col_is_virtual(col)) {
1414 const dict_v_col_t* vcol = reinterpret_cast<
1415 const dict_v_col_t*>(
1416 col);
1417
1418 uf = upd_get_field_by_field_no(
1419 update, vcol->v_pos, true);
1420 } else {
1421 uf = upd_get_field_by_field_no(
1422 update,
1423 dict_col_get_clust_pos(col, clust_index),
1424 false);
1425 }
1426
1427 if (uf) {
1428 row_upd_index_replace_new_col_val(
1429 dtuple_get_nth_field(entry, i),
1430 field, col, uf, heap, page_size);
1431 }
1432 }
1433 }
1434
1435 /** Replaces the virtual column values stored in the update vector.
1436 @param[in,out] row row whose column to be set
1437 @param[in] field data to set
1438 @param[in] len data length
1439 @param[in] vcol virtual column info */
1440 static
1441 void
row_upd_set_vcol_data(dtuple_t * row,const byte * field,ulint len,dict_v_col_t * vcol)1442 row_upd_set_vcol_data(
1443 dtuple_t* row,
1444 const byte* field,
1445 ulint len,
1446 dict_v_col_t* vcol)
1447 {
1448 dfield_t* dfield = dtuple_get_nth_v_field(row, vcol->v_pos);
1449
1450 if (dfield_get_type(dfield)->mtype == DATA_MISSING) {
1451 dict_col_copy_type(&vcol->m_col, dfield_get_type(dfield));
1452
1453 dfield_set_data(dfield, field, len);
1454 }
1455 }
1456
1457 /** Replaces the virtual column values stored in a dtuple with that of
1458 a update vector.
1459 @param[in,out] row row whose column to be updated
1460 @param[in] table table
1461 @param[in] update an update vector built for the clustered index
1462 @param[in] upd_new update to new or old value
1463 @param[in,out] undo_row undo row (if needs to be updated)
1464 @param[in] ptr remaining part in update undo log */
1465 void
row_upd_replace_vcol(dtuple_t * row,const dict_table_t * table,const upd_t * update,bool upd_new,dtuple_t * undo_row,const byte * ptr)1466 row_upd_replace_vcol(
1467 dtuple_t* row,
1468 const dict_table_t* table,
1469 const upd_t* update,
1470 bool upd_new,
1471 dtuple_t* undo_row,
1472 const byte* ptr)
1473 {
1474 ulint col_no;
1475 ulint i;
1476 ulint n_cols;
1477
1478 n_cols = dtuple_get_n_v_fields(row);
1479 for (col_no = 0; col_no < n_cols; col_no++) {
1480 dfield_t* dfield;
1481
1482 const dict_v_col_t* col
1483 = dict_table_get_nth_v_col(table, col_no);
1484
1485 /* If there is no index on the column, do not bother for
1486 value update */
1487 if (!col->m_col.ord_part) {
1488 dict_index_t* clust_index
1489 = dict_table_get_first_index(table);
1490
1491 /* Skip the column if there is no online alter
1492 table in progress or it is not being indexed
1493 in new table */
1494 if (!dict_index_is_online_ddl(clust_index)
1495 || !row_log_col_is_indexed(clust_index, col_no)) {
1496 continue;
1497 }
1498 }
1499
1500 dfield = dtuple_get_nth_v_field(row, col_no);
1501
1502 for (i = 0; i < upd_get_n_fields(update); i++) {
1503 const upd_field_t* upd_field
1504 = upd_get_nth_field(update, i);
1505 if (!upd_fld_is_virtual_col(upd_field)
1506 || upd_field->field_no != col->v_pos) {
1507 continue;
1508 }
1509
1510 if (upd_new) {
1511 dfield_copy_data(dfield, &upd_field->new_val);
1512 } else {
1513 dfield_copy_data(dfield, upd_field->old_v_val);
1514 }
1515
1516 dfield_get_type(dfield)->mtype =
1517 upd_field->new_val.type.mtype;
1518 dfield_get_type(dfield)->prtype =
1519 upd_field->new_val.type.prtype;
1520 dfield_get_type(dfield)->mbminmaxlen =
1521 upd_field->new_val.type.mbminmaxlen;
1522 break;
1523 }
1524 }
1525
1526 bool first_v_col = true;
1527 bool is_undo_log = true;
1528
1529 /* We will read those unchanged (but indexed) virtual columns in */
1530 if (ptr != NULL) {
1531 const byte* end_ptr;
1532
1533 end_ptr = ptr + mach_read_from_2(ptr);
1534 ptr += 2;
1535
1536 while (ptr != end_ptr) {
1537 const byte* field;
1538 ulint field_no;
1539 ulint len;
1540 ulint orig_len;
1541 bool is_v;
1542
1543 field_no = mach_read_next_compressed(&ptr);
1544
1545 is_v = (field_no >= REC_MAX_N_FIELDS);
1546
1547 if (is_v) {
1548 ptr = trx_undo_read_v_idx(
1549 table, ptr, first_v_col, &is_undo_log,
1550 &field_no);
1551 first_v_col = false;
1552 }
1553
1554 ptr = trx_undo_rec_get_col_val(
1555 ptr, &field, &len, &orig_len);
1556
1557 if (field_no == ULINT_UNDEFINED) {
1558 ut_ad(is_v);
1559 continue;
1560 }
1561
1562 if (is_v) {
1563 dict_v_col_t* vcol = dict_table_get_nth_v_col(
1564 table, field_no);
1565
1566 row_upd_set_vcol_data(row, field, len, vcol);
1567
1568 if (undo_row) {
1569 row_upd_set_vcol_data(
1570 undo_row, field, len, vcol);
1571 }
1572 }
1573 ut_ad(ptr<= end_ptr);
1574 }
1575 }
1576 }
1577
1578 /***********************************************************//**
1579 Replaces the new column values stored in the update vector. */
1580 void
row_upd_replace(dtuple_t * row,row_ext_t ** ext,const dict_index_t * index,const upd_t * update,mem_heap_t * heap)1581 row_upd_replace(
1582 /*============*/
1583 dtuple_t* row, /*!< in/out: row where replaced,
1584 indexed by col_no;
1585 the clustered index record must be
1586 covered by a lock or a page latch to
1587 prevent deletion (rollback or purge) */
1588 row_ext_t** ext, /*!< out, own: NULL, or externally
1589 stored column prefixes */
1590 const dict_index_t* index, /*!< in: clustered index */
1591 const upd_t* update, /*!< in: an update vector built for the
1592 clustered index */
1593 mem_heap_t* heap) /*!< in: memory heap */
1594 {
1595 ulint col_no;
1596 ulint i;
1597 ulint n_cols;
1598 ulint n_ext_cols;
1599 ulint* ext_cols;
1600 const dict_table_t* table;
1601
1602 ut_ad(row);
1603 ut_ad(ext);
1604 ut_ad(index);
1605 ut_ad(dict_index_is_clust(index));
1606 ut_ad(update);
1607 ut_ad(heap);
1608 ut_ad(update->validate());
1609
1610 n_cols = dtuple_get_n_fields(row);
1611 table = index->table;
1612 ut_ad(n_cols == dict_table_get_n_cols(table));
1613
1614 ext_cols = static_cast<ulint*>(
1615 mem_heap_alloc(heap, n_cols * sizeof *ext_cols));
1616
1617 n_ext_cols = 0;
1618
1619 dtuple_set_info_bits(row, update->info_bits);
1620
1621 for (col_no = 0; col_no < n_cols; col_no++) {
1622
1623 const dict_col_t* col
1624 = dict_table_get_nth_col(table, col_no);
1625 const ulint clust_pos
1626 = dict_col_get_clust_pos(col, index);
1627 dfield_t* dfield;
1628
1629 if (UNIV_UNLIKELY(clust_pos == ULINT_UNDEFINED)) {
1630
1631 continue;
1632 }
1633
1634 dfield = dtuple_get_nth_field(row, col_no);
1635
1636 for (i = 0; i < upd_get_n_fields(update); i++) {
1637
1638 const upd_field_t* upd_field
1639 = upd_get_nth_field(update, i);
1640
1641 if (upd_field->field_no != clust_pos
1642 || upd_fld_is_virtual_col(upd_field)) {
1643
1644 continue;
1645 }
1646
1647 dfield_copy_data(dfield, &upd_field->new_val);
1648 break;
1649 }
1650
1651 if (dfield_is_ext(dfield) && col->ord_part) {
1652 ext_cols[n_ext_cols++] = col_no;
1653 }
1654 }
1655
1656 if (n_ext_cols) {
1657 *ext = row_ext_create(n_ext_cols, ext_cols, table->flags, row,
1658 heap);
1659 } else {
1660 *ext = NULL;
1661 }
1662
1663 row_upd_replace_vcol(row, table, update, true, NULL, NULL);
1664 }
1665
1666 /***********************************************************//**
1667 Checks if an update vector changes an ordering field of an index record.
1668
1669 This function is fast if the update vector is short or the number of ordering
1670 fields in the index is small. Otherwise, this can be quadratic.
1671 NOTE: we compare the fields as binary strings!
1672 @return TRUE if update vector changes an ordering field in the index record */
1673 ibool
row_upd_changes_ord_field_binary_func(dict_index_t * index,const upd_t * update,const que_thr_t * thr,const dtuple_t * row,const row_ext_t * ext,ulint flag)1674 row_upd_changes_ord_field_binary_func(
1675 /*==================================*/
1676 dict_index_t* index, /*!< in: index of the record */
1677 const upd_t* update, /*!< in: update vector for the row; NOTE: the
1678 field numbers in this MUST be clustered index
1679 positions! */
1680 #ifdef UNIV_DEBUG
1681 const que_thr_t*thr, /*!< in: query thread */
1682 #endif /* UNIV_DEBUG */
1683 const dtuple_t* row, /*!< in: old value of row, or NULL if the
1684 row and the data values in update are not
1685 known when this function is called, e.g., at
1686 compile time */
1687 const row_ext_t*ext, /*!< NULL, or prefixes of the externally
1688 stored columns in the old row */
1689 ulint flag) /*!< in: ROW_BUILD_NORMAL,
1690 ROW_BUILD_FOR_PURGE or ROW_BUILD_FOR_UNDO */
1691 {
1692 ulint n_unique;
1693 ulint i;
1694 const dict_index_t* clust_index;
1695
1696 ut_ad(index);
1697 ut_ad(update);
1698 ut_ad(thr);
1699 ut_ad(thr->graph);
1700 ut_ad(thr->graph->trx);
1701
1702 n_unique = dict_index_get_n_unique(index);
1703
1704 clust_index = dict_table_get_first_index(index->table);
1705
1706 for (i = 0; i < n_unique; i++) {
1707
1708 const dict_field_t* ind_field;
1709 const dict_col_t* col;
1710 ulint col_no;
1711 const upd_field_t* upd_field;
1712 const dfield_t* dfield;
1713 dfield_t dfield_ext;
1714 ulint dfield_len;
1715 const byte* buf;
1716 bool is_virtual;
1717 const dict_v_col_t* vcol = NULL;
1718
1719 ind_field = dict_index_get_nth_field(index, i);
1720 col = dict_field_get_col(ind_field);
1721 col_no = dict_col_get_no(col);
1722 is_virtual = dict_col_is_virtual(col);
1723
1724 if (is_virtual) {
1725 vcol = reinterpret_cast<const dict_v_col_t*>(col);
1726
1727 upd_field = upd_get_field_by_field_no(
1728 update, vcol->v_pos, true);
1729 } else {
1730 upd_field = upd_get_field_by_field_no(
1731 update,
1732 dict_col_get_clust_pos(col, clust_index),
1733 false);
1734 }
1735
1736 if (upd_field == NULL) {
1737 continue;
1738 }
1739
1740 if (row == NULL) {
1741 ut_ad(ext == NULL);
1742 return(TRUE);
1743 }
1744
1745 if (is_virtual) {
1746 dfield = dtuple_get_nth_v_field(
1747 row, vcol->v_pos);
1748 } else {
1749 dfield = dtuple_get_nth_field(row, col_no);
1750 }
1751
1752 /* For spatial index update, since the different geometry
1753 data could generate same MBR, so, if the new index entry is
1754 same as old entry, which means the MBR is not changed, we
1755 don't need to do anything. */
1756 if (dict_index_is_spatial(index) && i == 0) {
1757 double mbr1[SPDIMS * 2];
1758 double mbr2[SPDIMS * 2];
1759 rtr_mbr_t* old_mbr;
1760 rtr_mbr_t* new_mbr;
1761 uchar* dptr = NULL;
1762 ulint flen = 0;
1763 ulint dlen = 0;
1764 mem_heap_t* temp_heap = NULL;
1765 const dfield_t* new_field = &upd_field->new_val;
1766
1767 const page_size_t page_size
1768 = (ext != NULL)
1769 ? ext->page_size
1770 : dict_table_page_size(
1771 index->table);
1772
1773 ut_ad(dfield->data != NULL
1774 && dfield->len > GEO_DATA_HEADER_SIZE);
1775 ut_ad(dict_col_get_spatial_status(col) != SPATIAL_NONE);
1776
1777 /* Get the old mbr. */
1778 if (dfield_is_ext(dfield)) {
1779 /* For off-page stored data, we
1780 need to read the whole field data. */
1781 flen = dfield_get_len(dfield);
1782 dptr = static_cast<byte*>(
1783 dfield_get_data(dfield));
1784 temp_heap = mem_heap_create(1000);
1785
1786 dptr = btr_copy_externally_stored_field(
1787 &dlen, dptr,
1788 page_size,
1789 flen,
1790 temp_heap);
1791 } else {
1792 dptr = static_cast<uchar*>(dfield->data);
1793 dlen = dfield->len;
1794 }
1795
1796 rtree_mbr_from_wkb(dptr + GEO_DATA_HEADER_SIZE,
1797 static_cast<uint>(dlen
1798 - GEO_DATA_HEADER_SIZE),
1799 SPDIMS, mbr1);
1800 old_mbr = reinterpret_cast<rtr_mbr_t*>(mbr1);
1801
1802 /* Get the new mbr. */
1803 if (dfield_is_ext(new_field)) {
1804 if (flag == ROW_BUILD_FOR_UNDO
1805 && dict_table_get_format(index->table)
1806 >= UNIV_FORMAT_B) {
1807 /* For undo, and the table is Barrcuda,
1808 we need to skip the prefix data. */
1809 flen = BTR_EXTERN_FIELD_REF_SIZE;
1810 ut_ad(dfield_get_len(new_field) >=
1811 BTR_EXTERN_FIELD_REF_SIZE);
1812 dptr = static_cast<byte*>(
1813 dfield_get_data(new_field))
1814 + dfield_get_len(new_field)
1815 - BTR_EXTERN_FIELD_REF_SIZE;
1816 } else {
1817 flen = dfield_get_len(new_field);
1818 dptr = static_cast<byte*>(
1819 dfield_get_data(new_field));
1820 }
1821
1822 if (temp_heap == NULL) {
1823 temp_heap = mem_heap_create(1000);
1824 }
1825
1826 dptr = btr_copy_externally_stored_field(
1827 &dlen, dptr,
1828 page_size,
1829 flen,
1830 temp_heap);
1831 } else {
1832 dptr = static_cast<uchar*>(upd_field->new_val.data);
1833 dlen = upd_field->new_val.len;
1834 }
1835 rtree_mbr_from_wkb(dptr + GEO_DATA_HEADER_SIZE,
1836 static_cast<uint>(dlen
1837 - GEO_DATA_HEADER_SIZE),
1838 SPDIMS, mbr2);
1839 new_mbr = reinterpret_cast<rtr_mbr_t*>(mbr2);
1840
1841 if (temp_heap) {
1842 mem_heap_free(temp_heap);
1843 }
1844
1845 if (!MBR_EQUAL_CMP(old_mbr, new_mbr)) {
1846 return(TRUE);
1847 } else {
1848 continue;
1849 }
1850 }
1851
1852 /* This treatment of column prefix indexes is loosely
1853 based on row_build_index_entry(). */
1854
1855 if (UNIV_LIKELY(ind_field->prefix_len == 0)
1856 || dfield_is_null(dfield)) {
1857 /* do nothing special */
1858 } else if (ext) {
1859 /* Silence a compiler warning without
1860 silencing a Valgrind error. */
1861 dfield_len = 0;
1862 UNIV_MEM_INVALID(&dfield_len, sizeof dfield_len);
1863 /* See if the column is stored externally. */
1864 buf = row_ext_lookup(ext, col_no, &dfield_len);
1865
1866 ut_ad(col->ord_part);
1867
1868 if (UNIV_LIKELY_NULL(buf)) {
1869 if (UNIV_UNLIKELY(buf == field_ref_zero)) {
1870 /* The externally stored field
1871 was not written yet. This
1872 record should only be seen by
1873 recv_recovery_rollback_active(),
1874 when the server had crashed before
1875 storing the field. */
1876 ut_ad(thr->graph->trx->is_recovered);
1877 ut_ad(trx_is_recv(thr->graph->trx));
1878 return(TRUE);
1879 }
1880
1881 goto copy_dfield;
1882 }
1883 } else if (dfield_is_ext(dfield)) {
1884 dfield_len = dfield_get_len(dfield);
1885 ut_a(dfield_len > BTR_EXTERN_FIELD_REF_SIZE);
1886 dfield_len -= BTR_EXTERN_FIELD_REF_SIZE;
1887 ut_a(dict_index_is_clust(index)
1888 || ind_field->prefix_len <= dfield_len);
1889
1890 buf = static_cast<byte*>(dfield_get_data(dfield));
1891 copy_dfield:
1892 ut_a(dfield_len > 0);
1893 dfield_copy(&dfield_ext, dfield);
1894 dfield_set_data(&dfield_ext, buf, dfield_len);
1895 dfield = &dfield_ext;
1896 }
1897
1898 if (!dfield_datas_are_binary_equal(
1899 dfield, &upd_field->new_val,
1900 ind_field->prefix_len)) {
1901
1902 return(TRUE);
1903 }
1904 }
1905
1906 return(FALSE);
1907 }
1908
1909 /***********************************************************//**
1910 Checks if an update vector changes an ordering field of an index record.
1911 NOTE: we compare the fields as binary strings!
1912 @return TRUE if update vector may change an ordering field in an index
1913 record */
1914 ibool
row_upd_changes_some_index_ord_field_binary(const dict_table_t * table,const upd_t * update)1915 row_upd_changes_some_index_ord_field_binary(
1916 /*========================================*/
1917 const dict_table_t* table, /*!< in: table */
1918 const upd_t* update) /*!< in: update vector for the row */
1919 {
1920 upd_field_t* upd_field;
1921 dict_index_t* index;
1922 ulint i;
1923
1924 index = dict_table_get_first_index(table);
1925
1926 for (i = 0; i < upd_get_n_fields(update); i++) {
1927
1928 upd_field = upd_get_nth_field(update, i);
1929
1930 if (upd_fld_is_virtual_col(upd_field)) {
1931 if (dict_table_get_nth_v_col(index->table,
1932 upd_field->field_no)
1933 ->m_col.ord_part) {
1934 return(TRUE);
1935 }
1936 } else {
1937 if (dict_field_get_col(dict_index_get_nth_field(
1938 index, upd_field->field_no))->ord_part) {
1939 return(TRUE);
1940 }
1941 }
1942 }
1943
1944 return(FALSE);
1945 }
1946
1947 /***********************************************************//**
1948 Checks if an FTS Doc ID column is affected by an UPDATE.
1949 @return whether the Doc ID column is changed */
1950 bool
row_upd_changes_doc_id(dict_table_t * table,upd_field_t * upd_field)1951 row_upd_changes_doc_id(
1952 /*===================*/
1953 dict_table_t* table, /*!< in: table */
1954 upd_field_t* upd_field) /*!< in: field to check */
1955 {
1956 ulint col_no;
1957 dict_index_t* clust_index;
1958 fts_t* fts = table->fts;
1959
1960 clust_index = dict_table_get_first_index(table);
1961
1962 /* Convert from index-specific column number to table-global
1963 column number. */
1964 col_no = dict_index_get_nth_col_no(clust_index, upd_field->field_no);
1965
1966 return(col_no == fts->doc_col);
1967 }
1968 /***********************************************************//**
1969 Checks if an FTS indexed column is affected by an UPDATE.
1970 @return offset within fts_t::indexes if FTS indexed column updated else
1971 ULINT_UNDEFINED */
1972 ulint
row_upd_changes_fts_column(dict_table_t * table,upd_field_t * upd_field)1973 row_upd_changes_fts_column(
1974 /*=======================*/
1975 dict_table_t* table, /*!< in: table */
1976 upd_field_t* upd_field) /*!< in: field to check */
1977 {
1978 ulint col_no;
1979 dict_index_t* clust_index;
1980 fts_t* fts = table->fts;
1981
1982 if (upd_fld_is_virtual_col(upd_field)) {
1983 col_no = upd_field->field_no;
1984 return(dict_table_is_fts_column(fts->indexes, col_no, true));
1985 } else {
1986 clust_index = dict_table_get_first_index(table);
1987
1988 /* Convert from index-specific column number to table-global
1989 column number. */
1990 col_no = dict_index_get_nth_col_no(clust_index,
1991 upd_field->field_no);
1992 return(dict_table_is_fts_column(fts->indexes, col_no, false));
1993 }
1994
1995 }
1996
1997 /***********************************************************//**
1998 Checks if an update vector changes some of the first ordering fields of an
1999 index record. This is only used in foreign key checks and we can assume
2000 that index does not contain column prefixes.
2001 @return TRUE if changes */
2002 static
2003 ibool
row_upd_changes_first_fields_binary(dtuple_t * entry,dict_index_t * index,const upd_t * update,ulint n)2004 row_upd_changes_first_fields_binary(
2005 /*================================*/
2006 dtuple_t* entry, /*!< in: index entry */
2007 dict_index_t* index, /*!< in: index of entry */
2008 const upd_t* update, /*!< in: update vector for the row */
2009 ulint n) /*!< in: how many first fields to check */
2010 {
2011 ulint n_upd_fields;
2012 ulint i, j;
2013 dict_index_t* clust_index;
2014
2015 ut_ad(update && index);
2016 ut_ad(n <= dict_index_get_n_fields(index));
2017
2018 n_upd_fields = upd_get_n_fields(update);
2019 clust_index = dict_table_get_first_index(index->table);
2020
2021 for (i = 0; i < n; i++) {
2022
2023 const dict_field_t* ind_field;
2024 const dict_col_t* col;
2025 ulint col_pos;
2026
2027 ind_field = dict_index_get_nth_field(index, i);
2028 col = dict_field_get_col(ind_field);
2029 col_pos = dict_col_get_clust_pos(col, clust_index);
2030
2031 ut_a(ind_field->prefix_len == 0);
2032
2033 for (j = 0; j < n_upd_fields; j++) {
2034
2035 upd_field_t* upd_field
2036 = upd_get_nth_field(update, j);
2037
2038 if (col_pos == upd_field->field_no
2039 && !dfield_datas_are_binary_equal(
2040 dtuple_get_nth_field(entry, i),
2041 &upd_field->new_val, 0)) {
2042
2043 return(TRUE);
2044 }
2045 }
2046 }
2047
2048 return(FALSE);
2049 }
2050
2051 /*********************************************************************//**
2052 Copies the column values from a record. */
2053 UNIV_INLINE
2054 void
row_upd_copy_columns(rec_t * rec,const ulint * offsets,sym_node_t * column)2055 row_upd_copy_columns(
2056 /*=================*/
2057 rec_t* rec, /*!< in: record in a clustered index */
2058 const ulint* offsets,/*!< in: array returned by rec_get_offsets() */
2059 sym_node_t* column) /*!< in: first column in a column list, or
2060 NULL */
2061 {
2062 byte* data;
2063 ulint len;
2064
2065 while (column) {
2066 data = rec_get_nth_field(rec, offsets,
2067 column->field_nos[SYM_CLUST_FIELD_NO],
2068 &len);
2069 eval_node_copy_and_alloc_val(column, data, len);
2070
2071 column = UT_LIST_GET_NEXT(col_var_list, column);
2072 }
2073 }
2074
2075 /*********************************************************************//**
2076 Calculates the new values for fields to update. Note that row_upd_copy_columns
2077 must have been called first. */
2078 UNIV_INLINE
2079 void
row_upd_eval_new_vals(upd_t * update)2080 row_upd_eval_new_vals(
2081 /*==================*/
2082 upd_t* update) /*!< in/out: update vector */
2083 {
2084 que_node_t* exp;
2085 upd_field_t* upd_field;
2086 ulint n_fields;
2087 ulint i;
2088
2089 n_fields = upd_get_n_fields(update);
2090
2091 for (i = 0; i < n_fields; i++) {
2092 upd_field = upd_get_nth_field(update, i);
2093
2094 exp = upd_field->exp;
2095
2096 eval_exp(exp);
2097
2098 dfield_copy_data(&(upd_field->new_val), que_node_get_val(exp));
2099 }
2100 }
2101
2102 /** Stores to the heap the virtual columns that need for any indexes
2103 @param[in,out] node row update node
2104 @param[in] update an update vector if it is update
2105 @param[in] thd mysql thread handle
2106 @param[in,out] mysql_table mysql table object */
2107 static
2108 void
row_upd_store_v_row(upd_node_t * node,const upd_t * update,THD * thd,TABLE * mysql_table)2109 row_upd_store_v_row(
2110 upd_node_t* node,
2111 const upd_t* update,
2112 THD* thd,
2113 TABLE* mysql_table)
2114 {
2115 mem_heap_t* heap = NULL;
2116 dict_index_t* index = dict_table_get_first_index(node->table);
2117
2118 for (ulint col_no = 0; col_no < dict_table_get_n_v_cols(node->table);
2119 col_no++) {
2120
2121 const dict_v_col_t* col
2122 = dict_table_get_nth_v_col(node->table, col_no);
2123
2124 if (col->m_col.ord_part) {
2125 dfield_t* dfield
2126 = dtuple_get_nth_v_field(node->row, col_no);
2127 ulint n_upd
2128 = update ? upd_get_n_fields(update) : 0;
2129 ulint i = 0;
2130
2131 /* Check if the value is already in update vector */
2132 for (i = 0; i < n_upd; i++) {
2133 const upd_field_t* upd_field
2134 = upd_get_nth_field(update, i);
2135 if (!(upd_field->new_val.type.prtype
2136 & DATA_VIRTUAL)
2137 || upd_field->field_no != col->v_pos) {
2138 continue;
2139 }
2140
2141 dfield_copy_data(dfield, upd_field->old_v_val);
2142 dfield_dup(dfield, node->heap);
2143 break;
2144 }
2145
2146 /* Not updated */
2147 if (i >= n_upd) {
2148 /* If this is an update, then the value
2149 should be in update->old_vrow */
2150 if (update) {
2151 if (update->old_vrow == NULL) {
2152 /* This only happens in
2153 cascade update. And virtual
2154 column can't be affected,
2155 so it is Ok to set it to NULL */
2156 dfield_set_null(dfield);
2157 } else {
2158 dfield_t* vfield
2159 = dtuple_get_nth_v_field(
2160 update->old_vrow,
2161 col_no);
2162 dfield_copy_data(dfield, vfield);
2163 dfield_dup(dfield, node->heap);
2164 if (dfield_is_null(dfield)) {
2165 innobase_get_computed_value(
2166 node->row, col, index,
2167 &heap, node->heap, NULL,
2168 thd, mysql_table, NULL,
2169 NULL, NULL);
2170 }
2171 }
2172 } else {
2173 /* Need to compute, this happens when
2174 deleting row */
2175 innobase_get_computed_value(
2176 node->row, col, index,
2177 &heap, node->heap, NULL,
2178 thd, mysql_table, NULL,
2179 NULL, NULL);
2180 }
2181 }
2182 }
2183 }
2184
2185 if (heap) {
2186 mem_heap_free(heap);
2187 }
2188 }
2189
2190 /** Stores to the heap the row on which the node->pcur is positioned.
2191 @param[in] node row update node
2192 @param[in] thd mysql thread handle
2193 @param[in,out] mysql_table NULL, or mysql table object when
2194 user thread invokes dml */
2195 void
row_upd_store_row(upd_node_t * node,THD * thd,TABLE * mysql_table)2196 row_upd_store_row(
2197 upd_node_t* node,
2198 THD* thd,
2199 TABLE* mysql_table)
2200 {
2201 dict_index_t* clust_index;
2202 rec_t* rec;
2203 mem_heap_t* heap = NULL;
2204 row_ext_t** ext;
2205 ulint offsets_[REC_OFFS_NORMAL_SIZE];
2206 const ulint* offsets;
2207 rec_offs_init(offsets_);
2208
2209 ut_ad(node->pcur->latch_mode != BTR_NO_LATCHES);
2210
2211 if (node->row != NULL) {
2212 mem_heap_empty(node->heap);
2213 }
2214
2215 clust_index = dict_table_get_first_index(node->table);
2216
2217 rec = btr_pcur_get_rec(node->pcur);
2218
2219 offsets = rec_get_offsets(rec, clust_index, offsets_,
2220 ULINT_UNDEFINED, &heap);
2221
2222 if (dict_table_get_format(node->table) >= UNIV_FORMAT_B) {
2223 /* In DYNAMIC or COMPRESSED format, there is no prefix
2224 of externally stored columns in the clustered index
2225 record. Build a cache of column prefixes. */
2226 ext = &node->ext;
2227 } else {
2228 /* REDUNDANT and COMPACT formats store a local
2229 768-byte prefix of each externally stored column.
2230 No cache is needed. */
2231 ext = NULL;
2232 node->ext = NULL;
2233 }
2234
2235 node->row = row_build(ROW_COPY_DATA, clust_index, rec, offsets,
2236 NULL, NULL, NULL, ext, node->heap);
2237
2238 if (node->table->n_v_cols) {
2239 row_upd_store_v_row(node, node->is_delete ? NULL : node->update,
2240 thd, mysql_table);
2241 }
2242
2243 if (node->is_delete) {
2244 node->upd_row = NULL;
2245 node->upd_ext = NULL;
2246 } else {
2247 node->upd_row = dtuple_copy(node->row, node->heap);
2248 row_upd_replace(node->upd_row, &node->upd_ext,
2249 clust_index, node->update, node->heap);
2250 }
2251
2252 if (UNIV_LIKELY_NULL(heap)) {
2253 mem_heap_free(heap);
2254 }
2255 }
2256
2257 /***********************************************************//**
2258 Print a MBR data from disk */
2259 static
2260 void
srv_mbr_print(const byte * data)2261 srv_mbr_print(const byte* data)
2262 {
2263 double a, b, c, d;
2264 a = mach_double_read(data);
2265 data += sizeof(double);
2266 b = mach_double_read(data);
2267 data += sizeof(double);
2268 c = mach_double_read(data);
2269 data += sizeof(double);
2270 d = mach_double_read(data);
2271
2272 ib::info() << "GIS MBR INFO: " << a << " and " << b << ", " << c
2273 << ", " << d << "\n";
2274 }
2275
2276
2277 #ifdef WITH_WSREP
2278 static inline
2279 bool
row_upd_parent_has_cascade(const que_node_t * parent)2280 row_upd_parent_has_cascade(
2281 const que_node_t* parent)
2282 {
2283 return(parent != NULL &&
2284 que_node_get_type(parent) == QUE_NODE_UPDATE &&
2285 ((const upd_node_t*) parent)->cascade_node != NULL);
2286 }
2287 #endif /* WITH_WSREP */
2288
2289 /***********************************************************//**
2290 Updates a secondary index entry of a row.
2291 @return DB_SUCCESS if operation successfully completed, else error
2292 code or DB_LOCK_WAIT */
2293 static MY_ATTRIBUTE((warn_unused_result))
2294 dberr_t
row_upd_sec_index_entry(upd_node_t * node,que_thr_t * thr)2295 row_upd_sec_index_entry(
2296 /*====================*/
2297 upd_node_t* node, /*!< in: row update node */
2298 que_thr_t* thr) /*!< in: query thread */
2299 {
2300 mtr_t mtr;
2301 const rec_t* rec;
2302 btr_pcur_t pcur;
2303 mem_heap_t* heap;
2304 dtuple_t* entry;
2305 dict_index_t* index;
2306 btr_cur_t* btr_cur;
2307 ibool referenced;
2308 dberr_t err = DB_SUCCESS;
2309 trx_t* trx = thr_get_trx(thr);
2310 ulint mode;
2311 ulint flags = 0;
2312 enum row_search_result search_result;
2313
2314 ut_ad(trx->id != 0);
2315
2316 index = node->index;
2317
2318 referenced = row_upd_index_is_referenced(index, trx);
2319 #ifdef WITH_WSREP
2320 ibool foreign = wsrep_row_upd_index_is_foreign(index, trx);
2321 #endif /* WITH_WSREP */
2322
2323 heap = mem_heap_create(1024);
2324
2325 /* Build old index entry */
2326 entry = row_build_index_entry(node->row, node->ext, index, heap);
2327 ut_a(entry);
2328
2329 if (!dict_table_is_intrinsic(index->table)) {
2330 log_free_check();
2331 }
2332
2333 DEBUG_SYNC_C_IF_THD(trx->mysql_thd,
2334 "before_row_upd_sec_index_entry");
2335
2336 mtr_start(&mtr);
2337 mtr.set_named_space(index->space);
2338
2339 /* Disable REDO logging as lifetime of temp-tables is limited to
2340 server or connection lifetime and so REDO information is not needed
2341 on restart for recovery.
2342 Disable locking as temp-tables are not shared across connection. */
2343 if (dict_table_is_temporary(index->table)) {
2344 flags |= BTR_NO_LOCKING_FLAG;
2345 mtr.set_log_mode(MTR_LOG_NO_REDO);
2346
2347 if (dict_table_is_intrinsic(index->table)) {
2348 flags |= BTR_NO_UNDO_LOG_FLAG;
2349 }
2350 }
2351
2352 if (!index->is_committed()) {
2353 /* The index->online_status may change if the index is
2354 or was being created online, but not committed yet. It
2355 is protected by index->lock. */
2356
2357 mtr_s_lock(dict_index_get_lock(index), &mtr);
2358
2359 switch (dict_index_get_online_status(index)) {
2360 case ONLINE_INDEX_COMPLETE:
2361 /* This is a normal index. Do not log anything.
2362 Perform the update on the index tree directly. */
2363 break;
2364 case ONLINE_INDEX_CREATION:
2365 /* Log a DELETE and optionally INSERT. */
2366 row_log_online_op(index, entry, 0);
2367
2368 if (!node->is_delete) {
2369 mem_heap_empty(heap);
2370 entry = row_build_index_entry(
2371 node->upd_row, node->upd_ext,
2372 index, heap);
2373 ut_a(entry);
2374 row_log_online_op(index, entry, trx->id);
2375 }
2376 /* fall through */
2377 case ONLINE_INDEX_ABORTED:
2378 case ONLINE_INDEX_ABORTED_DROPPED:
2379 mtr_commit(&mtr);
2380 goto func_exit;
2381 }
2382
2383 /* We can only buffer delete-mark operations if there
2384 are no foreign key constraints referring to the index.
2385 Change buffering is disabled for temporary tables and
2386 spatial index. */
2387 mode = (referenced || dict_table_is_temporary(index->table)
2388 || dict_index_is_spatial(index))
2389 ? BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED
2390 : BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED
2391 | BTR_DELETE_MARK;
2392 } else {
2393 /* For secondary indexes,
2394 index->online_status==ONLINE_INDEX_COMPLETE if
2395 index->is_committed(). */
2396 ut_ad(!dict_index_is_online_ddl(index));
2397
2398 /* We can only buffer delete-mark operations if there
2399 are no foreign key constraints referring to the index.
2400 Change buffering is disabled for temporary tables and
2401 spatial index. */
2402 mode = (referenced || dict_table_is_temporary(index->table)
2403 || dict_index_is_spatial(index))
2404 ? BTR_MODIFY_LEAF
2405 : BTR_MODIFY_LEAF | BTR_DELETE_MARK;
2406 }
2407
2408 if (dict_index_is_spatial(index)) {
2409 ut_ad(mode & BTR_MODIFY_LEAF);
2410 mode |= BTR_RTREE_DELETE_MARK;
2411 }
2412
2413 /* Set the query thread, so that ibuf_insert_low() will be
2414 able to invoke thd_get_trx(). */
2415 btr_pcur_get_btr_cur(&pcur)->thr = thr;
2416
2417 search_result = row_search_index_entry(index, entry, mode,
2418 &pcur, &mtr);
2419
2420 btr_cur = btr_pcur_get_btr_cur(&pcur);
2421
2422 rec = btr_cur_get_rec(btr_cur);
2423
2424 switch (search_result) {
2425 case ROW_NOT_DELETED_REF: /* should only occur for BTR_DELETE */
2426 ut_error;
2427 break;
2428 case ROW_BUFFERED:
2429 /* Entry was delete marked already. */
2430 break;
2431
2432 case ROW_NOT_FOUND:
2433 if (!index->is_committed()) {
2434 /* When online CREATE INDEX copied the update
2435 that we already made to the clustered index,
2436 and completed the secondary index creation
2437 before we got here, the old secondary index
2438 record would not exist. The CREATE INDEX
2439 should be waiting for a MySQL meta-data lock
2440 upgrade at least until this UPDATE returns.
2441 After that point, set_committed(true) would be
2442 invoked by commit_inplace_alter_table(). */
2443 break;
2444 }
2445
2446 if (dict_index_is_spatial(index) && btr_cur->rtr_info->fd_del) {
2447 /* We found the record, but a delete marked */
2448 break;
2449 }
2450
2451 ib::error()
2452 << "Record in index " << index->name
2453 << " of table " << index->table->name
2454 << " was not found on update: " << *entry
2455 << " at: " << rec_index_print(rec, index);
2456 srv_mbr_print((unsigned char*)entry->fields[0].data);
2457 #ifdef UNIV_DEBUG
2458 mtr_commit(&mtr);
2459 mtr_start(&mtr);
2460 ut_ad(btr_validate_index(index, 0, false));
2461 ut_ad(0);
2462 #endif /* UNIV_DEBUG */
2463 break;
2464 case ROW_FOUND:
2465 ut_ad(err == DB_SUCCESS);
2466
2467 /* Delete mark the old index record; it can already be
2468 delete marked if we return after a lock wait in
2469 row_ins_sec_index_entry() below */
2470 if (!rec_get_deleted_flag(
2471 rec, dict_table_is_comp(index->table))) {
2472 #ifdef WITH_WSREP
2473 que_node_t *parent = que_node_get_parent(node);
2474 #endif /* WITH_WSREP */
2475 err = btr_cur_del_mark_set_sec_rec(
2476 flags, btr_cur, TRUE, thr, &mtr);
2477 if (err != DB_SUCCESS) {
2478 break;
2479 }
2480 #ifdef WITH_WSREP
2481 if (wsrep_on(trx->mysql_thd) &&
2482 !wsrep_thd_is_BF(trx->mysql_thd, FALSE) &&
2483 err == DB_SUCCESS && !referenced && foreign &&
2484 !row_upd_parent_has_cascade(parent)
2485 ) {
2486 ulint* offsets =
2487 rec_get_offsets(
2488 rec, index, NULL, ULINT_UNDEFINED,
2489 &heap);
2490 err = wsrep_row_upd_check_foreign_constraints(
2491 node, &pcur, index->table,
2492 index, offsets, thr, &mtr);
2493 switch (err) {
2494 case DB_SUCCESS:
2495 case DB_NO_REFERENCED_ROW:
2496 err = DB_SUCCESS;
2497 break;
2498 case DB_DEADLOCK:
2499 if (wsrep_debug) {
2500 ib::warn() << "WSREP: sec index FK check fail for deadlock"
2501 << " index " << index->name
2502 << " table " << index->table->name;
2503 }
2504 break;
2505 case DB_LOCK_WAIT_TIMEOUT:
2506 case DB_LOCK_WAIT:
2507 err = DB_LOCK_WAIT_TIMEOUT;
2508 break;
2509 default:
2510 ib::error() << "WSREP: referenced FK check fail: " << ut_strerr(err)
2511 << " index " << index->name
2512 << " table " << index->table->name;
2513 break;
2514 }
2515 }
2516
2517 if (err != DB_SUCCESS) {
2518 break;
2519 }
2520 #endif /* WITH_WSREP */
2521 }
2522
2523 ut_ad(err == DB_SUCCESS);
2524
2525 if (referenced) {
2526
2527 ulint* offsets;
2528
2529 offsets = rec_get_offsets(
2530 rec, index, NULL, ULINT_UNDEFINED,
2531 &heap);
2532
2533 /* NOTE that the following call loses
2534 the position of pcur ! */
2535 err = row_upd_check_references_constraints(
2536 node, &pcur, index->table,
2537 index, offsets, thr, &mtr);
2538 }
2539 break;
2540 }
2541
2542 btr_pcur_close(&pcur);
2543 mtr_commit(&mtr);
2544
2545 if (node->is_delete || err != DB_SUCCESS) {
2546
2547 goto func_exit;
2548 }
2549
2550 mem_heap_empty(heap);
2551
2552 /* Build a new index entry */
2553 entry = row_build_index_entry(node->upd_row, node->upd_ext,
2554 index, heap);
2555 ut_a(entry);
2556
2557 /* Insert new index entry */
2558 err = row_ins_sec_index_entry(index, entry, thr, false);
2559
2560 func_exit:
2561 mem_heap_free(heap);
2562
2563 return(err);
2564 }
2565
2566 /***********************************************************//**
2567 Updates the secondary index record if it is changed in the row update or
2568 deletes it if this is a delete.
2569 @return DB_SUCCESS if operation successfully completed, else error
2570 code or DB_LOCK_WAIT */
2571 static MY_ATTRIBUTE((warn_unused_result))
2572 dberr_t
row_upd_sec_step(upd_node_t * node,que_thr_t * thr)2573 row_upd_sec_step(
2574 /*=============*/
2575 upd_node_t* node, /*!< in: row update node */
2576 que_thr_t* thr) /*!< in: query thread */
2577 {
2578 ut_ad((node->state == UPD_NODE_UPDATE_ALL_SEC)
2579 || (node->state == UPD_NODE_UPDATE_SOME_SEC));
2580 ut_ad(!dict_index_is_clust(node->index));
2581
2582 if (node->state == UPD_NODE_UPDATE_ALL_SEC
2583 || row_upd_changes_ord_field_binary(node->index, node->update,
2584 thr, node->row, node->ext)) {
2585 return(row_upd_sec_index_entry(node, thr));
2586 }
2587
2588 return(DB_SUCCESS);
2589 }
2590
2591 #ifdef UNIV_DEBUG
2592 # define row_upd_clust_rec_by_insert_inherit(rec,offsets,entry,update) \
2593 row_upd_clust_rec_by_insert_inherit_func(rec,offsets,entry,update)
2594 #else /* UNIV_DEBUG */
2595 # define row_upd_clust_rec_by_insert_inherit(rec,offsets,entry,update) \
2596 row_upd_clust_rec_by_insert_inherit_func(rec,entry,update)
2597 #endif /* UNIV_DEBUG */
2598 /*******************************************************************//**
2599 Mark non-updated off-page columns inherited when the primary key is
2600 updated. We must mark them as inherited in entry, so that they are not
2601 freed in a rollback. A limited version of this function used to be
2602 called btr_cur_mark_dtuple_inherited_extern().
2603 @return whether any columns were inherited */
2604 static
2605 bool
row_upd_clust_rec_by_insert_inherit_func(const rec_t * rec,const ulint * offsets,dtuple_t * entry,const upd_t * update)2606 row_upd_clust_rec_by_insert_inherit_func(
2607 /*=====================================*/
2608 const rec_t* rec, /*!< in: old record, or NULL */
2609 #ifdef UNIV_DEBUG
2610 const ulint* offsets,/*!< in: rec_get_offsets(rec), or NULL */
2611 #endif /* UNIV_DEBUG */
2612 dtuple_t* entry, /*!< in/out: updated entry to be
2613 inserted into the clustered index */
2614 const upd_t* update) /*!< in: update vector */
2615 {
2616 bool inherit = false;
2617 ulint i;
2618
2619 ut_ad(!rec == !offsets);
2620 ut_ad(!rec || rec_offs_any_extern(offsets));
2621
2622 for (i = 0; i < dtuple_get_n_fields(entry); i++) {
2623 dfield_t* dfield = dtuple_get_nth_field(entry, i);
2624 byte* data;
2625 ulint len;
2626
2627 ut_ad(!offsets
2628 || !rec_offs_nth_extern(offsets, i)
2629 == !dfield_is_ext(dfield)
2630 || upd_get_field_by_field_no(update, i, false));
2631 if (!dfield_is_ext(dfield)
2632 || upd_get_field_by_field_no(update, i, false)) {
2633 continue;
2634 }
2635
2636 #ifdef UNIV_DEBUG
2637 if (UNIV_LIKELY(rec != NULL)) {
2638 const byte* rec_data
2639 = rec_get_nth_field(rec, offsets, i, &len);
2640 ut_ad(len == dfield_get_len(dfield));
2641 ut_ad(len != UNIV_SQL_NULL);
2642 ut_ad(len >= BTR_EXTERN_FIELD_REF_SIZE);
2643
2644 rec_data += len - BTR_EXTERN_FIELD_REF_SIZE;
2645
2646 /* The pointer must not be zero. */
2647 ut_ad(memcmp(rec_data, field_ref_zero,
2648 BTR_EXTERN_FIELD_REF_SIZE));
2649 /* The BLOB must be owned. */
2650 ut_ad(!(rec_data[BTR_EXTERN_LEN]
2651 & BTR_EXTERN_OWNER_FLAG));
2652 }
2653 #endif /* UNIV_DEBUG */
2654
2655 len = dfield_get_len(dfield);
2656 ut_a(len != UNIV_SQL_NULL);
2657 ut_a(len >= BTR_EXTERN_FIELD_REF_SIZE);
2658
2659 data = static_cast<byte*>(dfield_get_data(dfield));
2660
2661 data += len - BTR_EXTERN_FIELD_REF_SIZE;
2662 /* The pointer must not be zero. */
2663 ut_a(memcmp(data, field_ref_zero, BTR_EXTERN_FIELD_REF_SIZE));
2664
2665 /* The BLOB must be owned, unless we are resuming from
2666 a lock wait and we already had disowned the BLOB. */
2667 ut_a(rec == NULL
2668 || !(data[BTR_EXTERN_LEN] & BTR_EXTERN_OWNER_FLAG));
2669 data[BTR_EXTERN_LEN] &= ~BTR_EXTERN_OWNER_FLAG;
2670 data[BTR_EXTERN_LEN] |= BTR_EXTERN_INHERITED_FLAG;
2671 /* The BTR_EXTERN_INHERITED_FLAG only matters in
2672 rollback of a fresh insert (insert_undo log).
2673 Purge (operating on update_undo log) will always free
2674 the extern fields of a delete-marked row. */
2675
2676 inherit = true;
2677 }
2678
2679 return(inherit);
2680 }
2681
2682 /***********************************************************//**
2683 Marks the clustered index record deleted and inserts the updated version
2684 of the record to the index. This function should be used when the ordering
2685 fields of the clustered index record change. This should be quite rare in
2686 database applications.
2687 @return DB_SUCCESS if operation successfully completed, else error
2688 code or DB_LOCK_WAIT */
2689 static MY_ATTRIBUTE((warn_unused_result))
2690 dberr_t
row_upd_clust_rec_by_insert(ulint flags,upd_node_t * node,dict_index_t * index,que_thr_t * thr,ibool referenced,ibool foreign,mtr_t * mtr)2691 row_upd_clust_rec_by_insert(
2692 /*========================*/
2693 ulint flags, /*!< in: undo logging and locking flags */
2694 upd_node_t* node, /*!< in/out: row update node */
2695 dict_index_t* index, /*!< in: clustered index of the record */
2696 que_thr_t* thr, /*!< in: query thread */
2697 ibool referenced,/*!< in: TRUE if index may be referenced in
2698 a foreign key constraint */
2699 #ifdef WITH_WSREP
2700 ibool foreign, /*!< in: TRUE if index is foreign key index */
2701 #endif /* WITH_WSREP */
2702 mtr_t* mtr) /*!< in/out: mtr; gets committed here */
2703 {
2704 mem_heap_t* heap;
2705 btr_pcur_t* pcur;
2706 btr_cur_t* btr_cur;
2707 trx_t* trx;
2708 dict_table_t* table;
2709 dtuple_t* entry;
2710 dberr_t err;
2711 rec_t* rec;
2712 ulint* offsets = NULL;
2713
2714 #ifdef WITH_WSREP
2715 que_node_t *parent = que_node_get_parent(node);
2716 #endif /* WITH_WSREP */
2717 ut_ad(node);
2718 ut_ad(dict_index_is_clust(index));
2719
2720 trx = thr_get_trx(thr);
2721 table = node->table;
2722 pcur = node->pcur;
2723 btr_cur = btr_pcur_get_btr_cur(pcur);
2724
2725 heap = mem_heap_create(1000);
2726
2727 entry = row_build_index_entry_low(node->upd_row, node->upd_ext,
2728 index, heap, ROW_BUILD_FOR_INSERT);
2729 ut_ad(dtuple_get_info_bits(entry) == 0);
2730
2731 row_upd_index_entry_sys_field(entry, index, DATA_TRX_ID, trx->id);
2732
2733 switch (node->state) {
2734 default:
2735 ut_error;
2736 case UPD_NODE_INSERT_CLUSTERED:
2737 /* A lock wait occurred in row_ins_clust_index_entry() in
2738 the previous invocation of this function. */
2739 row_upd_clust_rec_by_insert_inherit(
2740 NULL, NULL, entry, node->update);
2741 break;
2742 case UPD_NODE_UPDATE_CLUSTERED:
2743 /* This is the first invocation of the function where
2744 we update the primary key. Delete-mark the old record
2745 in the clustered index and prepare to insert a new entry. */
2746 rec = btr_cur_get_rec(btr_cur);
2747 offsets = rec_get_offsets(rec, index, NULL,
2748 ULINT_UNDEFINED, &heap);
2749 ut_ad(page_rec_is_user_rec(rec));
2750
2751 if (rec_get_deleted_flag(rec, rec_offs_comp(offsets))) {
2752 /* If the clustered index record is already delete
2753 marked, then we are here after a DB_LOCK_WAIT.
2754 Skip delete marking clustered index and disowning
2755 its blobs. */
2756 ut_ad(rec_get_trx_id(rec, index) == trx->id);
2757 ut_ad(!trx_undo_roll_ptr_is_insert(
2758 row_get_rec_roll_ptr(rec, index,
2759 offsets)));
2760 goto check_fk;
2761 }
2762
2763 err = btr_cur_del_mark_set_clust_rec(
2764 flags, btr_cur_get_block(btr_cur), rec, index, offsets,
2765 thr, node->row, mtr);
2766 if (err != DB_SUCCESS) {
2767 err_exit:
2768 mtr_commit(mtr);
2769 mem_heap_free(heap);
2770 return(err);
2771 }
2772
2773 /* If the the new row inherits externally stored
2774 fields (off-page columns a.k.a. BLOBs) from the
2775 delete-marked old record, mark them disowned by the
2776 old record and owned by the new entry. */
2777
2778 if (rec_offs_any_extern(offsets)) {
2779 if (row_upd_clust_rec_by_insert_inherit(
2780 rec, offsets, entry, node->update)) {
2781 /* The blobs are disowned here, expecting the
2782 insert down below to inherit them. But if the
2783 insert fails, then this disown will be undone
2784 when the operation is rolled back. */
2785 btr_cur_disown_inherited_fields(
2786 btr_cur_get_page_zip(btr_cur),
2787 rec, index, offsets, node->update,
2788 mtr);
2789 }
2790 }
2791 check_fk:
2792 if (referenced) {
2793 /* NOTE that the following call loses
2794 the position of pcur ! */
2795
2796 err = row_upd_check_references_constraints(
2797 node, pcur, table, index, offsets, thr, mtr);
2798
2799 if (err != DB_SUCCESS) {
2800 goto err_exit;
2801 }
2802 }
2803 #ifdef WITH_WSREP
2804 else if (wsrep_on(trx->mysql_thd) && foreign &&
2805 !row_upd_parent_has_cascade(parent)
2806 ) {
2807 err = wsrep_row_upd_check_foreign_constraints(
2808 node, pcur, table, index, offsets, thr, mtr);
2809 switch (err) {
2810 case DB_SUCCESS:
2811 case DB_NO_REFERENCED_ROW:
2812 err = DB_SUCCESS;
2813 break;
2814 case DB_DEADLOCK:
2815 if (wsrep_debug) {
2816 ib::warn() << "WSREP: insert FK check fail for deadlock"
2817 << " index " << index->name
2818 << " table " << index->table->name;
2819 }
2820 break;
2821 default:
2822 ib::error() << "WSREP: referenced FK check fail: " << ut_strerr(err)
2823 << " index " << index->name
2824 << " table " << index->table->name;
2825 break;
2826 }
2827 if (err != DB_SUCCESS) {
2828 goto err_exit;
2829 }
2830 }
2831 #endif /* WITH_WSREP */
2832 }
2833
2834 mtr_commit(mtr);
2835
2836 err = row_ins_clust_index_entry(
2837 index, entry, thr,
2838 entry->get_n_ext(), false);
2839 node->state = UPD_NODE_INSERT_CLUSTERED;
2840
2841 mem_heap_free(heap);
2842
2843 return(err);
2844 }
2845
2846 /***********************************************************//**
2847 Updates a clustered index record of a row when the ordering fields do
2848 not change.
2849 @return DB_SUCCESS if operation successfully completed, else error
2850 code or DB_LOCK_WAIT */
2851 static MY_ATTRIBUTE((warn_unused_result))
2852 dberr_t
row_upd_clust_rec(ulint flags,upd_node_t * node,dict_index_t * index,ulint * offsets,mem_heap_t ** offsets_heap,que_thr_t * thr,mtr_t * mtr)2853 row_upd_clust_rec(
2854 /*==============*/
2855 ulint flags, /*!< in: undo logging and locking flags */
2856 upd_node_t* node, /*!< in: row update node */
2857 dict_index_t* index, /*!< in: clustered index */
2858 ulint* offsets,/*!< in: rec_get_offsets() on node->pcur */
2859 mem_heap_t** offsets_heap,
2860 /*!< in/out: memory heap, can be emptied */
2861 que_thr_t* thr, /*!< in: query thread */
2862 mtr_t* mtr) /*!< in: mtr; gets committed here */
2863 {
2864 mem_heap_t* heap = NULL;
2865 big_rec_t* big_rec = NULL;
2866 btr_pcur_t* pcur;
2867 btr_cur_t* btr_cur;
2868 dberr_t err;
2869 const dtuple_t* rebuilt_old_pk = NULL;
2870
2871 ut_ad(node);
2872 ut_ad(dict_index_is_clust(index));
2873 ut_ad(!thr_get_trx(thr)->in_rollback);
2874
2875 pcur = node->pcur;
2876 btr_cur = btr_pcur_get_btr_cur(pcur);
2877
2878 ut_ad(btr_cur_get_index(btr_cur) == index);
2879 ut_ad(!rec_get_deleted_flag(btr_cur_get_rec(btr_cur),
2880 dict_table_is_comp(index->table)));
2881 ut_ad(rec_offs_validate(btr_cur_get_rec(btr_cur), index, offsets));
2882
2883 if (dict_index_is_online_ddl(index)) {
2884 rebuilt_old_pk = row_log_table_get_pk(
2885 btr_cur_get_rec(btr_cur), index, offsets, NULL, &heap);
2886 }
2887
2888 /* Try optimistic updating of the record, keeping changes within
2889 the page; we do not check locks because we assume the x-lock on the
2890 record to update */
2891
2892 if (node->cmpl_info & UPD_NODE_NO_SIZE_CHANGE) {
2893 err = btr_cur_update_in_place(
2894 flags | BTR_NO_LOCKING_FLAG, btr_cur,
2895 offsets, node->update,
2896 node->cmpl_info, thr, thr_get_trx(thr)->id, mtr);
2897 } else {
2898 err = btr_cur_optimistic_update(
2899 flags | BTR_NO_LOCKING_FLAG, btr_cur,
2900 &offsets, offsets_heap, node->update,
2901 node->cmpl_info, thr, thr_get_trx(thr)->id, mtr);
2902 }
2903
2904 if (err == DB_SUCCESS) {
2905 goto success;
2906 }
2907
2908 mtr_commit(mtr);
2909
2910 if (buf_LRU_buf_pool_running_out()) {
2911
2912 err = DB_LOCK_TABLE_FULL;
2913 goto func_exit;
2914 }
2915 /* We may have to modify the tree structure: do a pessimistic descent
2916 down the index tree */
2917
2918 mtr_start(mtr);
2919 mtr->set_named_space(index->space);
2920
2921 /* Disable REDO logging as lifetime of temp-tables is limited to
2922 server or connection lifetime and so REDO information is not needed
2923 on restart for recovery.
2924 Disable locking as temp-tables are not shared across connection. */
2925 if (dict_table_is_temporary(index->table)) {
2926 flags |= BTR_NO_LOCKING_FLAG;
2927 mtr->set_log_mode(MTR_LOG_NO_REDO);
2928
2929 if (dict_table_is_intrinsic(index->table)) {
2930 flags |= BTR_NO_UNDO_LOG_FLAG;
2931 }
2932 }
2933
2934 /* NOTE: this transaction has an s-lock or x-lock on the record and
2935 therefore other transactions cannot modify the record when we have no
2936 latch on the page. In addition, we assume that other query threads of
2937 the same transaction do not modify the record in the meantime.
2938 Therefore we can assert that the restoration of the cursor succeeds. */
2939
2940 ut_a(btr_pcur_restore_position(BTR_MODIFY_TREE, pcur, mtr));
2941
2942 ut_ad(!rec_get_deleted_flag(btr_pcur_get_rec(pcur),
2943 dict_table_is_comp(index->table)));
2944
2945 if (!heap) {
2946 heap = mem_heap_create(1024);
2947 }
2948
2949 err = btr_cur_pessimistic_update(
2950 flags | BTR_NO_LOCKING_FLAG | BTR_KEEP_POS_FLAG, btr_cur,
2951 &offsets, offsets_heap, heap, &big_rec,
2952 node->update, node->cmpl_info,
2953 thr, thr_get_trx(thr)->id, mtr);
2954 if (big_rec) {
2955 ut_a(err == DB_SUCCESS);
2956
2957 DEBUG_SYNC_C("before_row_upd_extern");
2958 err = btr_store_big_rec_extern_fields(
2959 pcur, node->update, offsets, big_rec, mtr,
2960 BTR_STORE_UPDATE);
2961 DEBUG_SYNC_C("after_row_upd_extern");
2962 }
2963
2964 if (err == DB_SUCCESS) {
2965 success:
2966 if (dict_index_is_online_ddl(index)) {
2967 dtuple_t* new_v_row = NULL;
2968 dtuple_t* old_v_row = NULL;
2969
2970 if (!(node->cmpl_info & UPD_NODE_NO_ORD_CHANGE)) {
2971 new_v_row = node->upd_row;
2972 old_v_row = node->update->old_vrow;
2973 }
2974
2975 row_log_table_update(
2976 btr_cur_get_rec(btr_cur),
2977 index, offsets, rebuilt_old_pk, new_v_row,
2978 old_v_row);
2979 }
2980 }
2981
2982 mtr_commit(mtr);
2983 func_exit:
2984 if (heap) {
2985 mem_heap_free(heap);
2986 }
2987
2988 if (big_rec) {
2989 dtuple_big_rec_free(big_rec);
2990 }
2991
2992 return(err);
2993 }
2994
2995 /***********************************************************//**
2996 Delete marks a clustered index record.
2997 @return DB_SUCCESS if operation successfully completed, else error code */
2998 static MY_ATTRIBUTE((warn_unused_result))
2999 dberr_t
row_upd_del_mark_clust_rec(ulint flags,upd_node_t * node,dict_index_t * index,ulint * offsets,que_thr_t * thr,ibool referenced,ibool foreign,mtr_t * mtr)3000 row_upd_del_mark_clust_rec(
3001 /*=======================*/
3002 ulint flags, /*!< in: undo logging and locking flags */
3003 upd_node_t* node, /*!< in: row update node */
3004 dict_index_t* index, /*!< in: clustered index */
3005 ulint* offsets,/*!< in/out: rec_get_offsets() for the
3006 record under the cursor */
3007 que_thr_t* thr, /*!< in: query thread */
3008 ibool referenced,
3009 /*!< in: TRUE if index may be referenced in
3010 a foreign key constraint */
3011 #ifdef WITH_WSREP
3012 ibool foreign,/*!< in: TRUE if index is foreign key index */
3013 #endif /* WITH_WSREP */
3014 mtr_t* mtr) /*!< in: mtr; gets committed here */
3015 {
3016 btr_pcur_t* pcur;
3017 btr_cur_t* btr_cur;
3018 dberr_t err;
3019 #ifdef WITH_WSREP
3020 que_node_t *parent = que_node_get_parent(node);
3021 trx_t* trx = thr_get_trx(thr);
3022 #endif /* WITH_WSREP */
3023
3024 ut_ad(node);
3025 ut_ad(dict_index_is_clust(index));
3026 ut_ad(node->is_delete);
3027
3028 pcur = node->pcur;
3029 btr_cur = btr_pcur_get_btr_cur(pcur);
3030
3031 /* Store row because we have to build also the secondary index
3032 entries */
3033
3034 row_upd_store_row(node, thr_get_trx(thr)->mysql_thd,
3035 thr->prebuilt ? thr->prebuilt->m_mysql_table : NULL);
3036
3037 /* Mark the clustered index record deleted; we do not have to check
3038 locks, because we assume that we have an x-lock on the record */
3039
3040 err = btr_cur_del_mark_set_clust_rec(
3041 flags, btr_cur_get_block(btr_cur), btr_cur_get_rec(btr_cur),
3042 index, offsets, thr, node->row, mtr);
3043 if (err == DB_SUCCESS && referenced) {
3044 /* NOTE that the following call loses the position of pcur ! */
3045
3046 err = row_upd_check_references_constraints(
3047 node, pcur, index->table, index, offsets, thr, mtr);
3048 }
3049 #ifdef WITH_WSREP
3050 else if (trx && wsrep_on(trx->mysql_thd) && err == DB_SUCCESS &&
3051 !row_upd_parent_has_cascade(parent)
3052 ) {
3053 err = wsrep_row_upd_check_foreign_constraints(
3054 node, pcur, index->table, index, offsets, thr, mtr);
3055 switch (err) {
3056 case DB_SUCCESS:
3057 case DB_NO_REFERENCED_ROW:
3058 err = DB_SUCCESS;
3059 break;
3060 case DB_DEADLOCK:
3061 if (wsrep_debug) {
3062 ib::warn() << "WSREP: clust rec FK check fail for deadlock"
3063 << " index " << index->name
3064 << " table " << index->table->name;
3065 }
3066 break;
3067 default:
3068 ib::error() << "WSREP: referenced FK check fail: " << ut_strerr(err)
3069 << " index " << index->name
3070 << " table " << index->table->name;
3071 break;
3072 }
3073 }
3074 #endif /* WITH_WSREP */
3075
3076 mtr_commit(mtr);
3077
3078 return(err);
3079 }
3080
3081 /***********************************************************//**
3082 Updates the clustered index record.
3083 @return DB_SUCCESS if operation successfully completed, DB_LOCK_WAIT
3084 in case of a lock wait, else error code */
3085 static MY_ATTRIBUTE((warn_unused_result))
3086 dberr_t
row_upd_clust_step(upd_node_t * node,que_thr_t * thr)3087 row_upd_clust_step(
3088 /*===============*/
3089 upd_node_t* node, /*!< in: row update node */
3090 que_thr_t* thr) /*!< in: query thread */
3091 {
3092 dict_index_t* index;
3093 btr_pcur_t* pcur;
3094 ibool success;
3095 dberr_t err;
3096 mtr_t mtr;
3097 rec_t* rec;
3098 mem_heap_t* heap = NULL;
3099 ulint offsets_[REC_OFFS_NORMAL_SIZE];
3100 ulint* offsets;
3101 ibool referenced;
3102 ulint flags = 0;
3103 trx_t* trx = thr_get_trx(thr);
3104 rec_offs_init(offsets_);
3105
3106 index = dict_table_get_first_index(node->table);
3107
3108 referenced = row_upd_index_is_referenced(index, trx);
3109 #ifdef WITH_WSREP
3110 ibool foreign = wsrep_row_upd_index_is_foreign(
3111 index, thr_get_trx(thr));
3112 #endif /* WITH_WSREP */
3113
3114 pcur = node->pcur;
3115
3116 /* We have to restore the cursor to its position */
3117
3118 mtr_start(&mtr);
3119 mtr.set_named_space(index->space);
3120
3121 /* Disable REDO logging as lifetime of temp-tables is limited to
3122 server or connection lifetime and so REDO information is not needed
3123 on restart for recovery.
3124 Disable locking as temp-tables are not shared across connection. */
3125 if (dict_table_is_temporary(index->table)) {
3126 flags |= BTR_NO_LOCKING_FLAG;
3127 mtr.set_log_mode(MTR_LOG_NO_REDO);
3128
3129 if (dict_table_is_intrinsic(index->table)) {
3130 flags |= BTR_NO_UNDO_LOG_FLAG;
3131 }
3132 }
3133
3134 /* If the restoration does not succeed, then the same
3135 transaction has deleted the record on which the cursor was,
3136 and that is an SQL error. If the restoration succeeds, it may
3137 still be that the same transaction has successively deleted
3138 and inserted a record with the same ordering fields, but in
3139 that case we know that the transaction has at least an
3140 implicit x-lock on the record. */
3141
3142 ut_a(pcur->rel_pos == BTR_PCUR_ON);
3143
3144 ulint mode;
3145
3146 DEBUG_SYNC_C_IF_THD(
3147 thr_get_trx(thr)->mysql_thd,
3148 "innodb_row_upd_clust_step_enter");
3149
3150 if (dict_index_is_online_ddl(index)) {
3151 ut_ad(node->table->id != DICT_INDEXES_ID);
3152 mode = BTR_MODIFY_LEAF | BTR_ALREADY_S_LATCHED;
3153 mtr_s_lock(dict_index_get_lock(index), &mtr);
3154 } else {
3155 mode = BTR_MODIFY_LEAF;
3156 }
3157
3158 success = btr_pcur_restore_position(mode, pcur, &mtr);
3159
3160 if (!success) {
3161 err = DB_RECORD_NOT_FOUND;
3162
3163 mtr_commit(&mtr);
3164
3165 return(err);
3166 }
3167
3168 /* If this is a row in SYS_INDEXES table of the data dictionary,
3169 then we have to free the file segments of the index tree associated
3170 with the index */
3171
3172 if (node->is_delete && node->table->id == DICT_INDEXES_ID) {
3173
3174 ut_ad(!dict_index_is_online_ddl(index));
3175
3176 dict_drop_index_tree(
3177 btr_pcur_get_rec(pcur), pcur, &mtr);
3178
3179 mtr_commit(&mtr);
3180
3181 mtr_start(&mtr);
3182 mtr.set_named_space(index->space);
3183
3184 success = btr_pcur_restore_position(BTR_MODIFY_LEAF, pcur,
3185 &mtr);
3186 if (!success) {
3187 err = DB_ERROR;
3188
3189 mtr_commit(&mtr);
3190
3191 return(err);
3192 }
3193 }
3194
3195 rec = btr_pcur_get_rec(pcur);
3196 offsets = rec_get_offsets(rec, index, offsets_,
3197 ULINT_UNDEFINED, &heap);
3198
3199 if (!node->has_clust_rec_x_lock) {
3200 err = lock_clust_rec_modify_check_and_lock(
3201 flags, btr_pcur_get_block(pcur),
3202 rec, index, offsets, thr);
3203 if (err != DB_SUCCESS) {
3204 mtr_commit(&mtr);
3205 goto exit_func;
3206 }
3207 }
3208
3209 ut_ad(lock_trx_has_rec_x_lock(thr_get_trx(thr), index->table,
3210 btr_pcur_get_block(pcur),
3211 page_rec_get_heap_no(rec)));
3212
3213 /* NOTE: the following function calls will also commit mtr */
3214
3215 if (node->is_delete) {
3216
3217 err = row_upd_del_mark_clust_rec(
3218 #ifdef WITH_WSREP
3219 flags, node, index, offsets, thr, referenced, foreign, &mtr);
3220 #else
3221 flags, node, index, offsets, thr, referenced, &mtr);
3222 #endif /* WITH_WSREP */
3223
3224 if (err == DB_SUCCESS) {
3225 node->state = UPD_NODE_UPDATE_ALL_SEC;
3226 node->index = dict_table_get_next_index(index);
3227 }
3228
3229 goto exit_func;
3230 }
3231
3232 /* If the update is made for MySQL, we already have the update vector
3233 ready, else we have to do some evaluation: */
3234
3235 if (UNIV_UNLIKELY(!node->in_mysql_interface)) {
3236 /* Copy the necessary columns from clust_rec and calculate the
3237 new values to set */
3238 row_upd_copy_columns(rec, offsets,
3239 UT_LIST_GET_FIRST(node->columns));
3240 row_upd_eval_new_vals(node->update);
3241 }
3242
3243 if (node->cmpl_info & UPD_NODE_NO_ORD_CHANGE) {
3244
3245 err = row_upd_clust_rec(
3246 flags, node, index, offsets, &heap, thr, &mtr);
3247 goto exit_func;
3248 }
3249
3250 row_upd_store_row(node, trx->mysql_thd,
3251 thr->prebuilt ? thr->prebuilt->m_mysql_table : NULL);
3252
3253 if (row_upd_changes_ord_field_binary(index, node->update, thr,
3254 node->row, node->ext)) {
3255
3256 /* Update causes an ordering field (ordering fields within
3257 the B-tree) of the clustered index record to change: perform
3258 the update by delete marking and inserting.
3259
3260 TODO! What to do to the 'Halloween problem', where an update
3261 moves the record forward in index so that it is again
3262 updated when the cursor arrives there? Solution: the
3263 read operation must check the undo record undo number when
3264 choosing records to update. MySQL solves now the problem
3265 externally! */
3266
3267 err = row_upd_clust_rec_by_insert(
3268 #ifdef WITH_WSREP
3269 flags, node, index, thr, referenced, foreign, &mtr);
3270 #else
3271 flags, node, index, thr, referenced, &mtr);
3272 #endif /* WITH_WSREP */
3273
3274 if (err != DB_SUCCESS) {
3275
3276 goto exit_func;
3277 }
3278
3279 node->state = UPD_NODE_UPDATE_ALL_SEC;
3280 } else {
3281 err = row_upd_clust_rec(
3282 flags, node, index, offsets, &heap, thr, &mtr);
3283
3284 if (err != DB_SUCCESS) {
3285
3286 goto exit_func;
3287 }
3288
3289 node->state = UPD_NODE_UPDATE_SOME_SEC;
3290 }
3291
3292 node->index = dict_table_get_next_index(index);
3293
3294 exit_func:
3295 if (heap) {
3296 mem_heap_free(heap);
3297 }
3298 return(err);
3299 }
3300
3301 /***********************************************************//**
3302 Updates the affected index records of a row. When the control is transferred
3303 to this node, we assume that we have a persistent cursor which was on a
3304 record, and the position of the cursor is stored in the cursor.
3305 @return DB_SUCCESS if operation successfully completed, else error
3306 code or DB_LOCK_WAIT */
3307 dberr_t
row_upd(upd_node_t * node,que_thr_t * thr)3308 row_upd(
3309 /*====*/
3310 upd_node_t* node, /*!< in: row update node */
3311 que_thr_t* thr) /*!< in: query thread */
3312 {
3313 dberr_t err = DB_SUCCESS;
3314 DBUG_ENTER("row_upd");
3315
3316 ut_ad(node != NULL);
3317 ut_ad(thr != NULL);
3318 ut_ad(!thr_get_trx(thr)->in_rollback);
3319
3320 DBUG_PRINT("row_upd", ("table: %s", node->table->name.m_name));
3321 DBUG_PRINT("row_upd", ("info bits in update vector: 0x%lx",
3322 node->update ? node->update->info_bits: 0));
3323 DBUG_PRINT("row_upd", ("foreign_id: %s",
3324 node->foreign ? node->foreign->id: "NULL"));
3325
3326 if (UNIV_LIKELY(node->in_mysql_interface)) {
3327
3328 /* We do not get the cmpl_info value from the MySQL
3329 interpreter: we must calculate it on the fly: */
3330
3331 if (node->is_delete
3332 || row_upd_changes_some_index_ord_field_binary(
3333 node->table, node->update)) {
3334 node->cmpl_info = 0;
3335 } else {
3336 node->cmpl_info = UPD_NODE_NO_ORD_CHANGE;
3337 }
3338 }
3339
3340 switch (node->state) {
3341 case UPD_NODE_UPDATE_CLUSTERED:
3342 case UPD_NODE_INSERT_CLUSTERED:
3343 if (!dict_table_is_intrinsic(node->table)) {
3344 log_free_check();
3345 }
3346 err = row_upd_clust_step(node, thr);
3347
3348 if (err != DB_SUCCESS) {
3349
3350 DBUG_RETURN(err);
3351 }
3352 }
3353
3354 DEBUG_SYNC_C_IF_THD(thr_get_trx(thr)->mysql_thd,
3355 "after_row_upd_clust");
3356
3357 if (node->index == NULL
3358 || (!node->is_delete
3359 && (node->cmpl_info & UPD_NODE_NO_ORD_CHANGE))) {
3360
3361 DBUG_RETURN(DB_SUCCESS);
3362 }
3363
3364 DBUG_EXECUTE_IF("row_upd_skip_sec", node->index = NULL;);
3365
3366 do {
3367 /* Skip corrupted index */
3368 dict_table_skip_corrupt_index(node->index);
3369
3370 if (!node->index) {
3371 break;
3372 }
3373
3374 if (node->index->type != DICT_FTS) {
3375 err = row_upd_sec_step(node, thr);
3376
3377 if (err != DB_SUCCESS) {
3378
3379 DBUG_RETURN(err);
3380 }
3381 }
3382
3383 node->index = dict_table_get_next_index(node->index);
3384 } while (node->index != NULL);
3385
3386 ut_ad(err == DB_SUCCESS);
3387
3388 /* Do some cleanup */
3389
3390 if (node->row != NULL) {
3391 node->row = NULL;
3392 node->ext = NULL;
3393 node->upd_row = NULL;
3394 node->upd_ext = NULL;
3395 mem_heap_empty(node->heap);
3396 }
3397
3398 node->state = UPD_NODE_UPDATE_CLUSTERED;
3399
3400 DBUG_RETURN(err);
3401 }
3402
3403 /***********************************************************//**
3404 Updates a row in a table. This is a high-level function used in SQL execution
3405 graphs.
3406 @return query thread to run next or NULL */
3407 que_thr_t*
row_upd_step(que_thr_t * thr)3408 row_upd_step(
3409 /*=========*/
3410 que_thr_t* thr) /*!< in: query thread */
3411 {
3412 upd_node_t* node;
3413 sel_node_t* sel_node;
3414 que_node_t* parent;
3415 dberr_t err = DB_SUCCESS;
3416 trx_t* trx;
3417 DBUG_ENTER("row_upd_step");
3418
3419 ut_ad(thr);
3420
3421 trx = thr_get_trx(thr);
3422
3423 trx_start_if_not_started_xa(trx, true);
3424
3425 node = static_cast<upd_node_t*>(thr->run_node);
3426
3427 sel_node = node->select;
3428
3429 parent = que_node_get_parent(node);
3430
3431 ut_ad(que_node_get_type(node) == QUE_NODE_UPDATE);
3432
3433 if (thr->prev_node == parent) {
3434 node->state = UPD_NODE_SET_IX_LOCK;
3435 }
3436
3437 if (node->state == UPD_NODE_SET_IX_LOCK) {
3438
3439 if (!node->has_clust_rec_x_lock) {
3440 /* It may be that the current session has not yet
3441 started its transaction, or it has been committed: */
3442
3443 err = lock_table(0, node->table, LOCK_IX, thr);
3444
3445 if (err != DB_SUCCESS) {
3446
3447 goto error_handling;
3448 }
3449 }
3450
3451 node->state = UPD_NODE_UPDATE_CLUSTERED;
3452
3453 if (node->searched_update) {
3454 /* Reset the cursor */
3455 sel_node->state = SEL_NODE_OPEN;
3456
3457 /* Fetch a row to update */
3458
3459 thr->run_node = sel_node;
3460
3461 DBUG_RETURN(thr);
3462 }
3463 }
3464
3465 /* sel_node is NULL if we are in the MySQL interface */
3466
3467 if (sel_node && (sel_node->state != SEL_NODE_FETCH)) {
3468
3469 if (!node->searched_update) {
3470 /* An explicit cursor should be positioned on a row
3471 to update */
3472
3473 ut_error;
3474
3475 err = DB_ERROR;
3476
3477 goto error_handling;
3478 }
3479
3480 ut_ad(sel_node->state == SEL_NODE_NO_MORE_ROWS);
3481
3482 /* No more rows to update, or the select node performed the
3483 updates directly in-place */
3484
3485 thr->run_node = parent;
3486
3487 DBUG_RETURN(thr);
3488 }
3489
3490 /* DO THE CHECKS OF THE CONSISTENCY CONSTRAINTS HERE */
3491
3492 err = row_upd(node, thr);
3493
3494 error_handling:
3495 trx->error_state = err;
3496
3497 if (err != DB_SUCCESS) {
3498 DBUG_RETURN(NULL);
3499 }
3500
3501 /* DO THE TRIGGER ACTIONS HERE */
3502
3503 if (node->searched_update) {
3504 /* Fetch next row to update */
3505
3506 thr->run_node = sel_node;
3507 } else {
3508 /* It was an explicit cursor update */
3509
3510 thr->run_node = parent;
3511 }
3512
3513 node->state = UPD_NODE_UPDATE_CLUSTERED;
3514
3515 DBUG_RETURN(thr);
3516 }
3517
3518 #endif /* !UNIV_HOTBACKUP */
3519