1 /* Copyright (c) 2000, 2016, Oracle and/or its affiliates.
2 Copyright (c) 2011, 2021, MariaDB
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; version 2 of the License.
7
8 This program is distributed in the hope that it will be useful,
9 but WITHOUT ANY WARRANTY; without even the implied warranty of
10 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 GNU General Public License for more details.
12
13 You should have received a copy of the GNU General Public License
14 along with this program; if not, write to the Free Software
15 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
16
17
18 /*
19 Single table and multi table updates of tables.
20 Multi-table updates were introduced by Sinisa & Monty
21 */
22
23 #include "mariadb.h" /* NO_EMBEDDED_ACCESS_CHECKS */
24 #include "sql_priv.h"
25 #include "sql_update.h"
26 #include "sql_cache.h" // query_cache_*
27 #include "sql_base.h" // close_tables_for_reopen
28 #include "sql_parse.h" // cleanup_items
29 #include "sql_partition.h" // partition_key_modified
30 #include "sql_select.h"
31 #include "sql_view.h" // check_key_in_view
32 #include "sp_head.h"
33 #include "sql_trigger.h"
34 #include "sql_statistics.h"
35 #include "probes_mysql.h"
36 #include "debug_sync.h"
37 #include "key.h" // is_key_used
38 #include "records.h" // init_read_record,
39 // end_read_record
40 #include "filesort.h" // filesort
41 #include "sql_derived.h" // mysql_derived_prepare,
42 // mysql_handle_derived,
43 // mysql_derived_filling
44
45
46 #include "sql_insert.h" // For vers_insert_history_row() that may be
47 // needed for System Versioning.
48
49 /**
50 True if the table's input and output record buffers are comparable using
51 compare_record(TABLE*).
52 */
records_are_comparable(const TABLE * table)53 bool records_are_comparable(const TABLE *table) {
54 return !table->versioned(VERS_TRX_ID) &&
55 (((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) == 0) ||
56 bitmap_is_subset(table->write_set, table->read_set));
57 }
58
59
60 /**
61 Compares the input and outbut record buffers of the table to see if a row
62 has changed.
63
64 @return true if row has changed.
65 @return false otherwise.
66 */
67
compare_record(const TABLE * table)68 bool compare_record(const TABLE *table)
69 {
70 DBUG_ASSERT(records_are_comparable(table));
71
72 if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ ||
73 table->s->has_update_default_function)
74 {
75 /*
76 Storage engine may not have read all columns of the record. Fields
77 (including NULL bits) not in the write_set may not have been read and
78 can therefore not be compared.
79 Or ON UPDATE DEFAULT NOW() could've changed field values, including
80 NULL bits.
81 */
82 for (Field **ptr= table->field ; *ptr != NULL; ptr++)
83 {
84 Field *field= *ptr;
85 if (field->has_explicit_value() && !field->vcol_info)
86 {
87 if (field->real_maybe_null())
88 {
89 uchar null_byte_index= (uchar)(field->null_ptr - table->record[0]);
90
91 if (((table->record[0][null_byte_index]) & field->null_bit) !=
92 ((table->record[1][null_byte_index]) & field->null_bit))
93 return TRUE;
94 }
95 if (field->cmp_binary_offset(table->s->rec_buff_length))
96 return TRUE;
97 }
98 }
99 return FALSE;
100 }
101
102 /*
103 The storage engine has read all columns, so it's safe to compare all bits
104 including those not in the write_set. This is cheaper than the
105 field-by-field comparison done above.
106 */
107 if (table->s->can_cmp_whole_record)
108 return cmp_record(table,record[1]);
109 /* Compare null bits */
110 if (memcmp(table->null_flags,
111 table->null_flags+table->s->rec_buff_length,
112 table->s->null_bytes_for_compare))
113 return TRUE; // Diff in NULL value
114 /* Compare updated fields */
115 for (Field **ptr= table->field ; *ptr ; ptr++)
116 {
117 Field *field= *ptr;
118 if (field->has_explicit_value() && !field->vcol_info &&
119 field->cmp_binary_offset(table->s->rec_buff_length))
120 return TRUE;
121 }
122 return FALSE;
123 }
124
125
126 /*
127 check that all fields are real fields
128
129 SYNOPSIS
130 check_fields()
131 thd thread handler
132 items Items for check
133
134 RETURN
135 TRUE Items can't be used in UPDATE
136 FALSE Items are OK
137 */
138
check_fields(THD * thd,TABLE_LIST * table,List<Item> & items,bool update_view)139 static bool check_fields(THD *thd, TABLE_LIST *table, List<Item> &items,
140 bool update_view)
141 {
142 Item *item;
143 if (update_view)
144 {
145 List_iterator<Item> it(items);
146 Item_field *field;
147 while ((item= it++))
148 {
149 if (!(field= item->field_for_view_update()))
150 {
151 /* item has name, because it comes from VIEW SELECT list */
152 my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), item->name.str);
153 return TRUE;
154 }
155 /*
156 we make temporary copy of Item_field, to avoid influence of changing
157 result_field on Item_ref which refer on this field
158 */
159 thd->change_item_tree(it.ref(),
160 new (thd->mem_root) Item_field(thd, field));
161 }
162 }
163
164 if (thd->variables.sql_mode & MODE_SIMULTANEOUS_ASSIGNMENT)
165 {
166 // Make sure that a column is updated only once
167 List_iterator_fast<Item> it(items);
168 while ((item= it++))
169 {
170 item->field_for_view_update()->field->clear_has_explicit_value();
171 }
172 it.rewind();
173 while ((item= it++))
174 {
175 Field *f= item->field_for_view_update()->field;
176 if (f->has_explicit_value())
177 {
178 my_error(ER_UPDATED_COLUMN_ONLY_ONCE, MYF(0),
179 *(f->table_name), f->field_name.str);
180 return TRUE;
181 }
182 f->set_has_explicit_value();
183 }
184 }
185
186 if (table->has_period())
187 {
188 if (table->is_view_or_derived())
189 {
190 my_error(ER_IT_IS_A_VIEW, MYF(0), table->table_name.str);
191 return TRUE;
192 }
193 if (thd->lex->sql_command == SQLCOM_UPDATE_MULTI)
194 {
195 my_error(ER_NOT_SUPPORTED_YET, MYF(0),
196 "updating and querying the same temporal periods table");
197
198 return true;
199 }
200 DBUG_ASSERT(thd->lex->sql_command == SQLCOM_UPDATE);
201 for (List_iterator_fast<Item> it(items); (item=it++);)
202 {
203 Field *f= item->field_for_view_update()->field;
204 vers_select_conds_t &period= table->period_conditions;
205 if (period.field_start->field == f || period.field_end->field == f)
206 {
207 my_error(ER_PERIOD_COLUMNS_UPDATED, MYF(0),
208 item->name.str, period.name.str);
209 return true;
210 }
211 }
212 }
213 return FALSE;
214 }
215
vers_check_update(List<Item> & items)216 bool TABLE::vers_check_update(List<Item> &items)
217 {
218 List_iterator<Item> it(items);
219 if (!versioned_write())
220 return false;
221
222 while (Item *item= it++)
223 {
224 if (Item_field *item_field= item->field_for_view_update())
225 {
226 Field *field= item_field->field;
227 if (field->table == this && !field->vers_update_unversioned())
228 {
229 no_cache= true;
230 return true;
231 }
232 }
233 }
234 return false;
235 }
236
237 /**
238 Re-read record if more columns are needed for error message.
239
240 If we got a duplicate key error, we want to write an error
241 message containing the value of the duplicate key. If we do not have
242 all fields of the key value in record[0], we need to re-read the
243 record with a proper read_set.
244
245 @param[in] error error number
246 @param[in] table table
247 */
248
prepare_record_for_error_message(int error,TABLE * table)249 static void prepare_record_for_error_message(int error, TABLE *table)
250 {
251 Field **field_p;
252 Field *field;
253 uint keynr;
254 MY_BITMAP unique_map; /* Fields in offended unique. */
255 my_bitmap_map unique_map_buf[bitmap_buffer_size(MAX_FIELDS)];
256 DBUG_ENTER("prepare_record_for_error_message");
257
258 /*
259 Only duplicate key errors print the key value.
260 If storage engine does always read all columns, we have the value alraedy.
261 */
262 if ((error != HA_ERR_FOUND_DUPP_KEY) ||
263 !(table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ))
264 DBUG_VOID_RETURN;
265
266 /*
267 Get the number of the offended index.
268 We will see MAX_KEY if the engine cannot determine the affected index.
269 */
270 if (unlikely((keynr= table->file->get_dup_key(error)) >= MAX_KEY))
271 DBUG_VOID_RETURN;
272
273 /* Create unique_map with all fields used by that index. */
274 my_bitmap_init(&unique_map, unique_map_buf, table->s->fields, FALSE);
275 table->mark_index_columns(keynr, &unique_map);
276
277 /* Subtract read_set and write_set. */
278 bitmap_subtract(&unique_map, table->read_set);
279 bitmap_subtract(&unique_map, table->write_set);
280
281 /*
282 If the unique index uses columns that are neither in read_set
283 nor in write_set, we must re-read the record.
284 Otherwise no need to do anything.
285 */
286 if (bitmap_is_clear_all(&unique_map))
287 DBUG_VOID_RETURN;
288
289 /* Get identifier of last read record into table->file->ref. */
290 table->file->position(table->record[0]);
291 /* Add all fields used by unique index to read_set. */
292 bitmap_union(table->read_set, &unique_map);
293 /* Tell the engine about the new set. */
294 table->file->column_bitmaps_signal();
295
296 if ((error= table->file->ha_index_or_rnd_end()) ||
297 (error= table->file->ha_rnd_init(0)))
298 {
299 table->file->print_error(error, MYF(0));
300 DBUG_VOID_RETURN;
301 }
302
303 /* Read record that is identified by table->file->ref. */
304 (void) table->file->ha_rnd_pos(table->record[1], table->file->ref);
305 /* Copy the newly read columns into the new record. */
306 for (field_p= table->field; (field= *field_p); field_p++)
307 if (bitmap_is_set(&unique_map, field->field_index))
308 field->copy_from_tmp(table->s->rec_buff_length);
309
310 DBUG_VOID_RETURN;
311 }
312
313
314 static
cut_fields_for_portion_of_time(THD * thd,TABLE * table,const vers_select_conds_t & period_conds)315 int cut_fields_for_portion_of_time(THD *thd, TABLE *table,
316 const vers_select_conds_t &period_conds)
317 {
318 bool lcond= period_conds.field_start->val_datetime_packed(thd)
319 < period_conds.start.item->val_datetime_packed(thd);
320 bool rcond= period_conds.field_end->val_datetime_packed(thd)
321 > period_conds.end.item->val_datetime_packed(thd);
322
323 Field *start_field= table->field[table->s->period.start_fieldno];
324 Field *end_field= table->field[table->s->period.end_fieldno];
325
326 int res= 0;
327 if (lcond)
328 {
329 res= period_conds.start.item->save_in_field(start_field, true);
330 start_field->set_has_explicit_value();
331 }
332
333 if (likely(!res) && rcond)
334 {
335 res= period_conds.end.item->save_in_field(end_field, true);
336 end_field->set_has_explicit_value();
337 }
338
339 return res;
340 }
341
342 /*
343 Process usual UPDATE
344
345 SYNOPSIS
346 mysql_update()
347 thd thread handler
348 fields fields for update
349 values values of fields for update
350 conds WHERE clause expression
351 order_num number of elemen in ORDER BY clause
352 order ORDER BY clause list
353 limit limit clause
354
355 RETURN
356 0 - OK
357 2 - privilege check and openning table passed, but we need to convert to
358 multi-update because of view substitution
359 1 - error
360 */
361
mysql_update(THD * thd,TABLE_LIST * table_list,List<Item> & fields,List<Item> & values,COND * conds,uint order_num,ORDER * order,ha_rows limit,bool ignore,ha_rows * found_return,ha_rows * updated_return)362 int mysql_update(THD *thd,
363 TABLE_LIST *table_list,
364 List<Item> &fields,
365 List<Item> &values,
366 COND *conds,
367 uint order_num, ORDER *order,
368 ha_rows limit,
369 bool ignore,
370 ha_rows *found_return, ha_rows *updated_return)
371 {
372 bool using_limit= limit != HA_POS_ERROR;
373 bool safe_update= thd->variables.option_bits & OPTION_SAFE_UPDATES;
374 bool used_key_is_modified= FALSE, transactional_table;
375 bool will_batch= FALSE;
376 bool can_compare_record;
377 int res;
378 int error, loc_error;
379 ha_rows dup_key_found;
380 bool need_sort= TRUE;
381 bool reverse= FALSE;
382 #ifndef NO_EMBEDDED_ACCESS_CHECKS
383 privilege_t want_privilege(NO_ACL);
384 #endif
385 uint table_count= 0;
386 ha_rows updated, found;
387 key_map old_covering_keys;
388 TABLE *table;
389 SQL_SELECT *select= NULL;
390 SORT_INFO *file_sort= 0;
391 READ_RECORD info;
392 SELECT_LEX *select_lex= thd->lex->first_select_lex();
393 ulonglong id;
394 List<Item> all_fields;
395 killed_state killed_status= NOT_KILLED;
396 bool has_triggers, binlog_is_row, do_direct_update= FALSE;
397 Update_plan query_plan(thd->mem_root);
398 Explain_update *explain;
399 TABLE_LIST *update_source_table;
400 query_plan.index= MAX_KEY;
401 query_plan.using_filesort= FALSE;
402
403 // For System Versioning (may need to insert new fields to a table).
404 ha_rows rows_inserted= 0;
405
406 DBUG_ENTER("mysql_update");
407
408 create_explain_query(thd->lex, thd->mem_root);
409 if (open_tables(thd, &table_list, &table_count, 0))
410 DBUG_RETURN(1);
411
412 /* Prepare views so they are handled correctly */
413 if (mysql_handle_derived(thd->lex, DT_INIT))
414 DBUG_RETURN(1);
415
416 if (table_list->has_period() && table_list->is_view_or_derived())
417 {
418 my_error(ER_IT_IS_A_VIEW, MYF(0), table_list->table_name.str);
419 DBUG_RETURN(TRUE);
420 }
421
422 if (((update_source_table=unique_table(thd, table_list,
423 table_list->next_global, 0)) ||
424 table_list->is_multitable()))
425 {
426 DBUG_ASSERT(update_source_table || table_list->view != 0);
427 DBUG_PRINT("info", ("Switch to multi-update"));
428 /* pass counter value */
429 thd->lex->table_count= table_count;
430 if (thd->lex->period_conditions.is_set())
431 {
432 my_error(ER_NOT_SUPPORTED_YET, MYF(0),
433 "updating and querying the same temporal periods table");
434
435 DBUG_RETURN(1);
436 }
437
438 /* convert to multiupdate */
439 DBUG_RETURN(2);
440 }
441 if (lock_tables(thd, table_list, table_count, 0))
442 DBUG_RETURN(1);
443
444 (void) read_statistics_for_tables_if_needed(thd, table_list);
445
446 THD_STAGE_INFO(thd, stage_init_update);
447 if (table_list->handle_derived(thd->lex, DT_MERGE_FOR_INSERT))
448 DBUG_RETURN(1);
449 if (table_list->handle_derived(thd->lex, DT_PREPARE))
450 DBUG_RETURN(1);
451
452 table= table_list->table;
453
454 if (!table_list->single_table_updatable())
455 {
456 my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias.str, "UPDATE");
457 DBUG_RETURN(1);
458 }
459
460 /* Calculate "table->covering_keys" based on the WHERE */
461 table->covering_keys= table->s->keys_in_use;
462 table->opt_range_keys.clear_all();
463
464 query_plan.select_lex= thd->lex->first_select_lex();
465 query_plan.table= table;
466 #ifndef NO_EMBEDDED_ACCESS_CHECKS
467 /* Force privilege re-checking for views after they have been opened. */
468 want_privilege= (table_list->view ? UPDATE_ACL :
469 table_list->grant.want_privilege);
470 #endif
471 promote_select_describe_flag_if_needed(thd->lex);
472
473 if (mysql_prepare_update(thd, table_list, &conds, order_num, order))
474 DBUG_RETURN(1);
475
476 if (table_list->has_period())
477 {
478 if (!table_list->period_conditions.start.item->const_item()
479 || !table_list->period_conditions.end.item->const_item())
480 {
481 my_error(ER_NOT_CONSTANT_EXPRESSION, MYF(0), "FOR PORTION OF");
482 DBUG_RETURN(true);
483 }
484 table->no_cache= true;
485 }
486
487 old_covering_keys= table->covering_keys; // Keys used in WHERE
488 /* Check the fields we are going to modify */
489 #ifndef NO_EMBEDDED_ACCESS_CHECKS
490 table_list->grant.want_privilege= table->grant.want_privilege= want_privilege;
491 table_list->register_want_access(want_privilege);
492 #endif
493 /* 'Unfix' fields to allow correct marking by the setup_fields function. */
494 if (table_list->is_view())
495 unfix_fields(fields);
496
497 if (setup_fields_with_no_wrap(thd, Ref_ptr_array(),
498 fields, MARK_COLUMNS_WRITE, 0, 0))
499 DBUG_RETURN(1); /* purecov: inspected */
500 if (check_fields(thd, table_list, fields, table_list->view))
501 {
502 DBUG_RETURN(1);
503 }
504 bool has_vers_fields= table->vers_check_update(fields);
505 if (check_key_in_view(thd, table_list))
506 {
507 my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias.str, "UPDATE");
508 DBUG_RETURN(1);
509 }
510
511 if (table->default_field)
512 table->mark_default_fields_for_write(false);
513
514 #ifndef NO_EMBEDDED_ACCESS_CHECKS
515 /* Check values */
516 table_list->grant.want_privilege= table->grant.want_privilege=
517 (SELECT_ACL & ~table->grant.privilege);
518 #endif
519 if (setup_fields(thd, Ref_ptr_array(), values, MARK_COLUMNS_READ, 0, NULL, 0))
520 {
521 free_underlaid_joins(thd, select_lex);
522 DBUG_RETURN(1); /* purecov: inspected */
523 }
524
525 if (check_unique_table(thd, table_list))
526 DBUG_RETURN(TRUE);
527
528 switch_to_nullable_trigger_fields(fields, table);
529 switch_to_nullable_trigger_fields(values, table);
530
531 /* Apply the IN=>EXISTS transformation to all subqueries and optimize them */
532 if (select_lex->optimize_unflattened_subqueries(false))
533 DBUG_RETURN(TRUE);
534
535 if (select_lex->inner_refs_list.elements &&
536 fix_inner_refs(thd, all_fields, select_lex, select_lex->ref_pointer_array))
537 DBUG_RETURN(1);
538
539 if (conds)
540 {
541 Item::cond_result cond_value;
542 conds= conds->remove_eq_conds(thd, &cond_value, true);
543 if (cond_value == Item::COND_FALSE)
544 {
545 limit= 0; // Impossible WHERE
546 query_plan.set_impossible_where();
547 if (thd->lex->describe || thd->lex->analyze_stmt)
548 goto produce_explain_and_leave;
549 }
550 }
551
552 // Don't count on usage of 'only index' when calculating which key to use
553 table->covering_keys.clear_all();
554
555 #ifdef WITH_PARTITION_STORAGE_ENGINE
556 if (prune_partitions(thd, table, conds))
557 {
558 free_underlaid_joins(thd, select_lex);
559
560 query_plan.set_no_partitions();
561 if (thd->lex->describe || thd->lex->analyze_stmt)
562 goto produce_explain_and_leave;
563 if (thd->is_error())
564 DBUG_RETURN(1);
565
566 my_ok(thd); // No matching records
567 DBUG_RETURN(0);
568 }
569 #endif
570 /* Update the table->file->stats.records number */
571 table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
572 set_statistics_for_table(thd, table);
573
574 select= make_select(table, 0, 0, conds, (SORT_INFO*) 0, 0, &error);
575 if (unlikely(error || !limit || thd->is_error() ||
576 (select && select->check_quick(thd, safe_update, limit))))
577 {
578 query_plan.set_impossible_where();
579 if (thd->lex->describe || thd->lex->analyze_stmt)
580 goto produce_explain_and_leave;
581
582 delete select;
583 free_underlaid_joins(thd, select_lex);
584 /*
585 There was an error or the error was already sent by
586 the quick select evaluation.
587 TODO: Add error code output parameter to Item::val_xxx() methods.
588 Currently they rely on the user checking DA for
589 errors when unwinding the stack after calling Item::val_xxx().
590 */
591 if (error || thd->is_error())
592 {
593 DBUG_RETURN(1); // Error in where
594 }
595 my_ok(thd); // No matching records
596 DBUG_RETURN(0);
597 }
598
599 /* If running in safe sql mode, don't allow updates without keys */
600 if (table->opt_range_keys.is_clear_all())
601 {
602 thd->set_status_no_index_used();
603 if (safe_update && !using_limit)
604 {
605 my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,
606 ER_THD(thd, ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0));
607 goto err;
608 }
609 }
610 if (unlikely(init_ftfuncs(thd, select_lex, 1)))
611 goto err;
612
613 if (table_list->has_period())
614 {
615 table->use_all_columns();
616 table->rpl_write_set= table->write_set;
617 }
618 else
619 {
620 table->mark_columns_needed_for_update();
621 }
622
623 table->update_const_key_parts(conds);
624 order= simple_remove_const(order, conds);
625 query_plan.scanned_rows= select? select->records: table->file->stats.records;
626
627 if (select && select->quick && select->quick->unique_key_range())
628 {
629 /* Single row select (always "ordered"): Ok to use with key field UPDATE */
630 need_sort= FALSE;
631 query_plan.index= MAX_KEY;
632 used_key_is_modified= FALSE;
633 }
634 else
635 {
636 ha_rows scanned_limit= query_plan.scanned_rows;
637 table->no_keyread= 1;
638 query_plan.index= get_index_for_order(order, table, select, limit,
639 &scanned_limit, &need_sort,
640 &reverse);
641 table->no_keyread= 0;
642 if (!need_sort)
643 query_plan.scanned_rows= scanned_limit;
644
645 if (select && select->quick)
646 {
647 DBUG_ASSERT(need_sort || query_plan.index == select->quick->index);
648 used_key_is_modified= (!select->quick->unique_key_range() &&
649 select->quick->is_keys_used(table->write_set));
650 }
651 else
652 {
653 if (need_sort)
654 {
655 /* Assign table scan index to check below for modified key fields: */
656 query_plan.index= table->file->key_used_on_scan;
657 }
658 if (query_plan.index != MAX_KEY)
659 {
660 /* Check if we are modifying a key that we are used to search with: */
661 used_key_is_modified= is_key_used(table, query_plan.index,
662 table->write_set);
663 }
664 }
665 }
666
667 /*
668 Query optimization is finished at this point.
669 - Save the decisions in the query plan
670 - if we're running EXPLAIN UPDATE, get out
671 */
672 query_plan.select= select;
673 query_plan.possible_keys= select? select->possible_keys: key_map(0);
674
675 if (used_key_is_modified || order ||
676 partition_key_modified(table, table->write_set))
677 {
678 if (order && need_sort)
679 query_plan.using_filesort= true;
680 else
681 query_plan.using_io_buffer= true;
682 }
683
684 /*
685 Ok, we have generated a query plan for the UPDATE.
686 - if we're running EXPLAIN UPDATE, goto produce explain output
687 - otherwise, execute the query plan
688 */
689 if (thd->lex->describe)
690 goto produce_explain_and_leave;
691 if (!(explain= query_plan.save_explain_update_data(query_plan.mem_root, thd)))
692 goto err;
693
694 ANALYZE_START_TRACKING(thd, &explain->command_tracker);
695
696 DBUG_EXECUTE_IF("show_explain_probe_update_exec_start",
697 dbug_serve_apcs(thd, 1););
698
699 has_triggers= (table->triggers &&
700 (table->triggers->has_triggers(TRG_EVENT_UPDATE,
701 TRG_ACTION_BEFORE) ||
702 table->triggers->has_triggers(TRG_EVENT_UPDATE,
703 TRG_ACTION_AFTER)));
704
705 if (table_list->has_period())
706 has_triggers= table->triggers &&
707 (table->triggers->has_triggers(TRG_EVENT_INSERT,
708 TRG_ACTION_BEFORE)
709 || table->triggers->has_triggers(TRG_EVENT_INSERT,
710 TRG_ACTION_AFTER)
711 || has_triggers);
712 DBUG_PRINT("info", ("has_triggers: %s", has_triggers ? "TRUE" : "FALSE"));
713 binlog_is_row= thd->is_current_stmt_binlog_format_row();
714 DBUG_PRINT("info", ("binlog_is_row: %s", binlog_is_row ? "TRUE" : "FALSE"));
715
716 if (!(select && select->quick))
717 status_var_increment(thd->status_var.update_scan_count);
718
719 /*
720 We can use direct update (update that is done silently in the handler)
721 if none of the following conditions are true:
722 - There are triggers
723 - There is binary logging
724 - using_io_buffer
725 - This means that the partition changed or the key we want
726 to use for scanning the table is changed
727 - ignore is set
728 - Direct updates don't return the number of ignored rows
729 - There is a virtual not stored column in the WHERE clause
730 - Changing a field used by a stored virtual column, which
731 would require the column to be recalculated.
732 - ORDER BY or LIMIT
733 - As this requires the rows to be updated in a specific order
734 - Note that Spider can handle ORDER BY and LIMIT in a cluster with
735 one data node. These conditions are therefore checked in
736 direct_update_rows_init().
737 - Update fields include a unique timestamp field
738 - The storage engine may not be able to avoid false duplicate key
739 errors. This condition is checked in direct_update_rows_init().
740
741 Direct update does not require a WHERE clause
742
743 Later we also ensure that we are only using one table (no sub queries)
744 */
745 DBUG_PRINT("info", ("HA_CAN_DIRECT_UPDATE_AND_DELETE: %s", (table->file->ha_table_flags() & HA_CAN_DIRECT_UPDATE_AND_DELETE) ? "TRUE" : "FALSE"));
746 DBUG_PRINT("info", ("using_io_buffer: %s", query_plan.using_io_buffer ? "TRUE" : "FALSE"));
747 DBUG_PRINT("info", ("ignore: %s", ignore ? "TRUE" : "FALSE"));
748 DBUG_PRINT("info", ("virtual_columns_marked_for_read: %s", table->check_virtual_columns_marked_for_read() ? "TRUE" : "FALSE"));
749 DBUG_PRINT("info", ("virtual_columns_marked_for_write: %s", table->check_virtual_columns_marked_for_write() ? "TRUE" : "FALSE"));
750 if ((table->file->ha_table_flags() & HA_CAN_DIRECT_UPDATE_AND_DELETE) &&
751 !has_triggers && !binlog_is_row &&
752 !query_plan.using_io_buffer && !ignore &&
753 !table->check_virtual_columns_marked_for_read() &&
754 !table->check_virtual_columns_marked_for_write())
755 {
756 DBUG_PRINT("info", ("Trying direct update"));
757 bool use_direct_update= !select || !select->cond;
758 if (!use_direct_update &&
759 (select->cond->used_tables() & ~RAND_TABLE_BIT) == table->map)
760 {
761 DBUG_ASSERT(!table->file->pushed_cond);
762 if (!table->file->cond_push(select->cond))
763 {
764 use_direct_update= TRUE;
765 table->file->pushed_cond= select->cond;
766 }
767 }
768
769 if (use_direct_update &&
770 !table->file->info_push(INFO_KIND_UPDATE_FIELDS, &fields) &&
771 !table->file->info_push(INFO_KIND_UPDATE_VALUES, &values) &&
772 !table->file->direct_update_rows_init(&fields))
773 {
774 do_direct_update= TRUE;
775
776 /* Direct update is not using_filesort and is not using_io_buffer */
777 goto update_begin;
778 }
779 }
780
781 if (query_plan.using_filesort || query_plan.using_io_buffer)
782 {
783 /*
784 We can't update table directly; We must first search after all
785 matching rows before updating the table!
786
787 note: We avoid sorting if we sort on the used index
788 */
789 if (query_plan.using_filesort)
790 {
791 /*
792 Doing an ORDER BY; Let filesort find and sort the rows we are going
793 to update
794 NOTE: filesort will call table->prepare_for_position()
795 */
796 Filesort fsort(order, limit, true, select);
797
798 Filesort_tracker *fs_tracker=
799 thd->lex->explain->get_upd_del_plan()->filesort_tracker;
800
801 if (!(file_sort= filesort(thd, table, &fsort, fs_tracker)))
802 goto err;
803 thd->inc_examined_row_count(file_sort->examined_rows);
804
805 /*
806 Filesort has already found and selected the rows we want to update,
807 so we don't need the where clause
808 */
809 delete select;
810 select= 0;
811 }
812 else
813 {
814 MY_BITMAP *save_read_set= table->read_set;
815 MY_BITMAP *save_write_set= table->write_set;
816
817 if (query_plan.index < MAX_KEY && old_covering_keys.is_set(query_plan.index))
818 table->prepare_for_keyread(query_plan.index);
819 else
820 table->use_all_columns();
821
822 /*
823 We are doing a search on a key that is updated. In this case
824 we go trough the matching rows, save a pointer to them and
825 update these in a separate loop based on the pointer.
826 */
827 explain->buf_tracker.on_scan_init();
828 IO_CACHE tempfile;
829 if (open_cached_file(&tempfile, mysql_tmpdir,TEMP_PREFIX,
830 DISK_BUFFER_SIZE, MYF(MY_WME)))
831 goto err;
832
833 /* If quick select is used, initialize it before retrieving rows. */
834 if (select && select->quick && select->quick->reset())
835 {
836 close_cached_file(&tempfile);
837 goto err;
838 }
839
840 table->file->try_semi_consistent_read(1);
841
842 /*
843 When we get here, we have one of the following options:
844 A. query_plan.index == MAX_KEY
845 This means we should use full table scan, and start it with
846 init_read_record call
847 B. query_plan.index != MAX_KEY
848 B.1 quick select is used, start the scan with init_read_record
849 B.2 quick select is not used, this is full index scan (with LIMIT)
850 Full index scan must be started with init_read_record_idx
851 */
852
853 if (query_plan.index == MAX_KEY || (select && select->quick))
854 error= init_read_record(&info, thd, table, select, NULL, 0, 1, FALSE);
855 else
856 error= init_read_record_idx(&info, thd, table, 1, query_plan.index,
857 reverse);
858
859 if (unlikely(error))
860 {
861 close_cached_file(&tempfile);
862 goto err;
863 }
864
865 THD_STAGE_INFO(thd, stage_searching_rows_for_update);
866 ha_rows tmp_limit= limit;
867
868 while (likely(!(error=info.read_record())) && likely(!thd->killed))
869 {
870 explain->buf_tracker.on_record_read();
871 thd->inc_examined_row_count(1);
872 if (!select || (error= select->skip_record(thd)) > 0)
873 {
874 if (table->file->ha_was_semi_consistent_read())
875 continue; /* repeat the read of the same row if it still exists */
876
877 explain->buf_tracker.on_record_after_where();
878 table->file->position(table->record[0]);
879 if (unlikely(my_b_write(&tempfile,table->file->ref,
880 table->file->ref_length)))
881 {
882 error=1; /* purecov: inspected */
883 break; /* purecov: inspected */
884 }
885 if (!--limit && using_limit)
886 {
887 error= -1;
888 break;
889 }
890 }
891 else
892 {
893 /*
894 Don't try unlocking the row if skip_record reported an
895 error since in this case the transaction might have been
896 rolled back already.
897 */
898 if (unlikely(error < 0))
899 {
900 /* Fatal error from select->skip_record() */
901 error= 1;
902 break;
903 }
904 else
905 table->file->unlock_row();
906 }
907 }
908 if (unlikely(thd->killed) && !error)
909 error= 1; // Aborted
910 limit= tmp_limit;
911 table->file->try_semi_consistent_read(0);
912 end_read_record(&info);
913
914 /* Change select to use tempfile */
915 if (select)
916 {
917 delete select->quick;
918 if (select->free_cond)
919 delete select->cond;
920 select->quick=0;
921 select->cond=0;
922 }
923 else
924 {
925 if (!(select= new SQL_SELECT))
926 goto err;
927 select->head=table;
928 }
929
930 if (unlikely(reinit_io_cache(&tempfile,READ_CACHE,0L,0,0)))
931 error= 1; /* purecov: inspected */
932 select->file= tempfile; // Read row ptrs from this file
933 if (unlikely(error >= 0))
934 goto err;
935
936 table->file->ha_end_keyread();
937 table->column_bitmaps_set(save_read_set, save_write_set);
938 }
939 }
940
941 update_begin:
942 if (ignore)
943 table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
944
945 if (select && select->quick && select->quick->reset())
946 goto err;
947 table->file->try_semi_consistent_read(1);
948 if (init_read_record(&info, thd, table, select, file_sort, 0, 1, FALSE))
949 goto err;
950
951 updated= found= 0;
952 /*
953 Generate an error (in TRADITIONAL mode) or warning
954 when trying to set a NOT NULL field to NULL.
955 */
956 thd->count_cuted_fields= CHECK_FIELD_WARN;
957 thd->cuted_fields=0L;
958
959 transactional_table= table->file->has_transactions_and_rollback();
960 thd->abort_on_warning= !ignore && thd->is_strict_mode();
961
962 if (do_direct_update)
963 {
964 /* Direct updating is supported */
965 ha_rows update_rows= 0, found_rows= 0;
966 DBUG_PRINT("info", ("Using direct update"));
967 table->reset_default_fields();
968 if (unlikely(!(error= table->file->ha_direct_update_rows(&update_rows,
969 &found_rows))))
970 error= -1;
971 updated= update_rows;
972 found= found_rows;
973 if (found < updated)
974 found= updated;
975 goto update_end;
976 }
977
978 if ((table->file->ha_table_flags() & HA_CAN_FORCE_BULK_UPDATE) &&
979 !table->prepare_triggers_for_update_stmt_or_event())
980 will_batch= !table->file->start_bulk_update();
981
982 /*
983 Assure that we can use position()
984 if we need to create an error message.
985 */
986 if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ)
987 table->prepare_for_position();
988
989 table->reset_default_fields();
990
991 /*
992 We can use compare_record() to optimize away updates if
993 the table handler is returning all columns OR if
994 if all updated columns are read
995 */
996 can_compare_record= records_are_comparable(table);
997 explain->tracker.on_scan_init();
998
999 table->file->prepare_for_insert(1);
1000 DBUG_ASSERT(table->file->inited != handler::NONE);
1001
1002 THD_STAGE_INFO(thd, stage_updating);
1003 while (!(error=info.read_record()) && !thd->killed)
1004 {
1005 explain->tracker.on_record_read();
1006 thd->inc_examined_row_count(1);
1007 if (!select || select->skip_record(thd) > 0)
1008 {
1009 if (table->file->ha_was_semi_consistent_read())
1010 continue; /* repeat the read of the same row if it still exists */
1011
1012 explain->tracker.on_record_after_where();
1013 store_record(table,record[1]);
1014
1015 if (table_list->has_period())
1016 cut_fields_for_portion_of_time(thd, table,
1017 table_list->period_conditions);
1018
1019 if (fill_record_n_invoke_before_triggers(thd, table, fields, values, 0,
1020 TRG_EVENT_UPDATE))
1021 break; /* purecov: inspected */
1022
1023 found++;
1024
1025 bool record_was_same= false;
1026 bool need_update= !can_compare_record || compare_record(table);
1027
1028 if (need_update)
1029 {
1030 if (table->versioned(VERS_TIMESTAMP) &&
1031 thd->lex->sql_command == SQLCOM_DELETE)
1032 table->vers_update_end();
1033
1034 if ((res= table_list->view_check_option(thd, ignore)) !=
1035 VIEW_CHECK_OK)
1036 {
1037 found--;
1038 if (res == VIEW_CHECK_SKIP)
1039 continue;
1040 else if (res == VIEW_CHECK_ERROR)
1041 {
1042 error= 1;
1043 break;
1044 }
1045 }
1046 if (will_batch)
1047 {
1048 /*
1049 Typically a batched handler can execute the batched jobs when:
1050 1) When specifically told to do so
1051 2) When it is not a good idea to batch anymore
1052 3) When it is necessary to send batch for other reasons
1053 (One such reason is when READ's must be performed)
1054
1055 1) is covered by exec_bulk_update calls.
1056 2) and 3) is handled by the bulk_update_row method.
1057
1058 bulk_update_row can execute the updates including the one
1059 defined in the bulk_update_row or not including the row
1060 in the call. This is up to the handler implementation and can
1061 vary from call to call.
1062
1063 The dup_key_found reports the number of duplicate keys found
1064 in those updates actually executed. It only reports those if
1065 the extra call with HA_EXTRA_IGNORE_DUP_KEY have been issued.
1066 If this hasn't been issued it returns an error code and can
1067 ignore this number. Thus any handler that implements batching
1068 for UPDATE IGNORE must also handle this extra call properly.
1069
1070 If a duplicate key is found on the record included in this
1071 call then it should be included in the count of dup_key_found
1072 and error should be set to 0 (only if these errors are ignored).
1073 */
1074 DBUG_PRINT("info", ("Batched update"));
1075 error= table->file->ha_bulk_update_row(table->record[1],
1076 table->record[0],
1077 &dup_key_found);
1078 limit+= dup_key_found;
1079 updated-= dup_key_found;
1080 }
1081 else
1082 {
1083 /* Non-batched update */
1084 error= table->file->ha_update_row(table->record[1],
1085 table->record[0]);
1086 }
1087
1088 record_was_same= error == HA_ERR_RECORD_IS_THE_SAME;
1089 if (unlikely(record_was_same))
1090 {
1091 error= 0;
1092 }
1093 else if (likely(!error))
1094 {
1095 if (has_vers_fields && table->versioned(VERS_TRX_ID))
1096 rows_inserted++;
1097 updated++;
1098 }
1099
1100 if (likely(!error) && !record_was_same && table_list->has_period())
1101 {
1102 store_record(table, record[2]);
1103 restore_record(table, record[1]);
1104 error= table->insert_portion_of_time(thd,
1105 table_list->period_conditions,
1106 &rows_inserted);
1107 restore_record(table, record[2]);
1108 }
1109
1110 if (unlikely(error) &&
1111 (!ignore || table->file->is_fatal_error(error, HA_CHECK_ALL)))
1112 {
1113 goto error;
1114 }
1115 }
1116
1117 if (likely(!error) && has_vers_fields && table->versioned(VERS_TIMESTAMP))
1118 {
1119 store_record(table, record[2]);
1120 table->mark_columns_per_binlog_row_image();
1121 error= vers_insert_history_row(table);
1122 restore_record(table, record[2]);
1123 if (unlikely(error))
1124 {
1125 error:
1126 /*
1127 If (ignore && error is ignorable) we don't have to
1128 do anything; otherwise...
1129 */
1130 myf flags= 0;
1131
1132 if (table->file->is_fatal_error(error, HA_CHECK_ALL))
1133 flags|= ME_FATAL; /* Other handler errors are fatal */
1134
1135 prepare_record_for_error_message(error, table);
1136 table->file->print_error(error,MYF(flags));
1137 error= 1;
1138 break;
1139 }
1140 rows_inserted++;
1141 }
1142
1143 if (table->triggers &&
1144 unlikely(table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
1145 TRG_ACTION_AFTER, TRUE)))
1146 {
1147 error= 1;
1148 break;
1149 }
1150
1151 if (!--limit && using_limit)
1152 {
1153 /*
1154 We have reached end-of-file in most common situations where no
1155 batching has occurred and if batching was supposed to occur but
1156 no updates were made and finally when the batch execution was
1157 performed without error and without finding any duplicate keys.
1158 If the batched updates were performed with errors we need to
1159 check and if no error but duplicate key's found we need to
1160 continue since those are not counted for in limit.
1161 */
1162 if (will_batch &&
1163 ((error= table->file->exec_bulk_update(&dup_key_found)) ||
1164 dup_key_found))
1165 {
1166 if (error)
1167 {
1168 /* purecov: begin inspected */
1169 /*
1170 The handler should not report error of duplicate keys if they
1171 are ignored. This is a requirement on batching handlers.
1172 */
1173 prepare_record_for_error_message(error, table);
1174 table->file->print_error(error,MYF(0));
1175 error= 1;
1176 break;
1177 /* purecov: end */
1178 }
1179 /*
1180 Either an error was found and we are ignoring errors or there
1181 were duplicate keys found. In both cases we need to correct
1182 the counters and continue the loop.
1183 */
1184 limit= dup_key_found; //limit is 0 when we get here so need to +
1185 updated-= dup_key_found;
1186 }
1187 else
1188 {
1189 error= -1; // Simulate end of file
1190 break;
1191 }
1192 }
1193 }
1194 /*
1195 Don't try unlocking the row if skip_record reported an error since in
1196 this case the transaction might have been rolled back already.
1197 */
1198 else if (likely(!thd->is_error()))
1199 table->file->unlock_row();
1200 else
1201 {
1202 error= 1;
1203 break;
1204 }
1205 thd->get_stmt_da()->inc_current_row_for_warning();
1206 if (unlikely(thd->is_error()))
1207 {
1208 error= 1;
1209 break;
1210 }
1211 }
1212 ANALYZE_STOP_TRACKING(thd, &explain->command_tracker);
1213 table->auto_increment_field_not_null= FALSE;
1214 dup_key_found= 0;
1215 /*
1216 Caching the killed status to pass as the arg to query event constuctor;
1217 The cached value can not change whereas the killed status can
1218 (externally) since this point and change of the latter won't affect
1219 binlogging.
1220 It's assumed that if an error was set in combination with an effective
1221 killed status then the error is due to killing.
1222 */
1223 killed_status= thd->killed; // get the status of the volatile
1224 // simulated killing after the loop must be ineffective for binlogging
1225 DBUG_EXECUTE_IF("simulate_kill_bug27571",
1226 {
1227 thd->set_killed(KILL_QUERY);
1228 };);
1229 error= (killed_status == NOT_KILLED)? error : 1;
1230
1231 if (likely(error) &&
1232 will_batch &&
1233 (loc_error= table->file->exec_bulk_update(&dup_key_found)))
1234 /*
1235 An error has occurred when a batched update was performed and returned
1236 an error indication. It cannot be an allowed duplicate key error since
1237 we require the batching handler to treat this as a normal behavior.
1238
1239 Otherwise we simply remove the number of duplicate keys records found
1240 in the batched update.
1241 */
1242 {
1243 /* purecov: begin inspected */
1244 prepare_record_for_error_message(loc_error, table);
1245 table->file->print_error(loc_error,MYF(ME_FATAL));
1246 error= 1;
1247 /* purecov: end */
1248 }
1249 else
1250 updated-= dup_key_found;
1251 if (will_batch)
1252 table->file->end_bulk_update();
1253
1254 update_end:
1255 table->file->try_semi_consistent_read(0);
1256
1257 if (!transactional_table && updated > 0)
1258 thd->transaction->stmt.modified_non_trans_table= TRUE;
1259
1260 end_read_record(&info);
1261 delete select;
1262 select= NULL;
1263 THD_STAGE_INFO(thd, stage_end);
1264 if (table_list->has_period())
1265 table->file->ha_release_auto_increment();
1266 (void) table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
1267
1268 /*
1269 Invalidate the table in the query cache if something changed.
1270 This must be before binlog writing and ha_autocommit_...
1271 */
1272 if (updated)
1273 {
1274 query_cache_invalidate3(thd, table_list, 1);
1275 }
1276
1277 if (thd->transaction->stmt.modified_non_trans_table)
1278 thd->transaction->all.modified_non_trans_table= TRUE;
1279 thd->transaction->all.m_unsafe_rollback_flags|=
1280 (thd->transaction->stmt.m_unsafe_rollback_flags & THD_TRANS::DID_WAIT);
1281
1282 /*
1283 error < 0 means really no error at all: we processed all rows until the
1284 last one without error. error > 0 means an error (e.g. unique key
1285 violation and no IGNORE or REPLACE). error == 0 is also an error (if
1286 preparing the record or invoking before triggers fails). See
1287 ha_autocommit_or_rollback(error>=0) and DBUG_RETURN(error>=0) below.
1288 Sometimes we want to binlog even if we updated no rows, in case user used
1289 it to be sure master and slave are in same state.
1290 */
1291 if (likely(error < 0) || thd->transaction->stmt.modified_non_trans_table)
1292 {
1293 if (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open())
1294 {
1295 int errcode= 0;
1296 if (likely(error < 0))
1297 thd->clear_error();
1298 else
1299 errcode= query_error_code(thd, killed_status == NOT_KILLED);
1300
1301 ScopedStatementReplication scoped_stmt_rpl(
1302 table->versioned(VERS_TRX_ID) ? thd : NULL);
1303
1304 if (thd->binlog_query(THD::ROW_QUERY_TYPE,
1305 thd->query(), thd->query_length(),
1306 transactional_table, FALSE, FALSE, errcode) > 0)
1307 {
1308 error=1; // Rollback update
1309 }
1310 }
1311 }
1312 DBUG_ASSERT(transactional_table || !updated || thd->transaction->stmt.modified_non_trans_table);
1313 free_underlaid_joins(thd, select_lex);
1314 delete file_sort;
1315 if (table->file->pushed_cond)
1316 {
1317 table->file->pushed_cond= 0;
1318 table->file->cond_pop();
1319 }
1320
1321 /* If LAST_INSERT_ID(X) was used, report X */
1322 id= thd->arg_of_last_insert_id_function ?
1323 thd->first_successful_insert_id_in_prev_stmt : 0;
1324
1325 if (likely(error < 0) && likely(!thd->lex->analyze_stmt))
1326 {
1327 char buff[MYSQL_ERRMSG_SIZE];
1328 if (!table->versioned(VERS_TIMESTAMP) && !table_list->has_period())
1329 my_snprintf(buff, sizeof(buff), ER_THD(thd, ER_UPDATE_INFO), (ulong) found,
1330 (ulong) updated,
1331 (ulong) thd->get_stmt_da()->current_statement_warn_count());
1332 else
1333 my_snprintf(buff, sizeof(buff),
1334 ER_THD(thd, ER_UPDATE_INFO_WITH_SYSTEM_VERSIONING),
1335 (ulong) found, (ulong) updated, (ulong) rows_inserted,
1336 (ulong) thd->get_stmt_da()->current_statement_warn_count());
1337 my_ok(thd, (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated,
1338 id, buff);
1339 DBUG_PRINT("info",("%ld records updated", (long) updated));
1340 }
1341 thd->count_cuted_fields= CHECK_FIELD_IGNORE; /* calc cuted fields */
1342 thd->abort_on_warning= 0;
1343 if (thd->lex->current_select->first_cond_optimization)
1344 {
1345 thd->lex->current_select->save_leaf_tables(thd);
1346 thd->lex->current_select->first_cond_optimization= 0;
1347 }
1348 *found_return= found;
1349 *updated_return= updated;
1350
1351 if (unlikely(thd->lex->analyze_stmt))
1352 goto emit_explain_and_leave;
1353
1354 DBUG_RETURN((error >= 0 || thd->is_error()) ? 1 : 0);
1355
1356 err:
1357 delete select;
1358 delete file_sort;
1359 free_underlaid_joins(thd, select_lex);
1360 table->file->ha_end_keyread();
1361 if (table->file->pushed_cond)
1362 table->file->cond_pop();
1363 thd->abort_on_warning= 0;
1364 DBUG_RETURN(1);
1365
1366 produce_explain_and_leave:
1367 /*
1368 We come here for various "degenerate" query plans: impossible WHERE,
1369 no-partitions-used, impossible-range, etc.
1370 */
1371 if (unlikely(!query_plan.save_explain_update_data(query_plan.mem_root, thd)))
1372 goto err;
1373
1374 emit_explain_and_leave:
1375 int err2= thd->lex->explain->send_explain(thd);
1376
1377 delete select;
1378 free_underlaid_joins(thd, select_lex);
1379 DBUG_RETURN((err2 || thd->is_error()) ? 1 : 0);
1380 }
1381
1382 /*
1383 Prepare items in UPDATE statement
1384
1385 SYNOPSIS
1386 mysql_prepare_update()
1387 thd - thread handler
1388 table_list - global/local table list
1389 conds - conditions
1390 order_num - number of ORDER BY list entries
1391 order - ORDER BY clause list
1392
1393 RETURN VALUE
1394 FALSE OK
1395 TRUE error
1396 */
mysql_prepare_update(THD * thd,TABLE_LIST * table_list,Item ** conds,uint order_num,ORDER * order)1397 bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list,
1398 Item **conds, uint order_num, ORDER *order)
1399 {
1400 Item *fake_conds= 0;
1401 #ifndef NO_EMBEDDED_ACCESS_CHECKS
1402 TABLE *table= table_list->table;
1403 #endif
1404 List<Item> all_fields;
1405 SELECT_LEX *select_lex= thd->lex->first_select_lex();
1406 DBUG_ENTER("mysql_prepare_update");
1407
1408 #ifndef NO_EMBEDDED_ACCESS_CHECKS
1409 table_list->grant.want_privilege= table->grant.want_privilege=
1410 (SELECT_ACL & ~table->grant.privilege);
1411 table_list->register_want_access(SELECT_ACL);
1412 #endif
1413
1414 thd->lex->allow_sum_func.clear_all();
1415
1416 if (table_list->has_period() &&
1417 select_lex->period_setup_conds(thd, table_list))
1418 DBUG_RETURN(true);
1419
1420 DBUG_ASSERT(table_list->table);
1421 // conds could be cached from previous SP call
1422 DBUG_ASSERT(!table_list->vers_conditions.need_setup() ||
1423 !*conds || thd->stmt_arena->is_stmt_execute());
1424 if (select_lex->vers_setup_conds(thd, table_list))
1425 DBUG_RETURN(TRUE);
1426
1427 *conds= select_lex->where;
1428
1429 /*
1430 We do not call DT_MERGE_FOR_INSERT because it has no sense for simple
1431 (not multi-) update
1432 */
1433 if (mysql_handle_derived(thd->lex, DT_PREPARE))
1434 DBUG_RETURN(TRUE);
1435
1436 if (setup_tables_and_check_access(thd, &select_lex->context,
1437 &select_lex->top_join_list, table_list,
1438 select_lex->leaf_tables,
1439 FALSE, UPDATE_ACL, SELECT_ACL, TRUE) ||
1440 setup_conds(thd, table_list, select_lex->leaf_tables, conds) ||
1441 select_lex->setup_ref_array(thd, order_num) ||
1442 setup_order(thd, select_lex->ref_pointer_array,
1443 table_list, all_fields, all_fields, order) ||
1444 setup_ftfuncs(select_lex))
1445 DBUG_RETURN(TRUE);
1446
1447
1448 select_lex->fix_prepare_information(thd, conds, &fake_conds);
1449 DBUG_RETURN(FALSE);
1450 }
1451
1452 /**
1453 Check that we are not using table that we are updating in a sub select
1454
1455 @param thd Thread handle
1456 @param table_list List of table with first to check
1457
1458 @retval TRUE Error
1459 @retval FALSE OK
1460 */
check_unique_table(THD * thd,TABLE_LIST * table_list)1461 bool check_unique_table(THD *thd, TABLE_LIST *table_list)
1462 {
1463 TABLE_LIST *duplicate;
1464 DBUG_ENTER("check_unique_table");
1465 if ((duplicate= unique_table(thd, table_list, table_list->next_global, 0)))
1466 {
1467 update_non_unique_table_error(table_list, "UPDATE", duplicate);
1468 DBUG_RETURN(TRUE);
1469 }
1470 DBUG_RETURN(FALSE);
1471 }
1472
1473 /***************************************************************************
1474 Update multiple tables from join
1475 ***************************************************************************/
1476
1477 /*
1478 Get table map for list of Item_field
1479 */
1480
get_table_map(List<Item> * items)1481 static table_map get_table_map(List<Item> *items)
1482 {
1483 List_iterator_fast<Item> item_it(*items);
1484 Item_field *item;
1485 table_map map= 0;
1486
1487 while ((item= (Item_field *) item_it++))
1488 map|= item->all_used_tables();
1489 DBUG_PRINT("info", ("table_map: 0x%08lx", (long) map));
1490 return map;
1491 }
1492
1493 /**
1494 If one row is updated through two different aliases and the first
1495 update physically moves the row, the second update will error
1496 because the row is no longer located where expected. This function
1497 checks if the multiple-table update is about to do that and if so
1498 returns with an error.
1499
1500 The following update operations physically moves rows:
1501 1) Update of a column in a clustered primary key
1502 2) Update of a column used to calculate which partition the row belongs to
1503
1504 This function returns with an error if both of the following are
1505 true:
1506
1507 a) A table in the multiple-table update statement is updated
1508 through multiple aliases (including views)
1509 b) At least one of the updates on the table from a) may physically
1510 moves the row. Note: Updating a column used to calculate which
1511 partition a row belongs to does not necessarily mean that the
1512 row is moved. The new value may or may not belong to the same
1513 partition.
1514
1515 @param leaves First leaf table
1516 @param tables_for_update Map of tables that are updated
1517
1518 @return
1519 true if the update is unsafe, in which case an error message is also set,
1520 false otherwise.
1521 */
1522 static
unsafe_key_update(List<TABLE_LIST> leaves,table_map tables_for_update)1523 bool unsafe_key_update(List<TABLE_LIST> leaves, table_map tables_for_update)
1524 {
1525 List_iterator_fast<TABLE_LIST> it(leaves), it2(leaves);
1526 TABLE_LIST *tl, *tl2;
1527
1528 while ((tl= it++))
1529 {
1530 if (!tl->is_jtbm() && (tl->table->map & tables_for_update))
1531 {
1532 TABLE *table1= tl->table;
1533 bool primkey_clustered= (table1->file->
1534 pk_is_clustering_key(table1->s->primary_key));
1535
1536 bool table_partitioned= false;
1537 #ifdef WITH_PARTITION_STORAGE_ENGINE
1538 table_partitioned= (table1->part_info != NULL);
1539 #endif
1540
1541 if (!table_partitioned && !primkey_clustered)
1542 continue;
1543
1544 it2.rewind();
1545 while ((tl2= it2++))
1546 {
1547 if (tl2->is_jtbm())
1548 continue;
1549 /*
1550 Look at "next" tables only since all previous tables have
1551 already been checked
1552 */
1553 TABLE *table2= tl2->table;
1554 if (tl2 != tl &&
1555 table2->map & tables_for_update && table1->s == table2->s)
1556 {
1557 // A table is updated through two aliases
1558 if (table_partitioned &&
1559 (partition_key_modified(table1, table1->write_set) ||
1560 partition_key_modified(table2, table2->write_set)))
1561 {
1562 // Partitioned key is updated
1563 my_error(ER_MULTI_UPDATE_KEY_CONFLICT, MYF(0),
1564 tl->top_table()->alias.str,
1565 tl2->top_table()->alias.str);
1566 return true;
1567 }
1568
1569 if (primkey_clustered)
1570 {
1571 // The primary key can cover multiple columns
1572 KEY key_info= table1->key_info[table1->s->primary_key];
1573 KEY_PART_INFO *key_part= key_info.key_part;
1574 KEY_PART_INFO *key_part_end= key_part + key_info.user_defined_key_parts;
1575
1576 for (;key_part != key_part_end; ++key_part)
1577 {
1578 if (bitmap_is_set(table1->write_set, key_part->fieldnr-1) ||
1579 bitmap_is_set(table2->write_set, key_part->fieldnr-1))
1580 {
1581 // Clustered primary key is updated
1582 my_error(ER_MULTI_UPDATE_KEY_CONFLICT, MYF(0),
1583 tl->top_table()->alias.str,
1584 tl2->top_table()->alias.str);
1585 return true;
1586 }
1587 }
1588 }
1589 }
1590 }
1591 }
1592 }
1593 return false;
1594 }
1595
1596 /**
1597 Check if there is enough privilege on specific table used by the
1598 main select list of multi-update directly or indirectly (through
1599 a view).
1600
1601 @param[in] thd Thread context.
1602 @param[in] table Table list element for the table.
1603 @param[in] tables_for_update Bitmap with tables being updated.
1604 @param[in/out] updated_arg Set to true if table in question is
1605 updated, also set to true if it is
1606 a view and one of its underlying
1607 tables is updated. Should be
1608 initialized to false by the caller
1609 before a sequence of calls to this
1610 function.
1611
1612 @note To determine which tables/views are updated we have to go from
1613 leaves to root since tables_for_update contains map of leaf
1614 tables being updated and doesn't include non-leaf tables
1615 (fields are already resolved to leaf tables).
1616
1617 @retval false - Success, all necessary privileges on all tables are
1618 present or might be present on column-level.
1619 @retval true - Failure, some necessary privilege on some table is
1620 missing.
1621 */
1622
multi_update_check_table_access(THD * thd,TABLE_LIST * table,table_map tables_for_update,bool * updated_arg)1623 static bool multi_update_check_table_access(THD *thd, TABLE_LIST *table,
1624 table_map tables_for_update,
1625 bool *updated_arg)
1626 {
1627 if (table->view)
1628 {
1629 bool updated= false;
1630 /*
1631 If it is a mergeable view then we need to check privileges on its
1632 underlying tables being merged (including views). We also need to
1633 check if any of them is updated in order to find if this view is
1634 updated.
1635 If it is a non-mergeable view then it can't be updated.
1636 */
1637 DBUG_ASSERT(table->merge_underlying_list ||
1638 (!table->updatable &&
1639 !(table->table->map & tables_for_update)));
1640
1641 for (TABLE_LIST *tbl= table->merge_underlying_list; tbl;
1642 tbl= tbl->next_local)
1643 {
1644 if (multi_update_check_table_access(thd, tbl, tables_for_update,
1645 &updated))
1646 {
1647 tbl->hide_view_error(thd);
1648 return true;
1649 }
1650 }
1651 if (check_table_access(thd, updated ? UPDATE_ACL: SELECT_ACL, table,
1652 FALSE, 1, FALSE))
1653 return true;
1654 *updated_arg|= updated;
1655 /* We only need SELECT privilege for columns in the values list. */
1656 table->grant.want_privilege= SELECT_ACL & ~table->grant.privilege;
1657 }
1658 else
1659 {
1660 /* Must be a base or derived table. */
1661 const bool updated= table->table->map & tables_for_update;
1662 if (check_table_access(thd, updated ? UPDATE_ACL : SELECT_ACL, table,
1663 FALSE, 1, FALSE))
1664 return true;
1665 *updated_arg|= updated;
1666 /* We only need SELECT privilege for columns in the values list. */
1667 if (!table->derived)
1668 {
1669 table->grant.want_privilege= SELECT_ACL & ~table->grant.privilege;
1670 table->table->grant.want_privilege= (SELECT_ACL &
1671 ~table->table->grant.privilege);
1672 }
1673 }
1674 return false;
1675 }
1676
1677
1678 class Multiupdate_prelocking_strategy : public DML_prelocking_strategy
1679 {
1680 bool done;
1681 bool has_prelocking_list;
1682 public:
1683 void reset(THD *thd);
1684 bool handle_end(THD *thd);
1685 };
1686
reset(THD * thd)1687 void Multiupdate_prelocking_strategy::reset(THD *thd)
1688 {
1689 done= false;
1690 has_prelocking_list= thd->lex->requires_prelocking();
1691 }
1692
1693 /**
1694 Determine what tables could be updated in the multi-update
1695
1696 For these tables we'll need to open triggers and continue prelocking
1697 until all is open.
1698 */
handle_end(THD * thd)1699 bool Multiupdate_prelocking_strategy::handle_end(THD *thd)
1700 {
1701 DBUG_ENTER("Multiupdate_prelocking_strategy::handle_end");
1702 if (done)
1703 DBUG_RETURN(0);
1704
1705 LEX *lex= thd->lex;
1706 SELECT_LEX *select_lex= lex->first_select_lex();
1707 TABLE_LIST *table_list= lex->query_tables, *tl;
1708
1709 done= true;
1710
1711 if (mysql_handle_derived(lex, DT_INIT) ||
1712 mysql_handle_derived(lex, DT_MERGE_FOR_INSERT) ||
1713 mysql_handle_derived(lex, DT_PREPARE))
1714 DBUG_RETURN(1);
1715
1716 /*
1717 setup_tables() need for VIEWs. JOIN::prepare() will call setup_tables()
1718 second time, but this call will do nothing (there are check for second
1719 call in setup_tables()).
1720 */
1721
1722 if (setup_tables_and_check_access(thd, &select_lex->context,
1723 &select_lex->top_join_list, table_list, select_lex->leaf_tables,
1724 FALSE, UPDATE_ACL, SELECT_ACL, TRUE))
1725 DBUG_RETURN(1);
1726
1727 List<Item> *fields= &lex->first_select_lex()->item_list;
1728 if (setup_fields_with_no_wrap(thd, Ref_ptr_array(),
1729 *fields, MARK_COLUMNS_WRITE, 0, 0))
1730 DBUG_RETURN(1);
1731
1732 // Check if we have a view in the list ...
1733 for (tl= table_list; tl ; tl= tl->next_local)
1734 if (tl->view)
1735 break;
1736 // ... and pass this knowlage in check_fields call
1737 if (check_fields(thd, table_list, *fields, tl != NULL ))
1738 DBUG_RETURN(1);
1739
1740 table_map tables_for_update= thd->table_map_for_update= get_table_map(fields);
1741
1742 if (unsafe_key_update(select_lex->leaf_tables, tables_for_update))
1743 DBUG_RETURN(1);
1744
1745 /*
1746 Setup timestamp handling and locking mode
1747 */
1748 List_iterator<TABLE_LIST> ti(lex->first_select_lex()->leaf_tables);
1749 const bool using_lock_tables= thd->locked_tables_mode != LTM_NONE;
1750 while ((tl= ti++))
1751 {
1752 TABLE *table= tl->table;
1753
1754 if (tl->is_jtbm())
1755 continue;
1756
1757 /* if table will be updated then check that it is unique */
1758 if (table->map & tables_for_update)
1759 {
1760 if (!tl->single_table_updatable() || check_key_in_view(thd, tl))
1761 {
1762 my_error(ER_NON_UPDATABLE_TABLE, MYF(0),
1763 tl->top_table()->alias.str, "UPDATE");
1764 DBUG_RETURN(1);
1765 }
1766
1767 DBUG_PRINT("info",("setting table `%s` for update",
1768 tl->top_table()->alias.str));
1769 /*
1770 If table will be updated we should not downgrade lock for it and
1771 leave it as is.
1772 */
1773 tl->updating= 1;
1774 if (tl->belong_to_view)
1775 tl->belong_to_view->updating= 1;
1776 if (extend_table_list(thd, tl, this, has_prelocking_list))
1777 DBUG_RETURN(1);
1778 }
1779 else
1780 {
1781 DBUG_PRINT("info",("setting table `%s` for read-only", tl->alias.str));
1782 /*
1783 If we are using the binary log, we need TL_READ_NO_INSERT to get
1784 correct order of statements. Otherwise, we use a TL_READ lock to
1785 improve performance.
1786 We don't downgrade metadata lock from SW to SR in this case as
1787 there is no guarantee that the same ticket is not used by
1788 another table instance used by this statement which is going to
1789 be write-locked (for example, trigger to be invoked might try
1790 to update this table).
1791 Last argument routine_modifies_data for read_lock_type_for_table()
1792 is ignored, as prelocking placeholder will never be set here.
1793 */
1794 DBUG_ASSERT(tl->prelocking_placeholder == false);
1795 thr_lock_type lock_type= read_lock_type_for_table(thd, lex, tl, true);
1796 if (using_lock_tables)
1797 tl->lock_type= lock_type;
1798 else
1799 tl->set_lock_type(thd, lock_type);
1800 }
1801 }
1802
1803 /*
1804 Check access privileges for tables being updated or read.
1805 Note that unlike in the above loop we need to iterate here not only
1806 through all leaf tables but also through all view hierarchy.
1807 */
1808
1809 for (tl= table_list; tl; tl= tl->next_local)
1810 {
1811 bool not_used= false;
1812 if (tl->is_jtbm())
1813 continue;
1814 if (multi_update_check_table_access(thd, tl, tables_for_update, ¬_used))
1815 DBUG_RETURN(TRUE);
1816 }
1817
1818 /* check single table update for view compound from several tables */
1819 for (tl= table_list; tl; tl= tl->next_local)
1820 {
1821 TABLE_LIST *for_update= 0;
1822 if (tl->is_jtbm())
1823 continue;
1824 if (tl->is_merged_derived() &&
1825 tl->check_single_table(&for_update, tables_for_update, tl))
1826 {
1827 my_error(ER_VIEW_MULTIUPDATE, MYF(0), tl->view_db.str, tl->view_name.str);
1828 DBUG_RETURN(1);
1829 }
1830 }
1831
1832 DBUG_RETURN(0);
1833 }
1834
1835 /*
1836 make update specific preparation and checks after opening tables
1837
1838 SYNOPSIS
1839 mysql_multi_update_prepare()
1840 thd thread handler
1841
1842 RETURN
1843 FALSE OK
1844 TRUE Error
1845 */
1846
mysql_multi_update_prepare(THD * thd)1847 int mysql_multi_update_prepare(THD *thd)
1848 {
1849 LEX *lex= thd->lex;
1850 TABLE_LIST *table_list= lex->query_tables;
1851 TABLE_LIST *tl;
1852 Multiupdate_prelocking_strategy prelocking_strategy;
1853 uint table_count= lex->table_count;
1854 DBUG_ENTER("mysql_multi_update_prepare");
1855
1856 /*
1857 Open tables and create derived ones, but do not lock and fill them yet.
1858
1859 During prepare phase acquire only S metadata locks instead of SW locks to
1860 keep prepare of multi-UPDATE compatible with concurrent LOCK TABLES WRITE
1861 and global read lock.
1862
1863 Don't evaluate any subqueries even if constant, because
1864 tables aren't locked yet.
1865 */
1866 lex->context_analysis_only|= CONTEXT_ANALYSIS_ONLY_DERIVED;
1867 if (thd->lex->sql_command == SQLCOM_UPDATE_MULTI)
1868 {
1869 if (open_tables(thd, &table_list, &table_count,
1870 thd->stmt_arena->is_stmt_prepare() ? MYSQL_OPEN_FORCE_SHARED_MDL : 0,
1871 &prelocking_strategy))
1872 DBUG_RETURN(TRUE);
1873 }
1874 else
1875 {
1876 /* following need for prepared statements, to run next time multi-update */
1877 thd->lex->sql_command= SQLCOM_UPDATE_MULTI;
1878 prelocking_strategy.reset(thd);
1879 if (prelocking_strategy.handle_end(thd))
1880 DBUG_RETURN(TRUE);
1881 }
1882
1883 /* now lock and fill tables */
1884 if (!thd->stmt_arena->is_stmt_prepare() &&
1885 lock_tables(thd, table_list, table_count, 0))
1886 DBUG_RETURN(TRUE);
1887
1888 lex->context_analysis_only&= ~CONTEXT_ANALYSIS_ONLY_DERIVED;
1889
1890 (void) read_statistics_for_tables_if_needed(thd, table_list);
1891 /* @todo: downgrade the metadata locks here. */
1892
1893 /*
1894 Check that we are not using table that we are updating, but we should
1895 skip all tables of UPDATE SELECT itself
1896 */
1897 lex->first_select_lex()->exclude_from_table_unique_test= TRUE;
1898 /* We only need SELECT privilege for columns in the values list */
1899 List_iterator<TABLE_LIST> ti(lex->first_select_lex()->leaf_tables);
1900 while ((tl= ti++))
1901 {
1902 if (tl->is_jtbm())
1903 continue;
1904 TABLE *table= tl->table;
1905 TABLE_LIST *tlist;
1906 if (!(tlist= tl->top_table())->derived)
1907 {
1908 tlist->grant.want_privilege=
1909 (SELECT_ACL & ~tlist->grant.privilege);
1910 table->grant.want_privilege= (SELECT_ACL & ~table->grant.privilege);
1911 }
1912 DBUG_PRINT("info", ("table: %s want_privilege: %llx", tl->alias.str,
1913 (longlong) table->grant.want_privilege));
1914 }
1915 /*
1916 Set exclude_from_table_unique_test value back to FALSE. It is needed for
1917 further check in multi_update::prepare whether to use record cache.
1918 */
1919 lex->first_select_lex()->exclude_from_table_unique_test= FALSE;
1920
1921 if (lex->save_prep_leaf_tables())
1922 DBUG_RETURN(TRUE);
1923
1924 DBUG_RETURN (FALSE);
1925 }
1926
1927
1928 /*
1929 Setup multi-update handling and call SELECT to do the join
1930 */
1931
mysql_multi_update(THD * thd,TABLE_LIST * table_list,List<Item> * fields,List<Item> * values,COND * conds,ulonglong options,enum enum_duplicates handle_duplicates,bool ignore,SELECT_LEX_UNIT * unit,SELECT_LEX * select_lex,multi_update ** result)1932 bool mysql_multi_update(THD *thd, TABLE_LIST *table_list, List<Item> *fields,
1933 List<Item> *values, COND *conds, ulonglong options,
1934 enum enum_duplicates handle_duplicates,
1935 bool ignore, SELECT_LEX_UNIT *unit,
1936 SELECT_LEX *select_lex, multi_update **result)
1937 {
1938 bool res;
1939 DBUG_ENTER("mysql_multi_update");
1940
1941 if (!(*result= new (thd->mem_root) multi_update(thd, table_list,
1942 &thd->lex->first_select_lex()->leaf_tables,
1943 fields, values, handle_duplicates, ignore)))
1944 {
1945 DBUG_RETURN(TRUE);
1946 }
1947
1948 if ((*result)->init(thd))
1949 DBUG_RETURN(1);
1950
1951 thd->abort_on_warning= !ignore && thd->is_strict_mode();
1952 List<Item> total_list;
1953
1954 if (setup_tables(thd, &select_lex->context, &select_lex->top_join_list,
1955 table_list, select_lex->leaf_tables, FALSE, FALSE))
1956 DBUG_RETURN(1);
1957
1958 if (select_lex->vers_setup_conds(thd, table_list))
1959 DBUG_RETURN(1);
1960
1961 res= mysql_select(thd,
1962 table_list, total_list, conds,
1963 select_lex->order_list.elements,
1964 select_lex->order_list.first, NULL, NULL, NULL,
1965 options | SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK |
1966 OPTION_SETUP_TABLES_DONE,
1967 *result, unit, select_lex);
1968
1969 DBUG_PRINT("info",("res: %d report_error: %d", res, (int) thd->is_error()));
1970 res|= thd->is_error();
1971 if (unlikely(res))
1972 (*result)->abort_result_set();
1973 else
1974 {
1975 if (thd->lex->describe || thd->lex->analyze_stmt)
1976 res= thd->lex->explain->send_explain(thd);
1977 }
1978 thd->abort_on_warning= 0;
1979 DBUG_RETURN(res);
1980 }
1981
1982
multi_update(THD * thd_arg,TABLE_LIST * table_list,List<TABLE_LIST> * leaves_list,List<Item> * field_list,List<Item> * value_list,enum enum_duplicates handle_duplicates_arg,bool ignore_arg)1983 multi_update::multi_update(THD *thd_arg, TABLE_LIST *table_list,
1984 List<TABLE_LIST> *leaves_list,
1985 List<Item> *field_list, List<Item> *value_list,
1986 enum enum_duplicates handle_duplicates_arg,
1987 bool ignore_arg):
1988 select_result_interceptor(thd_arg),
1989 all_tables(table_list), leaves(leaves_list), update_tables(0),
1990 tmp_tables(0), updated(0), found(0), fields(field_list),
1991 values(value_list), table_count(0), copy_field(0),
1992 handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(1),
1993 transactional_tables(0), ignore(ignore_arg), error_handled(0), prepared(0),
1994 updated_sys_ver(0)
1995 {
1996 }
1997
1998
init(THD * thd)1999 bool multi_update::init(THD *thd)
2000 {
2001 table_map tables_to_update= get_table_map(fields);
2002 List_iterator_fast<TABLE_LIST> li(*leaves);
2003 TABLE_LIST *tbl;
2004 while ((tbl =li++))
2005 {
2006 if (tbl->is_jtbm())
2007 continue;
2008 if (!(tbl->table->map & tables_to_update))
2009 continue;
2010 if (updated_leaves.push_back(tbl, thd->mem_root))
2011 return true;
2012 }
2013 return false;
2014 }
2015
2016
2017 /*
2018 Connect fields with tables and create list of tables that are updated
2019 */
2020
prepare(List<Item> & not_used_values,SELECT_LEX_UNIT * lex_unit)2021 int multi_update::prepare(List<Item> ¬_used_values,
2022 SELECT_LEX_UNIT *lex_unit)
2023
2024 {
2025 TABLE_LIST *table_ref;
2026 SQL_I_List<TABLE_LIST> update;
2027 table_map tables_to_update;
2028 Item_field *item;
2029 List_iterator_fast<Item> field_it(*fields);
2030 List_iterator_fast<Item> value_it(*values);
2031 uint i, max_fields;
2032 uint leaf_table_count= 0;
2033 List_iterator<TABLE_LIST> ti(updated_leaves);
2034 DBUG_ENTER("multi_update::prepare");
2035
2036 if (prepared)
2037 DBUG_RETURN(0);
2038 prepared= true;
2039
2040 thd->count_cuted_fields= CHECK_FIELD_WARN;
2041 thd->cuted_fields=0L;
2042 THD_STAGE_INFO(thd, stage_updating_main_table);
2043
2044 tables_to_update= get_table_map(fields);
2045
2046 if (!tables_to_update)
2047 {
2048 my_message(ER_NO_TABLES_USED, ER_THD(thd, ER_NO_TABLES_USED), MYF(0));
2049 DBUG_RETURN(1);
2050 }
2051
2052 /*
2053 We gather the set of columns read during evaluation of SET expression in
2054 TABLE::tmp_set by pointing TABLE::read_set to it and then restore it after
2055 setup_fields().
2056 */
2057 while ((table_ref= ti++))
2058 {
2059 if (table_ref->is_jtbm())
2060 continue;
2061
2062 TABLE *table= table_ref->table;
2063 if (tables_to_update & table->map)
2064 {
2065 DBUG_ASSERT(table->read_set == &table->def_read_set);
2066 table->read_set= &table->tmp_set;
2067 bitmap_clear_all(table->read_set);
2068 }
2069 }
2070
2071 /*
2072 We have to check values after setup_tables to get covering_keys right in
2073 reference tables
2074 */
2075
2076 int error= setup_fields(thd, Ref_ptr_array(),
2077 *values, MARK_COLUMNS_READ, 0, NULL, 0);
2078
2079 ti.rewind();
2080 while ((table_ref= ti++))
2081 {
2082 if (table_ref->is_jtbm())
2083 continue;
2084
2085 TABLE *table= table_ref->table;
2086 if (tables_to_update & table->map)
2087 {
2088 table->read_set= &table->def_read_set;
2089 bitmap_union(table->read_set, &table->tmp_set);
2090 table->file->prepare_for_insert(1);
2091 }
2092 }
2093 if (unlikely(error))
2094 DBUG_RETURN(1);
2095
2096 /*
2097 Save tables being updated in update_tables
2098 update_table->shared is position for table
2099 Don't use key read on tables that are updated
2100 */
2101
2102 update.empty();
2103 ti.rewind();
2104 while ((table_ref= ti++))
2105 {
2106 /* TODO: add support of view of join support */
2107 if (table_ref->is_jtbm())
2108 continue;
2109 TABLE *table=table_ref->table;
2110 leaf_table_count++;
2111 if (tables_to_update & table->map)
2112 {
2113 TABLE_LIST *tl= (TABLE_LIST*) thd->memdup(table_ref,
2114 sizeof(*tl));
2115 if (!tl)
2116 DBUG_RETURN(1);
2117 update.link_in_list(tl, &tl->next_local);
2118 tl->shared= table_count++;
2119 table->no_keyread=1;
2120 table->covering_keys.clear_all();
2121 table->pos_in_table_list= tl;
2122 table->prepare_triggers_for_update_stmt_or_event();
2123 table->reset_default_fields();
2124 }
2125 }
2126
2127 table_count= update.elements;
2128 update_tables= update.first;
2129
2130 tmp_tables = (TABLE**) thd->calloc(sizeof(TABLE *) * table_count);
2131 tmp_table_param = (TMP_TABLE_PARAM*) thd->calloc(sizeof(TMP_TABLE_PARAM) *
2132 table_count);
2133 fields_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
2134 table_count);
2135 values_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
2136 table_count);
2137 if (unlikely(thd->is_fatal_error))
2138 DBUG_RETURN(1);
2139 for (i=0 ; i < table_count ; i++)
2140 {
2141 fields_for_table[i]= new List_item;
2142 values_for_table[i]= new List_item;
2143 }
2144 if (unlikely(thd->is_fatal_error))
2145 DBUG_RETURN(1);
2146
2147 /* Split fields into fields_for_table[] and values_by_table[] */
2148
2149 while ((item= (Item_field *) field_it++))
2150 {
2151 Item *value= value_it++;
2152 uint offset= item->field->table->pos_in_table_list->shared;
2153 fields_for_table[offset]->push_back(item, thd->mem_root);
2154 values_for_table[offset]->push_back(value, thd->mem_root);
2155 }
2156 if (unlikely(thd->is_fatal_error))
2157 DBUG_RETURN(1);
2158
2159 /* Allocate copy fields */
2160 max_fields=0;
2161 for (i=0 ; i < table_count ; i++)
2162 {
2163 set_if_bigger(max_fields, fields_for_table[i]->elements + leaf_table_count);
2164 if (fields_for_table[i]->elements)
2165 {
2166 TABLE *table= ((Item_field*)(fields_for_table[i]->head()))->field->table;
2167 switch_to_nullable_trigger_fields(*fields_for_table[i], table);
2168 switch_to_nullable_trigger_fields(*values_for_table[i], table);
2169 }
2170 }
2171 copy_field= new (thd->mem_root) Copy_field[max_fields];
2172 DBUG_RETURN(thd->is_fatal_error != 0);
2173 }
2174
update_used_tables()2175 void multi_update::update_used_tables()
2176 {
2177 Item *item;
2178 List_iterator_fast<Item> it(*values);
2179 while ((item= it++))
2180 {
2181 item->update_used_tables();
2182 }
2183 }
2184
prepare_to_read_rows()2185 void multi_update::prepare_to_read_rows()
2186 {
2187 /*
2188 update column maps now. it cannot be done in ::prepare() before the
2189 optimizer, because the optimize might reset them (in
2190 SELECT_LEX::update_used_tables()), it cannot be done in
2191 ::initialize_tables() after the optimizer, because the optimizer
2192 might read rows from const tables
2193 */
2194
2195 for (TABLE_LIST *tl= update_tables; tl; tl= tl->next_local)
2196 tl->table->mark_columns_needed_for_update();
2197 }
2198
2199
2200 /*
2201 Check if table is safe to update on fly
2202
2203 SYNOPSIS
2204 safe_update_on_fly()
2205 thd Thread handler
2206 join_tab How table is used in join
2207 all_tables List of tables
2208
2209 NOTES
2210 We can update the first table in join on the fly if we know that
2211 a row in this table will never be read twice. This is true under
2212 the following conditions:
2213
2214 - No column is both written to and read in SET expressions.
2215
2216 - We are doing a table scan and the data is in a separate file (MyISAM) or
2217 if we don't update a clustered key.
2218
2219 - We are doing a range scan and we don't update the scan key or
2220 the primary key for a clustered table handler.
2221
2222 - Table is not joined to itself.
2223
2224 This function gets information about fields to be updated from
2225 the TABLE::write_set bitmap.
2226
2227 WARNING
2228 This code is a bit dependent of how make_join_readinfo() works.
2229
2230 The field table->tmp_set is used for keeping track of which fields are
2231 read during evaluation of the SET expression. See multi_update::prepare.
2232
2233 RETURN
2234 0 Not safe to update
2235 1 Safe to update
2236 */
2237
safe_update_on_fly(THD * thd,JOIN_TAB * join_tab,TABLE_LIST * table_ref,TABLE_LIST * all_tables)2238 static bool safe_update_on_fly(THD *thd, JOIN_TAB *join_tab,
2239 TABLE_LIST *table_ref, TABLE_LIST *all_tables)
2240 {
2241 TABLE *table= join_tab->table;
2242 if (unique_table(thd, table_ref, all_tables, 0))
2243 return 0;
2244 if (join_tab->join->order) // FIXME this is probably too strong
2245 return 0;
2246 switch (join_tab->type) {
2247 case JT_SYSTEM:
2248 case JT_CONST:
2249 case JT_EQ_REF:
2250 return TRUE; // At most one matching row
2251 case JT_REF:
2252 case JT_REF_OR_NULL:
2253 return !is_key_used(table, join_tab->ref.key, table->write_set);
2254 case JT_ALL:
2255 if (bitmap_is_overlapping(&table->tmp_set, table->write_set))
2256 return FALSE;
2257 /* If range search on index */
2258 if (join_tab->quick)
2259 return !join_tab->quick->is_keys_used(table->write_set);
2260 /* If scanning in clustered key */
2261 if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
2262 table->s->primary_key < MAX_KEY)
2263 return !is_key_used(table, table->s->primary_key, table->write_set);
2264 return TRUE;
2265 default:
2266 break; // Avoid compiler warning
2267 }
2268 return FALSE;
2269
2270 }
2271
2272
2273 /*
2274 Initialize table for multi table
2275
2276 IMPLEMENTATION
2277 - Update first table in join on the fly, if possible
2278 - Create temporary tables to store changed values for all other tables
2279 that are updated (and main_table if the above doesn't hold).
2280 */
2281
2282 bool
initialize_tables(JOIN * join)2283 multi_update::initialize_tables(JOIN *join)
2284 {
2285 TABLE_LIST *table_ref;
2286 DBUG_ENTER("initialize_tables");
2287
2288 if (unlikely((thd->variables.option_bits & OPTION_SAFE_UPDATES) &&
2289 error_if_full_join(join)))
2290 DBUG_RETURN(1);
2291 main_table=join->join_tab->table;
2292 table_to_update= 0;
2293
2294 /* Any update has at least one pair (field, value) */
2295 DBUG_ASSERT(fields->elements);
2296 /*
2297 Only one table may be modified by UPDATE of an updatable view.
2298 For an updatable view first_table_for_update indicates this
2299 table.
2300 For a regular multi-update it refers to some updated table.
2301 */
2302 TABLE *first_table_for_update= ((Item_field *) fields->head())->field->table;
2303
2304 /* Create a temporary table for keys to all tables, except main table */
2305 for (table_ref= update_tables; table_ref; table_ref= table_ref->next_local)
2306 {
2307 TABLE *table=table_ref->table;
2308 uint cnt= table_ref->shared;
2309 List<Item> temp_fields;
2310 ORDER group;
2311 TMP_TABLE_PARAM *tmp_param;
2312
2313 if (ignore)
2314 table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
2315 if (table == main_table) // First table in join
2316 {
2317 if (safe_update_on_fly(thd, join->join_tab, table_ref, all_tables))
2318 {
2319 table_to_update= table; // Update table on the fly
2320 has_vers_fields= table->vers_check_update(*fields);
2321 continue;
2322 }
2323 }
2324 table->prepare_for_position();
2325 join->map2table[table->tablenr]->keep_current_rowid= true;
2326
2327 /*
2328 enable uncacheable flag if we update a view with check option
2329 and check option has a subselect, otherwise, the check option
2330 can be evaluated after the subselect was freed as independent
2331 (See full_local in JOIN::join_free()).
2332 */
2333 if (table_ref->check_option && !join->select_lex->uncacheable)
2334 {
2335 SELECT_LEX_UNIT *tmp_unit;
2336 SELECT_LEX *sl;
2337 for (tmp_unit= join->select_lex->first_inner_unit();
2338 tmp_unit;
2339 tmp_unit= tmp_unit->next_unit())
2340 {
2341 for (sl= tmp_unit->first_select(); sl; sl= sl->next_select())
2342 {
2343 if (sl->master_unit()->item)
2344 {
2345 join->select_lex->uncacheable|= UNCACHEABLE_CHECKOPTION;
2346 goto loop_end;
2347 }
2348 }
2349 }
2350 }
2351 loop_end:
2352
2353 if (table == first_table_for_update && table_ref->check_option)
2354 {
2355 table_map unupdated_tables= table_ref->check_option->used_tables() &
2356 ~first_table_for_update->map;
2357 List_iterator<TABLE_LIST> ti(*leaves);
2358 TABLE_LIST *tbl_ref;
2359 while ((tbl_ref= ti++) && unupdated_tables)
2360 {
2361 if (unupdated_tables & tbl_ref->table->map)
2362 unupdated_tables&= ~tbl_ref->table->map;
2363 else
2364 continue;
2365 if (unupdated_check_opt_tables.push_back(tbl_ref->table))
2366 DBUG_RETURN(1);
2367 }
2368 }
2369
2370 tmp_param= tmp_table_param+cnt;
2371
2372 /*
2373 Create a temporary table to store all fields that are changed for this
2374 table. The first field in the temporary table is a pointer to the
2375 original row so that we can find and update it. For the updatable
2376 VIEW a few following fields are rowids of tables used in the CHECK
2377 OPTION condition.
2378 */
2379
2380 List_iterator_fast<TABLE> tbl_it(unupdated_check_opt_tables);
2381 TABLE *tbl= table;
2382 do
2383 {
2384 LEX_CSTRING field_name;
2385 field_name.str= tbl->alias.c_ptr();
2386 field_name.length= strlen(field_name.str);
2387 /*
2388 Signal each table (including tables referenced by WITH CHECK OPTION
2389 clause) for which we will store row position in the temporary table
2390 that we need a position to be read first.
2391 */
2392 tbl->prepare_for_position();
2393 join->map2table[tbl->tablenr]->keep_current_rowid= true;
2394
2395 Item_temptable_rowid *item=
2396 new (thd->mem_root) Item_temptable_rowid(tbl);
2397 if (!item)
2398 DBUG_RETURN(1);
2399 item->fix_fields(thd, 0);
2400 if (temp_fields.push_back(item, thd->mem_root))
2401 DBUG_RETURN(1);
2402 } while ((tbl= tbl_it++));
2403
2404 temp_fields.append(fields_for_table[cnt]);
2405
2406 /* Make an unique key over the first field to avoid duplicated updates */
2407 bzero((char*) &group, sizeof(group));
2408 group.direction= ORDER::ORDER_ASC;
2409 group.item= (Item**) temp_fields.head_ref();
2410
2411 tmp_param->quick_group= 1;
2412 tmp_param->field_count= temp_fields.elements;
2413 tmp_param->func_count= temp_fields.elements - 1;
2414 calc_group_buffer(tmp_param, &group);
2415 /* small table, ignore @@big_tables */
2416 my_bool save_big_tables= thd->variables.big_tables;
2417 thd->variables.big_tables= FALSE;
2418 tmp_tables[cnt]=create_tmp_table(thd, tmp_param, temp_fields,
2419 (ORDER*) &group, 0, 0,
2420 TMP_TABLE_ALL_COLUMNS, HA_POS_ERROR, &empty_clex_str);
2421 thd->variables.big_tables= save_big_tables;
2422 if (!tmp_tables[cnt])
2423 DBUG_RETURN(1);
2424 tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
2425 }
2426 join->tmp_table_keep_current_rowid= TRUE;
2427 DBUG_RETURN(0);
2428 }
2429
2430
item_rowid_table(Item * item)2431 static TABLE *item_rowid_table(Item *item)
2432 {
2433 if (item->type() != Item::FUNC_ITEM)
2434 return NULL;
2435 Item_func *func= (Item_func *)item;
2436 if (func->functype() != Item_func::TEMPTABLE_ROWID)
2437 return NULL;
2438 Item_temptable_rowid *itr= (Item_temptable_rowid *)func;
2439 return itr->table;
2440 }
2441
2442
2443 /*
2444 multi_update stores a rowid and new field values for every updated row in a
2445 temporary table (one temporary table per updated table). These rowids are
2446 obtained via Item_temptable_rowid's by calling handler::position(). But if
2447 the join is resolved via a temp table, rowids cannot be obtained from
2448 handler::position() in the multi_update::send_data(). So, they're stored in
2449 the join's temp table (JOIN::add_fields_for_current_rowid()) and here we
2450 replace Item_temptable_rowid's (that would've done handler::position()) with
2451 Item_field's (that will simply take the corresponding field value from the
2452 temp table).
2453 */
prepare2(JOIN * join)2454 int multi_update::prepare2(JOIN *join)
2455 {
2456 if (!join->need_tmp || !join->tmp_table_keep_current_rowid)
2457 return 0;
2458
2459 // there cannot be many tmp tables in multi-update
2460 JOIN_TAB *tmptab= join->join_tab + join->exec_join_tab_cnt();
2461
2462 for (Item **it= tmptab->tmp_table_param->items_to_copy; *it ; it++)
2463 {
2464 TABLE *tbl= item_rowid_table(*it);
2465 if (!tbl)
2466 continue;
2467 for (uint i= 0; i < table_count; i++)
2468 {
2469 for (Item **it2= tmp_table_param[i].items_to_copy; *it2; it2++)
2470 {
2471 if (item_rowid_table(*it2) != tbl)
2472 continue;
2473 Item_field *fld= new (thd->mem_root)
2474 Item_field(thd, (*it)->get_tmp_table_field());
2475 if (!fld)
2476 return 1;
2477 fld->result_field= (*it2)->get_tmp_table_field();
2478 *it2= fld;
2479 }
2480 }
2481 }
2482 return 0;
2483 }
2484
2485
~multi_update()2486 multi_update::~multi_update()
2487 {
2488 TABLE_LIST *table;
2489 for (table= update_tables ; table; table= table->next_local)
2490 {
2491 table->table->no_keyread= 0;
2492 if (ignore)
2493 table->table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
2494 }
2495
2496 if (tmp_tables)
2497 {
2498 for (uint cnt = 0; cnt < table_count; cnt++)
2499 {
2500 if (tmp_tables[cnt])
2501 {
2502 free_tmp_table(thd, tmp_tables[cnt]);
2503 tmp_table_param[cnt].cleanup();
2504 }
2505 }
2506 }
2507 if (copy_field)
2508 delete [] copy_field;
2509 thd->count_cuted_fields= CHECK_FIELD_IGNORE; // Restore this setting
2510 DBUG_ASSERT(trans_safe || !updated ||
2511 thd->transaction->all.modified_non_trans_table);
2512 }
2513
2514
send_data(List<Item> & not_used_values)2515 int multi_update::send_data(List<Item> ¬_used_values)
2516 {
2517 TABLE_LIST *cur_table;
2518 DBUG_ENTER("multi_update::send_data");
2519
2520 for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
2521 {
2522 int error= 0;
2523 TABLE *table= cur_table->table;
2524 uint offset= cur_table->shared;
2525 /*
2526 Check if we are using outer join and we didn't find the row
2527 or if we have already updated this row in the previous call to this
2528 function.
2529
2530 The same row may be presented here several times in a join of type
2531 UPDATE t1 FROM t1,t2 SET t1.a=t2.a
2532
2533 In this case we will do the update for the first found row combination.
2534 The join algorithm guarantees that we will not find the a row in
2535 t1 several times.
2536 */
2537 if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
2538 continue;
2539
2540 if (table == table_to_update)
2541 {
2542 /*
2543 We can use compare_record() to optimize away updates if
2544 the table handler is returning all columns OR if
2545 if all updated columns are read
2546 */
2547 bool can_compare_record;
2548 can_compare_record= records_are_comparable(table);
2549
2550 table->status|= STATUS_UPDATED;
2551 store_record(table,record[1]);
2552
2553 if (fill_record_n_invoke_before_triggers(thd, table,
2554 *fields_for_table[offset],
2555 *values_for_table[offset], 0,
2556 TRG_EVENT_UPDATE))
2557 DBUG_RETURN(1);
2558 /*
2559 Reset the table->auto_increment_field_not_null as it is valid for
2560 only one row.
2561 */
2562 table->auto_increment_field_not_null= FALSE;
2563 found++;
2564 if (!can_compare_record || compare_record(table))
2565 {
2566
2567 if ((error= cur_table->view_check_option(thd, ignore)) !=
2568 VIEW_CHECK_OK)
2569 {
2570 found--;
2571 if (error == VIEW_CHECK_SKIP)
2572 continue;
2573 else if (unlikely(error == VIEW_CHECK_ERROR))
2574 DBUG_RETURN(1);
2575 }
2576 if (unlikely(!updated++))
2577 {
2578 /*
2579 Inform the main table that we are going to update the table even
2580 while we may be scanning it. This will flush the read cache
2581 if it's used.
2582 */
2583 main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE);
2584 }
2585 if (unlikely((error=table->file->ha_update_row(table->record[1],
2586 table->record[0]))) &&
2587 error != HA_ERR_RECORD_IS_THE_SAME)
2588 {
2589 updated--;
2590 if (!ignore ||
2591 table->file->is_fatal_error(error, HA_CHECK_ALL))
2592 goto error;
2593 }
2594 else
2595 {
2596 if (unlikely(error == HA_ERR_RECORD_IS_THE_SAME))
2597 {
2598 error= 0;
2599 updated--;
2600 }
2601 else if (has_vers_fields && table->versioned(VERS_TRX_ID))
2602 {
2603 updated_sys_ver++;
2604 }
2605 /* non-transactional or transactional table got modified */
2606 /* either multi_update class' flag is raised in its branch */
2607 if (table->file->has_transactions_and_rollback())
2608 transactional_tables= TRUE;
2609 else
2610 {
2611 trans_safe= FALSE;
2612 thd->transaction->stmt.modified_non_trans_table= TRUE;
2613 }
2614 }
2615 }
2616 if (has_vers_fields && table->versioned(VERS_TIMESTAMP))
2617 {
2618 store_record(table, record[2]);
2619 if (unlikely(error= vers_insert_history_row(table)))
2620 {
2621 restore_record(table, record[2]);
2622 goto error;
2623 }
2624 restore_record(table, record[2]);
2625 updated_sys_ver++;
2626 }
2627 if (table->triggers &&
2628 unlikely(table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
2629 TRG_ACTION_AFTER, TRUE)))
2630 DBUG_RETURN(1);
2631 }
2632 else
2633 {
2634 TABLE *tmp_table= tmp_tables[offset];
2635 if (copy_funcs(tmp_table_param[offset].items_to_copy, thd))
2636 DBUG_RETURN(1);
2637 /* rowid field is NULL if join tmp table has null row from outer join */
2638 if (tmp_table->field[0]->is_null())
2639 continue;
2640 /* Store regular updated fields in the row. */
2641 DBUG_ASSERT(1 + unupdated_check_opt_tables.elements ==
2642 tmp_table_param[offset].func_count);
2643 fill_record(thd, tmp_table,
2644 tmp_table->field + 1 + unupdated_check_opt_tables.elements,
2645 *values_for_table[offset], TRUE, FALSE);
2646
2647 /* Write row, ignoring duplicated updates to a row */
2648 error= tmp_table->file->ha_write_tmp_row(tmp_table->record[0]);
2649 found++;
2650 if (unlikely(error))
2651 {
2652 found--;
2653 if (error != HA_ERR_FOUND_DUPP_KEY &&
2654 error != HA_ERR_FOUND_DUPP_UNIQUE)
2655 {
2656 if (create_internal_tmp_table_from_heap(thd, tmp_table,
2657 tmp_table_param[offset].start_recinfo,
2658 &tmp_table_param[offset].recinfo,
2659 error, 1, NULL))
2660 {
2661 do_update= 0;
2662 DBUG_RETURN(1); // Not a table_is_full error
2663 }
2664 found++;
2665 }
2666 }
2667 }
2668 continue;
2669 error:
2670 DBUG_ASSERT(error > 0);
2671 /*
2672 If (ignore && error == is ignorable) we don't have to
2673 do anything; otherwise...
2674 */
2675 myf flags= 0;
2676
2677 if (table->file->is_fatal_error(error, HA_CHECK_ALL))
2678 flags|= ME_FATAL; /* Other handler errors are fatal */
2679
2680 prepare_record_for_error_message(error, table);
2681 table->file->print_error(error,MYF(flags));
2682 DBUG_RETURN(1);
2683 } // for (cur_table)
2684 DBUG_RETURN(0);
2685 }
2686
2687
abort_result_set()2688 void multi_update::abort_result_set()
2689 {
2690 /* the error was handled or nothing deleted and no side effects return */
2691 if (unlikely(error_handled ||
2692 (!thd->transaction->stmt.modified_non_trans_table && !updated)))
2693 return;
2694
2695 /* Something already updated so we have to invalidate cache */
2696 if (updated)
2697 query_cache_invalidate3(thd, update_tables, 1);
2698 /*
2699 If all tables that has been updated are trans safe then just do rollback.
2700 If not attempt to do remaining updates.
2701 */
2702
2703 if (! trans_safe)
2704 {
2705 DBUG_ASSERT(thd->transaction->stmt.modified_non_trans_table);
2706 if (do_update && table_count > 1)
2707 {
2708 /* Add warning here */
2709 (void) do_updates();
2710 }
2711 }
2712 if (thd->transaction->stmt.modified_non_trans_table)
2713 {
2714 /*
2715 The query has to binlog because there's a modified non-transactional table
2716 either from the query's list or via a stored routine: bug#13270,23333
2717 */
2718 if (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open())
2719 {
2720 /*
2721 THD::killed status might not have been set ON at time of an error
2722 got caught and if happens later the killed error is written
2723 into repl event.
2724 */
2725 int errcode= query_error_code(thd, thd->killed == NOT_KILLED);
2726 /* the error of binary logging is ignored */
2727 (void)thd->binlog_query(THD::ROW_QUERY_TYPE,
2728 thd->query(), thd->query_length(),
2729 transactional_tables, FALSE, FALSE, errcode);
2730 }
2731 thd->transaction->all.modified_non_trans_table= TRUE;
2732 }
2733 thd->transaction->all.m_unsafe_rollback_flags|=
2734 (thd->transaction->stmt.m_unsafe_rollback_flags & THD_TRANS::DID_WAIT);
2735 DBUG_ASSERT(trans_safe || !updated || thd->transaction->stmt.modified_non_trans_table);
2736 }
2737
2738
do_updates()2739 int multi_update::do_updates()
2740 {
2741 TABLE_LIST *cur_table;
2742 int local_error= 0;
2743 ha_rows org_updated;
2744 TABLE *table, *tmp_table, *err_table;
2745 List_iterator_fast<TABLE> check_opt_it(unupdated_check_opt_tables);
2746 DBUG_ENTER("multi_update::do_updates");
2747
2748 do_update= 0; // Don't retry this function
2749 if (!found)
2750 DBUG_RETURN(0);
2751
2752 /*
2753 Update read_set to include all fields that virtual columns may depend on.
2754 Usually they're already in the read_set, but if the previous access
2755 method was keyread, only the virtual column itself will be in read_set,
2756 not its dependencies
2757 */
2758 while(TABLE *tbl= check_opt_it++)
2759 if (Field **vf= tbl->vfield)
2760 for (; *vf; vf++)
2761 if (bitmap_is_set(tbl->read_set, (*vf)->field_index))
2762 (*vf)->vcol_info->expr->walk(&Item::register_field_in_read_map, 1, 0);
2763
2764 for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
2765 {
2766 bool can_compare_record;
2767 uint offset= cur_table->shared;
2768
2769 table = cur_table->table;
2770 if (table == table_to_update)
2771 continue; // Already updated
2772 org_updated= updated;
2773 tmp_table= tmp_tables[cur_table->shared];
2774 tmp_table->file->extra(HA_EXTRA_CACHE); // Change to read cache
2775 if (unlikely((local_error= table->file->ha_rnd_init(0))))
2776 {
2777 err_table= table;
2778 goto err;
2779 }
2780 table->file->extra(HA_EXTRA_NO_CACHE);
2781 /*
2782 We have to clear the base record, if we have virtual indexed
2783 blob fields, as some storage engines will access the blob fields
2784 to calculate the keys to see if they have changed. Without
2785 clearing the blob pointers will contain random values which can
2786 cause a crash.
2787 This is a workaround for engines that access columns not present in
2788 either read or write set.
2789 */
2790 if (table->vfield)
2791 empty_record(table);
2792
2793 has_vers_fields= table->vers_check_update(*fields);
2794
2795 check_opt_it.rewind();
2796 while(TABLE *tbl= check_opt_it++)
2797 {
2798 if (unlikely((local_error= tbl->file->ha_rnd_init(0))))
2799 {
2800 err_table= tbl;
2801 goto err;
2802 }
2803 tbl->file->extra(HA_EXTRA_CACHE);
2804 }
2805
2806 /*
2807 Setup copy functions to copy fields from temporary table
2808 */
2809 List_iterator_fast<Item> field_it(*fields_for_table[offset]);
2810 Field **field;
2811 Copy_field *copy_field_ptr= copy_field, *copy_field_end;
2812
2813 /* Skip row pointers */
2814 field= tmp_table->field + 1 + unupdated_check_opt_tables.elements;
2815 for ( ; *field ; field++)
2816 {
2817 Item_field *item= (Item_field* ) field_it++;
2818 (copy_field_ptr++)->set(item->field, *field, 0);
2819 }
2820 copy_field_end=copy_field_ptr;
2821
2822 if (unlikely((local_error= tmp_table->file->ha_rnd_init(1))))
2823 {
2824 err_table= tmp_table;
2825 goto err;
2826 }
2827
2828 can_compare_record= records_are_comparable(table);
2829
2830 for (;;)
2831 {
2832 if (thd->killed && trans_safe)
2833 {
2834 thd->fatal_error();
2835 goto err2;
2836 }
2837 if (unlikely((local_error=
2838 tmp_table->file->ha_rnd_next(tmp_table->record[0]))))
2839 {
2840 if (local_error == HA_ERR_END_OF_FILE)
2841 break;
2842 err_table= tmp_table;
2843 goto err;
2844 }
2845
2846 /* call rnd_pos() using rowids from temporary table */
2847 check_opt_it.rewind();
2848 TABLE *tbl= table;
2849 uint field_num= 0;
2850 do
2851 {
2852 DBUG_ASSERT(!tmp_table->field[field_num]->is_null());
2853 String rowid;
2854 tmp_table->field[field_num]->val_str(&rowid);
2855 if (unlikely((local_error= tbl->file->ha_rnd_pos(tbl->record[0],
2856 (uchar*)rowid.ptr()))))
2857 {
2858 err_table= tbl;
2859 goto err;
2860 }
2861 field_num++;
2862 } while ((tbl= check_opt_it++));
2863
2864 if (table->vfield &&
2865 unlikely(table->update_virtual_fields(table->file,
2866 VCOL_UPDATE_INDEXED_FOR_UPDATE)))
2867 goto err2;
2868
2869 table->status|= STATUS_UPDATED;
2870 store_record(table,record[1]);
2871
2872 /* Copy data from temporary table to current table */
2873 for (copy_field_ptr=copy_field;
2874 copy_field_ptr != copy_field_end;
2875 copy_field_ptr++)
2876 {
2877 (*copy_field_ptr->do_copy)(copy_field_ptr);
2878 copy_field_ptr->to_field->set_has_explicit_value();
2879 }
2880
2881 table->evaluate_update_default_function();
2882 if (table->vfield &&
2883 table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_WRITE))
2884 goto err2;
2885 if (table->triggers &&
2886 table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
2887 TRG_ACTION_BEFORE, TRUE))
2888 goto err2;
2889
2890 if (!can_compare_record || compare_record(table))
2891 {
2892 int error;
2893 if ((error= cur_table->view_check_option(thd, ignore)) !=
2894 VIEW_CHECK_OK)
2895 {
2896 if (error == VIEW_CHECK_SKIP)
2897 continue;
2898 else if (unlikely(error == VIEW_CHECK_ERROR))
2899 {
2900 thd->fatal_error();
2901 goto err2;
2902 }
2903 }
2904 if (has_vers_fields && table->versioned())
2905 table->vers_update_fields();
2906
2907 if (unlikely((local_error=
2908 table->file->ha_update_row(table->record[1],
2909 table->record[0]))) &&
2910 local_error != HA_ERR_RECORD_IS_THE_SAME)
2911 {
2912 if (!ignore ||
2913 table->file->is_fatal_error(local_error, HA_CHECK_ALL))
2914 {
2915 err_table= table;
2916 goto err;
2917 }
2918 }
2919 if (local_error != HA_ERR_RECORD_IS_THE_SAME)
2920 {
2921 updated++;
2922
2923 if (has_vers_fields && table->versioned())
2924 {
2925 if (table->versioned(VERS_TIMESTAMP))
2926 {
2927 store_record(table, record[2]);
2928 if ((local_error= vers_insert_history_row(table)))
2929 {
2930 restore_record(table, record[2]);
2931 err_table = table;
2932 goto err;
2933 }
2934 restore_record(table, record[2]);
2935 }
2936 updated_sys_ver++;
2937 }
2938 }
2939 else
2940 local_error= 0;
2941 }
2942
2943 if (table->triggers &&
2944 unlikely(table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
2945 TRG_ACTION_AFTER, TRUE)))
2946 goto err2;
2947 }
2948
2949 if (updated != org_updated)
2950 {
2951 if (table->file->has_transactions_and_rollback())
2952 transactional_tables= TRUE;
2953 else
2954 {
2955 trans_safe= FALSE; // Can't do safe rollback
2956 thd->transaction->stmt.modified_non_trans_table= TRUE;
2957 }
2958 }
2959 (void) table->file->ha_rnd_end();
2960 (void) tmp_table->file->ha_rnd_end();
2961 check_opt_it.rewind();
2962 while (TABLE *tbl= check_opt_it++)
2963 tbl->file->ha_rnd_end();
2964
2965 }
2966 DBUG_RETURN(0);
2967
2968 err:
2969 {
2970 prepare_record_for_error_message(local_error, err_table);
2971 err_table->file->print_error(local_error,MYF(ME_FATAL));
2972 }
2973
2974 err2:
2975 if (table->file->inited)
2976 (void) table->file->ha_rnd_end();
2977 if (tmp_table->file->inited)
2978 (void) tmp_table->file->ha_rnd_end();
2979 check_opt_it.rewind();
2980 while (TABLE *tbl= check_opt_it++)
2981 {
2982 if (tbl->file->inited)
2983 (void) tbl->file->ha_rnd_end();
2984 }
2985
2986 if (updated != org_updated)
2987 {
2988 if (table->file->has_transactions_and_rollback())
2989 transactional_tables= TRUE;
2990 else
2991 {
2992 trans_safe= FALSE;
2993 thd->transaction->stmt.modified_non_trans_table= TRUE;
2994 }
2995 }
2996 DBUG_RETURN(1);
2997 }
2998
2999
3000 /* out: 1 if error, 0 if success */
3001
send_eof()3002 bool multi_update::send_eof()
3003 {
3004 char buff[STRING_BUFFER_USUAL_SIZE];
3005 ulonglong id;
3006 killed_state killed_status= NOT_KILLED;
3007 DBUG_ENTER("multi_update::send_eof");
3008 THD_STAGE_INFO(thd, stage_updating_reference_tables);
3009
3010 /*
3011 Does updates for the last n - 1 tables, returns 0 if ok;
3012 error takes into account killed status gained in do_updates()
3013 */
3014 int local_error= thd->is_error();
3015 if (likely(!local_error))
3016 local_error = (table_count) ? do_updates() : 0;
3017 /*
3018 if local_error is not set ON until after do_updates() then
3019 later carried out killing should not affect binlogging.
3020 */
3021 killed_status= (local_error == 0) ? NOT_KILLED : thd->killed;
3022 THD_STAGE_INFO(thd, stage_end);
3023
3024 /* We must invalidate the query cache before binlog writing and
3025 ha_autocommit_... */
3026
3027 if (updated)
3028 {
3029 query_cache_invalidate3(thd, update_tables, 1);
3030 }
3031 /*
3032 Write the SQL statement to the binlog if we updated
3033 rows and we succeeded or if we updated some non
3034 transactional tables.
3035
3036 The query has to binlog because there's a modified non-transactional table
3037 either from the query's list or via a stored routine: bug#13270,23333
3038 */
3039
3040 if (thd->transaction->stmt.modified_non_trans_table)
3041 thd->transaction->all.modified_non_trans_table= TRUE;
3042 thd->transaction->all.m_unsafe_rollback_flags|=
3043 (thd->transaction->stmt.m_unsafe_rollback_flags & THD_TRANS::DID_WAIT);
3044
3045 if (likely(local_error == 0 ||
3046 thd->transaction->stmt.modified_non_trans_table))
3047 {
3048 if (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open())
3049 {
3050 int errcode= 0;
3051 if (likely(local_error == 0))
3052 thd->clear_error();
3053 else
3054 errcode= query_error_code(thd, killed_status == NOT_KILLED);
3055
3056 bool force_stmt= false;
3057 for (TABLE *table= all_tables->table; table; table= table->next)
3058 {
3059 if (table->versioned(VERS_TRX_ID))
3060 {
3061 force_stmt= true;
3062 break;
3063 }
3064 }
3065 enum_binlog_format save_binlog_format;
3066 save_binlog_format= thd->get_current_stmt_binlog_format();
3067 if (force_stmt)
3068 thd->set_current_stmt_binlog_format_stmt();
3069
3070 if (thd->binlog_query(THD::ROW_QUERY_TYPE, thd->query(),
3071 thd->query_length(), transactional_tables, FALSE,
3072 FALSE, errcode) > 0)
3073 local_error= 1; // Rollback update
3074 thd->set_current_stmt_binlog_format(save_binlog_format);
3075 }
3076 }
3077 DBUG_ASSERT(trans_safe || !updated ||
3078 thd->transaction->stmt.modified_non_trans_table);
3079
3080 if (unlikely(local_error))
3081 {
3082 error_handled= TRUE; // to force early leave from ::abort_result_set()
3083 if (thd->killed == NOT_KILLED && !thd->get_stmt_da()->is_set())
3084 {
3085 /*
3086 No error message was sent and query was not killed (in which case
3087 mysql_execute_command() will send the error mesage).
3088 */
3089 my_message(ER_UNKNOWN_ERROR, "An error occurred in multi-table update",
3090 MYF(0));
3091 }
3092 DBUG_RETURN(TRUE);
3093 }
3094
3095 if (!thd->lex->analyze_stmt)
3096 {
3097 id= thd->arg_of_last_insert_id_function ?
3098 thd->first_successful_insert_id_in_prev_stmt : 0;
3099 my_snprintf(buff, sizeof(buff), ER_THD(thd, ER_UPDATE_INFO),
3100 (ulong) found, (ulong) updated, (ulong) thd->cuted_fields);
3101 ::my_ok(thd, (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated,
3102 id, buff);
3103 }
3104 DBUG_RETURN(FALSE);
3105 }
3106