1 /* Copyright (c) 2000, 2021, Oracle and/or its affiliates.
2 
3    This program is free software; you can redistribute it and/or modify
4    it under the terms of the GNU General Public License, version 2.0,
5    as published by the Free Software Foundation.
6 
7    This program is also distributed with certain software (including
8    but not limited to OpenSSL) that is licensed under separate terms,
9    as designated in a particular file or component or in included license
10    documentation.  The authors of MySQL hereby grant you an additional
11    permission to link the program and your derivative works with the
12    separately licensed software that they have included with MySQL.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License, version 2.0, for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program; if not, write to the Free Software Foundation,
21    51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
22 
23 
24 /*
25   Single table and multi table updates of tables.
26   Multi-table updates were introduced by Sinisa & Monty
27 */
28 
29 #include "sql_update.h"
30 
31 #include "auth_common.h"              // check_table_access
32 #include "binlog.h"                   // mysql_bin_log
33 #include "debug_sync.h"               // DEBUG_SYNC
34 #include "field.h"                    // Field
35 #include "item.h"                     // Item
36 #include "key.h"                      // is_key_used
37 #include "opt_explain.h"              // Modification_plan
38 #include "opt_trace.h"                // Opt_trace_object
39 #include "records.h"                  // READ_RECORD
40 #include "sql_base.h"                 // open_tables_for_query
41 #include "sql_optimizer.h"            // build_equal_items, substitute_gc
42 #include "sql_resolver.h"             // setup_order
43 #include "sql_select.h"               // free_underlaid_joins
44 #include "sql_tmp_table.h"            // create_tmp_table
45 #include "sql_view.h"                 // check_key_in_view
46 #include "table.h"                    // TABLE
47 #include "table_trigger_dispatcher.h" // Table_trigger_dispatcher
48 #include "sql_partition.h"            // partition_key_modified
49 #include "sql_prepare.h"              // select_like_stmt_cmd_test
50 #include "probes_mysql.h"             // MYSQL_UPDATE_START
51 #include "sql_parse.h"                // all_tables_not_ok
52 
53 /**
54    True if the table's input and output record buffers are comparable using
55    compare_records(TABLE*).
56  */
records_are_comparable(const TABLE * table)57 bool records_are_comparable(const TABLE *table) {
58   return ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) == 0) ||
59     bitmap_is_subset(table->write_set, table->read_set);
60 }
61 
62 
63 /**
64    Compares the input and outbut record buffers of the table to see if a row
65    has changed. The algorithm iterates over updated columns and if they are
66    nullable compares NULL bits in the buffer before comparing actual
67    data. Special care must be taken to compare only the relevant NULL bits and
68    mask out all others as they may be undefined. The storage engine will not
69    and should not touch them.
70 
71    @param table The table to evaluate.
72 
73    @return true if row has changed.
74    @return false otherwise.
75 */
compare_records(const TABLE * table)76 bool compare_records(const TABLE *table)
77 {
78   assert(records_are_comparable(table));
79 
80   if ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) != 0)
81   {
82     /*
83       Storage engine may not have read all columns of the record.  Fields
84       (including NULL bits) not in the write_set may not have been read and
85       can therefore not be compared.
86     */
87     for (Field **ptr= table->field ; *ptr != NULL; ptr++)
88     {
89       Field *field= *ptr;
90       if (bitmap_is_set(table->write_set, field->field_index))
91       {
92         if (field->real_maybe_null())
93         {
94           uchar null_byte_index= field->null_offset();
95 
96           if (((table->record[0][null_byte_index]) & field->null_bit) !=
97               ((table->record[1][null_byte_index]) & field->null_bit))
98             return TRUE;
99         }
100         if (field->cmp_binary_offset(table->s->rec_buff_length))
101           return TRUE;
102       }
103     }
104     return FALSE;
105   }
106 
107   /*
108      The storage engine has read all columns, so it's safe to compare all bits
109      including those not in the write_set. This is cheaper than the field-by-field
110      comparison done above.
111   */
112   if (table->s->blob_fields + table->s->varchar_fields == 0)
113     // Fixed-size record: do bitwise comparison of the records
114     return cmp_record(table,record[1]);
115   /* Compare null bits */
116   if (memcmp(table->null_flags,
117 	     table->null_flags+table->s->rec_buff_length,
118 	     table->s->null_bytes))
119     return TRUE;				// Diff in NULL value
120   /* Compare updated fields */
121   for (Field **ptr= table->field ; *ptr ; ptr++)
122   {
123     if (bitmap_is_set(table->write_set, (*ptr)->field_index) &&
124 	(*ptr)->cmp_binary_offset(table->s->rec_buff_length))
125       return TRUE;
126   }
127   return FALSE;
128 }
129 
130 
131 /**
132   Check that all fields are base table columns.
133   Replace columns from views with base table columns, and save original items in
134   a list for later privilege checking.
135 
136   @param      thd              thread handler
137   @param      items            Items for check
138   @param[out] original_columns Saved list of items which are the original
139                                resolved columns (if not NULL)
140 
141   @return false if success, true if error (Items not updatable columns or OOM)
142 */
143 
check_fields(THD * thd,List<Item> & items,List<Item> * original_columns)144 static bool check_fields(THD *thd, List<Item> &items,
145                          List<Item> *original_columns)
146 {
147   List_iterator<Item> it(items);
148   Item *item;
149 
150   while ((item= it++))
151   {
152     // Save original item for later privilege checking
153     if (original_columns && original_columns->push_back(item))
154       return true;                   /* purecov: inspected */
155 
156     /*
157       we make temporary copy of Item_field, to avoid influence of changing
158       result_field on Item_ref which refer on this field
159     */
160     Item_field *const base_table_field= item->field_for_view_update();
161     assert(base_table_field != NULL);
162 
163     Item_field *const cloned_field= new Item_field(thd, base_table_field);
164     if (!cloned_field)
165       return true;                  /* purecov: inspected */
166 
167     thd->change_item_tree(it.ref(), cloned_field);
168   }
169   return false;
170 }
171 
172 
173 /**
174   Check if all expressions in list are constant expressions
175 
176   @param[in] values List of expressions
177 
178   @retval true Only constant expressions
179   @retval false At least one non-constant expression
180 */
181 
check_constant_expressions(List<Item> & values)182 static bool check_constant_expressions(List<Item> &values)
183 {
184   Item *value;
185   List_iterator_fast<Item> v(values);
186   DBUG_ENTER("check_constant_expressions");
187 
188   while ((value= v++))
189   {
190     if (!value->const_item())
191     {
192       DBUG_PRINT("exit", ("expression is not constant"));
193       DBUG_RETURN(false);
194     }
195   }
196   DBUG_PRINT("exit", ("expression is constant"));
197   DBUG_RETURN(true);
198 }
199 
200 
201 /**
202   Prepare a table for an UPDATE operation
203 
204   @param thd     Thread pointer
205   @param select  Query block
206 
207   @returns false if success, true if error
208 */
209 
mysql_update_prepare_table(THD * thd,SELECT_LEX * select)210 bool mysql_update_prepare_table(THD *thd, SELECT_LEX *select)
211 {
212   TABLE_LIST *const tr= select->table_list.first;
213 
214   if (!tr->is_view())
215     return false;
216 
217   // Semi-join is not possible, as single-table UPDATE does not support joins
218   if (tr->resolve_derived(thd, false))
219     return true;
220 
221   if (select->merge_derived(thd, tr))
222     return true;                 /* purecov: inspected */
223 
224   if (!tr->is_updatable())
225   {
226     my_error(ER_NON_UPDATABLE_TABLE, MYF(0), tr->alias, "UPDATE");
227     return true;
228   }
229 
230   return false;
231 }
232 
233 /*
234   Process usual UPDATE
235 
236   SYNOPSIS
237     mysql_update()
238     thd			thread handler
239     fields		fields for update
240     values		values of fields for update
241     limit		limit clause
242     handle_duplicates	how to handle duplicates
243 
244   RETURN
245     false - OK
246     true  - error
247 */
248 
mysql_update(THD * thd,List<Item> & fields,List<Item> & values,ha_rows limit,enum enum_duplicates handle_duplicates,ha_rows * found_return,ha_rows * updated_return)249 bool mysql_update(THD *thd,
250                   List<Item> &fields,
251                   List<Item> &values,
252                   ha_rows limit,
253                   enum enum_duplicates handle_duplicates,
254                   ha_rows *found_return, ha_rows *updated_return)
255 {
256   DBUG_ENTER("mysql_update");
257 
258   myf           error_flags= MYF(0);            /**< Flag for fatal errors */
259   const bool    using_limit= limit != HA_POS_ERROR;
260   bool          used_key_is_modified= false;
261   bool          transactional_table, will_batch;
262   int           res;
263   int           error= 1;
264   int           loc_error;
265   uint          used_index, dup_key_found;
266   bool          need_sort= true;
267   bool          reverse= false;
268   bool          using_filesort;
269   bool          read_removal= false;
270   ha_rows       updated, found;
271   READ_RECORD   info;
272   ulonglong     id;
273   THD::killed_state killed_status= THD::NOT_KILLED;
274   COPY_INFO update(COPY_INFO::UPDATE_OPERATION, &fields, &values);
275 
276   SELECT_LEX   *const select_lex= thd->lex->select_lex;
277   TABLE_LIST   *const table_list= select_lex->get_table_list();
278 
279   select_lex->make_active_options(0, 0);
280 
281   const bool safe_update= thd->variables.option_bits & OPTION_SAFE_UPDATES;
282 
283   THD_STAGE_INFO(thd, stage_init);
284 
285   if (!table_list->is_updatable())
286   {
287     my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "UPDATE");
288     DBUG_RETURN(true);
289   }
290 
291   TABLE_LIST *const update_table_ref= table_list->updatable_base_table();
292   TABLE      *const table= update_table_ref->table;
293 
294   /* Calculate "table->covering_keys" based on the WHERE */
295   table->covering_keys= table->s->keys_in_use;
296   table->quick_keys.clear_all();
297   table->possible_quick_keys.clear_all();
298 
299   key_map covering_keys_for_cond;
300   if (mysql_prepare_update(thd, update_table_ref, &covering_keys_for_cond,
301                            values))
302     DBUG_RETURN(1);
303 
304   Item *conds;
305   if (select_lex->get_optimizable_conditions(thd, &conds, NULL))
306     DBUG_RETURN(1);
307 
308   if (update.add_function_default_columns(table, table->write_set))
309     DBUG_RETURN(1);
310 
311   ORDER *order= select_lex->order_list.first;
312 
313   /*
314     See if we can substitute expressions with equivalent generated
315     columns in the WHERE and ORDER BY clauses of the UPDATE statement.
316     It is unclear if this is best to do before or after the other
317     substitutions performed by substitute_for_best_equal_field(). Do
318     it here for now, to keep it consistent with how multi-table
319     updates are optimized in JOIN::optimize().
320   */
321   if (conds || order)
322     static_cast<void>(substitute_gc(thd, select_lex, conds, NULL, order));
323 
324   if ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) != 0 &&
325       update.function_defaults_apply(table))
326     /*
327       A column is to be set to its ON UPDATE function default only if other
328       columns of the row are changing. To know this, we must be able to
329       compare the "before" and "after" value of those columns
330       (i.e. records_are_comparable() must be true below). Thus, we must read
331       those columns:
332     */
333     bitmap_union(table->read_set, table->write_set);
334 
335   // Don't count on usage of 'only index' when calculating which key to use
336   table->covering_keys.clear_all();
337 
338   /*
339     This must be done before partitioning pruning, since prune_partitions()
340     uses the table->write_set to determine may prune locks too.
341   */
342   if (table->triggers && table->triggers->mark_fields(TRG_EVENT_UPDATE))
343     DBUG_RETURN(true);
344 
345   QEP_TAB_standalone qep_tab_st;
346   QEP_TAB &qep_tab= qep_tab_st.as_QEP_TAB();
347 
348   if (table->part_info)
349   {
350     if (prune_partitions(thd, table, conds))
351       DBUG_RETURN(1);
352     if (table->all_partitions_pruned_away)
353     {
354       /* No matching records */
355       if (thd->lex->describe)
356       {
357         /*
358           Initialize plan only for regular EXPLAIN. Don't do it for EXPLAIN
359           FOR CONNECTION as the plan would exist for very short period of time
360           but will cost taking/releasing of a mutex, so it's not worth
361           bothering with. Same for similar cases below.
362         */
363         Modification_plan plan(thd, MT_UPDATE, table,
364                                "No matching rows after partition pruning",
365                                true, 0);
366         error= explain_single_table_modification(thd, &plan, select_lex);
367         goto exit_without_my_ok;
368       }
369       my_ok(thd);
370       DBUG_RETURN(0);
371     }
372   }
373   if (lock_tables(thd, table_list, thd->lex->table_count, 0))
374     DBUG_RETURN(1);
375 
376   // Must be done after lock_tables()
377   if (conds)
378   {
379     COND_EQUAL *cond_equal= NULL;
380     Item::cond_result result;
381     if (table_list->check_option)
382     {
383       /*
384         If this UPDATE is on a view with CHECK OPTION, Item_fields
385         must not be replaced by constants. The reason is that when
386         'conds' is optimized, 'check_option' is also optimized (it is
387         part of 'conds'). Const replacement is fine for 'conds'
388         because it is evaluated on a read row, but 'check_option' is
389         evaluated on a row with updated fields and needs those updated
390         values to be correct.
391 
392         Example:
393         CREATE VIEW v1 ... WHERE fld < 2 WITH CHECK_OPTION
394         UPDATE v1 SET fld=4 WHERE fld=1
395 
396         check_option is  "(fld < 2)"
397         conds is         "(fld < 2) and (fld = 1)"
398 
399         optimize_cond() would propagate fld=1 to the first argument of
400         the AND to create "(1 < 2) AND (fld = 1)". After this,
401         check_option would be "(1 < 2)". But for check_option to work
402         it must be evaluated with the *updated* value of fld: 4.
403         Otherwise it will evaluate to true even when it should be
404         false, which is the case for the UPDATE statement above.
405 
406         Thus, if there is a check_option, we do only the "safe" parts
407         of optimize_cond(): Item_row -> Item_func_eq conversion (to
408         enable range access) and removal of always true/always false
409         predicates.
410 
411         An alternative to restricting this optimization of 'conds' in
412         the presense of check_option: the Item-tree of 'check_option'
413         could be cloned before optimizing 'conds' and thereby avoid
414         const replacement. However, at the moment there is no such
415         thing as Item::clone().
416       */
417       if (build_equal_items(thd, conds, &conds, NULL, false,
418                             select_lex->join_list, &cond_equal))
419         goto exit_without_my_ok;
420       if (remove_eq_conds(thd, conds, &conds, &result))
421         goto exit_without_my_ok;
422     }
423     else
424     {
425       if (optimize_cond(thd, &conds, &cond_equal, select_lex->join_list,
426                         &result))
427         goto exit_without_my_ok;
428     }
429 
430     if (result == Item::COND_FALSE)
431     {
432       limit= 0;                                   // Impossible WHERE
433       if (thd->lex->describe)
434       {
435         Modification_plan plan(thd, MT_UPDATE, table,
436                                "Impossible WHERE", true, 0);
437         error= explain_single_table_modification(thd, &plan, select_lex);
438         goto exit_without_my_ok;
439       }
440     }
441     if (conds)
442     {
443       conds= substitute_for_best_equal_field(conds, cond_equal, 0);
444       if (conds == NULL)
445       {
446         error= true;
447         goto exit_without_my_ok;
448       }
449       conds->update_used_tables();
450     }
451   }
452 
453   /*
454     Also try a second time after locking, to prune when subqueries and
455     stored programs can be evaluated.
456   */
457   if (table->part_info)
458   {
459     if (prune_partitions(thd, table, conds))
460       DBUG_RETURN(1);
461     if (table->all_partitions_pruned_away)
462     {
463       if (thd->lex->describe)
464       {
465         Modification_plan plan(thd, MT_UPDATE, table,
466                                "No matching rows after partition pruning",
467                                true, 0);
468         error= explain_single_table_modification(thd, &plan, select_lex);
469         goto exit_without_my_ok;
470       }
471       my_ok(thd);
472       DBUG_RETURN(0);
473     }
474   }
475   // Initialize the cost model that will be used for this table
476   table->init_cost_model(thd->cost_model());
477 
478   /* Update the table->file->stats.records number */
479   table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
480 
481   table->mark_columns_needed_for_update(false/*mark_binlog_columns=false*/);
482   if (table->vfield &&
483       validate_gc_assignment(thd, &fields, &values, table))
484     DBUG_RETURN(0);
485 
486   error= 0;
487   qep_tab.set_table(table);
488   qep_tab.set_condition(conds);
489 
490   { // Enter scope for optimizer trace wrapper
491     Opt_trace_object wrapper(&thd->opt_trace);
492     wrapper.add_utf8_table(update_table_ref);
493 
494     bool impossible= false;
495     if (error || limit == 0)
496       impossible= true;
497     else if (conds != NULL)
498     {
499       key_map keys_to_use(key_map::ALL_BITS), needed_reg_dummy;
500       QUICK_SELECT_I *qck;
501       impossible= test_quick_select(thd, keys_to_use, 0, limit, safe_update,
502                                     ORDER::ORDER_NOT_RELEVANT, &qep_tab,
503                                     conds, &needed_reg_dummy, &qck,
504                                     qep_tab.table()->force_index) < 0;
505       qep_tab.set_quick(qck);
506     }
507     if (impossible)
508     {
509       if (thd->lex->describe && !error && !thd->is_error())
510       {
511         Modification_plan plan(thd, MT_UPDATE, table,
512                                "Impossible WHERE", true, 0);
513         error= explain_single_table_modification(thd, &plan, select_lex);
514         goto exit_without_my_ok;
515       }
516       free_underlaid_joins(thd, select_lex);
517       /*
518         There was an error or the error was already sent by
519         the quick select evaluation.
520         TODO: Add error code output parameter to Item::val_xxx() methods.
521         Currently they rely on the user checking DA for
522         errors when unwinding the stack after calling Item::val_xxx().
523       */
524       if (error || thd->is_error())
525       {
526         DBUG_RETURN(1);				// Error in where
527       }
528 
529       char buff[MYSQL_ERRMSG_SIZE];
530       my_snprintf(buff, sizeof(buff), ER(ER_UPDATE_INFO), 0, 0,
531                   (long) thd->get_stmt_da()->current_statement_cond_count());
532       my_ok(thd, 0, 0, buff);
533 
534       DBUG_PRINT("info",("0 records updated"));
535       DBUG_RETURN(0);
536     }
537   } // Ends scope for optimizer trace wrapper
538 
539   /* If running in safe sql mode, don't allow updates without keys */
540   if (table->quick_keys.is_clear_all())
541   {
542     thd->server_status|= SERVER_QUERY_NO_INDEX_USED;
543 
544     /*
545       No safe update error will be returned if:
546       1) Statement is an EXPLAIN OR
547       2) LIMIT is present.
548 
549       Append the first warning (if any) to the error message. Allows the user
550       to understand why index access couldn't be chosen.
551     */
552     if (!thd->lex->is_explain() && safe_update && !using_limit)
553     {
554       my_error(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE, MYF(0),
555                thd->get_stmt_da()->get_first_condition_message());
556       DBUG_RETURN(true);
557     }
558   }
559   if (select_lex->has_ft_funcs() && init_ftfuncs(thd, select_lex))
560     goto exit_without_my_ok;
561 
562   table->update_const_key_parts(conds);
563   order= simple_remove_const(order, conds);
564 
565   used_index= get_index_for_order(order, &qep_tab, limit,
566                                   &need_sort, &reverse);
567   if (need_sort)
568   { // Assign table scan index to check below for modified key fields:
569     used_index= table->file->key_used_on_scan;
570   }
571   if (used_index != MAX_KEY)
572   { // Check if we are modifying a key that we are used to search with:
573     used_key_is_modified= is_key_used(table, used_index, table->write_set);
574   }
575   else if (qep_tab.quick())
576   {
577     /*
578       select->quick != NULL and used_index == MAX_KEY happens for index
579       merge and should be handled in a different way.
580     */
581     used_key_is_modified= (!qep_tab.quick()->unique_key_range() &&
582                            qep_tab.quick()->is_keys_used(table->write_set));
583   }
584 
585   used_key_is_modified|= partition_key_modified(table, table->write_set);
586   table->mark_columns_per_binlog_row_image();
587 
588   using_filesort= order && need_sort;
589 
590   {
591     ha_rows rows;
592     if (qep_tab.quick())
593       rows= qep_tab.quick()->records;
594     else if (!conds && !need_sort && limit != HA_POS_ERROR)
595       rows= limit;
596     else
597     {
598       assert(table->pos_in_table_list == update_table_ref);
599       update_table_ref->fetch_number_of_rows();
600       rows= table->file->stats.records;
601     }
602     qep_tab.set_quick_optim();
603     qep_tab.set_condition_optim();
604     DEBUG_SYNC(thd, "before_single_update");
605     Modification_plan plan(thd, MT_UPDATE, &qep_tab,
606                            used_index, limit,
607                            (!using_filesort && (used_key_is_modified || order)),
608                            using_filesort, used_key_is_modified, rows);
609     DEBUG_SYNC(thd, "planned_single_update");
610     if (thd->lex->describe)
611     {
612       error= explain_single_table_modification(thd, &plan, select_lex);
613       goto exit_without_my_ok;
614     }
615 
616     if (used_key_is_modified || order)
617     {
618       /*
619         We can't update table directly;  We must first search after all
620         matching rows before updating the table!
621       */
622 
623       if (used_index < MAX_KEY && covering_keys_for_cond.is_set(used_index))
624         table->set_keyread(true);
625 
626       /* note: We avoid sorting if we sort on the used index */
627       if (using_filesort)
628       {
629         /*
630           Doing an ORDER BY;  Let filesort find and sort the rows we are going
631           to update
632           NOTE: filesort will call table->prepare_for_position()
633         */
634         ha_rows examined_rows, found_rows, returned_rows;
635         Filesort fsort(&qep_tab, order, limit);
636 
637         assert(table->sort.io_cache == NULL);
638         table->sort.io_cache= (IO_CACHE*) my_malloc(key_memory_TABLE_sort_io_cache,
639                                                     sizeof(IO_CACHE),
640                                                     MYF(MY_FAE | MY_ZEROFILL));
641 
642         if (filesort(thd, &fsort, true,
643                      &examined_rows, &found_rows, &returned_rows))
644           goto exit_without_my_ok;
645 
646         table->sort.found_records= returned_rows;
647         thd->inc_examined_row_count(examined_rows);
648         /*
649           Filesort has already found and selected the rows we want to update,
650           so we don't need the where clause
651         */
652         qep_tab.set_quick(NULL);
653         qep_tab.set_condition(NULL);
654       }
655       else
656       {
657         /*
658           We are doing a search on a key that is updated. In this case
659           we go trough the matching rows, save a pointer to them and
660           update these in a separate loop based on the pointer.
661         */
662         table->prepare_for_position();
663 
664         /* If quick select is used, initialize it before retrieving rows. */
665         if (qep_tab.quick() && (error= qep_tab.quick()->reset()))
666         {
667           if (table->file->is_fatal_error(error))
668             error_flags|= ME_FATALERROR;
669 
670           table->file->print_error(error, error_flags);
671           goto exit_without_my_ok;
672         }
673         table->file->try_semi_consistent_read(1);
674 
675         /*
676           When we get here, we have one of the following options:
677           A. used_index == MAX_KEY
678           This means we should use full table scan, and start it with
679           init_read_record call
680           B. used_index != MAX_KEY
681           B.1 quick select is used, start the scan with init_read_record
682           B.2 quick select is not used, this is full index scan (with LIMIT)
683           Full index scan must be started with init_read_record_idx
684         */
685 
686         if (used_index == MAX_KEY || (qep_tab.quick()))
687           error= init_read_record(&info, thd, NULL, &qep_tab, 0, 1, FALSE);
688         else
689           error= init_read_record_idx(&info, thd, table, 1, used_index, reverse);
690 
691         if (error)
692           goto exit_without_my_ok;
693 
694         THD_STAGE_INFO(thd, stage_searching_rows_for_update);
695         ha_rows tmp_limit= limit;
696 
697         IO_CACHE *tempfile= (IO_CACHE*) my_malloc(key_memory_TABLE_sort_io_cache,
698                                                   sizeof(IO_CACHE),
699                                                   MYF(MY_FAE | MY_ZEROFILL));
700 
701         if (open_cached_file(tempfile, mysql_tmpdir,TEMP_PREFIX,
702                              DISK_BUFFER_SIZE, MYF(MY_WME)))
703         {
704           my_free(tempfile);
705           goto exit_without_my_ok;
706         }
707 
708         while (!(error=info.read_record(&info)) && !thd->killed)
709         {
710           thd->inc_examined_row_count(1);
711           bool skip_record= FALSE;
712           if (qep_tab.skip_record(thd, &skip_record))
713           {
714             error= 1;
715             /*
716              Don't try unlocking the row if skip_record reported an error since
717              in this case the transaction might have been rolled back already.
718             */
719             break;
720           }
721           if (!skip_record)
722           {
723             if (table->file->was_semi_consistent_read())
724               continue;  /* repeat the read of the same row if it still exists */
725 
726             table->file->position(table->record[0]);
727             if (my_b_write(tempfile, table->file->ref,
728                            table->file->ref_length))
729             {
730               error=1; /* purecov: inspected */
731               break; /* purecov: inspected */
732             }
733             if (!--limit && using_limit)
734             {
735               error= -1;
736               break;
737             }
738           }
739           else
740             table->file->unlock_row();
741         }
742         if (thd->killed && !error)				// Aborted
743           error= 1; /* purecov: inspected */
744         limit= tmp_limit;
745         table->file->try_semi_consistent_read(0);
746         end_read_record(&info);
747         /* Change select to use tempfile */
748         if (reinit_io_cache(tempfile, READ_CACHE, 0L, 0, 0))
749           error=1; /* purecov: inspected */
750 
751         assert(table->sort.io_cache == NULL);
752         /*
753           After this assignment, init_read_record() will run, and decide to
754           read from sort.io_cache. This cache will be freed when qep_tab is
755           destroyed.
756          */
757         table->sort.io_cache= tempfile;
758         qep_tab.set_quick(NULL);
759         qep_tab.set_condition(NULL);
760         if (error >= 0)
761           goto exit_without_my_ok;
762       }
763       if (used_index < MAX_KEY && covering_keys_for_cond.is_set(used_index))
764         table->set_keyread(false);
765       table->file->ha_index_or_rnd_end();
766     }
767 
768     if (thd->lex->is_ignore())
769       table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
770 
771     if (qep_tab.quick() && (error= qep_tab.quick()->reset()))
772     {
773       if (table->file->is_fatal_error(error))
774         error_flags|= ME_FATALERROR;
775 
776       table->file->print_error(error, error_flags);
777       goto exit_without_my_ok;
778     }
779 
780     table->file->try_semi_consistent_read(1);
781     if ((error= init_read_record(&info, thd, NULL, &qep_tab, 0, 1, FALSE)))
782       goto exit_without_my_ok;
783 
784     updated= found= 0;
785     /*
786       Generate an error (in TRADITIONAL mode) or warning
787       when trying to set a NOT NULL field to NULL.
788     */
789     thd->count_cuted_fields= CHECK_FIELD_WARN;
790     thd->cuted_fields=0L;
791     THD_STAGE_INFO(thd, stage_updating);
792 
793     transactional_table= table->file->has_transactions();
794 
795     if (table->triggers &&
796         table->triggers->has_triggers(TRG_EVENT_UPDATE,
797                                       TRG_ACTION_AFTER))
798     {
799       /*
800         The table has AFTER UPDATE triggers that might access to subject
801         table and therefore might need update to be done immediately.
802         So we turn-off the batching.
803       */
804       (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH);
805       will_batch= FALSE;
806     }
807     else
808       will_batch= !table->file->start_bulk_update();
809 
810     if ((table->file->ha_table_flags() & HA_READ_BEFORE_WRITE_REMOVAL) &&
811         !thd->lex->is_ignore() && !using_limit &&
812         !(table->triggers && table->triggers->has_update_triggers()) &&
813         qep_tab.quick() && qep_tab.quick()->index != MAX_KEY &&
814         check_constant_expressions(values))
815       read_removal= table->check_read_removal(qep_tab.quick()->index);
816 
817     while (true)
818     {
819       error= info.read_record(&info);
820       if (error || thd->killed)
821         break;
822       thd->inc_examined_row_count(1);
823       bool skip_record;
824       if ((!qep_tab.skip_record(thd, &skip_record) && !skip_record))
825       {
826         if (table->file->was_semi_consistent_read())
827           continue;  /* repeat the read of the same row if it still exists */
828 
829         store_record(table,record[1]);
830         if (fill_record_n_invoke_before_triggers(thd, &update, fields, values,
831                                                  table, TRG_EVENT_UPDATE, 0))
832           break; /* purecov: inspected */
833 
834         found++;
835 
836         if (!records_are_comparable(table) || compare_records(table))
837         {
838           if ((res= table_list->view_check_option(thd)) != VIEW_CHECK_OK)
839           {
840             found--;
841             if (res == VIEW_CHECK_SKIP)
842               continue;
843             else if (res == VIEW_CHECK_ERROR)
844             {
845               error= 1;
846               break;
847             }
848           }
849 
850           /*
851             In order to keep MySQL legacy behavior, we do this update *after*
852             the CHECK OPTION test. Proper behavior is probably to throw an
853             error, though.
854           */
855           update.set_function_defaults(table);
856 
857           if (will_batch)
858           {
859             /*
860               Typically a batched handler can execute the batched jobs when:
861               1) When specifically told to do so
862               2) When it is not a good idea to batch anymore
863               3) When it is necessary to send batch for other reasons
864               (One such reason is when READ's must be performed)
865 
866               1) is covered by exec_bulk_update calls.
867               2) and 3) is handled by the bulk_update_row method.
868 
869               bulk_update_row can execute the updates including the one
870               defined in the bulk_update_row or not including the row
871               in the call. This is up to the handler implementation and can
872               vary from call to call.
873 
874               The dup_key_found reports the number of duplicate keys found
875               in those updates actually executed. It only reports those if
876               the extra call with HA_EXTRA_IGNORE_DUP_KEY have been issued.
877               If this hasn't been issued it returns an error code and can
878               ignore this number. Thus any handler that implements batching
879               for UPDATE IGNORE must also handle this extra call properly.
880 
881               If a duplicate key is found on the record included in this
882               call then it should be included in the count of dup_key_found
883               and error should be set to 0 (only if these errors are ignored).
884             */
885             error= table->file->ha_bulk_update_row(table->record[1],
886                                                    table->record[0],
887                                                    &dup_key_found);
888             limit+= dup_key_found;
889             updated-= dup_key_found;
890           }
891           else
892           {
893             /* Non-batched update */
894             error= table->file->ha_update_row(table->record[1],
895                                               table->record[0]);
896           }
897           if (error == 0)
898             updated++;
899           else if (error == HA_ERR_RECORD_IS_THE_SAME)
900             error= 0;
901           else
902           {
903             if (table->file->is_fatal_error(error))
904               error_flags|= ME_FATALERROR;
905 
906             table->file->print_error(error, error_flags);
907 
908             // The error can have been downgraded to warning by IGNORE.
909             if (thd->is_error())
910               break;
911           }
912         }
913         else
914         {
915           /*
916              Some no operation dml statements do not go through SE.
917              In read_only mode, if a no operation dml is not marked as
918              read_write then binlogging cant be restricted for that statement.
919              To make binlogging be consistent in read_only mode,
920              if the no operation dml statement doesn't go through SE then mark
921              that statement as noop_read_write here.
922           */
923           table->file->mark_trx_noop_dml();
924         }
925 
926         if (!error && table->triggers &&
927             table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
928                                               TRG_ACTION_AFTER, TRUE))
929         {
930           error= 1;
931           break;
932         }
933 
934         if (!--limit && using_limit)
935         {
936           /*
937             We have reached end-of-file in most common situations where no
938             batching has occurred and if batching was supposed to occur but
939             no updates were made and finally when the batch execution was
940             performed without error and without finding any duplicate keys.
941             If the batched updates were performed with errors we need to
942             check and if no error but duplicate key's found we need to
943             continue since those are not counted for in limit.
944           */
945           if (will_batch &&
946               ((error= table->file->exec_bulk_update(&dup_key_found)) ||
947                dup_key_found))
948           {
949  	    if (error)
950             {
951               /* purecov: begin inspected */
952               /*
953                 The handler should not report error of duplicate keys if they
954                 are ignored. This is a requirement on batching handlers.
955               */
956               if (table->file->is_fatal_error(error))
957                 error_flags|= ME_FATALERROR;
958 
959               table->file->print_error(error, error_flags);
960               error= 1;
961               break;
962               /* purecov: end */
963             }
964             /*
965               Either an error was found and we are ignoring errors or there
966               were duplicate keys found. In both cases we need to correct
967               the counters and continue the loop.
968             */
969             limit= dup_key_found; //limit is 0 when we get here so need to +
970             updated-= dup_key_found;
971           }
972           else
973           {
974             error= -1;				// Simulate end of file
975             break;
976           }
977         }
978       }
979       /*
980         Don't try unlocking the row if skip_record reported an error since in
981         this case the transaction might have been rolled back already.
982       */
983       else if (!thd->is_error())
984         table->file->unlock_row();
985       else
986       {
987         error= 1;
988         break;
989       }
990       thd->get_stmt_da()->inc_current_row_for_condition();
991       if (thd->is_error())
992       {
993         error= 1;
994         break;
995       }
996     }
997     table->auto_increment_field_not_null= FALSE;
998     dup_key_found= 0;
999     /*
1000       Caching the killed status to pass as the arg to query event constuctor;
1001       The cached value can not change whereas the killed status can
1002       (externally) since this point and change of the latter won't affect
1003       binlogging.
1004       It's assumed that if an error was set in combination with an effective
1005       killed status then the error is due to killing.
1006     */
1007     killed_status= thd->killed; // get the status of the volatile
1008     // simulated killing after the loop must be ineffective for binlogging
1009     DBUG_EXECUTE_IF("simulate_kill_bug27571",
1010                     {
1011                       thd->killed= THD::KILL_QUERY;
1012                     };);
1013     error= (killed_status == THD::NOT_KILLED)?  error : 1;
1014 
1015     if (error &&
1016         will_batch &&
1017         (loc_error= table->file->exec_bulk_update(&dup_key_found)))
1018       /*
1019         An error has occurred when a batched update was performed and returned
1020         an error indication. It cannot be an allowed duplicate key error since
1021         we require the batching handler to treat this as a normal behavior.
1022 
1023         Otherwise we simply remove the number of duplicate keys records found
1024         in the batched update.
1025       */
1026     {
1027       /* purecov: begin inspected */
1028       error_flags= MYF(0);
1029       if (table->file->is_fatal_error(loc_error))
1030         error_flags|= ME_FATALERROR;
1031 
1032       table->file->print_error(loc_error, error_flags);
1033       error= 1;
1034       /* purecov: end */
1035     }
1036     else
1037       updated-= dup_key_found;
1038     if (will_batch)
1039       table->file->end_bulk_update();
1040     table->file->try_semi_consistent_read(0);
1041 
1042     if (read_removal)
1043     {
1044       /* Only handler knows how many records really was written */
1045       updated= table->file->end_read_removal();
1046       if (!records_are_comparable(table))
1047         found= updated;
1048     }
1049 
1050   } // End of scope for Modification_plan
1051 
1052   if (!transactional_table && updated > 0)
1053     thd->get_transaction()->mark_modified_non_trans_table(
1054       Transaction_ctx::STMT);
1055 
1056   end_read_record(&info);
1057   THD_STAGE_INFO(thd, stage_end);
1058   (void) table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
1059 
1060   /*
1061     Invalidate the table in the query cache if something changed.
1062     This must be before binlog writing and ha_autocommit_...
1063   */
1064   if (updated)
1065     query_cache.invalidate_single(thd, update_table_ref, true);
1066 
1067   /*
1068     error < 0 means really no error at all: we processed all rows until the
1069     last one without error. error > 0 means an error (e.g. unique key
1070     violation and no IGNORE or REPLACE). error == 0 is also an error (if
1071     preparing the record or invoking before triggers fails). See
1072     ha_autocommit_or_rollback(error>=0) and DBUG_RETURN(error>=0) below.
1073     Sometimes we want to binlog even if we updated no rows, in case user used
1074     it to be sure master and slave are in same state.
1075   */
1076   if ((error < 0) || thd->get_transaction()->cannot_safely_rollback(
1077       Transaction_ctx::STMT))
1078   {
1079 #ifdef WITH_WSREP
1080     if (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open())
1081 #else
1082     if (mysql_bin_log.is_open())
1083 #endif
1084     {
1085       int errcode= 0;
1086       if (error < 0)
1087         thd->clear_error();
1088       else
1089         errcode= query_error_code(thd, killed_status == THD::NOT_KILLED);
1090 
1091       if (thd->binlog_query(THD::ROW_QUERY_TYPE,
1092                             thd->query().str, thd->query().length,
1093                             transactional_table, FALSE, FALSE, errcode))
1094       {
1095         error=1;				// Rollback update
1096       }
1097     }
1098   }
1099   assert(transactional_table || !updated ||
1100          thd->get_transaction()->cannot_safely_rollback(
1101                                                         Transaction_ctx::STMT));
1102   free_underlaid_joins(thd, select_lex);
1103 
1104   /* If LAST_INSERT_ID(X) was used, report X */
1105   id= thd->arg_of_last_insert_id_function ?
1106     thd->first_successful_insert_id_in_prev_stmt : 0;
1107 
1108   if (error < 0)
1109   {
1110     char buff[MYSQL_ERRMSG_SIZE];
1111     my_snprintf(buff, sizeof(buff), ER(ER_UPDATE_INFO), (long) found,
1112                 (long) updated,
1113                 (long) thd->get_stmt_da()->current_statement_cond_count());
1114     my_ok(thd, thd->get_protocol()->has_client_capability(CLIENT_FOUND_ROWS) ?
1115           found : updated, id, buff);
1116     DBUG_PRINT("info",("%ld records updated", (long) updated));
1117   }
1118   thd->count_cuted_fields= CHECK_FIELD_IGNORE;		/* calc cuted fields */
1119   *found_return= found;
1120   *updated_return= updated;
1121   DBUG_RETURN((error >= 0 || thd->is_error()) ? 1 : 0);
1122 
1123 exit_without_my_ok:
1124   free_underlaid_joins(thd, select_lex);
1125   table->set_keyread(FALSE);
1126   DBUG_RETURN(error);
1127 }
1128 
1129 /**
1130   Prepare items in UPDATE statement
1131 
1132   @param thd              thread handler
1133   @param update_table_ref Reference to table being updated
1134   @param[out] covering_keys_for_cond Keys which are covering for conditions
1135                                      and ORDER BY clause.
1136 
1137   @return false if success, true if error
1138 */
mysql_prepare_update(THD * thd,const TABLE_LIST * update_table_ref,key_map * covering_keys_for_cond,List<Item> & update_value_list)1139 bool mysql_prepare_update(THD *thd, const TABLE_LIST *update_table_ref,
1140                           key_map *covering_keys_for_cond,
1141                           List<Item> &update_value_list)
1142 {
1143   List<Item> all_fields;
1144   LEX *const lex= thd->lex;
1145   SELECT_LEX *const select= lex->select_lex;
1146   TABLE_LIST *const table_list= select->get_table_list();
1147   DBUG_ENTER("mysql_prepare_update");
1148 
1149   assert(select->item_list.elements == update_value_list.elements);
1150 
1151   lex->allow_sum_func= 0;
1152 
1153   if (select->setup_tables(thd, table_list, false))
1154     DBUG_RETURN(true);
1155   if (select->derived_table_count &&
1156       select->check_view_privileges(thd, UPDATE_ACL, SELECT_ACL))
1157     DBUG_RETURN(true);
1158 
1159   thd->want_privilege= SELECT_ACL;
1160   enum enum_mark_columns mark_used_columns_saved= thd->mark_used_columns;
1161   thd->mark_used_columns= MARK_COLUMNS_READ;
1162 #ifndef NO_EMBEDDED_ACCESS_CHECKS
1163   table_list->set_want_privilege(SELECT_ACL);
1164 #endif
1165   if (select->setup_conds(thd))
1166     DBUG_RETURN(true);
1167   if (select->setup_ref_array(thd))
1168     DBUG_RETURN(true);                          /* purecov: inspected */
1169   if (select->order_list.first &&
1170       setup_order(thd, select->ref_pointer_array,
1171                   table_list, all_fields, all_fields,
1172                   select->order_list.first))
1173     DBUG_RETURN(true);
1174 
1175   // Return covering keys derived from conditions and ORDER BY clause:
1176   *covering_keys_for_cond= update_table_ref->table->covering_keys;
1177 
1178   // Check the fields we are going to modify
1179 #ifndef NO_EMBEDDED_ACCESS_CHECKS
1180   table_list->set_want_privilege(UPDATE_ACL);
1181 #endif
1182   if (setup_fields(thd, Ref_ptr_array(), select->item_list, UPDATE_ACL, NULL,
1183                    false, true))
1184     DBUG_RETURN(true);                     /* purecov: inspected */
1185 
1186   if (check_fields(thd, select->item_list, NULL))
1187     DBUG_RETURN(true);
1188 
1189   // check_key_in_view() may send an SQL note, but we only want it once.
1190   if (select->first_execution &&
1191       check_key_in_view(thd, table_list, update_table_ref))
1192   {
1193     my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "UPDATE");
1194     DBUG_RETURN(true);
1195   }
1196 
1197   table_list->set_want_privilege(SELECT_ACL);
1198 
1199   if (setup_fields(thd, Ref_ptr_array(), update_value_list, SELECT_ACL, NULL,
1200                    false, false))
1201     DBUG_RETURN(true);                          /* purecov: inspected */
1202 
1203   thd->mark_used_columns= mark_used_columns_saved;
1204 
1205   // Check that table to be updated is not used in a subquery
1206   TABLE_LIST *const duplicate= unique_table(thd, update_table_ref,
1207                                             table_list->next_global, 0);
1208   if (duplicate)
1209   {
1210     update_non_unique_table_error(table_list, "UPDATE", duplicate);
1211     DBUG_RETURN(true);
1212   }
1213 
1214   if (setup_ftfuncs(select))
1215     DBUG_RETURN(true);                          /* purecov: inspected */
1216 
1217   if (select->inner_refs_list.elements && select->fix_inner_refs(thd))
1218     DBUG_RETURN(true);  /* purecov: inspected */
1219 
1220   if (select->apply_local_transforms(thd, false))
1221     DBUG_RETURN(true);
1222 
1223   DBUG_RETURN(false);
1224 }
1225 
1226 
1227 /***************************************************************************
1228   Update multiple tables from join
1229 ***************************************************************************/
1230 
1231 /*
1232   Get table map for list of Item_field
1233 */
1234 
get_table_map(List<Item> * items)1235 static table_map get_table_map(List<Item> *items)
1236 {
1237   List_iterator_fast<Item> item_it(*items);
1238   Item_field *item;
1239   table_map map= 0;
1240 
1241   while ((item= (Item_field *) item_it++))
1242     map|= item->used_tables();
1243   DBUG_PRINT("info", ("table_map: 0x%08lx", (long) map));
1244   return map;
1245 }
1246 
1247 /**
1248   If one row is updated through two different aliases and the first
1249   update physically moves the row, the second update will error
1250   because the row is no longer located where expected. This function
1251   checks if the multiple-table update is about to do that and if so
1252   returns with an error.
1253 
1254   The following update operations physically moves rows:
1255     1) Update of a column in a clustered primary key
1256     2) Update of a column used to calculate which partition the row belongs to
1257 
1258   This function returns with an error if both of the following are
1259   true:
1260 
1261     a) A table in the multiple-table update statement is updated
1262        through multiple aliases (including views)
1263     b) At least one of the updates on the table from a) may physically
1264        moves the row. Note: Updating a column used to calculate which
1265        partition a row belongs to does not necessarily mean that the
1266        row is moved. The new value may or may not belong to the same
1267        partition.
1268 
1269   @param leaves               First leaf table
1270   @param tables_for_update    Map of tables that are updated
1271 
1272   @return
1273     true   if the update is unsafe, in which case an error message is also set,
1274     false  otherwise.
1275 */
1276 static
unsafe_key_update(TABLE_LIST * leaves,table_map tables_for_update)1277 bool unsafe_key_update(TABLE_LIST *leaves, table_map tables_for_update)
1278 {
1279   TABLE_LIST *tl= leaves;
1280 
1281   for (tl= leaves; tl ; tl= tl->next_leaf)
1282   {
1283     if (tl->map() & tables_for_update)
1284     {
1285       TABLE *table1= tl->table;
1286       bool primkey_clustered= (table1->file->primary_key_is_clustered() &&
1287                                table1->s->primary_key != MAX_KEY);
1288 
1289       bool table_partitioned= (table1->part_info != NULL);
1290 
1291       if (!table_partitioned && !primkey_clustered)
1292         continue;
1293 
1294       for (TABLE_LIST* tl2= tl->next_leaf; tl2 ; tl2= tl2->next_leaf)
1295       {
1296         /*
1297           Look at "next" tables only since all previous tables have
1298           already been checked
1299         */
1300         TABLE *table2= tl2->table;
1301         if (tl2->map() & tables_for_update && table1->s == table2->s)
1302         {
1303           // A table is updated through two aliases
1304           if (table_partitioned &&
1305               (partition_key_modified(table1, table1->write_set) ||
1306                partition_key_modified(table2, table2->write_set)))
1307           {
1308             // Partitioned key is updated
1309             my_error(ER_MULTI_UPDATE_KEY_CONFLICT, MYF(0),
1310                      tl->belong_to_view ? tl->belong_to_view->alias
1311                                         : tl->alias,
1312                      tl2->belong_to_view ? tl2->belong_to_view->alias
1313                                          : tl2->alias);
1314             return true;
1315           }
1316 
1317           if (primkey_clustered)
1318           {
1319             // The primary key can cover multiple columns
1320             KEY key_info= table1->key_info[table1->s->primary_key];
1321             KEY_PART_INFO *key_part= key_info.key_part;
1322             KEY_PART_INFO *key_part_end= key_part +
1323               key_info.user_defined_key_parts;
1324 
1325             for (;key_part != key_part_end; ++key_part)
1326             {
1327               if (bitmap_is_set(table1->write_set, key_part->fieldnr-1) ||
1328                   bitmap_is_set(table2->write_set, key_part->fieldnr-1))
1329               {
1330                 // Clustered primary key is updated
1331                 my_error(ER_MULTI_UPDATE_KEY_CONFLICT, MYF(0),
1332                          tl->belong_to_view ? tl->belong_to_view->alias
1333                          : tl->alias,
1334                          tl2->belong_to_view ? tl2->belong_to_view->alias
1335                          : tl2->alias);
1336                 return true;
1337               }
1338             }
1339           }
1340         }
1341       }
1342     }
1343   }
1344   return false;
1345 }
1346 
1347 
1348 /**
1349   Check if there is enough privilege on specific table used by the
1350   main select list of multi-update directly or indirectly (through
1351   a view).
1352 
1353   @param[in]  thd               Thread context.
1354   @param[in]  table             Table list element for the table.
1355   @param[in]  tables_for_update Bitmap with tables being updated.
1356   @param[in]  updatable         True if table in question is updatable.
1357                                 Should be initialized to true by the caller
1358                                 before a sequence of calls to this function.
1359   @param[out] updated           Return whether the table is actually updated.
1360                                 Should be initialized to false by caller.
1361 
1362   @note To determine which tables/views are updated we have to go from
1363         leaves to root since tables_for_update contains map of leaf
1364         tables being updated and doesn't include non-leaf tables
1365         (fields are already resolved to leaf tables).
1366 
1367   @retval false - Success, all necessary privileges on all tables are
1368                   present or might be present on column-level.
1369   @retval true  - Failure, some necessary privilege on some table is
1370                   missing.
1371 */
1372 
multi_update_check_table_access(THD * thd,TABLE_LIST * table,table_map tables_for_update,bool updatable,bool * updated)1373 static bool multi_update_check_table_access(THD *thd, TABLE_LIST *table,
1374                                             table_map tables_for_update,
1375                                             bool updatable, bool *updated)
1376 {
1377   // Adjust updatability based on table/view's properties:
1378   updatable&= table->is_updatable();
1379 
1380   if (table->is_view_or_derived())
1381   {
1382     // Determine if this view/derived table is updated:
1383     bool view_updated= false;
1384     /*
1385       If it is a mergeable view then we need to check privileges on its
1386       underlying tables being merged (including views). We also need to
1387       check if any of them is updated in order to find if this view is
1388       updated.
1389       If it is a non-mergeable view or a derived table then it can't be updated.
1390     */
1391     assert(table->merge_underlying_list ||
1392            (!table->is_updatable() &&
1393             !(table->map() & tables_for_update)));
1394 
1395     Internal_error_handler_holder<View_error_handler, TABLE_LIST>
1396       view_handler(thd, true, table->merge_underlying_list);
1397     for (TABLE_LIST *tbl= table->merge_underlying_list; tbl;
1398          tbl= tbl->next_local)
1399     {
1400       if (multi_update_check_table_access(thd, tbl, tables_for_update,
1401                                           updatable, &view_updated))
1402         return true;
1403     }
1404     table->set_want_privilege(
1405       view_updated ? UPDATE_ACL | SELECT_ACL: SELECT_ACL);
1406     table->updating= view_updated;
1407     *updated|= view_updated;
1408   }
1409   else
1410   {
1411     // Must be a base table.
1412     const bool base_table_updated= table->map() & tables_for_update;
1413     if (base_table_updated && !updatable)
1414     {
1415       my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table->alias, "UPDATE");
1416       return true;
1417     }
1418     table->set_want_privilege(
1419       base_table_updated ? UPDATE_ACL | SELECT_ACL : SELECT_ACL);
1420 
1421     table->updating= base_table_updated;
1422     *updated|= base_table_updated;
1423   }
1424 
1425   return false;
1426 }
1427 
1428 
1429 /*
1430   make update specific preparation and checks after opening tables
1431 
1432   SYNOPSIS
1433     mysql_multi_update_prepare()
1434     thd         thread handler
1435 
1436   RETURN
1437     FALSE OK
1438     TRUE  Error
1439 */
1440 
mysql_multi_update_prepare(THD * thd)1441 int Sql_cmd_update::mysql_multi_update_prepare(THD *thd)
1442 {
1443   LEX *lex= thd->lex;
1444   SELECT_LEX *select= lex->select_lex;
1445   TABLE_LIST *table_list= lex->query_tables;
1446   List<Item> *fields= &select->item_list;
1447   table_map tables_for_update;
1448   const bool using_lock_tables= thd->locked_tables_mode != LTM_NONE;
1449   bool original_multiupdate= (thd->lex->sql_command == SQLCOM_UPDATE_MULTI);
1450   DBUG_ENTER("mysql_multi_update_prepare");
1451 
1452   assert(select->item_list.elements == update_value_list.elements);
1453 
1454   Prepare_error_tracker tracker(thd);
1455 
1456   /* following need for prepared statements, to run next time multi-update */
1457   sql_command= thd->lex->sql_command= SQLCOM_UPDATE_MULTI;
1458 
1459   /*
1460     Open tables and create derived ones, but do not lock and fill them yet.
1461 
1462     During prepare phase acquire only S metadata locks instead of SW locks to
1463     keep prepare of multi-UPDATE compatible with concurrent LOCK TABLES WRITE
1464     and global read lock.
1465   */
1466   if (original_multiupdate &&
1467       open_tables_for_query(thd, table_list,
1468                             thd->stmt_arena->is_stmt_prepare() ?
1469                             MYSQL_OPEN_FORCE_SHARED_MDL : 0))
1470     DBUG_RETURN(true);
1471 
1472   if (run_before_dml_hook(thd))
1473     DBUG_RETURN(true);
1474 
1475   /*
1476     setup_tables() need for VIEWs. SELECT_LEX::prepare() will call
1477     setup_tables() second time, but this call will do nothing (there are check
1478     for second call in setup_tables()).
1479   */
1480 
1481   if (select->setup_tables(thd, table_list, false))
1482     DBUG_RETURN(true);             /* purecov: inspected */
1483 
1484   /*
1485     A view's CHECK OPTION is incompatible with semi-join.
1486     @note We could let non-updated views do semi-join, and we could let
1487           updated views without CHECK OPTION do semi-join.
1488           But since we resolve derived tables before we know this context,
1489           we cannot use semi-join in any case currently.
1490           The problem is that the CHECK OPTION condition serves as
1491           part of the semi-join condition, and a standalone condition
1492           to be evaluated as part of the UPDATE, and those two uses are
1493           incompatible.
1494   */
1495   if (select->derived_table_count && select->resolve_derived(thd, false))
1496     DBUG_RETURN(true);
1497 
1498   if (setup_natural_join_row_types(thd, select->join_list, &select->context))
1499     DBUG_RETURN(true);
1500 
1501   /*
1502     In multi-table UPDATE, tables to be updated are determined based on
1503     which fields are updated. Hence, tables that need to be checked
1504     for update have not been established yet, and thus 0 is supplied
1505     for want_privilege argument below.
1506     Later, the combined used_tables information for these columns are used
1507     to determine updatable tables, those tables are prepared for update,
1508     and finally the columns can be checked for proper update privileges.
1509   */
1510   if (setup_fields(thd, Ref_ptr_array(), *fields, 0, NULL, false, true))
1511     DBUG_RETURN(true);
1512 
1513   List<Item> original_update_fields;
1514   if (check_fields(thd, *fields, &original_update_fields))
1515     DBUG_RETURN(true);
1516 
1517   thd->table_map_for_update= tables_for_update= get_table_map(fields);
1518 
1519   /*
1520     Setup timestamp handling and locking mode
1521   */
1522   for (TABLE_LIST *tl= select->leaf_tables; tl; tl= tl->next_leaf)
1523   {
1524     // if table will be updated then check that it is updatable and unique:
1525     if (tl->map() & tables_for_update)
1526     {
1527       if (!tl->is_updatable() || check_key_in_view(thd, tl, tl))
1528       {
1529         my_error(ER_NON_UPDATABLE_TABLE, MYF(0), tl->alias, "UPDATE");
1530         DBUG_RETURN(true);
1531       }
1532 
1533       DBUG_PRINT("info",("setting table `%s` for update", tl->alias));
1534       /*
1535         If table will be updated we should not downgrade lock for it and
1536         leave it as is.
1537       */
1538     }
1539     else
1540     {
1541       DBUG_PRINT("info",("setting table `%s` for read-only", tl->alias));
1542       /*
1543         If we are using the binary log, we need TL_READ_NO_INSERT to get
1544         correct order of statements. Otherwise, we use a TL_READ lock to
1545         improve performance.
1546         We don't downgrade metadata lock from SW to SR in this case as
1547         there is no guarantee that the same ticket is not used by
1548         another table instance used by this statement which is going to
1549         be write-locked (for example, trigger to be invoked might try
1550         to update this table).
1551         Last argument routine_modifies_data for read_lock_type_for_table()
1552         is ignored, as prelocking placeholder will never be set here.
1553       */
1554       assert(tl->prelocking_placeholder == false);
1555       tl->lock_type= read_lock_type_for_table(thd, lex, tl, true);
1556       /* Update TABLE::lock_type accordingly. */
1557       if (!tl->is_placeholder() && !using_lock_tables)
1558         tl->table->reginfo.lock_type= tl->lock_type;
1559     }
1560   }
1561 
1562   /*
1563     Check privileges for tables being updated or read.
1564     Note that we need to iterate here not only through all leaf tables but
1565     also through all view hierarchy.
1566   */
1567   for (TABLE_LIST *tl= table_list; tl; tl= tl->next_local)
1568   {
1569     bool updated= false;
1570     if (multi_update_check_table_access(thd, tl, tables_for_update, true,
1571                                         &updated))
1572       DBUG_RETURN(true);
1573   }
1574 
1575   // Check privileges for columns that are updated
1576   {  // Opening scope for the RAII object
1577   Item *item;
1578   List_iterator_fast<Item> orig_it(original_update_fields);
1579   Mark_field mf(MARK_COLUMNS_WRITE);
1580   Column_privilege_tracker priv_tracker(thd, UPDATE_ACL);
1581   while ((item= orig_it++))
1582   {
1583     if (item->walk(&Item::check_column_privileges, Item::WALK_PREFIX,
1584                    (uchar *)thd))
1585       DBUG_RETURN(true);
1586 
1587     item->walk(&Item::mark_field_in_map, Item::WALK_POSTFIX, (uchar *)&mf);
1588   }
1589   } // Closing scope for the RAII object
1590 
1591   if (unsafe_key_update(select->leaf_tables, tables_for_update))
1592     DBUG_RETURN(true);
1593 
1594   /*
1595     When using a multi-table UPDATE command as a prepared statement,
1596     1) We must validate values (the right argument 'expr' of 'SET col1=expr')
1597     during PREPARE, so that:
1598     - bad columns are reported by PREPARE
1599     - cached_table is set for fields before query transformations (semijoin,
1600     view merging...) are done and make resolution more difficult.
1601     2) This validation is done by Query_result_update::prepare() but it is
1602     not called by PREPARE.
1603     3) So we do it below.
1604     @todo Remove this code duplication as part of WL#6570
1605   */
1606   if (thd->stmt_arena->is_stmt_prepare())
1607   {
1608     if (setup_fields(thd, Ref_ptr_array(), update_value_list, SELECT_ACL,
1609                      NULL, false, false))
1610       DBUG_RETURN(true);
1611 
1612     /*
1613       Check that table being updated is not being used in a subquery, but
1614       skip all tables of the UPDATE query block itself
1615     */
1616     select->exclude_from_table_unique_test= true;
1617     for (TABLE_LIST *tr= select->leaf_tables; tr; tr= tr->next_leaf)
1618     {
1619       if (tr->lock_type != TL_READ &&
1620           tr->lock_type != TL_READ_NO_INSERT)
1621       {
1622         TABLE_LIST *duplicate= unique_table(thd, tr, select->leaf_tables, 0);
1623         if (duplicate != NULL)
1624         {
1625           update_non_unique_table_error(select->leaf_tables, "UPDATE",
1626                                         duplicate);
1627           DBUG_RETURN(true);
1628         }
1629       }
1630     }
1631   }
1632 
1633   /* check single table update for view compound from several tables */
1634   for (TABLE_LIST *tl= table_list; tl; tl= tl->next_local)
1635   {
1636     if (tl->is_merged())
1637     {
1638       assert(tl->is_view_or_derived());
1639       TABLE_LIST *for_update= NULL;
1640       if (tl->check_single_table(&for_update, tables_for_update))
1641       {
1642 	my_error(ER_VIEW_MULTIUPDATE, MYF(0),
1643 		 tl->view_db.str, tl->view_name.str);
1644 	DBUG_RETURN(true);
1645       }
1646     }
1647   }
1648 
1649   // Downgrade desired privileges for updated tables to SELECT
1650   for (TABLE_LIST *tl= table_list; tl; tl= tl->next_local)
1651   {
1652     if (tl->updating)
1653       tl->set_want_privilege(SELECT_ACL);
1654   }
1655 
1656   /* @todo: downgrade the metadata locks here. */
1657 
1658   /*
1659     Syntax rule for multi-table update prevents these constructs.
1660     But they are possible for single-table UPDATE against multi-table view.
1661   */
1662   if (select->order_list.elements)
1663   {
1664     my_error(ER_WRONG_USAGE, MYF(0), "UPDATE", "ORDER BY");
1665     DBUG_RETURN(true);
1666   }
1667   if (select->select_limit)
1668   {
1669     my_error(ER_WRONG_USAGE, MYF(0), "UPDATE", "LIMIT");
1670     DBUG_RETURN(true);
1671   }
1672   DBUG_RETURN(false);
1673 }
1674 
1675 
1676 /*
1677   Setup multi-update handling and call SELECT to do the join
1678 */
1679 
mysql_multi_update(THD * thd,List<Item> * fields,List<Item> * values,enum enum_duplicates handle_duplicates,SELECT_LEX * select_lex,Query_result_update ** result)1680 bool mysql_multi_update(THD *thd,
1681                         List<Item> *fields,
1682                         List<Item> *values,
1683                         enum enum_duplicates handle_duplicates,
1684                         SELECT_LEX *select_lex,
1685                         Query_result_update **result)
1686 {
1687   bool res;
1688   DBUG_ENTER("mysql_multi_update");
1689 
1690   if (!(*result= new Query_result_update(select_lex->get_table_list(),
1691                                          select_lex->leaf_tables,
1692                                          fields, values,
1693                                          handle_duplicates)))
1694     DBUG_RETURN(true); /* purecov: inspected */
1695 
1696   assert(select_lex->having_cond() == NULL &&
1697          !select_lex->order_list.elements &&
1698          !select_lex->group_list.elements &&
1699          !select_lex->select_limit);
1700 
1701   res= handle_query(thd, thd->lex, *result,
1702                     SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK |
1703                     OPTION_SETUP_TABLES_DONE,
1704                     OPTION_BUFFER_RESULT);
1705 
1706   DBUG_PRINT("info",("res: %d  report_error: %d",res, (int) thd->is_error()));
1707   res|= thd->is_error();
1708   if (unlikely(res))
1709   {
1710     /* If we had a another error reported earlier then this will be ignored */
1711     (*result)->send_error(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR));
1712     (*result)->abort_result_set();
1713   }
1714   DBUG_RETURN(res);
1715 }
1716 
1717 
Query_result_update(TABLE_LIST * table_list,TABLE_LIST * leaves_list,List<Item> * field_list,List<Item> * value_list,enum enum_duplicates handle_duplicates_arg)1718 Query_result_update::Query_result_update(TABLE_LIST *table_list,
1719                                          TABLE_LIST *leaves_list,
1720                                          List<Item> *field_list,
1721                                          List<Item> *value_list,
1722                                     enum enum_duplicates handle_duplicates_arg)
1723   :all_tables(table_list), leaves(leaves_list), update_tables(0),
1724    tmp_tables(0), updated(0), found(0), fields(field_list),
1725    values(value_list), table_count(0), copy_field(0),
1726    handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(1),
1727    transactional_tables(0), error_handled(0), update_operations(NULL)
1728 {}
1729 
1730 
1731 /*
1732   Connect fields with tables and create list of tables that are updated
1733 */
1734 
prepare(List<Item> & not_used_values,SELECT_LEX_UNIT * lex_unit)1735 int Query_result_update::prepare(List<Item> &not_used_values,
1736                                  SELECT_LEX_UNIT *lex_unit)
1737 {
1738   SQL_I_List<TABLE_LIST> update;
1739   List_iterator_fast<Item> field_it(*fields);
1740   List_iterator_fast<Item> value_it(*values);
1741   DBUG_ENTER("Query_result_update::prepare");
1742 
1743   SELECT_LEX *const select= lex_unit->first_select();
1744 
1745   thd->count_cuted_fields= CHECK_FIELD_WARN;
1746   thd->cuted_fields=0L;
1747   THD_STAGE_INFO(thd, stage_updating_main_table);
1748 
1749   const table_map tables_to_update= get_table_map(fields);
1750 
1751   if (!tables_to_update)
1752   {
1753     my_message(ER_NO_TABLES_USED, ER(ER_NO_TABLES_USED), MYF(0));
1754     DBUG_RETURN(1);
1755   }
1756 
1757   /*
1758     We gather the set of columns read during evaluation of SET expression in
1759     TABLE::tmp_set by pointing TABLE::read_set to it and then restore it after
1760     setup_fields().
1761   */
1762   for (TABLE_LIST *tr= leaves; tr; tr= tr->next_leaf)
1763   {
1764     if (tables_to_update & tr->map())
1765     {
1766       TABLE *const table= tr->table;
1767       assert(table->read_set == &table->def_read_set);
1768       table->read_set= &table->tmp_set;
1769       bitmap_clear_all(table->read_set);
1770     }
1771     // Resolving may be needed for subsequent executions
1772     if (tr->check_option && !tr->check_option->fixed &&
1773         tr->check_option->fix_fields(thd, NULL))
1774       DBUG_RETURN(1);        /* purecov: inspected */
1775   }
1776 
1777   /*
1778     We have to check values after setup_tables to get covering_keys right in
1779     reference tables
1780   */
1781 
1782   int error= setup_fields(thd, Ref_ptr_array(), *values, SELECT_ACL, NULL,
1783                           false, false);
1784 
1785   for (TABLE_LIST *tr= leaves; tr; tr= tr->next_leaf)
1786   {
1787     if (tables_to_update & tr->map())
1788     {
1789       TABLE *const table= tr->table;
1790       table->read_set= &table->def_read_set;
1791       bitmap_union(table->read_set, &table->tmp_set);
1792       bitmap_clear_all(&table->tmp_set);
1793     }
1794   }
1795 
1796   if (error)
1797     DBUG_RETURN(1);
1798 
1799   /*
1800     Check that table being updated is not being used in a subquery, but
1801     skip all tables of the UPDATE query block itself
1802   */
1803   select->exclude_from_table_unique_test= true;
1804   for (TABLE_LIST *tr= select->leaf_tables; tr; tr= tr->next_leaf)
1805   {
1806     if (tr->lock_type != TL_READ &&
1807         tr->lock_type != TL_READ_NO_INSERT)
1808     {
1809       TABLE_LIST *duplicate= unique_table(thd, tr, all_tables, 0);
1810       if (duplicate != NULL)
1811       {
1812         update_non_unique_table_error(all_tables, "UPDATE", duplicate);
1813         DBUG_RETURN(true);
1814       }
1815     }
1816   }
1817   /*
1818     Set exclude_from_table_unique_test value back to FALSE. It is needed for
1819     further check whether to use record cache.
1820   */
1821   select->exclude_from_table_unique_test= false;
1822   /*
1823     Save tables beeing updated in update_tables
1824     update_table->shared is position for table
1825     Don't use key read on tables that are updated
1826   */
1827 
1828   update.empty();
1829   uint leaf_table_count= 0;
1830   for (TABLE_LIST *tr= leaves; tr; tr= tr->next_leaf)
1831   {
1832     /* TODO: add support of view of join support */
1833     leaf_table_count++;
1834     if (tables_to_update & tr->map())
1835     {
1836       TABLE_LIST *dup= (TABLE_LIST*) thd->memdup(tr, sizeof(*dup));
1837       if (dup == NULL)
1838 	DBUG_RETURN(1);
1839 
1840       TABLE *const table= tr->table;
1841 
1842       update.link_in_list(dup, &dup->next_local);
1843       tr->shared= dup->shared= table_count++;
1844       table->no_keyread=1;
1845       table->covering_keys.clear_all();
1846       table->pos_in_table_list= dup;
1847       if (table->triggers &&
1848           table->triggers->has_triggers(TRG_EVENT_UPDATE,
1849                                         TRG_ACTION_AFTER))
1850       {
1851 	/*
1852            The table has AFTER UPDATE triggers that might access to subject
1853            table and therefore might need update to be done immediately.
1854            So we turn-off the batching.
1855 	*/
1856 	(void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH);
1857       }
1858     }
1859   }
1860 
1861 
1862   table_count=  update.elements;
1863   update_tables= update.first;
1864 
1865   tmp_tables = (TABLE**) thd->mem_calloc(sizeof(TABLE *) * table_count);
1866   tmp_table_param= new (thd->mem_root) Temp_table_param[table_count];
1867   fields_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
1868 					      table_count);
1869   values_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
1870 					      table_count);
1871 
1872   assert(update_operations == NULL);
1873   update_operations= (COPY_INFO**) thd->mem_calloc(sizeof(COPY_INFO*) *
1874                                                table_count);
1875 
1876   if (thd->is_error())
1877     DBUG_RETURN(1);
1878   for (uint i= 0; i < table_count; i++)
1879   {
1880     fields_for_table[i]= new List_item;
1881     values_for_table[i]= new List_item;
1882   }
1883   if (thd->is_error())
1884     DBUG_RETURN(1);
1885 
1886   /* Split fields into fields_for_table[] and values_by_table[] */
1887 
1888   Item *item;
1889   while ((item= field_it++))
1890   {
1891     Item_field *const field= down_cast<Item_field *>(item);
1892     Item *const value= value_it++;
1893     uint offset= field->table_ref->shared;
1894     fields_for_table[offset]->push_back(field);
1895     values_for_table[offset]->push_back(value);
1896   }
1897   if (thd->is_fatal_error)
1898     DBUG_RETURN(1);
1899 
1900   /* Allocate copy fields */
1901   uint max_fields= 0;
1902   for (uint i= 0; i < table_count; i++)
1903     set_if_bigger(max_fields, fields_for_table[i]->elements + leaf_table_count);
1904   copy_field= new Copy_field[max_fields];
1905 
1906 
1907   for (TABLE_LIST *ref= leaves; ref != NULL; ref= ref->next_leaf)
1908   {
1909     if (tables_to_update & ref->map())
1910     {
1911       const uint position= ref->shared;
1912       List<Item> *cols= fields_for_table[position];
1913       List<Item> *vals= values_for_table[position];
1914       TABLE *const table= ref->table;
1915 
1916       COPY_INFO *update=
1917         new (thd->mem_root) COPY_INFO(COPY_INFO::UPDATE_OPERATION, cols, vals);
1918       if (update == NULL ||
1919           update->add_function_default_columns(table, table->write_set))
1920         DBUG_RETURN(1);
1921 
1922       update_operations[position]= update;
1923 
1924       if ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) != 0 &&
1925           update->function_defaults_apply(table))
1926       {
1927         /*
1928           A column is to be set to its ON UPDATE function default only if
1929           other columns of the row are changing. To know this, we must be able
1930           to compare the "before" and "after" value of those columns. Thus, we
1931           must read those columns:
1932         */
1933         bitmap_union(table->read_set, table->write_set);
1934       }
1935       /* All needed columns must be marked before prune_partitions(). */
1936       if (table->triggers && table->triggers->mark_fields(TRG_EVENT_UPDATE))
1937         DBUG_RETURN(true);
1938     }
1939   }
1940 
1941   DBUG_RETURN(thd->is_fatal_error != 0);
1942 }
1943 
1944 
1945 /*
1946   Check if table is safe to update on fly
1947 
1948   SYNOPSIS
1949     safe_update_on_fly()
1950     thd                 Thread handler
1951     join_tab            How table is used in join
1952     all_tables          List of tables
1953 
1954   NOTES
1955     We can update the first table in join on the fly if we know that
1956     a row in this table will never be read twice. This is true under
1957     the following conditions:
1958 
1959     - No column is both written to and read in SET expressions.
1960 
1961     - We are doing a table scan and the data is in a separate file (MyISAM) or
1962       if we don't update a clustered key.
1963 
1964     - We are doing a range scan and we don't update the scan key or
1965       the primary key for a clustered table handler.
1966 
1967     - Table is not joined to itself.
1968 
1969     This function gets information about fields to be updated from
1970     the TABLE::write_set bitmap.
1971 
1972   WARNING
1973     This code is a bit dependent of how make_join_readinfo() works.
1974 
1975     The field table->tmp_set is used for keeping track of which fields are
1976     read during evaluation of the SET expression.
1977     See Query_result_update::prepare.
1978 
1979   RETURN
1980     0		Not safe to update
1981     1		Safe to update
1982 */
1983 
safe_update_on_fly(THD * thd,JOIN_TAB * join_tab,TABLE_LIST * table_ref,TABLE_LIST * all_tables)1984 static bool safe_update_on_fly(THD *thd, JOIN_TAB *join_tab,
1985                                TABLE_LIST *table_ref, TABLE_LIST *all_tables)
1986 {
1987   TABLE *table= join_tab->table();
1988   if (unique_table(thd, table_ref, all_tables, 0))
1989     return 0;
1990   switch (join_tab->type()) {
1991   case JT_SYSTEM:
1992   case JT_CONST:
1993   case JT_EQ_REF:
1994     return TRUE;				// At most one matching row
1995   case JT_REF:
1996   case JT_REF_OR_NULL:
1997     return !is_key_used(table, join_tab->ref().key, table->write_set);
1998   case JT_ALL:
1999     if (bitmap_is_overlapping(&table->tmp_set, table->write_set))
2000       return FALSE;
2001     /* If range search on index */
2002     if (join_tab->quick())
2003       return !join_tab->quick()->is_keys_used(table->write_set);
2004     /* If scanning in clustered key */
2005     if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
2006 	table->s->primary_key < MAX_KEY)
2007       return !is_key_used(table, table->s->primary_key, table->write_set);
2008     return TRUE;
2009   default:
2010     break;					// Avoid compler warning
2011   }
2012   return FALSE;
2013 
2014 }
2015 
2016 
2017 /*
2018   Initialize table for multi table
2019 
2020   IMPLEMENTATION
2021     - Update first table in join on the fly, if possible
2022     - Create temporary tables to store changed values for all other tables
2023       that are updated (and main_table if the above doesn't hold).
2024 */
2025 
initialize_tables(JOIN * join)2026 bool Query_result_update::initialize_tables(JOIN *join)
2027 {
2028   TABLE_LIST *table_ref;
2029   DBUG_ENTER("initialize_tables");
2030   ASSERT_BEST_REF_IN_JOIN_ORDER(join);
2031 
2032   if ((thd->variables.option_bits & OPTION_SAFE_UPDATES) &&
2033       error_if_full_join(join))
2034     DBUG_RETURN(true);
2035   main_table= join->best_ref[0]->table();
2036   table_to_update= 0;
2037 
2038   /* Any update has at least one pair (field, value) */
2039   assert(fields->elements);
2040   /*
2041    Only one table may be modified by UPDATE of an updatable view.
2042    For an updatable view first_table_for_update indicates this
2043    table.
2044    For a regular multi-update it refers to some updated table.
2045   */
2046   TABLE_LIST *first_table_for_update= ((Item_field *)fields->head())->table_ref;
2047 
2048   /* Create a temporary table for keys to all tables, except main table */
2049   for (table_ref= update_tables; table_ref; table_ref= table_ref->next_local)
2050   {
2051     TABLE *table=table_ref->table;
2052     uint cnt= table_ref->shared;
2053     List<Item> temp_fields;
2054     ORDER *group = NULL;
2055     Temp_table_param *tmp_param;
2056     if (table->vfield &&
2057         validate_gc_assignment(thd, fields, values, table))
2058       DBUG_RETURN(0);
2059 
2060     if (thd->lex->is_ignore())
2061       table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
2062     if (table == main_table)			// First table in join
2063     {
2064       /*
2065         If there are at least two tables to update, t1 and t2, t1 being
2066         before t2 in the plan, we need to collect all fields of t1 which
2067         influence the selection of rows from t2. If those fields are also
2068         updated, it will not be possible to update t1 on-the-fly.
2069         Due to how the nested loop join algorithm works, when collecting
2070         we can ignore the condition attached to t1 - a row of t1 is read
2071         only one time.
2072       */
2073       if (update_tables->next_local)
2074       {
2075         for (uint i= 1; i < join->tables; ++i)
2076         {
2077           JOIN_TAB *tab= join->best_ref[i];
2078           if (tab->condition())
2079             tab->condition()->walk(&Item::add_field_to_set_processor,
2080                       Item::enum_walk(Item::WALK_POSTFIX | Item::WALK_SUBQUERY),
2081                       reinterpret_cast<uchar *>(main_table));
2082           /*
2083             On top of checking conditions, we need to check conditions
2084             referenced by index lookup on the following tables. They implement
2085             conditions too, but their corresponding search conditions might
2086             have been optimized away. The second table is an exception: even if
2087             rows are read from it using index lookup which references a column
2088             of main_table, the implementation of ref access will see the
2089             before-update value;
2090             consider this flow of a nested loop join:
2091             read a row from main_table and:
2092             - init ref access (cp_buffer_from_ref() in join_read_always_key()):
2093               copy referenced value from main_table into 2nd table's ref buffer
2094             - look up a first row in 2nd table (join_read_always_key)
2095               - if it joins, update row of main_table on the fly
2096             - look up a second row in 2nd table (join_read_next_same).
2097             Because cp_buffer_from_ref() is not called again, the before-update
2098             value of the row of main_table is still in the 2nd table's ref
2099             buffer. So the lookup is not influenced by the just-done update of
2100             main_table.
2101           */
2102           if (tab > join->join_tab + 1)
2103           {
2104             for (uint i= 0; i < tab->ref().key_parts; i++)
2105             {
2106               Item *ref_item= tab->ref().items[i];
2107               if ((table_ref->map() & ref_item->used_tables()) != 0)
2108                 ref_item->walk(&Item::add_field_to_set_processor,
2109                       Item::enum_walk(Item::WALK_POSTFIX | Item::WALK_SUBQUERY),
2110                       reinterpret_cast<uchar *>(main_table));
2111             }
2112           }
2113         }
2114       }
2115       if (safe_update_on_fly(thd, join->best_ref[0], table_ref, all_tables))
2116       {
2117         table->mark_columns_needed_for_update(true/*mark_binlog_columns=true*/);
2118         table_to_update= table;			// Update table on the fly
2119 	continue;
2120       }
2121     }
2122     table->mark_columns_needed_for_update(true/*mark_binlog_columns=true*/);
2123     /*
2124       enable uncacheable flag if we update a view with check option
2125       and check option has a subselect, otherwise, the check option
2126       can be evaluated after the subselect was freed as independent
2127       (See full_local in JOIN::join_free()).
2128     */
2129     if (table_ref->check_option && !join->select_lex->uncacheable)
2130     {
2131       SELECT_LEX_UNIT *tmp_unit;
2132       SELECT_LEX *sl;
2133       for (tmp_unit= join->select_lex->first_inner_unit();
2134            tmp_unit;
2135            tmp_unit= tmp_unit->next_unit())
2136       {
2137         for (sl= tmp_unit->first_select(); sl; sl= sl->next_select())
2138         {
2139           if (sl->master_unit()->item)
2140           {
2141             join->select_lex->uncacheable|= UNCACHEABLE_CHECKOPTION;
2142             goto loop_end;
2143           }
2144         }
2145       }
2146     }
2147 loop_end:
2148 
2149     if (table_ref->table == first_table_for_update->table &&
2150         table_ref->check_option)
2151     {
2152       table_map unupdated_tables= table_ref->check_option->used_tables() &
2153                                   ~first_table_for_update->map();
2154       for (TABLE_LIST *tbl_ref =leaves;
2155            unupdated_tables && tbl_ref;
2156            tbl_ref= tbl_ref->next_leaf)
2157       {
2158         if (unupdated_tables & tbl_ref->map())
2159           unupdated_tables&= ~tbl_ref->map();
2160         else
2161           continue;
2162         if (unupdated_check_opt_tables.push_back(tbl_ref->table))
2163           DBUG_RETURN(1);
2164       }
2165     }
2166 
2167     tmp_param= tmp_table_param+cnt;
2168 
2169     /*
2170       Create a temporary table to store all fields that are changed for this
2171       table. The first field in the temporary table is a pointer to the
2172       original row so that we can find and update it. For the updatable
2173       VIEW a few following fields are rowids of tables used in the CHECK
2174       OPTION condition.
2175     */
2176 
2177     List_iterator_fast<TABLE> tbl_it(unupdated_check_opt_tables);
2178     TABLE *tbl= table;
2179     do
2180     {
2181       /*
2182         Signal each table (including tables referenced by WITH CHECK OPTION
2183         clause) for which we will store row position in the temporary table
2184         that we need a position to be read first.
2185       */
2186       tbl->prepare_for_position();
2187       /*
2188         A tmp table is moved to InnoDB if it doesn't fit in memory,
2189         and InnoDB does not support fixed length string fields bigger
2190         than 1024 bytes, so use a variable length string field.
2191       */
2192       Field_varstring *field = new Field_varstring(
2193           tbl->file->ref_length, false, tbl->alias, tbl->s, &my_charset_bin);
2194 
2195       if (!field)
2196         DBUG_RETURN(1);
2197       field->init(tbl);
2198       /*
2199         The field will be converted to varstring when creating tmp table if
2200         table to be updated was created by mysql 4.1. Deny this.
2201       */
2202       Item_field *ifield= new Item_field((Field *) field);
2203       if (!ifield)
2204          DBUG_RETURN(1);
2205       ifield->maybe_null= 0;
2206       if (temp_fields.push_back(ifield))
2207         DBUG_RETURN(1);
2208     } while ((tbl= tbl_it++));
2209 
2210     temp_fields.concat(fields_for_table[cnt]);
2211 
2212     /* Make an unique key over the first field to avoid duplicated updates */
2213     group = new ORDER;
2214     memset(group, 0, sizeof(*group));
2215     group->direction = ORDER::ORDER_ASC;
2216     group->item = temp_fields.head_ref();
2217 
2218     tmp_param->quick_group=1;
2219     tmp_param->field_count=temp_fields.elements;
2220     tmp_param->group_parts=1;
2221     tmp_param->group_length= table->file->ref_length;
2222     /* small table, ignore SQL_BIG_TABLES */
2223     my_bool save_big_tables= thd->variables.big_tables;
2224     thd->variables.big_tables= FALSE;
2225     tmp_tables[cnt]=create_tmp_table(thd, tmp_param, temp_fields,
2226                                      group, 0, 0,
2227                                      TMP_TABLE_ALL_COLUMNS, HA_POS_ERROR, "");
2228     thd->variables.big_tables= save_big_tables;
2229     if (!tmp_tables[cnt])
2230       DBUG_RETURN(1);
2231 
2232     /*
2233       Pass a table triggers pointer (Table_trigger_dispatcher *) from
2234       the original table to the new temporary table. This pointer will be used
2235       inside the method Query_result_update::send_data() to determine temporary
2236       nullability flag for the temporary table's fields. It will be done before
2237       calling fill_record() to assign values to the temporary table's fields.
2238     */
2239     tmp_tables[cnt]->triggers= table->triggers;
2240     tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
2241     tmp_tables[cnt]->file->ha_index_init(0, false /*sorted*/);
2242   }
2243   DBUG_RETURN(0);
2244 }
2245 
2246 
~Query_result_update()2247 Query_result_update::~Query_result_update()
2248 {
2249   TABLE_LIST *table;
2250   for (table= update_tables ; table; table= table->next_local)
2251   {
2252     table->table->no_cache= 0;
2253     if (thd->lex->is_ignore())
2254       table->table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
2255   }
2256 
2257   if (tmp_tables)
2258   {
2259     for (uint cnt = 0; cnt < table_count; cnt++)
2260     {
2261       if (tmp_tables[cnt])
2262       {
2263         tmp_tables[cnt]->file->ha_index_or_rnd_end();
2264         delete tmp_tables[cnt]->group;
2265 	free_tmp_table(thd, tmp_tables[cnt]);
2266 	tmp_table_param[cnt].cleanup();
2267       }
2268     }
2269   }
2270   if (copy_field)
2271     delete [] copy_field;
2272   thd->count_cuted_fields= CHECK_FIELD_IGNORE;		// Restore this setting
2273   assert(trans_safe || !updated ||
2274          thd->get_transaction()->cannot_safely_rollback(
2275                                                         Transaction_ctx::STMT));
2276 
2277   if (update_operations != NULL)
2278     for (uint i= 0; i < table_count; i++)
2279       delete update_operations[i];
2280 }
2281 
2282 
send_data(List<Item> & not_used_values)2283 bool Query_result_update::send_data(List<Item> &not_used_values)
2284 {
2285   TABLE_LIST *cur_table;
2286   DBUG_ENTER("Query_result_update::send_data");
2287 
2288   for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
2289   {
2290     TABLE *table= cur_table->table;
2291     uint offset= cur_table->shared;
2292     /*
2293       Check if we are using outer join and we didn't find the row
2294       or if we have already updated this row in the previous call to this
2295       function.
2296 
2297       The same row may be presented here several times in a join of type
2298       UPDATE t1 FROM t1,t2 SET t1.a=t2.a
2299 
2300       In this case we will do the update for the first found row combination.
2301       The join algorithm guarantees that we will not find the a row in
2302       t1 several times.
2303     */
2304     if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
2305       continue;
2306 
2307     if (table == table_to_update)
2308     {
2309       table->status|= STATUS_UPDATED;
2310       store_record(table,record[1]);
2311       if (fill_record_n_invoke_before_triggers(thd, update_operations[offset],
2312                                                *fields_for_table[offset],
2313                                                *values_for_table[offset],
2314                                                table,
2315                                                TRG_EVENT_UPDATE, 0))
2316 	DBUG_RETURN(1);
2317 
2318       /*
2319         Reset the table->auto_increment_field_not_null as it is valid for
2320         only one row.
2321       */
2322       table->auto_increment_field_not_null= FALSE;
2323       found++;
2324       int error= 0;
2325       if (!records_are_comparable(table) || compare_records(table))
2326       {
2327         update_operations[offset]->set_function_defaults(table);
2328 
2329         if ((error= cur_table->view_check_option(thd)) !=
2330             VIEW_CHECK_OK)
2331         {
2332           found--;
2333           if (error == VIEW_CHECK_SKIP)
2334             continue;
2335           else if (error == VIEW_CHECK_ERROR)
2336             DBUG_RETURN(1);
2337         }
2338         if (!updated++)
2339         {
2340           /*
2341             Inform the main table that we are going to update the table even
2342             while we may be scanning it.  This will flush the read cache
2343             if it's used.
2344           */
2345           main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE);
2346         }
2347         if ((error=table->file->ha_update_row(table->record[1],
2348                                               table->record[0])) &&
2349             error != HA_ERR_RECORD_IS_THE_SAME)
2350         {
2351           updated--;
2352           myf error_flags= MYF(0);
2353           if (table->file->is_fatal_error(error))
2354             error_flags|= ME_FATALERROR;
2355 
2356           table->file->print_error(error, error_flags);
2357 
2358           /* Errors could be downgraded to warning by IGNORE */
2359           if (thd->is_error())
2360             DBUG_RETURN(1);
2361         }
2362         else
2363         {
2364           if (error == HA_ERR_RECORD_IS_THE_SAME)
2365           {
2366             error= 0;
2367             updated--;
2368           }
2369           /* non-transactional or transactional table got modified   */
2370           /* either Query_result_update class' flag is raised in its branch */
2371           if (table->file->has_transactions())
2372             transactional_tables= TRUE;
2373           else
2374           {
2375             trans_safe= FALSE;
2376             thd->get_transaction()->mark_modified_non_trans_table(
2377               Transaction_ctx::STMT);
2378           }
2379         }
2380       }
2381       if (!error && table->triggers &&
2382           table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
2383                                             TRG_ACTION_AFTER, TRUE))
2384         DBUG_RETURN(1);
2385     }
2386     else
2387     {
2388       int error;
2389       TABLE *tmp_table= tmp_tables[offset];
2390       /*
2391        For updatable VIEW store rowid of the updated table and
2392        rowids of tables used in the CHECK OPTION condition.
2393       */
2394       uint field_num= 0;
2395       List_iterator_fast<TABLE> tbl_it(unupdated_check_opt_tables);
2396       TABLE *tbl= table;
2397       do
2398       {
2399         tbl->file->position(tbl->record[0]);
2400         tmp_table->visible_field_ptr()[field_num]->store(
2401             reinterpret_cast<const char *>(tbl->file->ref),
2402             tbl->file->ref_length, &my_charset_bin);
2403         /*
2404          For outer joins a rowid field may have no NOT_NULL_FLAG,
2405          so we have to reset NULL bit for this field.
2406          (set_notnull() resets NULL bit only if available).
2407         */
2408         tmp_table->visible_field_ptr()[field_num]->set_notnull();
2409         field_num++;
2410       } while ((tbl= tbl_it++));
2411 
2412       /*
2413         If there are triggers in an original table the temporary table based on
2414         then enable temporary nullability for temporary table's fields.
2415       */
2416       if (tmp_table->triggers)
2417       {
2418         for (Field** modified_fields= tmp_table->visible_field_ptr() + 1 +
2419                                       unupdated_check_opt_tables.elements;
2420             *modified_fields; ++modified_fields)
2421         {
2422           (*modified_fields)->set_tmp_nullable();
2423         }
2424       }
2425 
2426       /* Store regular updated fields in the row. */
2427       fill_record(thd, tmp_table,
2428                   tmp_table->visible_field_ptr() +
2429                   1 + unupdated_check_opt_tables.elements,
2430                   *values_for_table[offset], NULL, NULL);
2431 
2432       // check if a record exists with the same hash value
2433       if (!check_unique_constraint(tmp_table))
2434         DBUG_RETURN(0); // skip adding duplicate record to the temp table
2435 
2436       /* Write row, ignoring duplicated updates to a row */
2437       error= tmp_table->file->ha_write_row(tmp_table->record[0]);
2438       if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)
2439       {
2440         if (error &&
2441             (create_ondisk_from_heap(thd, tmp_table,
2442                                      tmp_table_param[offset].start_recinfo,
2443                                      &tmp_table_param[offset].recinfo,
2444                                      error, TRUE, NULL) ||
2445              tmp_table->file->ha_index_init(0, false /*sorted*/)))
2446          {
2447            do_update= 0;
2448            DBUG_RETURN(1);			// Not a table_is_full error
2449          }
2450         found++;
2451       }
2452     }
2453   }
2454   DBUG_RETURN(0);
2455 }
2456 
2457 
send_error(uint errcode,const char * err)2458 void Query_result_update::send_error(uint errcode,const char *err)
2459 {
2460   /* First send error what ever it is ... */
2461   my_error(errcode, MYF(0), err);
2462 }
2463 
2464 
2465 
invalidate_update_tables(THD * thd,TABLE_LIST * update_tables)2466 static void invalidate_update_tables(THD *thd, TABLE_LIST *update_tables)
2467 {
2468   for (TABLE_LIST *tl= update_tables; tl != NULL; tl= tl->next_local)
2469   {
2470     query_cache.invalidate_single(thd, tl->updatable_base_table(), 1);
2471   }
2472 }
2473 
2474 
abort_result_set()2475 void Query_result_update::abort_result_set()
2476 {
2477   /* the error was handled or nothing deleted and no side effects return */
2478   if (error_handled ||
2479       (!thd->get_transaction()->cannot_safely_rollback(
2480         Transaction_ctx::STMT) && !updated))
2481     return;
2482 
2483   /* Something already updated so we have to invalidate cache */
2484   if (updated)
2485     invalidate_update_tables(thd, update_tables);
2486 
2487   /*
2488     If all tables that has been updated are trans safe then just do rollback.
2489     If not attempt to do remaining updates.
2490   */
2491 
2492   if (! trans_safe)
2493   {
2494     assert(thd->get_transaction()->cannot_safely_rollback(
2495                                                           Transaction_ctx::STMT));
2496     if (do_update && table_count > 1)
2497     {
2498       /* Add warning here */
2499       /*
2500          todo/fixme: do_update() is never called with the arg 1.
2501          should it change the signature to become argless?
2502       */
2503       (void) do_updates();
2504     }
2505   }
2506   if (thd->get_transaction()->cannot_safely_rollback(Transaction_ctx::STMT))
2507   {
2508     /*
2509       The query has to binlog because there's a modified non-transactional table
2510       either from the query's list or via a stored routine: bug#13270,23333
2511     */
2512 #ifdef WITH_WSREP
2513     if (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open())
2514 #else
2515     if (mysql_bin_log.is_open())
2516 #endif
2517     {
2518       /*
2519         THD::killed status might not have been set ON at time of an error
2520         got caught and if happens later the killed error is written
2521         into repl event.
2522       */
2523       int errcode= query_error_code(thd, thd->killed == THD::NOT_KILLED);
2524       /* the error of binary logging is ignored */
2525       (void)thd->binlog_query(THD::ROW_QUERY_TYPE,
2526                               thd->query().str, thd->query().length,
2527                               transactional_tables, false, false, errcode);
2528     }
2529   }
2530   assert(trans_safe || !updated ||
2531          thd->get_transaction()->cannot_safely_rollback(
2532                                                         Transaction_ctx::STMT));
2533 }
2534 
2535 
do_updates()2536 int Query_result_update::do_updates()
2537 {
2538   TABLE_LIST *cur_table;
2539   int local_error= 0;
2540   ha_rows org_updated;
2541   TABLE *table, *tmp_table;
2542   List_iterator_fast<TABLE> check_opt_it(unupdated_check_opt_tables);
2543   myf error_flags= MYF(0);                      /**< Flag for fatal errors */
2544 
2545   DBUG_ENTER("Query_result_update::do_updates");
2546 
2547   do_update= 0;					// Don't retry this function
2548 
2549   if (!found)
2550   {
2551     /*
2552       If the binary log is on, we still need to check
2553       if there are transactional tables involved. If
2554       there are mark the transactional_tables flag correctly.
2555 
2556       This flag determines whether the writes go into the
2557       transactional or non transactional cache, even if they
2558       do not change any table, they are still written into
2559       the binary log when the format is STMT or MIXED.
2560     */
2561     if(mysql_bin_log.is_open())
2562     {
2563       for (cur_table= update_tables; cur_table;
2564            cur_table= cur_table->next_local)
2565       {
2566         table = cur_table->table;
2567         transactional_tables= transactional_tables ||
2568                               table->file->has_transactions();
2569       }
2570     }
2571     DBUG_RETURN(0);
2572   }
2573   for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
2574   {
2575     uint offset= cur_table->shared;
2576 
2577     table = cur_table->table;
2578 
2579     /*
2580       Always update the flag if - even if not updating the table,
2581       when the binary log is ON. This will allow the right binlog
2582       cache - stmt or trx cache - to be selected when logging
2583       innefective statementst to the binary log (in STMT or MIXED
2584       mode logging).
2585      */
2586     if (mysql_bin_log.is_open())
2587       transactional_tables= transactional_tables || table->file->has_transactions();
2588 
2589     if (table == table_to_update)
2590       continue;                                        // Already updated
2591     org_updated= updated;
2592     tmp_table= tmp_tables[cur_table->shared];
2593     tmp_table->file->ha_index_or_rnd_end();
2594     tmp_table->file->extra(HA_EXTRA_CACHE);	// Change to read cache
2595     if ((local_error= table->file->ha_rnd_init(0)))
2596     {
2597       if (table->file->is_fatal_error(local_error))
2598         error_flags|= ME_FATALERROR;
2599 
2600       table->file->print_error(local_error, error_flags);
2601       goto err;
2602     }
2603 
2604     table->file->extra(HA_EXTRA_NO_CACHE);
2605 
2606     check_opt_it.rewind();
2607     while(TABLE *tbl= check_opt_it++)
2608     {
2609       if (tbl->file->ha_rnd_init(1))
2610         // No known handler error code present, print_error makes no sense
2611         goto err;
2612       tbl->file->extra(HA_EXTRA_CACHE);
2613     }
2614 
2615     /*
2616       Setup copy functions to copy fields from temporary table
2617     */
2618     List_iterator_fast<Item> field_it(*fields_for_table[offset]);
2619     Field **field= tmp_table->visible_field_ptr() +
2620                    1 + unupdated_check_opt_tables.elements; // Skip row pointers
2621     Copy_field *copy_field_ptr= copy_field, *copy_field_end;
2622     for ( ; *field ; field++)
2623     {
2624       Item_field *item= (Item_field* ) field_it++;
2625       (copy_field_ptr++)->set(item->field, *field, 0);
2626     }
2627     copy_field_end=copy_field_ptr;
2628 
2629     if ((local_error = tmp_table->file->ha_rnd_init(1)))
2630     {
2631       if (table->file->is_fatal_error(local_error))
2632         error_flags|= ME_FATALERROR;
2633 
2634       table->file->print_error(local_error, error_flags);
2635       goto err;
2636     }
2637 
2638     for (;;)
2639     {
2640       if (thd->killed && trans_safe)
2641         // No known handler error code present, print_error makes no sense
2642         goto err;
2643       if ((local_error=tmp_table->file->ha_rnd_next(tmp_table->record[0])))
2644       {
2645         if (local_error == HA_ERR_END_OF_FILE)
2646           break;
2647         if (local_error == HA_ERR_RECORD_DELETED)
2648           continue;                             // May happen on dup key
2649         if (table->file->is_fatal_error(local_error))
2650           error_flags|= ME_FATALERROR;
2651 
2652         table->file->print_error(local_error, error_flags);
2653         goto err;
2654       }
2655 
2656       /* call ha_rnd_pos() using rowids from temporary table */
2657       check_opt_it.rewind();
2658       TABLE *tbl= table;
2659       uint field_num= 0;
2660       do
2661       {
2662         /*
2663           The row-id is after the "length bytes", and the storage
2664           engine knows its length. Pass the pointer to the data after
2665           the "length bytes" to ha_rnd_pos().
2666         */
2667         uchar *data_ptr = NULL;
2668         tmp_table->visible_field_ptr()[field_num]->get_ptr(&data_ptr);
2669         if ((local_error = tbl->file->ha_rnd_pos(
2670                  tbl->record[0],
2671                  const_cast<uchar *>(data_ptr)))) {
2672           if (table->file->is_fatal_error(local_error))
2673             error_flags|= ME_FATALERROR;
2674 
2675           table->file->print_error(local_error, error_flags);
2676           goto err;
2677         }
2678         field_num++;
2679       } while((tbl= check_opt_it++));
2680 
2681       table->status|= STATUS_UPDATED;
2682       store_record(table,record[1]);
2683 
2684       /* Copy data from temporary table to current table */
2685       for (copy_field_ptr=copy_field;
2686 	   copy_field_ptr != copy_field_end;
2687 	   copy_field_ptr++)
2688         copy_field_ptr->invoke_do_copy(copy_field_ptr);
2689 
2690       // The above didn't update generated columns
2691       if (table->vfield &&
2692           update_generated_write_fields(table->write_set, table))
2693         goto err;
2694 
2695       if (table->triggers)
2696       {
2697         bool rc= table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
2698                                                    TRG_ACTION_BEFORE, true);
2699 
2700         // Trigger might have changed dependencies of generated columns
2701         if (!rc && table->vfield &&
2702             update_generated_write_fields(table->write_set, table))
2703           goto err;
2704 
2705         table->triggers->disable_fields_temporary_nullability();
2706 
2707         if (rc || check_record(thd, table->field))
2708           goto err;
2709       }
2710 
2711       if (!records_are_comparable(table) || compare_records(table))
2712       {
2713         update_operations[offset]->set_function_defaults(table);
2714         int error;
2715         if ((error= cur_table->view_check_option(thd)) !=
2716             VIEW_CHECK_OK)
2717         {
2718           if (error == VIEW_CHECK_SKIP)
2719             continue;
2720           else if (error == VIEW_CHECK_ERROR)
2721             // No known handler error code present, print_error makes no sense
2722             goto err;
2723         }
2724         local_error= table->file->ha_update_row(table->record[1],
2725                                                 table->record[0]);
2726         if (!local_error)
2727           updated++;
2728         else if (local_error == HA_ERR_RECORD_IS_THE_SAME)
2729           local_error= 0;
2730         else
2731 	{
2732           if (table->file->is_fatal_error(local_error))
2733             error_flags|= ME_FATALERROR;
2734 
2735           table->file->print_error(local_error, error_flags);
2736           /* Errors could be downgraded to warning by IGNORE */
2737           if (thd->is_error())
2738             goto err;
2739         }
2740       }
2741 
2742       if (!local_error && table->triggers &&
2743           table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
2744                                             TRG_ACTION_AFTER, TRUE))
2745         goto err;
2746     }
2747 
2748     if (updated != org_updated)
2749     {
2750       if (!table->file->has_transactions())
2751       {
2752         trans_safe= FALSE;				// Can't do safe rollback
2753         thd->get_transaction()->mark_modified_non_trans_table(
2754           Transaction_ctx::STMT);
2755       }
2756     }
2757     (void) table->file->ha_rnd_end();
2758     (void) tmp_table->file->ha_rnd_end();
2759     check_opt_it.rewind();
2760     while (TABLE *tbl= check_opt_it++)
2761         tbl->file->ha_rnd_end();
2762 
2763   }
2764   DBUG_RETURN(0);
2765 
2766 err:
2767   if (table->file->inited)
2768     (void) table->file->ha_rnd_end();
2769   if (tmp_table->file->inited)
2770     (void) tmp_table->file->ha_rnd_end();
2771   check_opt_it.rewind();
2772   while (TABLE *tbl= check_opt_it++)
2773   {
2774     if (tbl->file->inited)
2775       (void) tbl->file->ha_rnd_end();
2776   }
2777 
2778   if (updated != org_updated)
2779   {
2780     if (table->file->has_transactions())
2781       transactional_tables= TRUE;
2782     else
2783     {
2784       trans_safe= FALSE;
2785       thd->get_transaction()->mark_modified_non_trans_table(
2786         Transaction_ctx::STMT);
2787     }
2788   }
2789   DBUG_RETURN(1);
2790 }
2791 
2792 
2793 /* out: 1 if error, 0 if success */
2794 
send_eof()2795 bool Query_result_update::send_eof()
2796 {
2797   char buff[STRING_BUFFER_USUAL_SIZE];
2798   ulonglong id;
2799   THD::killed_state killed_status= THD::NOT_KILLED;
2800   DBUG_ENTER("Query_result_update::send_eof");
2801   THD_STAGE_INFO(thd, stage_updating_reference_tables);
2802 
2803   /*
2804      Does updates for the last n - 1 tables, returns 0 if ok;
2805      error takes into account killed status gained in do_updates()
2806   */
2807   int local_error= thd->is_error();
2808   if (!local_error)
2809     local_error = (table_count) ? do_updates() : 0;
2810   /*
2811     if local_error is not set ON until after do_updates() then
2812     later carried out killing should not affect binlogging.
2813   */
2814   killed_status= (local_error == 0)? THD::NOT_KILLED : thd->killed;
2815   THD_STAGE_INFO(thd, stage_end);
2816 
2817   /* We must invalidate the query cache before binlog writing and
2818   ha_autocommit_... */
2819 
2820   if (updated)
2821     invalidate_update_tables(thd, update_tables);
2822 
2823   /*
2824     Write the SQL statement to the binlog if we updated
2825     rows and we succeeded or if we updated some non
2826     transactional tables.
2827 
2828     The query has to binlog because there's a modified non-transactional table
2829     either from the query's list or via a stored routine: bug#13270,23333
2830   */
2831 
2832   if (local_error == 0 ||
2833       thd->get_transaction()->cannot_safely_rollback(Transaction_ctx::STMT))
2834   {
2835 #ifdef WITH_WSREP
2836     if (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open())
2837 #else
2838     if (mysql_bin_log.is_open())
2839 #endif
2840     {
2841       int errcode= 0;
2842       if (local_error == 0)
2843         thd->clear_error();
2844       else
2845         errcode= query_error_code(thd, killed_status == THD::NOT_KILLED);
2846       if (thd->binlog_query(THD::ROW_QUERY_TYPE,
2847                             thd->query().str, thd->query().length,
2848                             transactional_tables, FALSE, FALSE, errcode))
2849       {
2850 	local_error= 1;				// Rollback update
2851       }
2852     }
2853   }
2854   assert(trans_safe || !updated ||
2855          thd->get_transaction()->cannot_safely_rollback(
2856                                                         Transaction_ctx::STMT));
2857 
2858   if (local_error != 0)
2859     error_handled= TRUE; // to force early leave from ::send_error()
2860 
2861   if (local_error > 0) // if the above log write did not fail ...
2862   {
2863     /* Safety: If we haven't got an error before (can happen in do_updates) */
2864     my_message(ER_UNKNOWN_ERROR, "An error occured in multi-table update",
2865 	       MYF(0));
2866     DBUG_RETURN(TRUE);
2867   }
2868 
2869   id= thd->arg_of_last_insert_id_function ?
2870     thd->first_successful_insert_id_in_prev_stmt : 0;
2871   my_snprintf(buff, sizeof(buff), ER(ER_UPDATE_INFO),
2872               (long) found, (long) updated,
2873               (long) thd->get_stmt_da()->current_statement_cond_count());
2874   ::my_ok(thd, thd->get_protocol()->has_client_capability(CLIENT_FOUND_ROWS) ?
2875           found : updated, id, buff);
2876   DBUG_RETURN(FALSE);
2877 }
2878 
2879 
2880 /**
2881   Try to execute UPDATE as single-table UPDATE
2882 
2883   If the UPDATE statement can be executed as a single-table UPDATE,
2884   the do it. Otherwise defer its execution for the further
2885   execute_multi_table_update() call outside this function and set
2886   switch_to_multitable to "true".
2887 
2888   @param thd                            Current THD.
2889   @param [out] switch_to_multitable     True if the UPDATE statement can't
2890                                         be evaluated as single-table.
2891                                         Note: undefined if the function
2892                                         returns "true".
2893 
2894   @return true on error
2895   @return false otherwise.
2896 */
try_single_table_update(THD * thd,bool * switch_to_multitable)2897 bool Sql_cmd_update::try_single_table_update(THD *thd,
2898                                              bool *switch_to_multitable)
2899 {
2900   LEX *const lex= thd->lex;
2901   SELECT_LEX *const select_lex= lex->select_lex;
2902   SELECT_LEX_UNIT *const unit= lex->unit;
2903   TABLE_LIST *const first_table= select_lex->get_table_list();
2904   TABLE_LIST *const all_tables= first_table;
2905 
2906   if (update_precheck(thd, all_tables))
2907     return true;
2908 
2909   /*
2910     UPDATE IGNORE can be unsafe. We therefore use row based
2911     logging if mixed or row based logging is available.
2912     TODO: Check if the order of the output of the select statement is
2913     deterministic. Waiting for BUG#42415
2914   */
2915   if (lex->is_ignore())
2916     lex->set_stmt_unsafe(LEX::BINLOG_STMT_UNSAFE_UPDATE_IGNORE);
2917 
2918   assert(select_lex->offset_limit == 0);
2919   unit->set_limit(select_lex);
2920   MYSQL_UPDATE_START(const_cast<char*>(thd->query().str));
2921 
2922   // Need to open to check for multi-update
2923   if (open_tables_for_query(thd, all_tables, 0) ||
2924       mysql_update_prepare_table(thd, select_lex) ||
2925       run_before_dml_hook(thd))
2926     return true;
2927 
2928   if (!all_tables->is_multiple_tables())
2929   {
2930     /* Push ignore / strict error handler */
2931     Ignore_error_handler ignore_handler;
2932     Strict_error_handler strict_handler;
2933     if (thd->lex->is_ignore())
2934       thd->push_internal_handler(&ignore_handler);
2935     else if (thd->is_strict_mode())
2936       thd->push_internal_handler(&strict_handler);
2937 
2938     ha_rows found= 0, updated= 0;
2939     const bool res= mysql_update(thd, select_lex->item_list,
2940                                  update_value_list,
2941                                  unit->select_limit_cnt,
2942                                  lex->duplicates,
2943                                  &found, &updated);
2944 
2945     /* Pop ignore / strict error handler */
2946     if (thd->lex->is_ignore() || thd->is_strict_mode())
2947       thd->pop_internal_handler();
2948     MYSQL_UPDATE_DONE(res, found, updated);
2949     if (res)
2950       return true;
2951     else
2952     {
2953       *switch_to_multitable= false;
2954       return false;
2955     }
2956   }
2957   else
2958   {
2959     assert(all_tables->is_view());
2960     DBUG_PRINT("info", ("Switch to multi-update"));
2961     if (!thd->in_sub_stmt)
2962       thd->query_plan.set_query_plan(SQLCOM_UPDATE_MULTI, lex,
2963                                      !thd->stmt_arena->is_conventional());
2964     MYSQL_UPDATE_DONE(true, 0, 0);
2965     *switch_to_multitable= true;
2966     return false;
2967   }
2968 }
2969 
2970 
execute_multi_table_update(THD * thd)2971 bool Sql_cmd_update::execute_multi_table_update(THD *thd)
2972 {
2973   bool res= false;
2974   LEX *const lex= thd->lex;
2975   SELECT_LEX *const select_lex= lex->select_lex;
2976   TABLE_LIST *const first_table= select_lex->get_table_list();
2977   TABLE_LIST *const all_tables= first_table;
2978 
2979 #ifdef HAVE_REPLICATION
2980   /* have table map for update for multi-update statement (BUG#37051) */
2981   /*
2982     Save the thd->table_map_for_update value as a result of the
2983     Query_log_event::do_apply_event() function call --
2984     the mysql_multi_update_prepare() function will reset it below.
2985   */
2986   const bool have_table_map_for_update= thd->table_map_for_update;
2987 #endif
2988 
2989   res= mysql_multi_update_prepare(thd);
2990 
2991 #ifdef HAVE_REPLICATION
2992   /* Check slave filtering rules */
2993   if (unlikely(thd->slave_thread && !have_table_map_for_update))
2994   {
2995     if (all_tables_not_ok(thd, all_tables))
2996     {
2997       if (res!= 0)
2998       {
2999         res= 0;             /* don't care of prev failure  */
3000         thd->clear_error(); /* filters are of highest prior */
3001       }
3002       /* we warn the slave SQL thread */
3003       my_error(ER_SLAVE_IGNORED_TABLE, MYF(0));
3004       return res;
3005     }
3006     if (res)
3007       return res;
3008   }
3009   else
3010   {
3011 #endif /* HAVE_REPLICATION */
3012     if (res)
3013       return res;
3014     if (check_readonly(thd, false) &&
3015         some_non_temp_table_to_be_updated(thd, all_tables))
3016     {
3017       err_readonly(thd);
3018       return res;
3019     }
3020 #ifdef HAVE_REPLICATION
3021   }  /* unlikely */
3022 #endif
3023   {
3024     /* Push ignore / strict error handler */
3025     Ignore_error_handler ignore_handler;
3026     Strict_error_handler strict_handler;
3027     if (thd->lex->is_ignore())
3028       thd->push_internal_handler(&ignore_handler);
3029     else if (thd->is_strict_mode())
3030       thd->push_internal_handler(&strict_handler);
3031 
3032     Query_result_update *result_obj;
3033     MYSQL_MULTI_UPDATE_START(const_cast<char*>(thd->query().str));
3034     res= mysql_multi_update(thd,
3035                             &select_lex->item_list,
3036                             &update_value_list,
3037                             lex->duplicates,
3038                             select_lex,
3039                             &result_obj);
3040 
3041     /* Pop ignore / strict error handler */
3042     if (thd->lex->is_ignore() || thd->is_strict_mode())
3043       thd->pop_internal_handler();
3044 
3045     if (result_obj)
3046     {
3047       MYSQL_MULTI_UPDATE_DONE(res, result_obj->num_found(),
3048                               result_obj->num_updated());
3049       res= FALSE; /* Ignore errors here */
3050       delete result_obj;
3051     }
3052     else
3053     {
3054       MYSQL_MULTI_UPDATE_DONE(1, 0, 0);
3055     }
3056   }
3057   return res;
3058 }
3059 
3060 
execute(THD * thd)3061 bool Sql_cmd_update::execute(THD *thd)
3062 {
3063   if (thd->lex->sql_command == SQLCOM_UPDATE_MULTI)
3064   {
3065     return multi_update_precheck(thd, thd->lex->select_lex->get_table_list()) ||
3066            execute_multi_table_update(thd);
3067   }
3068 
3069   bool switch_to_multitable;
3070   if (try_single_table_update(thd, &switch_to_multitable))
3071     return true;
3072   if (switch_to_multitable)
3073   {
3074     sql_command= SQLCOM_UPDATE_MULTI;
3075     return execute_multi_table_update(thd);
3076   }
3077   return false;
3078 }
3079 
3080 
prepared_statement_test(THD * thd)3081 bool Sql_cmd_update::prepared_statement_test(THD *thd)
3082 {
3083   assert(thd->lex->query_tables == thd->lex->select_lex->get_table_list());
3084   if (thd->lex->sql_command == SQLCOM_UPDATE)
3085   {
3086     int res= mysql_test_update(thd);
3087     /* mysql_test_update returns 2 if we need to switch to multi-update */
3088     if (res == 2)
3089       return select_like_stmt_cmd_test(thd, this, OPTION_SETUP_TABLES_DONE);
3090     else
3091       return MY_TEST(res);
3092   }
3093   else
3094   {
3095     return multi_update_precheck(thd, thd->lex->query_tables) ||
3096            select_like_stmt_cmd_test(thd, this, OPTION_SETUP_TABLES_DONE);
3097   }
3098 }
3099