1 /* Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
2 
3    This program is free software; you can redistribute it and/or modify
4    it under the terms of the GNU General Public License, version 2.0,
5    as published by the Free Software Foundation.
6 
7    This program is also distributed with certain software (including
8    but not limited to OpenSSL) that is licensed under separate terms,
9    as designated in a particular file or component or in included license
10    documentation.  The authors of MySQL hereby grant you an additional
11    permission to link the program and your derivative works with the
12    separately licensed software that they have included with MySQL.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License, version 2.0, for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program; if not, write to the Free Software Foundation,
21    51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */
22 
23 
24 /*
25   Single table and multi table updates of tables.
26   Multi-table updates were introduced by Sinisa & Monty
27 */
28 
29 #include "my_global.h"                          /* NO_EMBEDDED_ACCESS_CHECKS */
30 #include "sql_priv.h"
31 #include "unireg.h"                    // REQUIRED: for other includes
32 #include "sql_update.h"
33 #include "sql_cache.h"                          // query_cache_*
34 #include "sql_base.h"                       // close_tables_for_reopen
35 #include "sql_parse.h"                          // cleanup_items
36 #include "sql_partition.h"                   // partition_key_modified
37 #include "sql_select.h"
38 #include "sql_view.h"                           // check_key_in_view
39 #include "sp_head.h"
40 #include "sql_trigger.h"
41 #include "probes_mysql.h"
42 #include "debug_sync.h"
43 #include "key.h"                                // is_key_used
44 #include "sql_acl.h"                            // *_ACL, check_grant
45 #include "records.h"                            // init_read_record,
46                                                 // end_read_record
47 #include "filesort.h"                           // filesort
48 #include "opt_explain.h"
49 #include "sql_derived.h" // mysql_derived_prepare,
50                          // mysql_handle_derived,
51                          // mysql_derived_filling
52 #include "opt_trace.h"   // Opt_trace_object
53 #include "sql_tmp_table.h"                      // tmp tables
54 #include "sql_optimizer.h"                      // remove_eq_conds
55 #include "sql_resolver.h"                       // setup_order, fix_inner_refs
56 
57 /**
58    True if the table's input and output record buffers are comparable using
59    compare_records(TABLE*).
60  */
records_are_comparable(const TABLE * table)61 bool records_are_comparable(const TABLE *table) {
62   return ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) == 0) ||
63     bitmap_is_subset(table->write_set, table->read_set);
64 }
65 
66 
67 /**
68    Compares the input and outbut record buffers of the table to see if a row
69    has changed. The algorithm iterates over updated columns and if they are
70    nullable compares NULL bits in the buffer before comparing actual
71    data. Special care must be taken to compare only the relevant NULL bits and
72    mask out all others as they may be undefined. The storage engine will not
73    and should not touch them.
74 
75    @param table The table to evaluate.
76 
77    @return true if row has changed.
78    @return false otherwise.
79 */
compare_records(const TABLE * table)80 bool compare_records(const TABLE *table)
81 {
82   DBUG_ASSERT(records_are_comparable(table));
83 
84   if ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) != 0)
85   {
86     /*
87       Storage engine may not have read all columns of the record.  Fields
88       (including NULL bits) not in the write_set may not have been read and
89       can therefore not be compared.
90     */
91     for (Field **ptr= table->field ; *ptr != NULL; ptr++)
92     {
93       Field *field= *ptr;
94       if (bitmap_is_set(table->write_set, field->field_index))
95       {
96         if (field->real_maybe_null())
97         {
98           uchar null_byte_index= field->null_offset();
99 
100           if (((table->record[0][null_byte_index]) & field->null_bit) !=
101               ((table->record[1][null_byte_index]) & field->null_bit))
102             return TRUE;
103         }
104         if (field->cmp_binary_offset(table->s->rec_buff_length))
105           return TRUE;
106       }
107     }
108     return FALSE;
109   }
110 
111   /*
112      The storage engine has read all columns, so it's safe to compare all bits
113      including those not in the write_set. This is cheaper than the field-by-field
114      comparison done above.
115   */
116   if (table->s->blob_fields + table->s->varchar_fields == 0)
117     // Fixed-size record: do bitwise comparison of the records
118     return cmp_record(table,record[1]);
119   /* Compare null bits */
120   if (memcmp(table->null_flags,
121 	     table->null_flags+table->s->rec_buff_length,
122 	     table->s->null_bytes))
123     return TRUE;				// Diff in NULL value
124   /* Compare updated fields */
125   for (Field **ptr= table->field ; *ptr ; ptr++)
126   {
127     if (bitmap_is_set(table->write_set, (*ptr)->field_index) &&
128 	(*ptr)->cmp_binary_offset(table->s->rec_buff_length))
129       return TRUE;
130   }
131   return FALSE;
132 }
133 
134 
135 /*
136   check that all fields are real fields
137 
138   SYNOPSIS
139     check_fields()
140     thd             thread handler
141     items           Items for check
142 
143   RETURN
144     TRUE  Items can't be used in UPDATE
145     FALSE Items are OK
146 */
147 
check_fields(THD * thd,List<Item> & items)148 static bool check_fields(THD *thd, List<Item> &items)
149 {
150   List_iterator<Item> it(items);
151   Item *item;
152   Item_field *field;
153 
154   while ((item= it++))
155   {
156     if (!(field= item->field_for_view_update()))
157     {
158       /* item has name, because it comes from VIEW SELECT list */
159       my_error(ER_NONUPDATEABLE_COLUMN, MYF(0), item->item_name.ptr());
160       return TRUE;
161     }
162     /*
163       we make temporary copy of Item_field, to avoid influence of changing
164       result_field on Item_ref which refer on this field
165     */
166     thd->change_item_tree(it.ref(), new Item_field(thd, field));
167   }
168   return FALSE;
169 }
170 
171 
172 /**
173   Check if all expressions in list are constant expressions
174 
175   @param[in] values List of expressions
176 
177   @retval true Only constant expressions
178   @retval false At least one non-constant expression
179 */
180 
check_constant_expressions(List<Item> & values)181 static bool check_constant_expressions(List<Item> &values)
182 {
183   Item *value;
184   List_iterator_fast<Item> v(values);
185   DBUG_ENTER("check_constant_expressions");
186 
187   while ((value= v++))
188   {
189     if (!value->const_item())
190     {
191       DBUG_PRINT("exit", ("expression is not constant"));
192       DBUG_RETURN(false);
193     }
194   }
195   DBUG_PRINT("exit", ("expression is constant"));
196   DBUG_RETURN(true);
197 }
198 
199 
200 /*
201   Process usual UPDATE
202 
203   SYNOPSIS
204     mysql_update()
205     thd			thread handler
206     fields		fields for update
207     values		values of fields for update
208     conds		WHERE clause expression
209     order_num		number of elemen in ORDER BY clause
210     order		ORDER BY clause list
211     limit		limit clause
212     handle_duplicates	how to handle duplicates
213 
214   RETURN
215     0  - OK
216     2  - privilege check and openning table passed, but we need to convert to
217          multi-update because of view substitution
218     1  - error
219 */
220 
mysql_update(THD * thd,TABLE_LIST * table_list,List<Item> & fields,List<Item> & values,Item * conds,uint order_num,ORDER * order,ha_rows limit,enum enum_duplicates handle_duplicates,bool ignore,ha_rows * found_return,ha_rows * updated_return)221 int mysql_update(THD *thd,
222                  TABLE_LIST *table_list,
223                  List<Item> &fields,
224 		 List<Item> &values,
225                  Item *conds,
226                  uint order_num, ORDER *order,
227 		 ha_rows limit,
228 		 enum enum_duplicates handle_duplicates, bool ignore,
229                  ha_rows *found_return, ha_rows *updated_return)
230 {
231   bool		using_limit= limit != HA_POS_ERROR;
232   bool		safe_update= MY_TEST(thd->variables.option_bits & OPTION_SAFE_UPDATES);
233   bool          used_key_is_modified= FALSE, transactional_table, will_batch;
234   int           res;
235   int           error= 1;
236   int           loc_error;
237   uint          used_index, dup_key_found;
238   bool          need_sort= TRUE;
239   bool          reverse= FALSE;
240   bool          using_filesort;
241   bool          read_removal= false;
242 #ifndef NO_EMBEDDED_ACCESS_CHECKS
243   uint		want_privilege;
244 #endif
245   ha_rows	updated, found;
246   key_map	old_covering_keys;
247   TABLE		*table;
248   SQL_SELECT	*select= NULL;
249   READ_RECORD	info;
250   SELECT_LEX    *select_lex= &thd->lex->select_lex;
251   ulonglong     id;
252   List<Item> all_fields;
253   THD::killed_state killed_status= THD::NOT_KILLED;
254   COPY_INFO update(COPY_INFO::UPDATE_OPERATION, &fields, &values);
255 
256   DBUG_ENTER("mysql_update");
257 
258   if (open_normal_and_derived_tables(thd, table_list, 0))
259     DBUG_RETURN(1);
260 
261   if (table_list->multitable_view)
262   {
263     DBUG_ASSERT(table_list->view != 0);
264     DBUG_PRINT("info", ("Switch to multi-update"));
265     /* convert to multiupdate */
266     DBUG_RETURN(2);
267   }
268 
269   THD_STAGE_INFO(thd, stage_init);
270   table= table_list->table;
271 
272   if (!table_list->updatable)
273   {
274     my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "UPDATE");
275     DBUG_RETURN(1);
276   }
277 
278   /* Calculate "table->covering_keys" based on the WHERE */
279   table->covering_keys= table->s->keys_in_use;
280   table->quick_keys.clear_all();
281   table->possible_quick_keys.clear_all();
282 
283 #ifndef NO_EMBEDDED_ACCESS_CHECKS
284   /* Force privilege re-checking for views after they have been opened. */
285   want_privilege= (table_list->view ? UPDATE_ACL :
286                    table_list->grant.want_privilege);
287 #endif
288   if (mysql_prepare_update(thd, table_list, &conds, order_num, order))
289     DBUG_RETURN(1);
290 
291   old_covering_keys= table->covering_keys;		// Keys used in WHERE
292   /* Check the fields we are going to modify */
293 #ifndef NO_EMBEDDED_ACCESS_CHECKS
294   table_list->grant.want_privilege= table->grant.want_privilege= want_privilege;
295   table_list->register_want_access(want_privilege);
296 #endif
297   if (setup_fields_with_no_wrap(thd, Ref_ptr_array(),
298                                 fields, MARK_COLUMNS_WRITE, 0, 0))
299     DBUG_RETURN(1);                     /* purecov: inspected */
300   if (table_list->view && check_fields(thd, fields))
301   {
302     DBUG_RETURN(1);
303   }
304   if (!table_list->updatable || check_key_in_view(thd, table_list))
305   {
306     my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias, "UPDATE");
307     DBUG_RETURN(1);
308   }
309 
310   if (update.add_function_default_columns(table, table->write_set))
311     DBUG_RETURN(1);
312 
313 #ifndef NO_EMBEDDED_ACCESS_CHECKS
314   /* Check values */
315   table_list->grant.want_privilege= table->grant.want_privilege=
316     (SELECT_ACL & ~table->grant.privilege);
317 #endif
318   if (setup_fields(thd, Ref_ptr_array(), values, MARK_COLUMNS_READ, 0, 0))
319   {
320     free_underlaid_joins(thd, select_lex);
321     DBUG_RETURN(1);				/* purecov: inspected */
322   }
323 
324   if (select_lex->inner_refs_list.elements &&
325     fix_inner_refs(thd, all_fields, select_lex, select_lex->ref_pointer_array))
326     DBUG_RETURN(1);
327 
328   if ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) != 0 &&
329       update.function_defaults_apply(table))
330     /*
331       A column is to be set to its ON UPDATE function default only if other
332       columns of the row are changing. To know this, we must be able to
333       compare the "before" and "after" value of those columns
334       (i.e. records_are_comparable() must be true below). Thus, we must read
335       those columns:
336     */
337     bitmap_union(table->read_set, table->write_set);
338 
339   // Don't count on usage of 'only index' when calculating which key to use
340   table->covering_keys.clear_all();
341 
342   /*
343     This must be done before partitioning pruning, since prune_partitions()
344     uses the table->write_set to determine may prune locks too.
345   */
346   if (table->triggers)
347     table->triggers->mark_fields_used(TRG_EVENT_UPDATE);
348 
349 #ifdef WITH_PARTITION_STORAGE_ENGINE
350   if (table->part_info)
351   {
352     if (prune_partitions(thd, table, conds))
353       DBUG_RETURN(1);
354     if (table->all_partitions_pruned_away)
355     {
356       /* No matching records */
357       if (thd->lex->describe)
358       {
359         error= explain_no_table(thd,
360                                 "No matching rows after partition pruning");
361         goto exit_without_my_ok;
362       }
363       my_ok(thd);                            // No matching records
364       DBUG_RETURN(0);
365     }
366   }
367 #endif
368   if (lock_tables(thd, table_list, thd->lex->table_count, 0))
369     DBUG_RETURN(1);
370 
371   // Must be done after lock_tables()
372   if (conds)
373   {
374     COND_EQUAL *cond_equal= NULL;
375     Item::cond_result result;
376     if (table_list->check_option)
377     {
378       /*
379         If this UPDATE is on a view with CHECK OPTION, Item_fields
380         must not be replaced by constants. The reason is that when
381         'conds' is optimized, 'check_option' is also optimized (it is
382         part of 'conds'). Const replacement is fine for 'conds'
383         because it is evaluated on a read row, but 'check_option' is
384         evaluated on a row with updated fields and needs those updated
385         values to be correct.
386 
387         Example:
388         CREATE VIEW v1 ... WHERE fld < 2 WITH CHECK_OPTION
389         UPDATE v1 SET fld=4 WHERE fld=1
390 
391         check_option is  "(fld < 2)"
392         conds is         "(fld < 2) and (fld = 1)"
393 
394         optimize_cond() would propagate fld=1 to the first argument of
395         the AND to create "(1 < 2) AND (fld = 1)". After this,
396         check_option would be "(1 < 2)". But for check_option to work
397         it must be evaluated with the *updated* value of fld: 4.
398         Otherwise it will evaluate to true even when it should be
399         false, which is the case for the UPDATE statement above.
400 
401         Thus, if there is a check_option, we do only the "safe" parts
402         of optimize_cond(): Item_row -> Item_func_eq conversion (to
403         enable range access) and removal of always true/always false
404         predicates.
405 
406         An alternative to restricting this optimization of 'conds' in
407         the presense of check_option: the Item-tree of 'check_option'
408         could be cloned before optimizing 'conds' and thereby avoid
409         const replacement. However, at the moment there is no such
410         thing as Item::clone().
411       */
412       conds= build_equal_items(thd, conds, NULL, false,
413                                select_lex->join_list, &cond_equal);
414       conds= remove_eq_conds(thd, conds, &result);
415     }
416     else
417       conds= optimize_cond(thd, conds, &cond_equal, select_lex->join_list,
418                            true, &result);
419 
420     if (result == Item::COND_FALSE)
421     {
422       limit= 0;                                   // Impossible WHERE
423       if (thd->lex->describe)
424       {
425         error= explain_no_table(thd, "Impossible WHERE");
426         goto exit_without_my_ok;
427       }
428     }
429     if (conds)
430     {
431       conds= substitute_for_best_equal_field(conds, cond_equal, 0);
432       conds->update_used_tables();
433     }
434   }
435 
436 #ifdef WITH_PARTITION_STORAGE_ENGINE
437   /*
438     Also try a second time after locking, to prune when subqueries and
439     stored programs can be evaluated.
440   */
441   if (table->part_info)
442   {
443     if (prune_partitions(thd, table, conds))
444       DBUG_RETURN(1);
445     if (table->all_partitions_pruned_away)
446     {
447       /* No matching records */
448       if (thd->lex->describe)
449       {
450         error= explain_no_table(thd,
451                                 "No matching rows after partition pruning");
452         goto exit_without_my_ok;
453       }
454       my_ok(thd);                            // No matching records
455       DBUG_RETURN(0);
456     }
457   }
458 #endif
459   /* Update the table->file->stats.records number */
460   table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
461 
462   table->mark_columns_needed_for_update(false/*mark_binlog_columns=false*/);
463   select= make_select(table, 0, 0, conds, 0, &error);
464 
465   { // Enter scope for optimizer trace wrapper
466     Opt_trace_object wrapper(&thd->opt_trace);
467     wrapper.add_utf8_table(table);
468 
469     if (error || !limit ||
470         (select && select->check_quick(thd, safe_update, limit)))
471     {
472       if (thd->lex->describe && !error && !thd->is_error())
473       {
474         error= explain_no_table(thd, "Impossible WHERE");
475         goto exit_without_my_ok;
476       }
477       delete select;
478       free_underlaid_joins(thd, select_lex);
479       /*
480         There was an error or the error was already sent by
481         the quick select evaluation.
482         TODO: Add error code output parameter to Item::val_xxx() methods.
483         Currently they rely on the user checking DA for
484         errors when unwinding the stack after calling Item::val_xxx().
485       */
486       if (error || thd->is_error())
487       {
488         DBUG_RETURN(1);				// Error in where
489       }
490 
491       char buff[MYSQL_ERRMSG_SIZE];
492       my_snprintf(buff, sizeof(buff), ER(ER_UPDATE_INFO), 0, 0,
493                   (ulong) thd->get_stmt_da()->current_statement_warn_count());
494       my_ok(thd, 0, 0, buff);
495 
496       DBUG_PRINT("info",("0 records updated"));
497       DBUG_RETURN(0);
498     }
499   } // Ends scope for optimizer trace wrapper
500 
501   /* If running in safe sql mode, don't allow updates without keys */
502   if (table->quick_keys.is_clear_all())
503   {
504     thd->server_status|=SERVER_QUERY_NO_INDEX_USED;
505     if (safe_update && !using_limit)
506     {
507       my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,
508 		 ER(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0));
509       goto exit_without_my_ok;
510     }
511   }
512 
513   if (select_lex->has_ft_funcs() && init_ftfuncs(thd, select_lex, 1))
514     goto exit_without_my_ok;
515 
516   table->update_const_key_parts(conds);
517   order= simple_remove_const(order, conds);
518 
519   used_index= get_index_for_order(order, table, select, limit,
520                                   &need_sort, &reverse);
521   if (need_sort)
522   { // Assign table scan index to check below for modified key fields:
523     used_index= table->file->key_used_on_scan;
524   }
525   if (used_index != MAX_KEY)
526   { // Check if we are modifying a key that we are used to search with:
527     used_key_is_modified= is_key_used(table, used_index, table->write_set);
528   }
529   else if (select && select->quick)
530   {
531     /*
532       select->quick != NULL and used_index == MAX_KEY happens for index
533       merge and should be handled in a different way.
534     */
535     used_key_is_modified= (!select->quick->unique_key_range() &&
536                            select->quick->is_keys_used(table->write_set));
537   }
538 
539 #ifdef WITH_PARTITION_STORAGE_ENGINE
540   used_key_is_modified|= partition_key_modified(table, table->write_set);
541 #endif
542   table->mark_columns_per_binlog_row_image();
543   using_filesort= order && (need_sort||used_key_is_modified);
544   if (thd->lex->describe)
545   {
546     const bool using_tmp_table= !using_filesort &&
547                                 (used_key_is_modified || order);
548     error= explain_single_table_modification(thd, table, select, used_index,
549                                              limit, using_tmp_table,
550                                              using_filesort,
551                                              true,
552                                              used_key_is_modified);
553     goto exit_without_my_ok;
554   }
555 
556   if (used_key_is_modified || order)
557   {
558     /*
559       We can't update table directly;  We must first search after all
560       matching rows before updating the table!
561     */
562 
563     if (used_index < MAX_KEY && old_covering_keys.is_set(used_index))
564       table->set_keyread(true);
565 
566     /* note: We avoid sorting if we sort on the used index */
567     if (using_filesort)
568     {
569       /*
570 	Doing an ORDER BY;  Let filesort find and sort the rows we are going
571 	to update
572         NOTE: filesort will call table->prepare_for_position()
573       */
574       ha_rows examined_rows;
575       ha_rows found_rows;
576       Filesort fsort(order, limit, select);
577 
578       table->sort.io_cache = (IO_CACHE *) my_malloc(sizeof(IO_CACHE),
579 						    MYF(MY_FAE | MY_ZEROFILL));
580       if ((table->sort.found_records= filesort(thd, table, &fsort, true,
581                                                &examined_rows, &found_rows))
582           == HA_POS_ERROR)
583       {
584         goto exit_without_my_ok;
585       }
586       thd->inc_examined_row_count(examined_rows);
587       /*
588 	Filesort has already found and selected the rows we want to update,
589 	so we don't need the where clause
590       */
591       delete select;
592       select= 0;
593     }
594     else
595     {
596       /*
597 	We are doing a search on a key that is updated. In this case
598 	we go trough the matching rows, save a pointer to them and
599 	update these in a separate loop based on the pointer.
600       */
601       table->prepare_for_position();
602 
603       IO_CACHE tempfile;
604       if (open_cached_file(&tempfile, mysql_tmpdir,TEMP_PREFIX,
605 			   DISK_BUFFER_SIZE, MYF(MY_WME)))
606         goto exit_without_my_ok;
607 
608       /* If quick select is used, initialize it before retrieving rows. */
609       if (select && select->quick && (error= select->quick->reset()))
610       {
611         close_cached_file(&tempfile);
612         table->file->print_error(error, MYF(0));
613         goto exit_without_my_ok;
614       }
615       table->file->try_semi_consistent_read(1);
616 
617       /*
618         When we get here, we have one of the following options:
619         A. used_index == MAX_KEY
620            This means we should use full table scan, and start it with
621            init_read_record call
622         B. used_index != MAX_KEY
623            B.1 quick select is used, start the scan with init_read_record
624            B.2 quick select is not used, this is full index scan (with LIMIT)
625                Full index scan must be started with init_read_record_idx
626       */
627 
628       if (used_index == MAX_KEY || (select && select->quick))
629         error= init_read_record(&info, thd, table, select, 0, 1, FALSE);
630       else
631         error= init_read_record_idx(&info, thd, table, 1, used_index, reverse);
632 
633       if (error)
634       {
635         close_cached_file(&tempfile);
636         goto exit_without_my_ok;
637       }
638 
639       THD_STAGE_INFO(thd, stage_searching_rows_for_update);
640       ha_rows tmp_limit= limit;
641 
642       while (!(error=info.read_record(&info)) && !thd->killed)
643       {
644         thd->inc_examined_row_count(1);
645         bool skip_record= FALSE;
646         if (select && select->skip_record(thd, &skip_record))
647         {
648           error= 1;
649           /*
650             Don't try unlocking the row if skip_record reported an error since
651             in this case the transaction might have been rolled back already.
652           */
653           break;
654         }
655         if (!skip_record)
656 	{
657           if (table->file->was_semi_consistent_read())
658 	    continue;  /* repeat the read of the same row if it still exists */
659 
660 	  table->file->position(table->record[0]);
661 	  if (my_b_write(&tempfile,table->file->ref,
662 			 table->file->ref_length))
663 	  {
664 	    error=1; /* purecov: inspected */
665 	    break; /* purecov: inspected */
666 	  }
667 	  if (!--limit && using_limit)
668 	  {
669 	    error= -1;
670 	    break;
671 	  }
672 	}
673 	else
674 	  table->file->unlock_row();
675       }
676       if (thd->killed && !error)
677 	error= 1;				// Aborted
678       limit= tmp_limit;
679       table->file->try_semi_consistent_read(0);
680       end_read_record(&info);
681 
682       /* Change select to use tempfile */
683       if (select)
684       {
685         select->set_quick(NULL);
686         if (select->free_cond)
687           delete select->cond;
688         select->cond= NULL;
689       }
690       else
691       {
692 	select= new SQL_SELECT;
693 	select->head=table;
694       }
695       if (reinit_io_cache(&tempfile,READ_CACHE,0L,0,0))
696 	error=1; /* purecov: inspected */
697       select->file=tempfile;			// Read row ptrs from this file
698       setup_io_cache(&select->file);
699       if (error >= 0)
700         goto exit_without_my_ok;
701     }
702     if (used_index < MAX_KEY && old_covering_keys.is_set(used_index))
703       table->set_keyread(false);
704   }
705 
706   if (ignore)
707     table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
708 
709   if (select && select->quick && (error= select->quick->reset()))
710   {
711     table->file->print_error(error, MYF(0));
712     goto exit_without_my_ok;
713   }
714   table->file->try_semi_consistent_read(1);
715   if ((error= init_read_record(&info, thd, table, select, 0, 1, FALSE)))
716     goto exit_without_my_ok;
717 
718   updated= found= 0;
719   /*
720     Generate an error (in TRADITIONAL mode) or warning
721     when trying to set a NOT NULL field to NULL.
722   */
723   thd->count_cuted_fields= CHECK_FIELD_WARN;
724   thd->cuted_fields=0L;
725   THD_STAGE_INFO(thd, stage_updating);
726 
727   transactional_table= table->file->has_transactions();
728   thd->abort_on_warning= (!ignore && thd->is_strict_mode());
729 
730   if (table->triggers &&
731       table->triggers->has_triggers(TRG_EVENT_UPDATE,
732                                     TRG_ACTION_AFTER))
733   {
734     /*
735       The table has AFTER UPDATE triggers that might access to subject
736       table and therefore might need update to be done immediately.
737       So we turn-off the batching.
738     */
739     (void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH);
740     will_batch= FALSE;
741   }
742   else
743     will_batch= !table->file->start_bulk_update();
744 
745   if ((table->file->ha_table_flags() & HA_READ_BEFORE_WRITE_REMOVAL) &&
746       !ignore && !using_limit &&
747       !(table->triggers && table->triggers->has_update_triggers()) &&
748       select && select->quick && select->quick->index != MAX_KEY &&
749       check_constant_expressions(values))
750     read_removal= table->check_read_removal(select->quick->index);
751 
752   error= table->file->ha_fast_update(thd, fields, values, conds);
753   if (error == 0)
754     error= -1; // error < 0 means really no error at all (see below)
755   else if (error != ENOTSUP) {
756     error= 1;
757   }
758   else
759   while (!(error=info.read_record(&info)) && !thd->killed)
760   {
761     thd->inc_examined_row_count(1);
762     bool skip_record;
763     if (!select || (!select->skip_record(thd, &skip_record) && !skip_record))
764     {
765       if (table->file->was_semi_consistent_read())
766         continue;  /* repeat the read of the same row if it still exists */
767 
768       store_record(table,record[1]);
769       if (fill_record_n_invoke_before_triggers(thd, fields, values, 0,
770                                                table->triggers,
771                                                TRG_EVENT_UPDATE))
772         break; /* purecov: inspected */
773 
774       found++;
775 
776       if (!records_are_comparable(table) || compare_records(table))
777       {
778         if ((res= table_list->view_check_option(thd, ignore)) !=
779             VIEW_CHECK_OK)
780         {
781           found--;
782           if (res == VIEW_CHECK_SKIP)
783             continue;
784           else if (res == VIEW_CHECK_ERROR)
785           {
786             error= 1;
787             break;
788           }
789         }
790 
791         /*
792           In order to keep MySQL legacy behavior, we do this update *after*
793           the CHECK OPTION test. Proper behavior is probably to throw an
794           error, though.
795         */
796         update.set_function_defaults(table);
797 
798         if (will_batch)
799         {
800           /*
801             Typically a batched handler can execute the batched jobs when:
802             1) When specifically told to do so
803             2) When it is not a good idea to batch anymore
804             3) When it is necessary to send batch for other reasons
805                (One such reason is when READ's must be performed)
806 
807             1) is covered by exec_bulk_update calls.
808             2) and 3) is handled by the bulk_update_row method.
809 
810             bulk_update_row can execute the updates including the one
811             defined in the bulk_update_row or not including the row
812             in the call. This is up to the handler implementation and can
813             vary from call to call.
814 
815             The dup_key_found reports the number of duplicate keys found
816             in those updates actually executed. It only reports those if
817             the extra call with HA_EXTRA_IGNORE_DUP_KEY have been issued.
818             If this hasn't been issued it returns an error code and can
819             ignore this number. Thus any handler that implements batching
820             for UPDATE IGNORE must also handle this extra call properly.
821 
822             If a duplicate key is found on the record included in this
823             call then it should be included in the count of dup_key_found
824             and error should be set to 0 (only if these errors are ignored).
825           */
826           error= table->file->ha_bulk_update_row(table->record[1],
827                                                  table->record[0],
828                                                  &dup_key_found);
829           limit+= dup_key_found;
830           updated-= dup_key_found;
831         }
832         else
833         {
834           /* Non-batched update */
835 	  error= table->file->ha_update_row(table->record[1],
836                                             table->record[0]);
837         }
838         if (!error || error == HA_ERR_RECORD_IS_THE_SAME)
839 	{
840           if (error != HA_ERR_RECORD_IS_THE_SAME)
841             updated++;
842           else
843             error= 0;
844 	}
845  	else if (!ignore ||
846                  table->file->is_fatal_error(error, HA_CHECK_DUP_KEY |
847                                                     HA_CHECK_FK_ERROR))
848 	{
849           /*
850             If (ignore && error is ignorable) we don't have to
851             do anything; otherwise...
852           */
853           myf flags= 0;
854 
855           if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY |
856                                                  HA_CHECK_FK_ERROR))
857             flags|= ME_FATALERROR; /* Other handler errors are fatal */
858 
859 	  table->file->print_error(error,MYF(flags));
860 	  error= 1;
861 	  break;
862 	}
863         else if (ignore && !table->file->is_fatal_error(error,
864                                                         HA_CHECK_FK_ERROR))
865           warn_fk_constraint_violation(thd, table, error);
866       }
867 
868       if (table->triggers &&
869           table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
870                                             TRG_ACTION_AFTER, TRUE))
871       {
872         error= 1;
873         break;
874       }
875 
876       if (!--limit && using_limit)
877       {
878         /*
879           We have reached end-of-file in most common situations where no
880           batching has occurred and if batching was supposed to occur but
881           no updates were made and finally when the batch execution was
882           performed without error and without finding any duplicate keys.
883           If the batched updates were performed with errors we need to
884           check and if no error but duplicate key's found we need to
885           continue since those are not counted for in limit.
886         */
887         if (will_batch &&
888             ((error= table->file->exec_bulk_update(&dup_key_found)) ||
889              dup_key_found))
890         {
891  	  if (error)
892           {
893             /* purecov: begin inspected */
894             /*
895               The handler should not report error of duplicate keys if they
896               are ignored. This is a requirement on batching handlers.
897             */
898             table->file->print_error(error,MYF(0));
899             error= 1;
900             break;
901             /* purecov: end */
902           }
903           /*
904             Either an error was found and we are ignoring errors or there
905             were duplicate keys found. In both cases we need to correct
906             the counters and continue the loop.
907           */
908           limit= dup_key_found; //limit is 0 when we get here so need to +
909           updated-= dup_key_found;
910         }
911         else
912         {
913 	  error= -1;				// Simulate end of file
914 	  break;
915         }
916       }
917     }
918     /*
919       Don't try unlocking the row if skip_record reported an error since in
920       this case the transaction might have been rolled back already.
921     */
922     else if (!thd->is_error())
923       table->file->unlock_row();
924     else
925     {
926       error= 1;
927       break;
928     }
929     thd->get_stmt_da()->inc_current_row_for_warning();
930     if (thd->is_error())
931     {
932       error= 1;
933       break;
934     }
935   }
936   table->auto_increment_field_not_null= FALSE;
937   dup_key_found= 0;
938   /*
939     Caching the killed status to pass as the arg to query event constuctor;
940     The cached value can not change whereas the killed status can
941     (externally) since this point and change of the latter won't affect
942     binlogging.
943     It's assumed that if an error was set in combination with an effective
944     killed status then the error is due to killing.
945   */
946   killed_status= thd->killed; // get the status of the volatile
947   // simulated killing after the loop must be ineffective for binlogging
948   DBUG_EXECUTE_IF("simulate_kill_bug27571",
949                   {
950                     thd->killed= THD::KILL_QUERY;
951                   };);
952   error= (killed_status == THD::NOT_KILLED)?  error : 1;
953 
954   if (error &&
955       will_batch &&
956       (loc_error= table->file->exec_bulk_update(&dup_key_found)))
957     /*
958       An error has occurred when a batched update was performed and returned
959       an error indication. It cannot be an allowed duplicate key error since
960       we require the batching handler to treat this as a normal behavior.
961 
962       Otherwise we simply remove the number of duplicate keys records found
963       in the batched update.
964     */
965   {
966     /* purecov: begin inspected */
967     table->file->print_error(loc_error,MYF(ME_FATALERROR));
968     error= 1;
969     /* purecov: end */
970   }
971   else
972     updated-= dup_key_found;
973   if (will_batch)
974     table->file->end_bulk_update();
975   table->file->try_semi_consistent_read(0);
976 
977   if (read_removal)
978   {
979     /* Only handler knows how many records really was written */
980     updated= table->file->end_read_removal();
981     if (!records_are_comparable(table))
982       found= updated;
983   }
984 
985   if (!transactional_table && updated > 0)
986     thd->transaction.stmt.mark_modified_non_trans_table();
987 
988   end_read_record(&info);
989   delete select;
990   THD_STAGE_INFO(thd, stage_end);
991   (void) table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
992 
993   /*
994     Invalidate the table in the query cache if something changed.
995     This must be before binlog writing and ha_autocommit_...
996   */
997   if (updated)
998   {
999     query_cache_invalidate3(thd, table_list, 1);
1000   }
1001 
1002   /*
1003     error < 0 means really no error at all: we processed all rows until the
1004     last one without error. error > 0 means an error (e.g. unique key
1005     violation and no IGNORE or REPLACE). error == 0 is also an error (if
1006     preparing the record or invoking before triggers fails). See
1007     ha_autocommit_or_rollback(error>=0) and DBUG_RETURN(error>=0) below.
1008     Sometimes we want to binlog even if we updated no rows, in case user used
1009     it to be sure master and slave are in same state.
1010   */
1011   if ((error < 0) || thd->transaction.stmt.cannot_safely_rollback())
1012   {
1013     if (mysql_bin_log.is_open())
1014     {
1015       int errcode= 0;
1016       if (error < 0)
1017         thd->clear_error();
1018       else
1019         errcode= query_error_code(thd, killed_status == THD::NOT_KILLED);
1020 
1021       if (thd->binlog_query(THD::ROW_QUERY_TYPE,
1022                             thd->query(), thd->query_length(),
1023                             transactional_table, FALSE, FALSE, errcode))
1024       {
1025         error=1;				// Rollback update
1026       }
1027     }
1028   }
1029   DBUG_ASSERT(transactional_table || !updated ||
1030               thd->transaction.stmt.cannot_safely_rollback());
1031   free_underlaid_joins(thd, select_lex);
1032 
1033   /* If LAST_INSERT_ID(X) was used, report X */
1034   id= thd->arg_of_last_insert_id_function ?
1035     thd->first_successful_insert_id_in_prev_stmt : 0;
1036 
1037   if (error < 0)
1038   {
1039     char buff[MYSQL_ERRMSG_SIZE];
1040     my_snprintf(buff, sizeof(buff), ER(ER_UPDATE_INFO), (ulong) found,
1041                 (ulong) updated,
1042                 (ulong) thd->get_stmt_da()->current_statement_warn_count());
1043     ha_rows row_count=
1044         (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated;
1045     my_ok(thd, row_count, id, buff);
1046     thd->updated_row_count += row_count;
1047     DBUG_PRINT("info",("%ld records updated", (long) updated));
1048   }
1049   thd->count_cuted_fields= CHECK_FIELD_IGNORE;		/* calc cuted fields */
1050   thd->abort_on_warning= 0;
1051   *found_return= found;
1052   *updated_return= updated;
1053   DBUG_RETURN((error >= 0 || thd->is_error()) ? 1 : 0);
1054 
1055 exit_without_my_ok:
1056   delete select;
1057   free_underlaid_joins(thd, select_lex);
1058   table->set_keyread(FALSE);
1059   thd->abort_on_warning= 0;
1060   DBUG_RETURN(error);
1061 }
1062 
1063 /*
1064   Prepare items in UPDATE statement
1065 
1066   SYNOPSIS
1067     mysql_prepare_update()
1068     thd			- thread handler
1069     table_list		- global/local table list
1070     conds		- conditions
1071     order_num		- number of ORDER BY list entries
1072     order		- ORDER BY clause list
1073 
1074   RETURN VALUE
1075     FALSE OK
1076     TRUE  error
1077 */
mysql_prepare_update(THD * thd,TABLE_LIST * table_list,Item ** conds,uint order_num,ORDER * order)1078 bool mysql_prepare_update(THD *thd, TABLE_LIST *table_list,
1079 			 Item **conds, uint order_num, ORDER *order)
1080 {
1081   Item *fake_conds= 0;
1082 #ifndef NO_EMBEDDED_ACCESS_CHECKS
1083   TABLE *table= table_list->table;
1084 #endif
1085   List<Item> all_fields;
1086   SELECT_LEX *select_lex= &thd->lex->select_lex;
1087   DBUG_ENTER("mysql_prepare_update");
1088 
1089 #ifndef NO_EMBEDDED_ACCESS_CHECKS
1090   table_list->grant.want_privilege= table->grant.want_privilege=
1091     (SELECT_ACL & ~table->grant.privilege);
1092   table_list->register_want_access(SELECT_ACL);
1093 #endif
1094 
1095   thd->lex->allow_sum_func= 0;
1096 
1097   if (setup_tables_and_check_access(thd, &select_lex->context,
1098                                     &select_lex->top_join_list,
1099                                     table_list,
1100                                     &select_lex->leaf_tables,
1101                                     FALSE, UPDATE_ACL, SELECT_ACL) ||
1102       setup_conds(thd, table_list, select_lex->leaf_tables, conds) ||
1103       select_lex->setup_ref_array(thd, order_num) ||
1104       setup_order(thd, select_lex->ref_pointer_array,
1105 		  table_list, all_fields, all_fields, order) ||
1106       setup_ftfuncs(select_lex))
1107     DBUG_RETURN(TRUE);
1108 
1109   /* Check that we are not using table that we are updating in a sub select */
1110   {
1111     TABLE_LIST *duplicate;
1112     if ((duplicate= unique_table(thd, table_list, table_list->next_global, 0)))
1113     {
1114       update_non_unique_table_error(table_list, "UPDATE", duplicate);
1115       DBUG_RETURN(TRUE);
1116     }
1117   }
1118   select_lex->fix_prepare_information(thd, conds, &fake_conds);
1119   DBUG_RETURN(FALSE);
1120 }
1121 
1122 
1123 /***************************************************************************
1124   Update multiple tables from join
1125 ***************************************************************************/
1126 
1127 /*
1128   Get table map for list of Item_field
1129 */
1130 
get_table_map(List<Item> * items)1131 static table_map get_table_map(List<Item> *items)
1132 {
1133   List_iterator_fast<Item> item_it(*items);
1134   Item_field *item;
1135   table_map map= 0;
1136 
1137   while ((item= (Item_field *) item_it++))
1138     map|= item->used_tables();
1139   DBUG_PRINT("info", ("table_map: 0x%08lx", (long) map));
1140   return map;
1141 }
1142 
1143 /**
1144   If one row is updated through two different aliases and the first
1145   update physically moves the row, the second update will error
1146   because the row is no longer located where expected. This function
1147   checks if the multiple-table update is about to do that and if so
1148   returns with an error.
1149 
1150   The following update operations physically moves rows:
1151     1) Update of a column in a clustered primary key
1152     2) Update of a column used to calculate which partition the row belongs to
1153 
1154   This function returns with an error if both of the following are
1155   true:
1156 
1157     a) A table in the multiple-table update statement is updated
1158        through multiple aliases (including views)
1159     b) At least one of the updates on the table from a) may physically
1160        moves the row. Note: Updating a column used to calculate which
1161        partition a row belongs to does not necessarily mean that the
1162        row is moved. The new value may or may not belong to the same
1163        partition.
1164 
1165   @param leaves               First leaf table
1166   @param tables_for_update    Map of tables that are updated
1167 
1168   @return
1169     true   if the update is unsafe, in which case an error message is also set,
1170     false  otherwise.
1171 */
1172 static
unsafe_key_update(TABLE_LIST * leaves,table_map tables_for_update)1173 bool unsafe_key_update(TABLE_LIST *leaves, table_map tables_for_update)
1174 {
1175   TABLE_LIST *tl= leaves;
1176 
1177   for (tl= leaves; tl ; tl= tl->next_leaf)
1178   {
1179     if (tl->table->map & tables_for_update)
1180     {
1181       TABLE *table1= tl->table;
1182       bool primkey_clustered= (table1->file->primary_key_is_clustered() &&
1183                                table1->s->primary_key != MAX_KEY);
1184 
1185       bool table_partitioned= false;
1186 #ifdef WITH_PARTITION_STORAGE_ENGINE
1187       table_partitioned= (table1->part_info != NULL);
1188 #endif
1189 
1190       if (!table_partitioned && !primkey_clustered)
1191         continue;
1192 
1193       for (TABLE_LIST* tl2= tl->next_leaf; tl2 ; tl2= tl2->next_leaf)
1194       {
1195         /*
1196           Look at "next" tables only since all previous tables have
1197           already been checked
1198         */
1199         TABLE *table2= tl2->table;
1200         if (table2->map & tables_for_update && table1->s == table2->s)
1201         {
1202 #ifdef WITH_PARTITION_STORAGE_ENGINE
1203           // A table is updated through two aliases
1204           if (table_partitioned &&
1205               (partition_key_modified(table1, table1->write_set) ||
1206                partition_key_modified(table2, table2->write_set)))
1207           {
1208             // Partitioned key is updated
1209             my_error(ER_MULTI_UPDATE_KEY_CONFLICT, MYF(0),
1210                      tl->belong_to_view ? tl->belong_to_view->alias
1211                                         : tl->alias,
1212                      tl2->belong_to_view ? tl2->belong_to_view->alias
1213                                          : tl2->alias);
1214             return true;
1215           }
1216 #endif
1217 
1218           if (primkey_clustered)
1219           {
1220             // The primary key can cover multiple columns
1221             KEY key_info= table1->key_info[table1->s->primary_key];
1222             KEY_PART_INFO *key_part= key_info.key_part;
1223             KEY_PART_INFO *key_part_end= key_part +
1224               key_info.user_defined_key_parts;
1225 
1226             for (;key_part != key_part_end; ++key_part)
1227             {
1228               if (bitmap_is_set(table1->write_set, key_part->fieldnr-1) ||
1229                   bitmap_is_set(table2->write_set, key_part->fieldnr-1))
1230               {
1231                 // Clustered primary key is updated
1232                 my_error(ER_MULTI_UPDATE_KEY_CONFLICT, MYF(0),
1233                          tl->belong_to_view ? tl->belong_to_view->alias
1234                          : tl->alias,
1235                          tl2->belong_to_view ? tl2->belong_to_view->alias
1236                          : tl2->alias);
1237                 return true;
1238               }
1239             }
1240           }
1241         }
1242       }
1243     }
1244   }
1245   return false;
1246 }
1247 
1248 
1249 /**
1250   Check if there is enough privilege on specific table used by the
1251   main select list of multi-update directly or indirectly (through
1252   a view).
1253 
1254   @param[in]      thd                Thread context.
1255   @param[in]      table              Table list element for the table.
1256   @param[in]      tables_for_update  Bitmap with tables being updated.
1257   @param[in/out]  updated_arg        Set to true if table in question is
1258                                      updated, also set to true if it is
1259                                      a view and one of its underlying
1260                                      tables is updated. Should be
1261                                      initialized to false by the caller
1262                                      before a sequence of calls to this
1263                                      function.
1264 
1265   @note To determine which tables/views are updated we have to go from
1266         leaves to root since tables_for_update contains map of leaf
1267         tables being updated and doesn't include non-leaf tables
1268         (fields are already resolved to leaf tables).
1269 
1270   @retval false - Success, all necessary privileges on all tables are
1271                   present or might be present on column-level.
1272   @retval true  - Failure, some necessary privilege on some table is
1273                   missing.
1274 */
1275 
multi_update_check_table_access(THD * thd,TABLE_LIST * table,table_map tables_for_update,bool * updated_arg)1276 static bool multi_update_check_table_access(THD *thd, TABLE_LIST *table,
1277                                             table_map tables_for_update,
1278                                             bool *updated_arg)
1279 {
1280   if (table->view)
1281   {
1282     bool updated= false;
1283     /*
1284       If it is a mergeable view then we need to check privileges on its
1285       underlying tables being merged (including views). We also need to
1286       check if any of them is updated in order to find if this view is
1287       updated.
1288       If it is a non-mergeable view then it can't be updated.
1289     */
1290     DBUG_ASSERT(table->merge_underlying_list ||
1291                 (!table->updatable &&
1292                  !(table->table->map & tables_for_update)));
1293 
1294     for (TABLE_LIST *tbl= table->merge_underlying_list; tbl;
1295          tbl= tbl->next_local)
1296     {
1297       if (multi_update_check_table_access(thd, tbl, tables_for_update, &updated))
1298         return true;
1299     }
1300     if (check_table_access(thd, updated ? UPDATE_ACL: SELECT_ACL, table,
1301                            FALSE, 1, FALSE))
1302       return true;
1303     *updated_arg|= updated;
1304     /* We only need SELECT privilege for columns in the values list. */
1305     table->grant.want_privilege= SELECT_ACL & ~table->grant.privilege;
1306   }
1307   else
1308   {
1309     /* Must be a base or derived table. */
1310     const bool updated= table->table->map & tables_for_update;
1311     if (check_table_access(thd, updated ? UPDATE_ACL : SELECT_ACL, table,
1312                            FALSE, 1, FALSE))
1313       return true;
1314     *updated_arg|= updated;
1315     /* We only need SELECT privilege for columns in the values list. */
1316     if (!table->derived)
1317     {
1318       table->grant.want_privilege= SELECT_ACL & ~table->grant.privilege;
1319       table->table->grant.want_privilege= SELECT_ACL & ~table->table->grant.privilege;
1320     }
1321   }
1322   return false;
1323 }
1324 
1325 
1326 /*
1327   make update specific preparation and checks after opening tables
1328 
1329   SYNOPSIS
1330     mysql_multi_update_prepare()
1331     thd         thread handler
1332 
1333   RETURN
1334     FALSE OK
1335     TRUE  Error
1336 */
1337 
mysql_multi_update_prepare(THD * thd)1338 int mysql_multi_update_prepare(THD *thd)
1339 {
1340   LEX *lex= thd->lex;
1341   TABLE_LIST *table_list= lex->query_tables;
1342   TABLE_LIST *tl, *leaves;
1343   List<Item> *fields= &lex->select_lex.item_list;
1344   table_map tables_for_update;
1345   bool update_view= 0;
1346   const bool using_lock_tables= thd->locked_tables_mode != LTM_NONE;
1347   bool original_multiupdate= (thd->lex->sql_command == SQLCOM_UPDATE_MULTI);
1348   DBUG_ENTER("mysql_multi_update_prepare");
1349 
1350   /* following need for prepared statements, to run next time multi-update */
1351   thd->lex->sql_command= SQLCOM_UPDATE_MULTI;
1352 
1353   /*
1354     Open tables and create derived ones, but do not lock and fill them yet.
1355 
1356     During prepare phase acquire only S metadata locks instead of SW locks to
1357     keep prepare of multi-UPDATE compatible with concurrent LOCK TABLES WRITE
1358     and global read lock.
1359   */
1360   if (original_multiupdate &&
1361       open_normal_and_derived_tables(thd, table_list,
1362                                      (thd->stmt_arena->is_stmt_prepare() ?
1363                                       MYSQL_OPEN_FORCE_SHARED_MDL : 0)))
1364     DBUG_RETURN(TRUE);
1365   /*
1366     setup_tables() need for VIEWs. JOIN::prepare() will call setup_tables()
1367     second time, but this call will do nothing (there are check for second
1368     call in setup_tables()).
1369   */
1370 
1371   if (setup_tables(thd, &lex->select_lex.context,
1372                    &lex->select_lex.top_join_list,
1373                    table_list, &lex->select_lex.leaf_tables,
1374                    FALSE))
1375     DBUG_RETURN(TRUE);
1376 
1377   if (setup_fields_with_no_wrap(thd, Ref_ptr_array(),
1378                                 *fields, MARK_COLUMNS_WRITE, 0, 0))
1379     DBUG_RETURN(TRUE);
1380 
1381   /*
1382    Setting tl->updating= false for view as it is correctly set
1383    for tables below
1384   */
1385   for (tl= table_list; tl ; tl= tl->next_local)
1386   {
1387     if (tl->view)
1388     {
1389       update_view= 1;
1390       tl->updating= false;
1391     }
1392   }
1393 
1394   if (update_view && check_fields(thd, *fields))
1395   {
1396     DBUG_RETURN(TRUE);
1397   }
1398 
1399   thd->table_map_for_update= tables_for_update= get_table_map(fields);
1400 
1401   leaves= lex->select_lex.leaf_tables;
1402 
1403   if (unsafe_key_update(leaves, tables_for_update))
1404     DBUG_RETURN(true);
1405 
1406   /*
1407     Setup timestamp handling and locking mode
1408   */
1409   for (tl= leaves; tl; tl= tl->next_leaf)
1410   {
1411     TABLE *table= tl->table;
1412 
1413     /* if table will be updated then check that it is unique */
1414     if (table->map & tables_for_update)
1415     {
1416       if (!tl->updatable || check_key_in_view(thd, tl))
1417       {
1418         my_error(ER_NON_UPDATABLE_TABLE, MYF(0), tl->alias, "UPDATE");
1419         DBUG_RETURN(TRUE);
1420       }
1421 
1422       DBUG_PRINT("info",("setting table `%s` for update", tl->alias));
1423       /*
1424         If table will be updated we should not downgrade lock for it and
1425         leave it as is.
1426       */
1427     }
1428     else
1429     {
1430       DBUG_PRINT("info",("setting table `%s` for read-only", tl->alias));
1431       /*
1432         If we are using the binary log, we need TL_READ_NO_INSERT to get
1433         correct order of statements. Otherwise, we use a TL_READ lock to
1434         improve performance.
1435         We don't downgrade metadata lock from SW to SR in this case as
1436         there is no guarantee that the same ticket is not used by
1437         another table instance used by this statement which is going to
1438         be write-locked (for example, trigger to be invoked might try
1439         to update this table).
1440         Last argument routine_modifies_data for read_lock_type_for_table()
1441         is ignored, as prelocking placeholder will never be set here.
1442       */
1443       DBUG_ASSERT(tl->prelocking_placeholder == false);
1444       tl->lock_type= read_lock_type_for_table(thd, lex, tl, true);
1445       tl->updating= 0;
1446       /* Update TABLE::lock_type accordingly. */
1447       if (!tl->placeholder() && !using_lock_tables)
1448         tl->table->reginfo.lock_type= tl->lock_type;
1449     }
1450   }
1451 
1452   /*
1453     Check access privileges for tables being updated or read.
1454     Note that unlike in the above loop we need to iterate here not only
1455     through all leaf tables but also through all view hierarchy.
1456   */
1457   for (tl= table_list; tl; tl= tl->next_local)
1458   {
1459     bool not_used= false;
1460     if (multi_update_check_table_access(thd, tl, tables_for_update, &not_used))
1461       DBUG_RETURN(TRUE);
1462   }
1463 
1464   /* check single table update for view compound from several tables */
1465   for (tl= table_list; tl; tl= tl->next_local)
1466   {
1467     if (tl->effective_algorithm == VIEW_ALGORITHM_MERGE)
1468     {
1469       TABLE_LIST *for_update= 0;
1470       if (tl->check_single_table(&for_update, tables_for_update, tl))
1471       {
1472 	my_error(ER_VIEW_MULTIUPDATE, MYF(0),
1473 		 tl->view_db.str, tl->view_name.str);
1474 	DBUG_RETURN(-1);
1475       }
1476     }
1477   }
1478 
1479   /* @todo: downgrade the metadata locks here. */
1480 
1481   /*
1482     Check that we are not using table that we are updating, but we should
1483     skip all tables of UPDATE SELECT itself
1484   */
1485   lex->select_lex.exclude_from_table_unique_test= TRUE;
1486   for (tl= leaves; tl; tl= tl->next_leaf)
1487   {
1488     if (tl->lock_type != TL_READ &&
1489         tl->lock_type != TL_READ_NO_INSERT)
1490     {
1491       TABLE_LIST *duplicate;
1492       if ((duplicate= unique_table(thd, tl, table_list, 0)))
1493       {
1494         update_non_unique_table_error(table_list, "UPDATE", duplicate);
1495         DBUG_RETURN(TRUE);
1496       }
1497     }
1498   }
1499   /*
1500     Set exclude_from_table_unique_test value back to FALSE. It is needed for
1501     further check in multi_update::prepare whether to use record cache.
1502   */
1503   lex->select_lex.exclude_from_table_unique_test= FALSE;
1504   DBUG_RETURN (FALSE);
1505 }
1506 
1507 
1508 /*
1509   Setup multi-update handling and call SELECT to do the join
1510 */
1511 
mysql_multi_update(THD * thd,TABLE_LIST * table_list,List<Item> * fields,List<Item> * values,Item * conds,ulonglong options,enum enum_duplicates handle_duplicates,bool ignore,SELECT_LEX_UNIT * unit,SELECT_LEX * select_lex,multi_update ** result)1512 bool mysql_multi_update(THD *thd,
1513                         TABLE_LIST *table_list,
1514                         List<Item> *fields,
1515                         List<Item> *values,
1516                         Item *conds,
1517                         ulonglong options,
1518                         enum enum_duplicates handle_duplicates,
1519                         bool ignore,
1520                         SELECT_LEX_UNIT *unit,
1521                         SELECT_LEX *select_lex,
1522                         multi_update **result)
1523 {
1524   bool res;
1525   DBUG_ENTER("mysql_multi_update");
1526 
1527   if (!(*result= new multi_update(table_list,
1528 				 thd->lex->select_lex.leaf_tables,
1529 				 fields, values,
1530 				 handle_duplicates, ignore)))
1531   {
1532     DBUG_RETURN(TRUE);
1533   }
1534 
1535   thd->abort_on_warning= (!ignore && thd->is_strict_mode());
1536 
1537   if (thd->lex->describe)
1538     res= explain_multi_table_modification(thd, *result);
1539   else
1540   {
1541     List<Item> total_list;
1542 
1543     res= mysql_select(thd,
1544                       table_list, select_lex->with_wild,
1545                       total_list,
1546                       conds, (SQL_I_List<ORDER> *) NULL,
1547                       (SQL_I_List<ORDER> *)NULL, (Item *) NULL,
1548                       options | SELECT_NO_JOIN_CACHE | SELECT_NO_UNLOCK |
1549                       OPTION_SETUP_TABLES_DONE,
1550                       *result, unit, select_lex);
1551 
1552     DBUG_PRINT("info",("res: %d  report_error: %d",res, (int) thd->is_error()));
1553     res|= thd->is_error();
1554     if (unlikely(res))
1555     {
1556       /* If we had a another error reported earlier then this will be ignored */
1557       (*result)->send_error(ER_UNKNOWN_ERROR, ER(ER_UNKNOWN_ERROR));
1558       (*result)->abort_result_set();
1559     }
1560   }
1561   thd->abort_on_warning= 0;
1562   DBUG_RETURN(res);
1563 }
1564 
1565 
multi_update(TABLE_LIST * table_list,TABLE_LIST * leaves_list,List<Item> * field_list,List<Item> * value_list,enum enum_duplicates handle_duplicates_arg,bool ignore_arg)1566 multi_update::multi_update(TABLE_LIST *table_list,
1567 			   TABLE_LIST *leaves_list,
1568 			   List<Item> *field_list, List<Item> *value_list,
1569 			   enum enum_duplicates handle_duplicates_arg,
1570                            bool ignore_arg)
1571   :all_tables(table_list), leaves(leaves_list), update_tables(0),
1572    tmp_tables(0), updated(0), found(0), fields(field_list),
1573    values(value_list), table_count(0), copy_field(0),
1574    handle_duplicates(handle_duplicates_arg), do_update(1), trans_safe(1),
1575    transactional_tables(0), ignore(ignore_arg), error_handled(0),
1576    update_operations(NULL)
1577 {}
1578 
1579 
1580 /*
1581   Connect fields with tables and create list of tables that are updated
1582 */
1583 
prepare(List<Item> & not_used_values,SELECT_LEX_UNIT * lex_unit)1584 int multi_update::prepare(List<Item> &not_used_values,
1585 			  SELECT_LEX_UNIT *lex_unit)
1586 {
1587   TABLE_LIST *table_ref;
1588   SQL_I_List<TABLE_LIST> update;
1589   table_map tables_to_update;
1590   Item_field *item;
1591   List_iterator_fast<Item> field_it(*fields);
1592   List_iterator_fast<Item> value_it(*values);
1593   uint i, max_fields;
1594   uint leaf_table_count= 0;
1595   DBUG_ENTER("multi_update::prepare");
1596 
1597   thd->count_cuted_fields= CHECK_FIELD_WARN;
1598   thd->cuted_fields=0L;
1599   THD_STAGE_INFO(thd, stage_updating_main_table);
1600 
1601   tables_to_update= get_table_map(fields);
1602 
1603   if (!tables_to_update)
1604   {
1605     my_message(ER_NO_TABLES_USED, ER(ER_NO_TABLES_USED), MYF(0));
1606     DBUG_RETURN(1);
1607   }
1608 
1609   /*
1610     We gather the set of columns read during evaluation of SET expression in
1611     TABLE::tmp_set by pointing TABLE::read_set to it and then restore it after
1612     setup_fields().
1613   */
1614   for (table_ref= leaves; table_ref; table_ref= table_ref->next_leaf)
1615   {
1616     TABLE *table= table_ref->table;
1617     if (tables_to_update & table->map)
1618     {
1619       DBUG_ASSERT(table->read_set == &table->def_read_set);
1620       table->read_set= &table->tmp_set;
1621       bitmap_clear_all(table->read_set);
1622     }
1623   }
1624 
1625   /*
1626     We have to check values after setup_tables to get covering_keys right in
1627     reference tables
1628   */
1629 
1630   int error= setup_fields(thd, Ref_ptr_array(),
1631                           *values, MARK_COLUMNS_READ, 0, 0);
1632 
1633   for (table_ref= leaves; table_ref; table_ref= table_ref->next_leaf)
1634   {
1635     TABLE *table= table_ref->table;
1636     if (tables_to_update & table->map)
1637     {
1638       table->read_set= &table->def_read_set;
1639       bitmap_union(table->read_set, &table->tmp_set);
1640     }
1641   }
1642 
1643   if (error)
1644     DBUG_RETURN(1);
1645 
1646   /*
1647     Save tables beeing updated in update_tables
1648     update_table->shared is position for table
1649     Don't use key read on tables that are updated
1650   */
1651 
1652   update.empty();
1653   for (table_ref= leaves; table_ref; table_ref= table_ref->next_leaf)
1654   {
1655     /* TODO: add support of view of join support */
1656     TABLE *table=table_ref->table;
1657     leaf_table_count++;
1658     if (tables_to_update & table->map)
1659     {
1660       TABLE_LIST *tl= (TABLE_LIST*) thd->memdup(table_ref,
1661 						sizeof(*tl));
1662       if (!tl)
1663 	DBUG_RETURN(1);
1664       update.link_in_list(tl, &tl->next_local);
1665       tl->shared= table_count++;
1666       table->no_keyread=1;
1667       table->covering_keys.clear_all();
1668       table->pos_in_table_list= tl;
1669       if (table->triggers &&
1670           table->triggers->has_triggers(TRG_EVENT_UPDATE,
1671                                         TRG_ACTION_AFTER))
1672       {
1673 	/*
1674            The table has AFTER UPDATE triggers that might access to subject
1675            table and therefore might need update to be done immediately.
1676            So we turn-off the batching.
1677 	*/
1678 	(void) table->file->extra(HA_EXTRA_UPDATE_CANNOT_BATCH);
1679       }
1680     }
1681   }
1682 
1683 
1684   table_count=  update.elements;
1685   update_tables= update.first;
1686 
1687   tmp_tables = (TABLE**) thd->calloc(sizeof(TABLE *) * table_count);
1688   tmp_table_param = (TMP_TABLE_PARAM*) thd->calloc(sizeof(TMP_TABLE_PARAM) *
1689 						   table_count);
1690   fields_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
1691 					      table_count);
1692   values_for_table= (List_item **) thd->alloc(sizeof(List_item *) *
1693 					      table_count);
1694 
1695   DBUG_ASSERT(update_operations == NULL);
1696   update_operations= (COPY_INFO**) thd->calloc(sizeof(COPY_INFO*) *
1697                                                table_count);
1698 
1699   if (thd->is_fatal_error)
1700     DBUG_RETURN(1);
1701   for (i=0 ; i < table_count ; i++)
1702   {
1703     fields_for_table[i]= new List_item;
1704     values_for_table[i]= new List_item;
1705   }
1706   if (thd->is_fatal_error)
1707     DBUG_RETURN(1);
1708 
1709   /* Split fields into fields_for_table[] and values_by_table[] */
1710 
1711   while ((item= (Item_field *) field_it++))
1712   {
1713     Item *value= value_it++;
1714     uint offset= item->field->table->pos_in_table_list->shared;
1715     fields_for_table[offset]->push_back(item);
1716     values_for_table[offset]->push_back(value);
1717   }
1718   if (thd->is_fatal_error)
1719     DBUG_RETURN(1);
1720 
1721   /* Allocate copy fields */
1722   max_fields=0;
1723   for (i=0 ; i < table_count ; i++)
1724     set_if_bigger(max_fields, fields_for_table[i]->elements + leaf_table_count);
1725   copy_field= new Copy_field[max_fields];
1726 
1727 
1728   for (TABLE_LIST *ref= leaves; ref != NULL; ref= ref->next_leaf)
1729   {
1730     TABLE *table= ref->table;
1731     if (tables_to_update & table->map)
1732     {
1733       const uint position= table->pos_in_table_list->shared;
1734       List<Item> *cols= fields_for_table[position];
1735       List<Item> *vals= values_for_table[position];
1736       COPY_INFO *update=
1737         new (thd->mem_root) COPY_INFO(COPY_INFO::UPDATE_OPERATION, cols, vals);
1738       if (update == NULL ||
1739           update->add_function_default_columns(table, table->write_set))
1740         DBUG_RETURN(1);
1741 
1742       update_operations[position]= update;
1743 
1744       if ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) != 0 &&
1745           update->function_defaults_apply(table))
1746       {
1747         /*
1748           A column is to be set to its ON UPDATE function default only if
1749           other columns of the row are changing. To know this, we must be able
1750           to compare the "before" and "after" value of those columns. Thus, we
1751           must read those columns:
1752         */
1753         bitmap_union(table->read_set, table->write_set);
1754       }
1755       /* All needed columns must be marked before prune_partitions(). */
1756       if (table->triggers)
1757         table->triggers->mark_fields_used(TRG_EVENT_UPDATE);
1758     }
1759   }
1760 
1761   DBUG_RETURN(thd->is_fatal_error != 0);
1762 }
1763 
1764 
1765 /*
1766   Check if table is safe to update on fly
1767 
1768   SYNOPSIS
1769     safe_update_on_fly()
1770     thd                 Thread handler
1771     join_tab            How table is used in join
1772     all_tables          List of tables
1773 
1774   NOTES
1775     We can update the first table in join on the fly if we know that
1776     a row in this table will never be read twice. This is true under
1777     the following conditions:
1778 
1779     - No column is both written to and read in SET expressions.
1780 
1781     - We are doing a table scan and the data is in a separate file (MyISAM) or
1782       if we don't update a clustered key.
1783 
1784     - We are doing a range scan and we don't update the scan key or
1785       the primary key for a clustered table handler.
1786 
1787     - Table is not joined to itself.
1788 
1789     This function gets information about fields to be updated from
1790     the TABLE::write_set bitmap.
1791 
1792   WARNING
1793     This code is a bit dependent of how make_join_readinfo() works.
1794 
1795     The field table->tmp_set is used for keeping track of which fields are
1796     read during evaluation of the SET expression. See multi_update::prepare.
1797 
1798   RETURN
1799     0		Not safe to update
1800     1		Safe to update
1801 */
1802 
safe_update_on_fly(THD * thd,JOIN_TAB * join_tab,TABLE_LIST * table_ref,TABLE_LIST * all_tables)1803 static bool safe_update_on_fly(THD *thd, JOIN_TAB *join_tab,
1804                                TABLE_LIST *table_ref, TABLE_LIST *all_tables)
1805 {
1806   TABLE *table= join_tab->table;
1807   if (unique_table(thd, table_ref, all_tables, 0))
1808     return 0;
1809   switch (join_tab->type) {
1810   case JT_SYSTEM:
1811   case JT_CONST:
1812   case JT_EQ_REF:
1813     return TRUE;				// At most one matching row
1814   case JT_REF:
1815   case JT_REF_OR_NULL:
1816     return !is_key_used(table, join_tab->ref.key, table->write_set);
1817   case JT_ALL:
1818     if (bitmap_is_overlapping(&table->tmp_set, table->write_set))
1819       return FALSE;
1820     /* If range search on index */
1821     if (join_tab->quick)
1822       return !join_tab->quick->is_keys_used(table->write_set);
1823     /* If scanning in clustered key */
1824     if ((table->file->ha_table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX) &&
1825 	table->s->primary_key < MAX_KEY)
1826       return !is_key_used(table, table->s->primary_key, table->write_set);
1827     return TRUE;
1828   default:
1829     break;					// Avoid compler warning
1830   }
1831   return FALSE;
1832 
1833 }
1834 
1835 
1836 /*
1837   Initialize table for multi table
1838 
1839   IMPLEMENTATION
1840     - Update first table in join on the fly, if possible
1841     - Create temporary tables to store changed values for all other tables
1842       that are updated (and main_table if the above doesn't hold).
1843 */
1844 
1845 bool
initialize_tables(JOIN * join)1846 multi_update::initialize_tables(JOIN *join)
1847 {
1848   TABLE_LIST *table_ref;
1849   DBUG_ENTER("initialize_tables");
1850 
1851   if ((thd->variables.option_bits & OPTION_SAFE_UPDATES) && error_if_full_join(join))
1852     DBUG_RETURN(1);
1853   main_table=join->join_tab->table;
1854   table_to_update= 0;
1855 
1856   /* Any update has at least one pair (field, value) */
1857   DBUG_ASSERT(fields->elements);
1858   /*
1859    Only one table may be modified by UPDATE of an updatable view.
1860    For an updatable view first_table_for_update indicates this
1861    table.
1862    For a regular multi-update it refers to some updated table.
1863   */
1864   TABLE *first_table_for_update= ((Item_field *) fields->head())->field->table;
1865 
1866   /* Create a temporary table for keys to all tables, except main table */
1867   for (table_ref= update_tables; table_ref; table_ref= table_ref->next_local)
1868   {
1869     TABLE *table=table_ref->table;
1870     uint cnt= table_ref->shared;
1871     List<Item> temp_fields;
1872     ORDER     group;
1873     TMP_TABLE_PARAM *tmp_param;
1874 
1875     if (ignore)
1876       table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
1877     if (table == main_table)			// First table in join
1878     {
1879       if (safe_update_on_fly(thd, join->join_tab, table_ref, all_tables))
1880       {
1881         table->mark_columns_needed_for_update(true/*mark_binlog_columns=true*/);
1882 	table_to_update= table;			// Update table on the fly
1883 	continue;
1884       }
1885     }
1886     table->mark_columns_needed_for_update(true/*mark_binlog_columns=true*/);
1887 
1888     /*
1889       enable uncacheable flag if we update a view with check option
1890       and check option has a subselect, otherwise, the check option
1891       can be evaluated after the subselect was freed as independent
1892       (See full_local in JOIN::join_free()).
1893     */
1894     if (table_ref->check_option && !join->select_lex->uncacheable)
1895     {
1896       SELECT_LEX_UNIT *tmp_unit;
1897       SELECT_LEX *sl;
1898       for (tmp_unit= join->select_lex->first_inner_unit();
1899            tmp_unit;
1900            tmp_unit= tmp_unit->next_unit())
1901       {
1902         for (sl= tmp_unit->first_select(); sl; sl= sl->next_select())
1903         {
1904           if (sl->master_unit()->item)
1905           {
1906             join->select_lex->uncacheable|= UNCACHEABLE_CHECKOPTION;
1907             goto loop_end;
1908           }
1909         }
1910       }
1911     }
1912 loop_end:
1913 
1914     if (table == first_table_for_update && table_ref->check_option)
1915     {
1916       table_map unupdated_tables= table_ref->check_option->used_tables() &
1917                                   ~first_table_for_update->map;
1918       for (TABLE_LIST *tbl_ref =leaves;
1919            unupdated_tables && tbl_ref;
1920            tbl_ref= tbl_ref->next_leaf)
1921       {
1922         if (unupdated_tables & tbl_ref->table->map)
1923           unupdated_tables&= ~tbl_ref->table->map;
1924         else
1925           continue;
1926         if (unupdated_check_opt_tables.push_back(tbl_ref->table))
1927           DBUG_RETURN(1);
1928       }
1929     }
1930 
1931     tmp_param= tmp_table_param+cnt;
1932 
1933     /*
1934       Create a temporary table to store all fields that are changed for this
1935       table. The first field in the temporary table is a pointer to the
1936       original row so that we can find and update it. For the updatable
1937       VIEW a few following fields are rowids of tables used in the CHECK
1938       OPTION condition.
1939     */
1940 
1941     List_iterator_fast<TABLE> tbl_it(unupdated_check_opt_tables);
1942     TABLE *tbl= table;
1943     do
1944     {
1945       /*
1946         Signal each table (including tables referenced by WITH CHECK OPTION
1947         clause) for which we will store row position in the temporary table
1948         that we need a position to be read first.
1949       */
1950       tbl->prepare_for_position();
1951 
1952       Field_string *field= new Field_string(tbl->file->ref_length, 0,
1953                                             tbl->alias, &my_charset_bin);
1954       if (!field)
1955         DBUG_RETURN(1);
1956       field->init(tbl);
1957       /*
1958         The field will be converted to varstring when creating tmp table if
1959         table to be updated was created by mysql 4.1. Deny this.
1960       */
1961       field->can_alter_field_type= 0;
1962       Item_field *ifield= new Item_field((Field *) field);
1963       if (!ifield)
1964          DBUG_RETURN(1);
1965       ifield->maybe_null= 0;
1966       if (temp_fields.push_back(ifield))
1967         DBUG_RETURN(1);
1968     } while ((tbl= tbl_it++));
1969 
1970     temp_fields.concat(fields_for_table[cnt]);
1971 
1972     /* Make an unique key over the first field to avoid duplicated updates */
1973     memset(&group, 0, sizeof(group));
1974     group.direction= ORDER::ORDER_ASC;
1975     group.item= (Item**) temp_fields.head_ref();
1976 
1977     tmp_param->quick_group=1;
1978     tmp_param->field_count=temp_fields.elements;
1979     tmp_param->group_parts=1;
1980     tmp_param->group_length= table->file->ref_length;
1981     /* small table, ignore SQL_BIG_TABLES */
1982     my_bool save_big_tables= thd->variables.big_tables;
1983     thd->variables.big_tables= FALSE;
1984     tmp_tables[cnt]=create_tmp_table(thd, tmp_param, temp_fields,
1985                                      (ORDER*) &group, 0, 0,
1986                                      TMP_TABLE_ALL_COLUMNS, HA_POS_ERROR, "");
1987     thd->variables.big_tables= save_big_tables;
1988     if (!tmp_tables[cnt])
1989       DBUG_RETURN(1);
1990     tmp_tables[cnt]->file->extra(HA_EXTRA_WRITE_CACHE);
1991   }
1992   DBUG_RETURN(0);
1993 }
1994 
1995 
~multi_update()1996 multi_update::~multi_update()
1997 {
1998   TABLE_LIST *table;
1999   for (table= update_tables ; table; table= table->next_local)
2000   {
2001     table->table->no_keyread= table->table->no_cache= 0;
2002     if (ignore)
2003       table->table->file->extra(HA_EXTRA_NO_IGNORE_DUP_KEY);
2004   }
2005 
2006   if (tmp_tables)
2007   {
2008     for (uint cnt = 0; cnt < table_count; cnt++)
2009     {
2010       if (tmp_tables[cnt])
2011       {
2012 	free_tmp_table(thd, tmp_tables[cnt]);
2013 	tmp_table_param[cnt].cleanup();
2014       }
2015     }
2016   }
2017   if (copy_field)
2018     delete [] copy_field;
2019   thd->count_cuted_fields= CHECK_FIELD_IGNORE;		// Restore this setting
2020   DBUG_ASSERT(trans_safe || !updated ||
2021               thd->transaction.stmt.cannot_safely_rollback());
2022 
2023   if (update_operations != NULL)
2024     for (uint i= 0; i < table_count; i++)
2025       delete update_operations[i];
2026 }
2027 
2028 
send_data(List<Item> & not_used_values)2029 bool multi_update::send_data(List<Item> &not_used_values)
2030 {
2031   TABLE_LIST *cur_table;
2032   DBUG_ENTER("multi_update::send_data");
2033 
2034   for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
2035   {
2036     TABLE *table= cur_table->table;
2037     uint offset= cur_table->shared;
2038     /*
2039       Check if we are using outer join and we didn't find the row
2040       or if we have already updated this row in the previous call to this
2041       function.
2042 
2043       The same row may be presented here several times in a join of type
2044       UPDATE t1 FROM t1,t2 SET t1.a=t2.a
2045 
2046       In this case we will do the update for the first found row combination.
2047       The join algorithm guarantees that we will not find the a row in
2048       t1 several times.
2049     */
2050     if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
2051       continue;
2052 
2053     if (table == table_to_update)
2054     {
2055       table->status|= STATUS_UPDATED;
2056       store_record(table,record[1]);
2057       if (fill_record_n_invoke_before_triggers(thd,
2058                                                *fields_for_table[offset],
2059                                                *values_for_table[offset],
2060                                                false, // ignore_errors
2061                                                table->triggers,
2062                                                TRG_EVENT_UPDATE))
2063 	DBUG_RETURN(1);
2064 
2065       /*
2066         Reset the table->auto_increment_field_not_null as it is valid for
2067         only one row.
2068       */
2069       table->auto_increment_field_not_null= FALSE;
2070       found++;
2071       if (!records_are_comparable(table) || compare_records(table))
2072       {
2073         update_operations[offset]->set_function_defaults(table);
2074 
2075 	int error;
2076         if ((error= cur_table->view_check_option(thd, ignore)) !=
2077             VIEW_CHECK_OK)
2078         {
2079           found--;
2080           if (error == VIEW_CHECK_SKIP)
2081             continue;
2082           else if (error == VIEW_CHECK_ERROR)
2083             DBUG_RETURN(1);
2084         }
2085         if (!updated++)
2086         {
2087           /*
2088             Inform the main table that we are going to update the table even
2089             while we may be scanning it.  This will flush the read cache
2090             if it's used.
2091           */
2092           main_table->file->extra(HA_EXTRA_PREPARE_FOR_UPDATE);
2093         }
2094         if ((error=table->file->ha_update_row(table->record[1],
2095                                               table->record[0])) &&
2096             error != HA_ERR_RECORD_IS_THE_SAME)
2097         {
2098           updated--;
2099           if (!ignore ||
2100               table->file->is_fatal_error(error, HA_CHECK_DUP_KEY |
2101                                                  HA_CHECK_FK_ERROR))
2102           {
2103             /*
2104               If (ignore && error == is ignorable) we don't have to
2105               do anything; otherwise...
2106             */
2107             myf flags= 0;
2108 
2109             if (table->file->is_fatal_error(error, HA_CHECK_DUP_KEY |
2110                                                    HA_CHECK_FK_ERROR))
2111               flags|= ME_FATALERROR; /* Other handler errors are fatal */
2112 
2113             table->file->print_error(error,MYF(flags));
2114             DBUG_RETURN(1);
2115           }
2116           else if (ignore && !table->file->is_fatal_error(error,
2117                                                           HA_CHECK_FK_ERROR))
2118             warn_fk_constraint_violation(thd, table, error);
2119         }
2120         else
2121         {
2122           if (error == HA_ERR_RECORD_IS_THE_SAME)
2123           {
2124             error= 0;
2125             updated--;
2126           }
2127           /* non-transactional or transactional table got modified   */
2128           /* either multi_update class' flag is raised in its branch */
2129           if (table->file->has_transactions())
2130             transactional_tables= TRUE;
2131           else
2132           {
2133             trans_safe= FALSE;
2134             thd->transaction.stmt.mark_modified_non_trans_table();
2135           }
2136         }
2137       }
2138       if (table->triggers &&
2139           table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
2140                                             TRG_ACTION_AFTER, TRUE))
2141         DBUG_RETURN(1);
2142     }
2143     else
2144     {
2145       int error;
2146       TABLE *tmp_table= tmp_tables[offset];
2147       /*
2148        For updatable VIEW store rowid of the updated table and
2149        rowids of tables used in the CHECK OPTION condition.
2150       */
2151       uint field_num= 0;
2152       List_iterator_fast<TABLE> tbl_it(unupdated_check_opt_tables);
2153       TABLE *tbl= table;
2154       do
2155       {
2156         tbl->file->position(tbl->record[0]);
2157         memcpy((char*) tmp_table->field[field_num]->ptr,
2158                (char*) tbl->file->ref, tbl->file->ref_length);
2159         /*
2160          For outer joins a rowid field may have no NOT_NULL_FLAG,
2161          so we have to reset NULL bit for this field.
2162          (set_notnull() resets NULL bit only if available).
2163         */
2164         tmp_table->field[field_num]->set_notnull();
2165         field_num++;
2166       } while ((tbl= tbl_it++));
2167 
2168       /* Store regular updated fields in the row. */
2169       fill_record(thd,
2170                   tmp_table->field + 1 + unupdated_check_opt_tables.elements,
2171                   *values_for_table[offset], 1, NULL);
2172 
2173       /* Write row, ignoring duplicated updates to a row */
2174       error= tmp_table->file->ha_write_row(tmp_table->record[0]);
2175       if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE)
2176       {
2177         if (error &&
2178             create_myisam_from_heap(thd, tmp_table,
2179                                          tmp_table_param[offset].start_recinfo,
2180                                          &tmp_table_param[offset].recinfo,
2181                                          error, TRUE, NULL))
2182         {
2183           do_update= 0;
2184 	  DBUG_RETURN(1);			// Not a table_is_full error
2185 	}
2186         found++;
2187       }
2188     }
2189   }
2190   DBUG_RETURN(0);
2191 }
2192 
2193 
send_error(uint errcode,const char * err)2194 void multi_update::send_error(uint errcode,const char *err)
2195 {
2196   /* First send error what ever it is ... */
2197   my_error(errcode, MYF(0), err);
2198 }
2199 
2200 
abort_result_set()2201 void multi_update::abort_result_set()
2202 {
2203   /* the error was handled or nothing deleted and no side effects return */
2204   if (error_handled ||
2205       (!thd->transaction.stmt.cannot_safely_rollback() && !updated))
2206     return;
2207 
2208   /* Something already updated so we have to invalidate cache */
2209   if (updated)
2210     query_cache_invalidate3(thd, update_tables, 1);
2211   /*
2212     If all tables that has been updated are trans safe then just do rollback.
2213     If not attempt to do remaining updates.
2214   */
2215 
2216   if (! trans_safe)
2217   {
2218     DBUG_ASSERT(thd->transaction.stmt.cannot_safely_rollback());
2219     if (do_update && table_count > 1)
2220     {
2221       /* Add warning here */
2222       /*
2223          todo/fixme: do_update() is never called with the arg 1.
2224          should it change the signature to become argless?
2225       */
2226       (void) do_updates();
2227     }
2228   }
2229   if (thd->transaction.stmt.cannot_safely_rollback())
2230   {
2231     /*
2232       The query has to binlog because there's a modified non-transactional table
2233       either from the query's list or via a stored routine: bug#13270,23333
2234     */
2235     if (mysql_bin_log.is_open())
2236     {
2237       /*
2238         THD::killed status might not have been set ON at time of an error
2239         got caught and if happens later the killed error is written
2240         into repl event.
2241       */
2242       int errcode= query_error_code(thd, thd->killed == THD::NOT_KILLED);
2243       /* the error of binary logging is ignored */
2244       (void)thd->binlog_query(THD::ROW_QUERY_TYPE,
2245                         thd->query(), thd->query_length(),
2246                         transactional_tables, FALSE, FALSE, errcode);
2247     }
2248   }
2249   DBUG_ASSERT(trans_safe || !updated || thd->transaction.stmt.cannot_safely_rollback());
2250 }
2251 
2252 
do_updates()2253 int multi_update::do_updates()
2254 {
2255   TABLE_LIST *cur_table;
2256   int local_error= 0;
2257   ha_rows org_updated;
2258   TABLE *table, *tmp_table;
2259   List_iterator_fast<TABLE> check_opt_it(unupdated_check_opt_tables);
2260   DBUG_ENTER("multi_update::do_updates");
2261 
2262   do_update= 0;					// Don't retry this function
2263 
2264   if (!found)
2265   {
2266     /*
2267       If the binary log is on, we still need to check
2268       if there are transactional tables involved. If
2269       there are mark the transactional_tables flag correctly.
2270 
2271       This flag determines whether the writes go into the
2272       transactional or non transactional cache, even if they
2273       do not change any table, they are still written into
2274       the binary log when the format is STMT or MIXED.
2275     */
2276     if(mysql_bin_log.is_open())
2277     {
2278       for (cur_table= update_tables; cur_table;
2279            cur_table= cur_table->next_local)
2280       {
2281         table = cur_table->table;
2282         transactional_tables= transactional_tables ||
2283                               table->file->has_transactions();
2284       }
2285     }
2286     DBUG_RETURN(0);
2287   }
2288   for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
2289   {
2290     uint offset= cur_table->shared;
2291 
2292     table = cur_table->table;
2293 
2294     /*
2295       Always update the flag if - even if not updating the table,
2296       when the binary log is ON. This will allow the right binlog
2297       cache - stmt or trx cache - to be selected when logging
2298       innefective statementst to the binary log (in STMT or MIXED
2299       mode logging).
2300      */
2301     if (mysql_bin_log.is_open())
2302       transactional_tables= transactional_tables || table->file->has_transactions();
2303 
2304     if (table == table_to_update)
2305       continue;                                        // Already updated
2306     org_updated= updated;
2307     tmp_table= tmp_tables[cur_table->shared];
2308     tmp_table->file->extra(HA_EXTRA_CACHE);	// Change to read cache
2309     if ((local_error= table->file->ha_rnd_init(0)))
2310       goto err;
2311     table->file->extra(HA_EXTRA_NO_CACHE);
2312 
2313     check_opt_it.rewind();
2314     while(TABLE *tbl= check_opt_it++)
2315     {
2316       if (tbl->file->ha_rnd_init(1))
2317         goto err;
2318       tbl->file->extra(HA_EXTRA_CACHE);
2319     }
2320 
2321     /*
2322       Setup copy functions to copy fields from temporary table
2323     */
2324     List_iterator_fast<Item> field_it(*fields_for_table[offset]);
2325     Field **field= tmp_table->field +
2326                    1 + unupdated_check_opt_tables.elements; // Skip row pointers
2327     Copy_field *copy_field_ptr= copy_field, *copy_field_end;
2328     for ( ; *field ; field++)
2329     {
2330       Item_field *item= (Item_field* ) field_it++;
2331       (copy_field_ptr++)->set(item->field, *field, 0);
2332     }
2333     copy_field_end=copy_field_ptr;
2334 
2335     if ((local_error = tmp_table->file->ha_rnd_init(1)))
2336       goto err;
2337 
2338     for (;;)
2339     {
2340       if (thd->killed && trans_safe)
2341 	goto err;
2342       if ((local_error=tmp_table->file->ha_rnd_next(tmp_table->record[0])))
2343       {
2344 	if (local_error == HA_ERR_END_OF_FILE)
2345 	  break;
2346 	if (local_error == HA_ERR_RECORD_DELETED)
2347 	  continue;				// May happen on dup key
2348 	goto err;
2349       }
2350 
2351       /* call ha_rnd_pos() using rowids from temporary table */
2352       check_opt_it.rewind();
2353       TABLE *tbl= table;
2354       uint field_num= 0;
2355       do
2356       {
2357         if((local_error=
2358               tbl->file->ha_rnd_pos(tbl->record[0],
2359                                     (uchar *) tmp_table->field[field_num]->ptr)))
2360           goto err;
2361         field_num++;
2362       } while((tbl= check_opt_it++));
2363 
2364       table->status|= STATUS_UPDATED;
2365       store_record(table,record[1]);
2366 
2367       /* Copy data from temporary table to current table */
2368       for (copy_field_ptr=copy_field;
2369 	   copy_field_ptr != copy_field_end;
2370 	   copy_field_ptr++)
2371 	(*copy_field_ptr->do_copy)(copy_field_ptr);
2372 
2373       if (table->triggers &&
2374           table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
2375                                             TRG_ACTION_BEFORE, TRUE))
2376         goto err2;
2377 
2378       if (!records_are_comparable(table) || compare_records(table))
2379       {
2380         update_operations[offset]->set_function_defaults(table);
2381         int error;
2382         if ((error= cur_table->view_check_option(thd, ignore)) !=
2383             VIEW_CHECK_OK)
2384         {
2385           if (error == VIEW_CHECK_SKIP)
2386             continue;
2387           else if (error == VIEW_CHECK_ERROR)
2388             goto err;
2389         }
2390         local_error= table->file->ha_update_row(table->record[1],
2391                                                 table->record[0]);
2392         if (!local_error)
2393           updated++;
2394         else if (local_error == HA_ERR_RECORD_IS_THE_SAME)
2395           local_error= 0;
2396         else if (!ignore ||
2397                  table->file->is_fatal_error(local_error, HA_CHECK_DUP_KEY |
2398                                                           HA_CHECK_FK_ERROR))
2399           goto err;
2400        else if (ignore && !table->file->is_fatal_error(local_error,
2401                                                           HA_CHECK_FK_ERROR))
2402           warn_fk_constraint_violation(thd, table, local_error);
2403         else
2404           local_error= 0;
2405       }
2406 
2407       if (table->triggers &&
2408           table->triggers->process_triggers(thd, TRG_EVENT_UPDATE,
2409                                             TRG_ACTION_AFTER, TRUE))
2410         goto err2;
2411     }
2412 
2413     if (updated != org_updated)
2414     {
2415       if (!table->file->has_transactions())
2416       {
2417         trans_safe= FALSE;				// Can't do safe rollback
2418         thd->transaction.stmt.mark_modified_non_trans_table();
2419       }
2420     }
2421     (void) table->file->ha_rnd_end();
2422     (void) tmp_table->file->ha_rnd_end();
2423     check_opt_it.rewind();
2424     while (TABLE *tbl= check_opt_it++)
2425         tbl->file->ha_rnd_end();
2426 
2427   }
2428   DBUG_RETURN(0);
2429 
2430 err:
2431   {
2432     table->file->print_error(local_error,MYF(ME_FATALERROR));
2433   }
2434 
2435 err2:
2436   if (table->file->inited)
2437     (void) table->file->ha_rnd_end();
2438   if (tmp_table->file->inited)
2439     (void) tmp_table->file->ha_rnd_end();
2440   check_opt_it.rewind();
2441   while (TABLE *tbl= check_opt_it++)
2442   {
2443     if (tbl->file->inited)
2444       (void) tbl->file->ha_rnd_end();
2445   }
2446 
2447   if (updated != org_updated)
2448   {
2449     if (table->file->has_transactions())
2450       transactional_tables= TRUE;
2451     else
2452     {
2453       trans_safe= FALSE;
2454       thd->transaction.stmt.mark_modified_non_trans_table();
2455     }
2456   }
2457   DBUG_RETURN(1);
2458 }
2459 
2460 
2461 /* out: 1 if error, 0 if success */
2462 
send_eof()2463 bool multi_update::send_eof()
2464 {
2465   char buff[STRING_BUFFER_USUAL_SIZE];
2466   ulonglong id;
2467   THD::killed_state killed_status= THD::NOT_KILLED;
2468   DBUG_ENTER("multi_update::send_eof");
2469   THD_STAGE_INFO(thd, stage_updating_reference_tables);
2470 
2471   /*
2472      Does updates for the last n - 1 tables, returns 0 if ok;
2473      error takes into account killed status gained in do_updates()
2474   */
2475   int local_error= thd->is_error();
2476   if (!local_error)
2477     local_error = (table_count) ? do_updates() : 0;
2478   /*
2479     if local_error is not set ON until after do_updates() then
2480     later carried out killing should not affect binlogging.
2481   */
2482   killed_status= (local_error == 0)? THD::NOT_KILLED : thd->killed;
2483   THD_STAGE_INFO(thd, stage_end);
2484 
2485   /* We must invalidate the query cache before binlog writing and
2486   ha_autocommit_... */
2487 
2488   if (updated)
2489   {
2490     query_cache_invalidate3(thd, update_tables, 1);
2491   }
2492   /*
2493     Write the SQL statement to the binlog if we updated
2494     rows and we succeeded or if we updated some non
2495     transactional tables.
2496 
2497     The query has to binlog because there's a modified non-transactional table
2498     either from the query's list or via a stored routine: bug#13270,23333
2499   */
2500 
2501   if (local_error == 0 || thd->transaction.stmt.cannot_safely_rollback())
2502   {
2503     if (mysql_bin_log.is_open())
2504     {
2505       int errcode= 0;
2506       if (local_error == 0)
2507         thd->clear_error();
2508       else
2509         errcode= query_error_code(thd, killed_status == THD::NOT_KILLED);
2510       if (thd->binlog_query(THD::ROW_QUERY_TYPE,
2511                             thd->query(), thd->query_length(),
2512                             transactional_tables, FALSE, FALSE, errcode))
2513       {
2514 	local_error= 1;				// Rollback update
2515       }
2516     }
2517   }
2518   DBUG_ASSERT(trans_safe || !updated ||
2519               thd->transaction.stmt.cannot_safely_rollback());
2520 
2521   if (local_error != 0)
2522     error_handled= TRUE; // to force early leave from ::send_error()
2523 
2524   if (local_error > 0) // if the above log write did not fail ...
2525   {
2526     /* Safety: If we haven't got an error before (can happen in do_updates) */
2527     my_message(ER_UNKNOWN_ERROR, "An error occured in multi-table update",
2528 	       MYF(0));
2529     DBUG_RETURN(TRUE);
2530   }
2531 
2532   id= thd->arg_of_last_insert_id_function ?
2533     thd->first_successful_insert_id_in_prev_stmt : 0;
2534   my_snprintf(buff, sizeof(buff), ER(ER_UPDATE_INFO),
2535               (ulong) found, (ulong) updated, (ulong) thd->cuted_fields);
2536   ha_rows row_count=
2537     (thd->client_capabilities & CLIENT_FOUND_ROWS) ? found : updated;
2538   ::my_ok(thd, row_count, id, buff);
2539   thd->updated_row_count+= row_count;
2540   DBUG_RETURN(FALSE);
2541 }
2542