1 /*
2 Copyright (c) 2000, 2019, Oracle and/or its affiliates.
3 Copyright (c) 2010, 2021, MariaDB
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; version 2 of the License.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
17
18 /*
19 Delete of records tables.
20
21 Multi-table deletes were introduced by Monty and Sinisa
22 */
23
24 #include "mariadb.h"
25 #include "sql_priv.h"
26 #include "unireg.h"
27 #include "sql_delete.h"
28 #include "sql_cache.h" // query_cache_*
29 #include "sql_base.h" // open_temprary_table
30 #include "lock.h" // unlock_table_name
31 #include "sql_view.h" // check_key_in_view, mysql_frm_type
32 #include "sql_parse.h" // mysql_init_select
33 #include "filesort.h" // filesort
34 #include "sql_handler.h" // mysql_ha_rm_tables
35 #include "sql_select.h"
36 #include "sp_head.h"
37 #include "sql_trigger.h"
38 #include "sql_statistics.h"
39 #include "transaction.h"
40 #include "records.h" // init_read_record,
41 #include "filesort.h"
42 #include "uniques.h"
43 #include "sql_derived.h" // mysql_handle_derived
44 // end_read_record
45 #include "sql_partition.h" // make_used_partitions_str
46
47 #define MEM_STRIP_BUF_SIZE ((size_t) thd->variables.sortbuff_size)
48
49 /*
50 @brief
51 Print query plan of a single-table DELETE command
52
53 @detail
54 This function is used by EXPLAIN DELETE and by SHOW EXPLAIN when it is
55 invoked on a running DELETE statement.
56 */
57
save_explain_delete_data(MEM_ROOT * mem_root,THD * thd)58 Explain_delete* Delete_plan::save_explain_delete_data(MEM_ROOT *mem_root, THD *thd)
59 {
60 Explain_query *query= thd->lex->explain;
61 Explain_delete *explain=
62 new (mem_root) Explain_delete(mem_root, thd->lex->analyze_stmt);
63 if (!explain)
64 return 0;
65
66 if (deleting_all_rows)
67 {
68 explain->deleting_all_rows= true;
69 explain->select_type= "SIMPLE";
70 explain->rows= scanned_rows;
71 }
72 else
73 {
74 explain->deleting_all_rows= false;
75 if (Update_plan::save_explain_data_intern(mem_root, explain,
76 thd->lex->analyze_stmt))
77 return 0;
78 }
79
80 query->add_upd_del_plan(explain);
81 return explain;
82 }
83
84
85 Explain_update*
save_explain_update_data(MEM_ROOT * mem_root,THD * thd)86 Update_plan::save_explain_update_data(MEM_ROOT *mem_root, THD *thd)
87 {
88 Explain_query *query= thd->lex->explain;
89 Explain_update* explain=
90 new (mem_root) Explain_update(mem_root, thd->lex->analyze_stmt);
91 if (!explain)
92 return 0;
93 if (save_explain_data_intern(mem_root, explain, thd->lex->analyze_stmt))
94 return 0;
95 query->add_upd_del_plan(explain);
96 return explain;
97 }
98
99
save_explain_data_intern(MEM_ROOT * mem_root,Explain_update * explain,bool is_analyze)100 bool Update_plan::save_explain_data_intern(MEM_ROOT *mem_root,
101 Explain_update *explain,
102 bool is_analyze)
103 {
104 explain->select_type= "SIMPLE";
105 explain->table_name.append(&table->pos_in_table_list->alias);
106
107 explain->impossible_where= false;
108 explain->no_partitions= false;
109
110 if (impossible_where)
111 {
112 explain->impossible_where= true;
113 return 0;
114 }
115
116 if (no_partitions)
117 {
118 explain->no_partitions= true;
119 return 0;
120 }
121
122 if (is_analyze)
123 table->file->set_time_tracker(&explain->table_tracker);
124
125 select_lex->set_explain_type(TRUE);
126 explain->select_type= select_lex->type;
127 /* Partitions */
128 {
129 #ifdef WITH_PARTITION_STORAGE_ENGINE
130 partition_info *part_info;
131 if ((part_info= table->part_info))
132 {
133 make_used_partitions_str(mem_root, part_info, &explain->used_partitions,
134 explain->used_partitions_list);
135 explain->used_partitions_set= true;
136 }
137 else
138 explain->used_partitions_set= false;
139 #else
140 /* just produce empty column if partitioning is not compiled in */
141 explain->used_partitions_set= false;
142 #endif
143 }
144
145
146 /* Set jtype */
147 if (select && select->quick)
148 {
149 int quick_type= select->quick->get_type();
150 if ((quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_MERGE) ||
151 (quick_type == QUICK_SELECT_I::QS_TYPE_INDEX_INTERSECT) ||
152 (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_INTERSECT) ||
153 (quick_type == QUICK_SELECT_I::QS_TYPE_ROR_UNION))
154 explain->jtype= JT_INDEX_MERGE;
155 else
156 explain->jtype= JT_RANGE;
157 }
158 else
159 {
160 if (index == MAX_KEY)
161 explain->jtype= JT_ALL;
162 else
163 explain->jtype= JT_NEXT;
164 }
165
166 explain->using_where= MY_TEST(select && select->cond);
167 explain->where_cond= select? select->cond: NULL;
168
169 if (using_filesort)
170 if (!(explain->filesort_tracker= new (mem_root) Filesort_tracker(is_analyze)))
171 return 1;
172 explain->using_io_buffer= using_io_buffer;
173
174 append_possible_keys(mem_root, explain->possible_keys, table,
175 possible_keys);
176
177 explain->quick_info= NULL;
178
179 /* Calculate key_len */
180 if (select && select->quick)
181 {
182 explain->quick_info= select->quick->get_explain(mem_root);
183 }
184 else
185 {
186 if (index != MAX_KEY)
187 {
188 explain->key.set(mem_root, &table->key_info[index],
189 table->key_info[index].key_length);
190 }
191 }
192 explain->rows= scanned_rows;
193
194 if (select && select->quick &&
195 select->quick->get_type() == QUICK_SELECT_I::QS_TYPE_RANGE)
196 {
197 explain_append_mrr_info((QUICK_RANGE_SELECT*)select->quick,
198 &explain->mrr_type);
199 }
200
201 /* Save subquery children */
202 for (SELECT_LEX_UNIT *unit= select_lex->first_inner_unit();
203 unit;
204 unit= unit->next_unit())
205 {
206 if (unit->explainable())
207 explain->add_child(unit->first_select()->select_number);
208 }
209 return 0;
210 }
211
212
record_should_be_deleted(THD * thd,TABLE * table,SQL_SELECT * sel,Explain_delete * explain,bool truncate_history)213 static bool record_should_be_deleted(THD *thd, TABLE *table, SQL_SELECT *sel,
214 Explain_delete *explain, bool truncate_history)
215 {
216 explain->tracker.on_record_read();
217 thd->inc_examined_row_count(1);
218 if (table->vfield)
219 (void) table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_DELETE);
220 if (!sel || sel->skip_record(thd) > 0)
221 {
222 explain->tracker.on_record_after_where();
223 return true;
224 }
225 return false;
226 }
227
228 static
update_portion_of_time(THD * thd,TABLE * table,const vers_select_conds_t & period_conds,bool * inside_period)229 int update_portion_of_time(THD *thd, TABLE *table,
230 const vers_select_conds_t &period_conds,
231 bool *inside_period)
232 {
233 bool lcond= period_conds.field_start->val_datetime_packed(thd)
234 < period_conds.start.item->val_datetime_packed(thd);
235 bool rcond= period_conds.field_end->val_datetime_packed(thd)
236 > period_conds.end.item->val_datetime_packed(thd);
237
238 *inside_period= !lcond && !rcond;
239 if (*inside_period)
240 return 0;
241
242 DBUG_ASSERT(!table->triggers
243 || !table->triggers->has_triggers(TRG_EVENT_INSERT,
244 TRG_ACTION_BEFORE));
245
246 int res= 0;
247 Item *src= lcond ? period_conds.start.item : period_conds.end.item;
248 uint dst_fieldno= lcond ? table->s->period.end_fieldno
249 : table->s->period.start_fieldno;
250
251 ulonglong prev_insert_id= table->file->next_insert_id;
252 store_record(table, record[1]);
253 if (likely(!res))
254 res= src->save_in_field(table->field[dst_fieldno], true);
255
256 if (likely(!res))
257 res= table->update_generated_fields();
258
259 if(likely(!res))
260 res= table->file->ha_update_row(table->record[1], table->record[0]);
261
262 if (likely(!res) && table->triggers)
263 res= table->triggers->process_triggers(thd, TRG_EVENT_INSERT,
264 TRG_ACTION_AFTER, true);
265 restore_record(table, record[1]);
266 if (res)
267 table->file->restore_auto_increment(prev_insert_id);
268
269 if (likely(!res) && lcond && rcond)
270 res= table->period_make_insert(period_conds.end.item,
271 table->field[table->s->period.start_fieldno]);
272
273 return res;
274 }
275
276 inline
delete_row()277 int TABLE::delete_row()
278 {
279 if (!versioned(VERS_TIMESTAMP) || !vers_end_field()->is_max())
280 return file->ha_delete_row(record[0]);
281
282 store_record(this, record[1]);
283 vers_update_end();
284 int err= file->ha_update_row(record[1], record[0]);
285 /*
286 MDEV-23644: we get HA_ERR_FOREIGN_DUPLICATE_KEY iff we already got history
287 row with same trx_id which is the result of foreign key action, so we
288 don't need one more history row.
289 */
290 if (err == HA_ERR_FOREIGN_DUPLICATE_KEY)
291 return file->ha_delete_row(record[0]);
292 return err;
293 }
294
295
296 /**
297 Implement DELETE SQL word.
298
299 @note Like implementations of other DDL/DML in MySQL, this function
300 relies on the caller to close the thread tables. This is done in the
301 end of dispatch_command().
302 */
303
mysql_delete(THD * thd,TABLE_LIST * table_list,COND * conds,SQL_I_List<ORDER> * order_list,ha_rows limit,ulonglong options,select_result * result)304 bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
305 SQL_I_List<ORDER> *order_list, ha_rows limit,
306 ulonglong options, select_result *result)
307 {
308 bool will_batch= FALSE;
309 int error, loc_error;
310 TABLE *table;
311 SQL_SELECT *select=0;
312 SORT_INFO *file_sort= 0;
313 READ_RECORD info;
314 bool using_limit=limit != HA_POS_ERROR;
315 bool transactional_table, safe_update, const_cond;
316 bool const_cond_result;
317 bool return_error= 0;
318 ha_rows deleted= 0;
319 bool reverse= FALSE;
320 bool has_triggers= false;
321 ORDER *order= (ORDER *) ((order_list && order_list->elements) ?
322 order_list->first : NULL);
323 SELECT_LEX *select_lex= thd->lex->first_select_lex();
324 SELECT_LEX *returning= thd->lex->has_returning() ? thd->lex->returning() : 0;
325 killed_state killed_status= NOT_KILLED;
326 THD::enum_binlog_query_type query_type= THD::ROW_QUERY_TYPE;
327 bool binlog_is_row;
328 Explain_delete *explain;
329 Delete_plan query_plan(thd->mem_root);
330 Unique * deltempfile= NULL;
331 bool delete_record= false;
332 bool delete_while_scanning;
333 bool portion_of_time_through_update;
334 DBUG_ENTER("mysql_delete");
335
336 query_plan.index= MAX_KEY;
337 query_plan.using_filesort= FALSE;
338
339 create_explain_query(thd->lex, thd->mem_root);
340 if (open_and_lock_tables(thd, table_list, TRUE, 0))
341 DBUG_RETURN(TRUE);
342
343 THD_STAGE_INFO(thd, stage_init_update);
344
345 const bool delete_history= table_list->vers_conditions.delete_history;
346 DBUG_ASSERT(!(delete_history && table_list->period_conditions.is_set()));
347
348 if (thd->lex->handle_list_of_derived(table_list, DT_MERGE_FOR_INSERT))
349 DBUG_RETURN(TRUE);
350 if (thd->lex->handle_list_of_derived(table_list, DT_PREPARE))
351 DBUG_RETURN(TRUE);
352
353 if (!table_list->single_table_updatable())
354 {
355 my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias.str, "DELETE");
356 DBUG_RETURN(TRUE);
357 }
358 if (!(table= table_list->table) || !table->is_created())
359 {
360 my_error(ER_VIEW_DELETE_MERGE_VIEW, MYF(0),
361 table_list->view_db.str, table_list->view_name.str);
362 DBUG_RETURN(TRUE);
363 }
364 table->map=1;
365 query_plan.select_lex= thd->lex->first_select_lex();
366 query_plan.table= table;
367
368 promote_select_describe_flag_if_needed(thd->lex);
369
370 if (mysql_prepare_delete(thd, table_list, &conds, &delete_while_scanning))
371 DBUG_RETURN(TRUE);
372
373 if (delete_history)
374 table->vers_write= false;
375
376 if (returning)
377 (void) result->prepare(returning->item_list, NULL);
378
379 if (thd->lex->current_select->first_cond_optimization)
380 {
381 thd->lex->current_select->save_leaf_tables(thd);
382 thd->lex->current_select->first_cond_optimization= 0;
383 }
384 /* check ORDER BY even if it can be ignored */
385 if (order)
386 {
387 TABLE_LIST tables;
388 List<Item> fields;
389 List<Item> all_fields;
390
391 bzero((char*) &tables,sizeof(tables));
392 tables.table = table;
393 tables.alias = table_list->alias;
394
395 if (select_lex->setup_ref_array(thd, order_list->elements) ||
396 setup_order(thd, select_lex->ref_pointer_array, &tables,
397 fields, all_fields, order))
398 {
399 free_underlaid_joins(thd, thd->lex->first_select_lex());
400 DBUG_RETURN(TRUE);
401 }
402 }
403
404 /* Apply the IN=>EXISTS transformation to all subqueries and optimize them. */
405 if (select_lex->optimize_unflattened_subqueries(false))
406 DBUG_RETURN(TRUE);
407
408 const_cond= (!conds || conds->const_item());
409 safe_update= MY_TEST(thd->variables.option_bits & OPTION_SAFE_UPDATES);
410 if (safe_update && const_cond)
411 {
412 my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,
413 ER_THD(thd, ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0));
414 DBUG_RETURN(TRUE);
415 }
416
417 const_cond_result= const_cond && (!conds || conds->val_int());
418 if (unlikely(thd->is_error()))
419 {
420 /* Error evaluating val_int(). */
421 DBUG_RETURN(TRUE);
422 }
423
424 /*
425 Test if the user wants to delete all rows and deletion doesn't have
426 any side-effects (because of triggers), so we can use optimized
427 handler::delete_all_rows() method.
428
429 We can use delete_all_rows() if and only if:
430 - We allow new functions (not using option --skip-new), and are
431 not in safe mode (not using option --safe-mode)
432 - There is no limit clause
433 - The condition is constant
434 - If there is a condition, then it it produces a non-zero value
435 - If the current command is DELETE FROM with no where clause, then:
436 - We should not be binlogging this statement in row-based, and
437 - there should be no delete triggers associated with the table.
438 */
439
440 has_triggers= table->triggers && table->triggers->has_delete_triggers();
441
442 if (!returning && !using_limit && const_cond_result &&
443 (!thd->is_current_stmt_binlog_format_row() && !has_triggers)
444 && !table->versioned(VERS_TIMESTAMP) && !table_list->has_period())
445 {
446 /* Update the table->file->stats.records number */
447 table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
448 ha_rows const maybe_deleted= table->file->stats.records;
449 DBUG_PRINT("debug", ("Trying to use delete_all_rows()"));
450
451 query_plan.set_delete_all_rows(maybe_deleted);
452 if (thd->lex->describe)
453 goto produce_explain_and_leave;
454
455 if (likely(!(error=table->file->ha_delete_all_rows())))
456 {
457 /*
458 If delete_all_rows() is used, it is not possible to log the
459 query in row format, so we have to log it in statement format.
460 */
461 query_type= THD::STMT_QUERY_TYPE;
462 error= -1;
463 deleted= maybe_deleted;
464 if (!query_plan.save_explain_delete_data(thd->mem_root, thd))
465 error= 1;
466 goto cleanup;
467 }
468 if (error != HA_ERR_WRONG_COMMAND)
469 {
470 table->file->print_error(error,MYF(0));
471 error=0;
472 goto cleanup;
473 }
474 /* Handler didn't support fast delete; Delete rows one by one */
475 query_plan.cancel_delete_all_rows();
476 }
477 if (conds)
478 {
479 Item::cond_result result;
480 conds= conds->remove_eq_conds(thd, &result, true);
481 if (result == Item::COND_FALSE) // Impossible where
482 {
483 limit= 0;
484 query_plan.set_impossible_where();
485 if (thd->lex->describe || thd->lex->analyze_stmt)
486 goto produce_explain_and_leave;
487 }
488 }
489
490 #ifdef WITH_PARTITION_STORAGE_ENGINE
491 if (prune_partitions(thd, table, conds))
492 {
493 free_underlaid_joins(thd, select_lex);
494
495 query_plan.set_no_partitions();
496 if (thd->lex->describe || thd->lex->analyze_stmt)
497 goto produce_explain_and_leave;
498
499 my_ok(thd, 0);
500 DBUG_RETURN(0);
501 }
502 #endif
503 /* Update the table->file->stats.records number */
504 table->file->info(HA_STATUS_VARIABLE | HA_STATUS_NO_LOCK);
505 set_statistics_for_table(thd, table);
506
507 table->covering_keys.clear_all();
508 table->opt_range_keys.clear_all();
509
510 select=make_select(table, 0, 0, conds, (SORT_INFO*) 0, 0, &error);
511 if (unlikely(error))
512 DBUG_RETURN(TRUE);
513 if ((select && select->check_quick(thd, safe_update, limit)) || !limit)
514 {
515 query_plan.set_impossible_where();
516 if (thd->lex->describe || thd->lex->analyze_stmt)
517 goto produce_explain_and_leave;
518
519 delete select;
520 free_underlaid_joins(thd, select_lex);
521 /*
522 Error was already created by quick select evaluation (check_quick()).
523 TODO: Add error code output parameter to Item::val_xxx() methods.
524 Currently they rely on the user checking DA for
525 errors when unwinding the stack after calling Item::val_xxx().
526 */
527 if (unlikely(thd->is_error()))
528 DBUG_RETURN(TRUE);
529 my_ok(thd, 0);
530 DBUG_RETURN(0); // Nothing to delete
531 }
532
533 /* If running in safe sql mode, don't allow updates without keys */
534 if (table->opt_range_keys.is_clear_all())
535 {
536 thd->set_status_no_index_used();
537 if (safe_update && !using_limit)
538 {
539 delete select;
540 free_underlaid_joins(thd, select_lex);
541 my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE,
542 ER_THD(thd, ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE), MYF(0));
543 DBUG_RETURN(TRUE);
544 }
545 }
546 if (options & OPTION_QUICK)
547 (void) table->file->extra(HA_EXTRA_QUICK);
548
549 query_plan.scanned_rows= select? select->records: table->file->stats.records;
550 if (order)
551 {
552 table->update_const_key_parts(conds);
553 order= simple_remove_const(order, conds);
554
555 if (select && select->quick && select->quick->unique_key_range())
556 { // Single row select (always "ordered")
557 query_plan.using_filesort= FALSE;
558 query_plan.index= MAX_KEY;
559 }
560 else
561 {
562 ha_rows scanned_limit= query_plan.scanned_rows;
563 table->no_keyread= 1;
564 query_plan.index= get_index_for_order(order, table, select, limit,
565 &scanned_limit,
566 &query_plan.using_filesort,
567 &reverse);
568 table->no_keyread= 0;
569 if (!query_plan.using_filesort)
570 query_plan.scanned_rows= scanned_limit;
571 }
572 }
573
574 query_plan.select= select;
575 query_plan.possible_keys= select? select->possible_keys: key_map(0);
576
577 /*
578 Ok, we have generated a query plan for the DELETE.
579 - if we're running EXPLAIN DELETE, goto produce explain output
580 - otherwise, execute the query plan
581 */
582 if (thd->lex->describe)
583 goto produce_explain_and_leave;
584
585 if (!(explain= query_plan.save_explain_delete_data(thd->mem_root, thd)))
586 goto got_error;
587 ANALYZE_START_TRACKING(thd, &explain->command_tracker);
588
589 DBUG_EXECUTE_IF("show_explain_probe_delete_exec_start",
590 dbug_serve_apcs(thd, 1););
591
592 if (!(select && select->quick))
593 status_var_increment(thd->status_var.delete_scan_count);
594
595 binlog_is_row= thd->is_current_stmt_binlog_format_row();
596 DBUG_PRINT("info", ("binlog_is_row: %s", binlog_is_row ? "TRUE" : "FALSE"));
597
598 /*
599 We can use direct delete (delete that is done silently in the handler)
600 if none of the following conditions are true:
601 - There are triggers
602 - There is binary logging
603 - There is a virtual not stored column in the WHERE clause
604 - ORDER BY or LIMIT
605 - As this requires the rows to be deleted in a specific order
606 - Note that Spider can handle ORDER BY and LIMIT in a cluster with
607 one data node. These conditions are therefore checked in
608 direct_delete_rows_init().
609
610 Direct delete does not require a WHERE clause
611
612 Later we also ensure that we are only using one table (no sub queries)
613 */
614
615 if ((table->file->ha_table_flags() & HA_CAN_DIRECT_UPDATE_AND_DELETE) &&
616 !has_triggers && !binlog_is_row && !returning &&
617 !table_list->has_period())
618 {
619 table->mark_columns_needed_for_delete();
620 if (!table->check_virtual_columns_marked_for_read())
621 {
622 DBUG_PRINT("info", ("Trying direct delete"));
623 bool use_direct_delete= !select || !select->cond;
624 if (!use_direct_delete &&
625 (select->cond->used_tables() & ~RAND_TABLE_BIT) == table->map)
626 {
627 DBUG_ASSERT(!table->file->pushed_cond);
628 if (!table->file->cond_push(select->cond))
629 {
630 use_direct_delete= TRUE;
631 table->file->pushed_cond= select->cond;
632 }
633 }
634 if (use_direct_delete && !table->file->direct_delete_rows_init())
635 {
636 /* Direct deleting is supported */
637 DBUG_PRINT("info", ("Using direct delete"));
638 THD_STAGE_INFO(thd, stage_updating);
639 if (!(error= table->file->ha_direct_delete_rows(&deleted)))
640 error= -1;
641 goto terminate_delete;
642 }
643 }
644 }
645
646 if (query_plan.using_filesort)
647 {
648 {
649 Filesort fsort(order, HA_POS_ERROR, true, select);
650 DBUG_ASSERT(query_plan.index == MAX_KEY);
651
652 Filesort_tracker *fs_tracker=
653 thd->lex->explain->get_upd_del_plan()->filesort_tracker;
654
655 if (!(file_sort= filesort(thd, table, &fsort, fs_tracker)))
656 goto got_error;
657
658 thd->inc_examined_row_count(file_sort->examined_rows);
659 /*
660 Filesort has already found and selected the rows we want to delete,
661 so we don't need the where clause
662 */
663 delete select;
664
665 /*
666 If we are not in DELETE ... RETURNING, we can free subqueries. (in
667 DELETE ... RETURNING we can't, because the RETURNING part may have
668 a subquery in it)
669 */
670 if (!returning)
671 free_underlaid_joins(thd, select_lex);
672 select= 0;
673 }
674 }
675
676 /* If quick select is used, initialize it before retrieving rows. */
677 if (select && select->quick && select->quick->reset())
678 goto got_error;
679
680 if (query_plan.index == MAX_KEY || (select && select->quick))
681 error= init_read_record(&info, thd, table, select, file_sort, 1, 1, FALSE);
682 else
683 error= init_read_record_idx(&info, thd, table, 1, query_plan.index,
684 reverse);
685 if (unlikely(error))
686 goto got_error;
687
688 if (unlikely(init_ftfuncs(thd, select_lex, 1)))
689 goto got_error;
690
691 if (table_list->has_period())
692 {
693 table->use_all_columns();
694 table->rpl_write_set= table->write_set;
695 }
696 else
697 {
698 table->mark_columns_needed_for_delete();
699 }
700
701 if ((table->file->ha_table_flags() & HA_CAN_FORCE_BULK_DELETE) &&
702 !table->prepare_triggers_for_delete_stmt_or_event())
703 will_batch= !table->file->start_bulk_delete();
704
705 /*
706 thd->get_stmt_da()->is_set() means first iteration of prepared statement
707 with array binding operation execution (non optimized so it is not
708 INSERT)
709 */
710 if (returning && !thd->get_stmt_da()->is_set())
711 {
712 if (result->send_result_set_metadata(returning->item_list,
713 Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))
714 goto cleanup;
715 }
716
717 explain= (Explain_delete*)thd->lex->explain->get_upd_del_plan();
718 explain->tracker.on_scan_init();
719
720 if (!delete_while_scanning)
721 {
722 /*
723 The table we are going to delete appears in subqueries in the where
724 clause. Instead of deleting the rows, first mark them deleted.
725 */
726 ha_rows tmplimit=limit;
727 deltempfile= new (thd->mem_root) Unique (refpos_order_cmp, table->file,
728 table->file->ref_length,
729 MEM_STRIP_BUF_SIZE);
730
731 THD_STAGE_INFO(thd, stage_searching_rows_for_update);
732 while (!(error=info.read_record()) && !thd->killed &&
733 ! thd->is_error())
734 {
735 if (record_should_be_deleted(thd, table, select, explain, delete_history))
736 {
737 table->file->position(table->record[0]);
738 if (unlikely((error=
739 deltempfile->unique_add((char*) table->file->ref))))
740 {
741 error= 1;
742 goto terminate_delete;
743 }
744 if (!--tmplimit && using_limit)
745 break;
746 }
747 }
748 end_read_record(&info);
749 if (unlikely(deltempfile->get(table)) ||
750 unlikely(table->file->ha_index_or_rnd_end()) ||
751 unlikely(init_read_record(&info, thd, table, 0, &deltempfile->sort, 0,
752 1, false)))
753 {
754 error= 1;
755 goto terminate_delete;
756 }
757 delete_record= true;
758 }
759
760 /*
761 From SQL2016, Part 2, 15.7 <Effect of deleting rows from base table>,
762 General Rules, 8), we can conclude that DELETE FOR PORTTION OF time performs
763 0-2 INSERTS + DELETE. We can substitute INSERT+DELETE with one UPDATE, with
764 a condition of no side effects. The side effect is possible if there is a
765 BEFORE INSERT trigger, since it is the only one splitting DELETE and INSERT
766 operations.
767 Another possible side effect is related to tables of non-transactional
768 engines, since UPDATE is anyway atomic, and DELETE+INSERT is not.
769
770 This optimization is not possible for system-versioned table.
771 */
772 portion_of_time_through_update=
773 !(table->triggers && table->triggers->has_triggers(TRG_EVENT_INSERT,
774 TRG_ACTION_BEFORE))
775 && !table->versioned()
776 && table->file->has_transactions();
777
778 if (table->versioned(VERS_TIMESTAMP) || (table_list->has_period()))
779 table->file->prepare_for_insert(1);
780 DBUG_ASSERT(table->file->inited != handler::NONE);
781
782 THD_STAGE_INFO(thd, stage_updating);
783 while (likely(!(error=info.read_record())) && likely(!thd->killed) &&
784 likely(!thd->is_error()))
785 {
786 if (delete_while_scanning)
787 delete_record= record_should_be_deleted(thd, table, select, explain,
788 delete_history);
789 if (delete_record)
790 {
791 if (!delete_history && table->triggers &&
792 table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
793 TRG_ACTION_BEFORE, FALSE))
794 {
795 error= 1;
796 break;
797 }
798
799 // no LIMIT / OFFSET
800 if (returning && result->send_data(returning->item_list) < 0)
801 {
802 error=1;
803 break;
804 }
805
806 if (table_list->has_period() && portion_of_time_through_update)
807 {
808 bool need_delete= true;
809 error= update_portion_of_time(thd, table, table_list->period_conditions,
810 &need_delete);
811 if (likely(!error) && need_delete)
812 error= table->delete_row();
813 }
814 else
815 {
816 error= table->delete_row();
817
818 ha_rows rows_inserted;
819 if (likely(!error) && table_list->has_period()
820 && !portion_of_time_through_update)
821 error= table->insert_portion_of_time(thd, table_list->period_conditions,
822 &rows_inserted);
823 }
824
825 if (likely(!error))
826 {
827 deleted++;
828 if (!delete_history && table->triggers &&
829 table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
830 TRG_ACTION_AFTER, FALSE))
831 {
832 error= 1;
833 break;
834 }
835 if (!--limit && using_limit)
836 {
837 error= -1;
838 break;
839 }
840 }
841 else
842 {
843 table->file->print_error(error,
844 MYF(thd->lex->ignore ? ME_WARNING : 0));
845 if (thd->is_error())
846 {
847 error= 1;
848 break;
849 }
850 }
851 }
852 /*
853 Don't try unlocking the row if skip_record reported an error since in
854 this case the transaction might have been rolled back already.
855 */
856 else if (likely(!thd->is_error()))
857 table->file->unlock_row(); // Row failed selection, release lock on it
858 else
859 break;
860 }
861
862 terminate_delete:
863 killed_status= thd->killed;
864 if (unlikely(killed_status != NOT_KILLED || thd->is_error()))
865 error= 1; // Aborted
866 if (will_batch && unlikely((loc_error= table->file->end_bulk_delete())))
867 {
868 if (error != 1)
869 table->file->print_error(loc_error,MYF(0));
870 error=1;
871 }
872 THD_STAGE_INFO(thd, stage_end);
873 end_read_record(&info);
874 if (table_list->has_period())
875 table->file->ha_release_auto_increment();
876 if (options & OPTION_QUICK)
877 (void) table->file->extra(HA_EXTRA_NORMAL);
878 ANALYZE_STOP_TRACKING(thd, &explain->command_tracker);
879
880 cleanup:
881 /*
882 Invalidate the table in the query cache if something changed. This must
883 be before binlog writing and ha_autocommit_...
884 */
885 if (deleted)
886 {
887 query_cache_invalidate3(thd, table_list, 1);
888 }
889
890 if (thd->lex->current_select->first_cond_optimization)
891 {
892 thd->lex->current_select->save_leaf_tables(thd);
893 thd->lex->current_select->first_cond_optimization= 0;
894 }
895
896 delete deltempfile;
897 deltempfile=NULL;
898 delete select;
899 select= NULL;
900 transactional_table= table->file->has_transactions_and_rollback();
901
902 if (!transactional_table && deleted > 0)
903 thd->transaction->stmt.modified_non_trans_table=
904 thd->transaction->all.modified_non_trans_table= TRUE;
905
906 /* See similar binlogging code in sql_update.cc, for comments */
907 if (likely((error < 0) || thd->transaction->stmt.modified_non_trans_table))
908 {
909 if (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open())
910 {
911 int errcode= 0;
912 if (error < 0)
913 thd->clear_error();
914 else
915 errcode= query_error_code(thd, killed_status == NOT_KILLED);
916
917 ScopedStatementReplication scoped_stmt_rpl(
918 table->versioned(VERS_TRX_ID) ? thd : NULL);
919 /*
920 [binlog]: If 'handler::delete_all_rows()' was called and the
921 storage engine does not inject the rows itself, we replicate
922 statement-based; otherwise, 'ha_delete_row()' was used to
923 delete specific rows which we might log row-based.
924 */
925 int log_result= thd->binlog_query(query_type,
926 thd->query(), thd->query_length(),
927 transactional_table, FALSE, FALSE,
928 errcode);
929
930 if (log_result > 0)
931 {
932 error=1;
933 }
934 }
935 }
936 DBUG_ASSERT(transactional_table || !deleted || thd->transaction->stmt.modified_non_trans_table);
937
938 if (likely(error < 0) ||
939 (thd->lex->ignore && !thd->is_error() && !thd->is_fatal_error))
940 {
941 if (thd->lex->analyze_stmt)
942 goto send_nothing_and_leave;
943
944 if (returning)
945 result->send_eof();
946 else
947 my_ok(thd, deleted);
948 DBUG_PRINT("info",("%ld records deleted",(long) deleted));
949 }
950 delete file_sort;
951 free_underlaid_joins(thd, select_lex);
952 if (table->file->pushed_cond)
953 table->file->cond_pop();
954 DBUG_RETURN(error >= 0 || thd->is_error());
955
956 /* Special exits */
957 produce_explain_and_leave:
958 /*
959 We come here for various "degenerate" query plans: impossible WHERE,
960 no-partitions-used, impossible-range, etc.
961 */
962 if (!(query_plan.save_explain_delete_data(thd->mem_root, thd)))
963 goto got_error;
964
965 send_nothing_and_leave:
966 /*
967 ANALYZE DELETE jumps here. We can't send explain right here, because
968 we might be using ANALYZE DELETE ...RETURNING, in which case we have
969 Protocol_discard active.
970 */
971
972 delete select;
973 delete file_sort;
974 free_underlaid_joins(thd, select_lex);
975 if (table->file->pushed_cond)
976 table->file->cond_pop();
977
978 DBUG_ASSERT(!return_error || thd->is_error() || thd->killed);
979 DBUG_RETURN((return_error || thd->is_error() || thd->killed) ? 1 : 0);
980
981 got_error:
982 return_error= 1;
983 goto send_nothing_and_leave;
984 }
985
986
987 /*
988 Prepare items in DELETE statement
989
990 SYNOPSIS
991 mysql_prepare_delete()
992 thd - thread handler
993 table_list - global/local table list
994 conds - conditions
995
996 RETURN VALUE
997 FALSE OK
998 TRUE error
999 */
mysql_prepare_delete(THD * thd,TABLE_LIST * table_list,Item ** conds,bool * delete_while_scanning)1000 int mysql_prepare_delete(THD *thd, TABLE_LIST *table_list, Item **conds,
1001 bool *delete_while_scanning)
1002 {
1003 Item *fake_conds= 0;
1004 SELECT_LEX *select_lex= thd->lex->first_select_lex();
1005 DBUG_ENTER("mysql_prepare_delete");
1006 List<Item> all_fields;
1007
1008 *delete_while_scanning= true;
1009 thd->lex->allow_sum_func.clear_all();
1010 if (setup_tables_and_check_access(thd, &select_lex->context,
1011 &select_lex->top_join_list, table_list,
1012 select_lex->leaf_tables, FALSE,
1013 DELETE_ACL, SELECT_ACL, TRUE))
1014 DBUG_RETURN(TRUE);
1015
1016 if (table_list->vers_conditions.is_set() && table_list->is_view_or_derived())
1017 {
1018 my_error(ER_IT_IS_A_VIEW, MYF(0), table_list->table_name.str);
1019 DBUG_RETURN(true);
1020 }
1021
1022 if (table_list->has_period())
1023 {
1024 if (table_list->is_view_or_derived())
1025 {
1026 my_error(ER_IT_IS_A_VIEW, MYF(0), table_list->table_name.str);
1027 DBUG_RETURN(true);
1028 }
1029
1030 if (select_lex->period_setup_conds(thd, table_list))
1031 DBUG_RETURN(true);
1032 }
1033
1034 DBUG_ASSERT(table_list->table);
1035 // conds could be cached from previous SP call
1036 DBUG_ASSERT(!table_list->vers_conditions.need_setup() ||
1037 !*conds || thd->stmt_arena->is_stmt_execute());
1038 if (select_lex->vers_setup_conds(thd, table_list))
1039 DBUG_RETURN(TRUE);
1040
1041 *conds= select_lex->where;
1042
1043 if (setup_returning_fields(thd, table_list) ||
1044 setup_conds(thd, table_list, select_lex->leaf_tables, conds) ||
1045 setup_ftfuncs(select_lex))
1046 DBUG_RETURN(TRUE);
1047 if (!table_list->single_table_updatable() ||
1048 check_key_in_view(thd, table_list))
1049 {
1050 my_error(ER_NON_UPDATABLE_TABLE, MYF(0), table_list->alias.str, "DELETE");
1051 DBUG_RETURN(TRUE);
1052 }
1053
1054 /*
1055 Application-time periods: if FOR PORTION OF ... syntax used, DELETE
1056 statement could issue delete_row's mixed with write_row's. This causes
1057 problems for myisam and corrupts table, if deleting while scanning.
1058 */
1059 if (table_list->has_period()
1060 || unique_table(thd, table_list, table_list->next_global, 0))
1061 *delete_while_scanning= false;
1062
1063 if (select_lex->inner_refs_list.elements &&
1064 fix_inner_refs(thd, all_fields, select_lex, select_lex->ref_pointer_array))
1065 DBUG_RETURN(TRUE);
1066
1067 select_lex->fix_prepare_information(thd, conds, &fake_conds);
1068 DBUG_RETURN(FALSE);
1069 }
1070
1071
1072 /***************************************************************************
1073 Delete multiple tables from join
1074 ***************************************************************************/
1075
1076
refpos_order_cmp(void * arg,const void * a,const void * b)1077 extern "C" int refpos_order_cmp(void* arg, const void *a,const void *b)
1078 {
1079 handler *file= (handler*)arg;
1080 return file->cmp_ref((const uchar*)a, (const uchar*)b);
1081 }
1082
1083 /*
1084 make delete specific preparation and checks after opening tables
1085
1086 SYNOPSIS
1087 mysql_multi_delete_prepare()
1088 thd thread handler
1089
1090 RETURN
1091 FALSE OK
1092 TRUE Error
1093 */
1094
mysql_multi_delete_prepare(THD * thd)1095 int mysql_multi_delete_prepare(THD *thd)
1096 {
1097 LEX *lex= thd->lex;
1098 TABLE_LIST *aux_tables= lex->auxiliary_table_list.first;
1099 TABLE_LIST *target_tbl;
1100 DBUG_ENTER("mysql_multi_delete_prepare");
1101
1102 if (mysql_handle_derived(lex, DT_INIT))
1103 DBUG_RETURN(TRUE);
1104 if (mysql_handle_derived(lex, DT_MERGE_FOR_INSERT))
1105 DBUG_RETURN(TRUE);
1106 if (mysql_handle_derived(lex, DT_PREPARE))
1107 DBUG_RETURN(TRUE);
1108 /*
1109 setup_tables() need for VIEWs. JOIN::prepare() will not do it second
1110 time.
1111
1112 lex->query_tables also point on local list of DELETE SELECT_LEX
1113 */
1114 if (setup_tables_and_check_access(thd,
1115 &thd->lex->first_select_lex()->context,
1116 &thd->lex->first_select_lex()->
1117 top_join_list,
1118 lex->query_tables,
1119 lex->first_select_lex()->leaf_tables,
1120 FALSE, DELETE_ACL, SELECT_ACL, FALSE))
1121 DBUG_RETURN(TRUE);
1122
1123 /*
1124 Multi-delete can't be constructed over-union => we always have
1125 single SELECT on top and have to check underlying SELECTs of it
1126 */
1127 lex->first_select_lex()->set_unique_exclude();
1128 /* Fix tables-to-be-deleted-from list to point at opened tables */
1129 for (target_tbl= (TABLE_LIST*) aux_tables;
1130 target_tbl;
1131 target_tbl= target_tbl->next_local)
1132 {
1133
1134 target_tbl->table= target_tbl->correspondent_table->table;
1135 if (target_tbl->correspondent_table->is_multitable())
1136 {
1137 my_error(ER_VIEW_DELETE_MERGE_VIEW, MYF(0),
1138 target_tbl->correspondent_table->view_db.str,
1139 target_tbl->correspondent_table->view_name.str);
1140 DBUG_RETURN(TRUE);
1141 }
1142
1143 if (!target_tbl->correspondent_table->single_table_updatable() ||
1144 check_key_in_view(thd, target_tbl->correspondent_table))
1145 {
1146 my_error(ER_NON_UPDATABLE_TABLE, MYF(0),
1147 target_tbl->table_name.str, "DELETE");
1148 DBUG_RETURN(TRUE);
1149 }
1150 }
1151
1152 for (target_tbl= (TABLE_LIST*) aux_tables;
1153 target_tbl;
1154 target_tbl= target_tbl->next_local)
1155 {
1156 /*
1157 Check that table from which we delete is not used somewhere
1158 inside subqueries/view.
1159 */
1160 {
1161 TABLE_LIST *duplicate;
1162 if ((duplicate= unique_table(thd, target_tbl->correspondent_table,
1163 lex->query_tables, 0)))
1164 {
1165 update_non_unique_table_error(target_tbl->correspondent_table,
1166 "DELETE", duplicate);
1167 DBUG_RETURN(TRUE);
1168 }
1169 }
1170 }
1171 /*
1172 Reset the exclude flag to false so it doesn't interfare
1173 with further calls to unique_table
1174 */
1175 lex->first_select_lex()->exclude_from_table_unique_test= FALSE;
1176
1177 if (lex->save_prep_leaf_tables())
1178 DBUG_RETURN(TRUE);
1179
1180 DBUG_RETURN(FALSE);
1181 }
1182
1183
multi_delete(THD * thd_arg,TABLE_LIST * dt,uint num_of_tables_arg)1184 multi_delete::multi_delete(THD *thd_arg, TABLE_LIST *dt, uint num_of_tables_arg):
1185 select_result_interceptor(thd_arg), delete_tables(dt), deleted(0), found(0),
1186 num_of_tables(num_of_tables_arg), error(0),
1187 do_delete(0), transactional_tables(0), normal_tables(0), error_handled(0)
1188 {
1189 tempfiles= (Unique **) thd_arg->calloc(sizeof(Unique *) * num_of_tables);
1190 }
1191
1192
1193 int
prepare(List<Item> & values,SELECT_LEX_UNIT * u)1194 multi_delete::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
1195 {
1196 DBUG_ENTER("multi_delete::prepare");
1197 unit= u;
1198 do_delete= 1;
1199 THD_STAGE_INFO(thd, stage_deleting_from_main_table);
1200 DBUG_RETURN(0);
1201 }
1202
prepare_to_read_rows()1203 void multi_delete::prepare_to_read_rows()
1204 {
1205 /* see multi_update::prepare_to_read_rows() */
1206 for (TABLE_LIST *walk= delete_tables; walk; walk= walk->next_local)
1207 {
1208 TABLE_LIST *tbl= walk->correspondent_table->find_table_for_update();
1209 tbl->table->mark_columns_needed_for_delete();
1210 }
1211 }
1212
1213 bool
initialize_tables(JOIN * join)1214 multi_delete::initialize_tables(JOIN *join)
1215 {
1216 TABLE_LIST *walk;
1217 Unique **tempfiles_ptr;
1218 DBUG_ENTER("initialize_tables");
1219
1220 if (unlikely((thd->variables.option_bits & OPTION_SAFE_UPDATES) &&
1221 error_if_full_join(join)))
1222 DBUG_RETURN(1);
1223
1224 table_map tables_to_delete_from=0;
1225 delete_while_scanning= true;
1226 for (walk= delete_tables; walk; walk= walk->next_local)
1227 {
1228 TABLE_LIST *tbl= walk->correspondent_table->find_table_for_update();
1229 tables_to_delete_from|= tbl->table->map;
1230 if (delete_while_scanning &&
1231 unique_table(thd, tbl, join->tables_list, 0))
1232 {
1233 /*
1234 If the table we are going to delete from appears
1235 in join, we need to defer delete. So the delete
1236 doesn't interfers with the scaning of results.
1237 */
1238 delete_while_scanning= false;
1239 }
1240 }
1241
1242 walk= delete_tables;
1243
1244 for (JOIN_TAB *tab= first_linear_tab(join, WITHOUT_BUSH_ROOTS,
1245 WITH_CONST_TABLES);
1246 tab;
1247 tab= next_linear_tab(join, tab, WITHOUT_BUSH_ROOTS))
1248 {
1249 if (!tab->bush_children && tab->table->map & tables_to_delete_from)
1250 {
1251 /* We are going to delete from this table */
1252 TABLE *tbl=walk->table=tab->table;
1253 walk= walk->next_local;
1254 /* Don't use KEYREAD optimization on this table */
1255 tbl->no_keyread=1;
1256 /* Don't use record cache */
1257 tbl->no_cache= 1;
1258 tbl->covering_keys.clear_all();
1259 if (tbl->file->has_transactions())
1260 transactional_tables= 1;
1261 else
1262 normal_tables= 1;
1263 tbl->prepare_triggers_for_delete_stmt_or_event();
1264 tbl->prepare_for_position();
1265
1266 if (tbl->versioned(VERS_TIMESTAMP))
1267 tbl->file->prepare_for_insert(1);
1268 }
1269 else if ((tab->type != JT_SYSTEM && tab->type != JT_CONST) &&
1270 walk == delete_tables)
1271 {
1272 /*
1273 We are not deleting from the table we are scanning. In this
1274 case send_data() shouldn't delete any rows a we may touch
1275 the rows in the deleted table many times
1276 */
1277 delete_while_scanning= false;
1278 }
1279 }
1280 walk= delete_tables;
1281 tempfiles_ptr= tempfiles;
1282 if (delete_while_scanning)
1283 {
1284 table_being_deleted= delete_tables;
1285 walk= walk->next_local;
1286 }
1287 for (;walk ;walk= walk->next_local)
1288 {
1289 TABLE *table=walk->table;
1290 *tempfiles_ptr++= new (thd->mem_root) Unique (refpos_order_cmp, table->file,
1291 table->file->ref_length,
1292 MEM_STRIP_BUF_SIZE);
1293 }
1294 init_ftfuncs(thd, thd->lex->current_select, 1);
1295 DBUG_RETURN(thd->is_fatal_error);
1296 }
1297
1298
~multi_delete()1299 multi_delete::~multi_delete()
1300 {
1301 for (table_being_deleted= delete_tables;
1302 table_being_deleted;
1303 table_being_deleted= table_being_deleted->next_local)
1304 {
1305 TABLE *table= table_being_deleted->table;
1306 table->no_keyread=0;
1307 table->no_cache= 0;
1308 }
1309
1310 for (uint counter= 0; counter < num_of_tables; counter++)
1311 {
1312 if (tempfiles[counter])
1313 delete tempfiles[counter];
1314 }
1315 }
1316
1317
send_data(List<Item> & values)1318 int multi_delete::send_data(List<Item> &values)
1319 {
1320 int secure_counter= delete_while_scanning ? -1 : 0;
1321 TABLE_LIST *del_table;
1322 DBUG_ENTER("multi_delete::send_data");
1323
1324 bool ignore= thd->lex->ignore;
1325
1326 for (del_table= delete_tables;
1327 del_table;
1328 del_table= del_table->next_local, secure_counter++)
1329 {
1330 TABLE *table= del_table->table;
1331
1332 /* Check if we are using outer join and we didn't find the row */
1333 if (table->status & (STATUS_NULL_ROW | STATUS_DELETED))
1334 continue;
1335
1336 table->file->position(table->record[0]);
1337 found++;
1338
1339 if (secure_counter < 0)
1340 {
1341 /* We are scanning the current table */
1342 DBUG_ASSERT(del_table == table_being_deleted);
1343 if (table->triggers &&
1344 table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
1345 TRG_ACTION_BEFORE, FALSE))
1346 DBUG_RETURN(1);
1347 table->status|= STATUS_DELETED;
1348
1349 error= table->delete_row();
1350 if (likely(!error))
1351 {
1352 deleted++;
1353 if (!table->file->has_transactions())
1354 thd->transaction->stmt.modified_non_trans_table= TRUE;
1355 if (table->triggers &&
1356 table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
1357 TRG_ACTION_AFTER, FALSE))
1358 DBUG_RETURN(1);
1359 }
1360 else if (!ignore)
1361 {
1362 /*
1363 If the IGNORE option is used errors caused by ha_delete_row don't
1364 have to stop the iteration.
1365 */
1366 table->file->print_error(error,MYF(0));
1367 DBUG_RETURN(1);
1368 }
1369 }
1370 else
1371 {
1372 error=tempfiles[secure_counter]->unique_add((char*) table->file->ref);
1373 if (unlikely(error))
1374 {
1375 error= 1; // Fatal error
1376 DBUG_RETURN(1);
1377 }
1378 }
1379 }
1380 DBUG_RETURN(0);
1381 }
1382
1383
abort_result_set()1384 void multi_delete::abort_result_set()
1385 {
1386 DBUG_ENTER("multi_delete::abort_result_set");
1387
1388 /* the error was handled or nothing deleted and no side effects return */
1389 if (error_handled ||
1390 (!thd->transaction->stmt.modified_non_trans_table && !deleted))
1391 DBUG_VOID_RETURN;
1392
1393 /* Something already deleted so we have to invalidate cache */
1394 if (deleted)
1395 query_cache_invalidate3(thd, delete_tables, 1);
1396
1397 if (thd->transaction->stmt.modified_non_trans_table)
1398 thd->transaction->all.modified_non_trans_table= TRUE;
1399 thd->transaction->all.m_unsafe_rollback_flags|=
1400 (thd->transaction->stmt.m_unsafe_rollback_flags & THD_TRANS::DID_WAIT);
1401
1402 /*
1403 If rows from the first table only has been deleted and it is
1404 transactional, just do rollback.
1405 The same if all tables are transactional, regardless of where we are.
1406 In all other cases do attempt deletes ...
1407 */
1408 if (do_delete && normal_tables &&
1409 (table_being_deleted != delete_tables ||
1410 !table_being_deleted->table->file->has_transactions_and_rollback()))
1411 {
1412 /*
1413 We have to execute the recorded do_deletes() and write info into the
1414 error log
1415 */
1416 error= 1;
1417 send_eof();
1418 DBUG_ASSERT(error_handled);
1419 DBUG_VOID_RETURN;
1420 }
1421
1422 if (thd->transaction->stmt.modified_non_trans_table)
1423 {
1424 /*
1425 there is only side effects; to binlog with the error
1426 */
1427 if (WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open())
1428 {
1429 int errcode= query_error_code(thd, thd->killed == NOT_KILLED);
1430 /* possible error of writing binary log is ignored deliberately */
1431 (void) thd->binlog_query(THD::ROW_QUERY_TYPE,
1432 thd->query(), thd->query_length(),
1433 transactional_tables, FALSE, FALSE, errcode);
1434 }
1435 }
1436 DBUG_VOID_RETURN;
1437 }
1438
1439
1440
1441 /**
1442 Do delete from other tables.
1443
1444 @retval 0 ok
1445 @retval 1 error
1446
1447 @todo Is there any reason not use the normal nested-loops join? If not, and
1448 there is no documentation supporting it, this method and callee should be
1449 removed and there should be hooks within normal execution.
1450 */
1451
do_deletes()1452 int multi_delete::do_deletes()
1453 {
1454 DBUG_ENTER("do_deletes");
1455 DBUG_ASSERT(do_delete);
1456
1457 do_delete= 0; // Mark called
1458 if (!found)
1459 DBUG_RETURN(0);
1460
1461 table_being_deleted= (delete_while_scanning ? delete_tables->next_local :
1462 delete_tables);
1463
1464 for (uint counter= 0; table_being_deleted;
1465 table_being_deleted= table_being_deleted->next_local, counter++)
1466 {
1467 TABLE *table = table_being_deleted->table;
1468 int local_error;
1469 if (unlikely(tempfiles[counter]->get(table)))
1470 DBUG_RETURN(1);
1471
1472 local_error= do_table_deletes(table, &tempfiles[counter]->sort,
1473 thd->lex->ignore);
1474
1475 if (unlikely(thd->killed) && likely(!local_error))
1476 DBUG_RETURN(1);
1477
1478 if (unlikely(local_error == -1)) // End of file
1479 local_error= 0;
1480
1481 if (unlikely(local_error))
1482 DBUG_RETURN(local_error);
1483 }
1484 DBUG_RETURN(0);
1485 }
1486
1487
1488 /**
1489 Implements the inner loop of nested-loops join within multi-DELETE
1490 execution.
1491
1492 @param table The table from which to delete.
1493
1494 @param ignore If used, all non fatal errors will be translated
1495 to warnings and we should not break the row-by-row iteration.
1496
1497 @return Status code
1498
1499 @retval 0 All ok.
1500 @retval 1 Triggers or handler reported error.
1501 @retval -1 End of file from handler.
1502 */
do_table_deletes(TABLE * table,SORT_INFO * sort_info,bool ignore)1503 int multi_delete::do_table_deletes(TABLE *table, SORT_INFO *sort_info,
1504 bool ignore)
1505 {
1506 int local_error= 0;
1507 READ_RECORD info;
1508 ha_rows last_deleted= deleted;
1509 DBUG_ENTER("do_deletes_for_table");
1510
1511 if (unlikely(init_read_record(&info, thd, table, NULL, sort_info, 0, 1,
1512 FALSE)))
1513 DBUG_RETURN(1);
1514
1515 bool will_batch= !table->file->start_bulk_delete();
1516 while (likely(!(local_error= info.read_record())) && likely(!thd->killed))
1517 {
1518 if (table->triggers &&
1519 unlikely(table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
1520 TRG_ACTION_BEFORE, FALSE)))
1521 {
1522 local_error= 1;
1523 break;
1524 }
1525
1526 local_error= table->delete_row();
1527 if (unlikely(local_error) && !ignore)
1528 {
1529 table->file->print_error(local_error, MYF(0));
1530 break;
1531 }
1532
1533 /*
1534 Increase the reported number of deleted rows only if no error occurred
1535 during ha_delete_row.
1536 Also, don't execute the AFTER trigger if the row operation failed.
1537 */
1538 if (unlikely(!local_error))
1539 {
1540 deleted++;
1541 if (table->triggers &&
1542 table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
1543 TRG_ACTION_AFTER, FALSE))
1544 {
1545 local_error= 1;
1546 break;
1547 }
1548 }
1549 }
1550 if (will_batch)
1551 {
1552 int tmp_error= table->file->end_bulk_delete();
1553 if (unlikely(tmp_error) && !local_error)
1554 {
1555 local_error= tmp_error;
1556 table->file->print_error(local_error, MYF(0));
1557 }
1558 }
1559 if (last_deleted != deleted && !table->file->has_transactions_and_rollback())
1560 thd->transaction->stmt.modified_non_trans_table= TRUE;
1561
1562 end_read_record(&info);
1563
1564 DBUG_RETURN(local_error);
1565 }
1566
1567 /*
1568 Send ok to the client
1569
1570 return: 0 sucess
1571 1 error
1572 */
1573
send_eof()1574 bool multi_delete::send_eof()
1575 {
1576 killed_state killed_status= NOT_KILLED;
1577 THD_STAGE_INFO(thd, stage_deleting_from_reference_tables);
1578
1579 /* Does deletes for the last n - 1 tables, returns 0 if ok */
1580 int local_error= do_deletes(); // returns 0 if success
1581
1582 /* compute a total error to know if something failed */
1583 local_error= local_error || error;
1584 killed_status= (local_error == 0)? NOT_KILLED : thd->killed;
1585 /* reset used flags */
1586 THD_STAGE_INFO(thd, stage_end);
1587
1588 if (thd->transaction->stmt.modified_non_trans_table)
1589 thd->transaction->all.modified_non_trans_table= TRUE;
1590 thd->transaction->all.m_unsafe_rollback_flags|=
1591 (thd->transaction->stmt.m_unsafe_rollback_flags & THD_TRANS::DID_WAIT);
1592
1593 /*
1594 We must invalidate the query cache before binlog writing and
1595 ha_autocommit_...
1596 */
1597 if (deleted)
1598 {
1599 query_cache_invalidate3(thd, delete_tables, 1);
1600 }
1601 if (likely((local_error == 0) ||
1602 thd->transaction->stmt.modified_non_trans_table))
1603 {
1604 if(WSREP_EMULATE_BINLOG(thd) || mysql_bin_log.is_open())
1605 {
1606 int errcode= 0;
1607 if (likely(local_error == 0))
1608 thd->clear_error();
1609 else
1610 errcode= query_error_code(thd, killed_status == NOT_KILLED);
1611 thd->thread_specific_used= TRUE;
1612 if (unlikely(thd->binlog_query(THD::ROW_QUERY_TYPE,
1613 thd->query(), thd->query_length(),
1614 transactional_tables, FALSE, FALSE,
1615 errcode) > 0) &&
1616 !normal_tables)
1617 {
1618 local_error=1; // Log write failed: roll back the SQL statement
1619 }
1620 }
1621 }
1622 if (unlikely(local_error != 0))
1623 error_handled= TRUE; // to force early leave from ::abort_result_set()
1624
1625 if (likely(!local_error && !thd->lex->analyze_stmt))
1626 {
1627 ::my_ok(thd, deleted);
1628 }
1629 return 0;
1630 }
1631