1 /* Copyright (c) 2000, 2021, Oracle and/or its affiliates. 2 3 This program is free software; you can redistribute it and/or modify 4 it under the terms of the GNU General Public License, version 2.0, 5 as published by the Free Software Foundation. 6 7 This program is also distributed with certain software (including 8 but not limited to OpenSSL) that is licensed under separate terms, 9 as designated in a particular file or component or in included license 10 documentation. The authors of MySQL hereby grant you an additional 11 permission to link the program and your derivative works with the 12 separately licensed software that they have included with MySQL. 13 14 This program is distributed in the hope that it will be useful, 15 but WITHOUT ANY WARRANTY; without even the implied warranty of 16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 GNU General Public License, version 2.0, for more details. 18 19 You should have received a copy of the GNU General Public License 20 along with this program; if not, write to the Free Software Foundation, 21 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */ 22 23 24 /* classes to use when handling where clause */ 25 26 #ifndef _opt_range_h 27 #define _opt_range_h 28 29 #include "my_global.h" 30 #include "field.h" // Field 31 #include "prealloced_array.h" // Prealloced_array 32 #include "priority_queue.h" // Priority_queue 33 #include "records.h" // READ_RECORD 34 #include "malloc_allocator.h" 35 36 #include <algorithm> 37 38 class JOIN; 39 class Item_sum; 40 class Opt_trace_context; 41 class Unique; 42 43 typedef struct st_key_part { 44 uint16 key,part; 45 /* See KEY_PART_INFO for meaning of the next two: */ 46 uint16 store_length, length; 47 uint8 null_bit; 48 /* 49 Keypart flags (0 when this structure is used by partition pruning code 50 for fake partitioning index description) 51 */ 52 uint8 flag; 53 Field *field; 54 Field::imagetype image_type; 55 } KEY_PART; 56 57 58 class QUICK_RANGE :public Sql_alloc { 59 public: 60 uchar *min_key,*max_key; 61 uint16 min_length,max_length; 62 63 /// Stores bitwise-or'ed bits defined in enum key_range_flags. 64 uint16 flag; 65 66 /** 67 Stores one of the HA_READ_MBR_XXX items in enum ha_rkey_function, only 68 effective when flag has a GEOM_FLAG bit. 69 */ 70 enum ha_rkey_function rkey_func_flag; 71 key_part_map min_keypart_map, // bitmap of used keyparts in min_key 72 max_keypart_map; // bitmap of used keyparts in max_key 73 74 QUICK_RANGE(); /* Full range */ 75 QUICK_RANGE(const uchar *min_key_arg, uint min_length_arg, 76 key_part_map min_keypart_map_arg, 77 const uchar *max_key_arg, uint max_length_arg, 78 key_part_map max_keypart_map_arg, 79 uint flag_arg, enum ha_rkey_function rkey_func); 80 81 /** 82 Initalizes a key_range object for communication with storage engine. 83 84 This function facilitates communication with the Storage Engine API by 85 translating the minimum endpoint of the interval represented by this 86 QUICK_RANGE into an index range endpoint specifier for the engine. 87 88 @param Pointer to an uninitialized key_range C struct. 89 90 @param prefix_length The length of the search key prefix to be used for 91 lookup. 92 93 @param keypart_map A set (bitmap) of keyparts to be used. 94 */ make_min_endpoint(key_range * kr,uint prefix_length,key_part_map keypart_map)95 void make_min_endpoint(key_range *kr, uint prefix_length, 96 key_part_map keypart_map) { 97 make_min_endpoint(kr); 98 kr->length= std::min(kr->length, prefix_length); 99 kr->keypart_map&= keypart_map; 100 } 101 102 /** 103 Initalizes a key_range object for communication with storage engine. 104 105 This function facilitates communication with the Storage Engine API by 106 translating the minimum endpoint of the interval represented by this 107 QUICK_RANGE into an index range endpoint specifier for the engine. 108 109 @param Pointer to an uninitialized key_range C struct. 110 */ make_min_endpoint(key_range * kr)111 void make_min_endpoint(key_range *kr) { 112 kr->key= (const uchar*)min_key; 113 kr->length= min_length; 114 kr->keypart_map= min_keypart_map; 115 kr->flag= ((flag & NEAR_MIN) ? HA_READ_AFTER_KEY : 116 (flag & EQ_RANGE) ? HA_READ_KEY_EXACT : HA_READ_KEY_OR_NEXT); 117 } 118 119 /** 120 Initalizes a key_range object for communication with storage engine. 121 122 This function facilitates communication with the Storage Engine API by 123 translating the maximum endpoint of the interval represented by this 124 QUICK_RANGE into an index range endpoint specifier for the engine. 125 126 @param Pointer to an uninitialized key_range C struct. 127 128 @param prefix_length The length of the search key prefix to be used for 129 lookup. 130 131 @param keypart_map A set (bitmap) of keyparts to be used. 132 */ make_max_endpoint(key_range * kr,uint prefix_length,key_part_map keypart_map)133 void make_max_endpoint(key_range *kr, uint prefix_length, 134 key_part_map keypart_map) { 135 make_max_endpoint(kr); 136 kr->length= std::min(kr->length, prefix_length); 137 kr->keypart_map&= keypart_map; 138 } 139 140 /** 141 Initalizes a key_range object for communication with storage engine. 142 143 This function facilitates communication with the Storage Engine API by 144 translating the maximum endpoint of the interval represented by this 145 QUICK_RANGE into an index range endpoint specifier for the engine. 146 147 @param Pointer to an uninitialized key_range C struct. 148 */ make_max_endpoint(key_range * kr)149 void make_max_endpoint(key_range *kr) { 150 kr->key= (const uchar*)max_key; 151 kr->length= max_length; 152 kr->keypart_map= max_keypart_map; 153 /* 154 We use READ_AFTER_KEY here because if we are reading on a key 155 prefix we want to find all keys with this prefix 156 */ 157 kr->flag= (flag & NEAR_MAX ? HA_READ_BEFORE_KEY : HA_READ_AFTER_KEY); 158 } 159 }; 160 161 162 /* 163 Quick select interface. 164 This class is a parent for all QUICK_*_SELECT and FT_SELECT classes. 165 166 The usage scenario is as follows: 167 1. Create quick select 168 quick= new QUICK_XXX_SELECT(...); 169 170 2. Perform lightweight initialization. This can be done in 2 ways: 171 2.a: Regular initialization 172 if (quick->init()) 173 { 174 //the only valid action after failed init() call is delete 175 delete quick; 176 } 177 2.b: Special initialization for quick selects merged by QUICK_ROR_*_SELECT 178 if (quick->init_ror_merged_scan()) 179 delete quick; 180 181 3. Perform zero, one, or more scans. 182 while (...) 183 { 184 // initialize quick select for scan. This may allocate 185 // buffers and/or prefetch rows. 186 if (quick->reset()) 187 { 188 //the only valid action after failed reset() call is delete 189 delete quick; 190 //abort query 191 } 192 193 // perform the scan 194 do 195 { 196 res= quick->get_next(); 197 } while (res && ...) 198 } 199 200 4. Delete the select: 201 delete quick; 202 203 NOTE 204 quick select doesn't use Sql_alloc/MEM_ROOT allocation because "range 205 checked for each record" functionality may create/destroy 206 O(#records_in_some_table) quick selects during query execution. 207 See Bug#18684036 ELIMINATE LAST FEW HEAP USAGE FROM THE 208 RANGE OPTIMIZER. 209 */ 210 211 class QUICK_SELECT_I 212 { 213 public: 214 ha_rows records; /* estimate of # of records to be retrieved */ 215 Cost_estimate cost_est; ///> cost to perform this retrieval 216 TABLE *head; 217 /* 218 Index this quick select uses, or MAX_KEY for quick selects 219 that use several indexes 220 */ 221 uint index; 222 223 /* 224 Total length of first used_key_parts parts of the key. 225 Applicable if index!= MAX_KEY. 226 */ 227 uint max_used_key_length; 228 229 /* 230 Max. number of (first) key parts this quick select uses for retrieval. 231 eg. for "(key1p1=c1 AND key1p2=c2) OR key1p1=c2" used_key_parts == 2. 232 Applicable if index!= MAX_KEY. 233 234 For QUICK_GROUP_MIN_MAX_SELECT it includes MIN/MAX argument keyparts. 235 */ 236 uint used_key_parts; 237 238 QUICK_SELECT_I(); ~QUICK_SELECT_I()239 virtual ~QUICK_SELECT_I(){}; 240 241 /* 242 Do post-constructor initialization. 243 SYNOPSIS 244 init() 245 246 init() performs initializations that should have been in constructor if 247 it was possible to return errors from constructors. The join optimizer may 248 create and then delete quick selects without retrieving any rows so init() 249 must not contain any IO or CPU intensive code. 250 251 If init() call fails the only valid action is to delete this quick select, 252 reset() and get_next() must not be called. 253 254 RETURN 255 0 OK 256 other Error code 257 */ 258 virtual int init() = 0; 259 260 /* 261 Initialize quick select for row retrieval. 262 SYNOPSIS 263 reset() 264 265 reset() should be called when it is certain that row retrieval will be 266 necessary. This call may do heavyweight initialization like buffering first 267 N records etc. If reset() call fails get_next() must not be called. 268 Note that reset() may be called several times if 269 * the quick select is executed in a subselect 270 * a JOIN buffer is used 271 272 RETURN 273 0 OK 274 other Error code 275 */ 276 virtual int reset(void) = 0; 277 278 virtual int get_next() = 0; /* get next record to retrieve */ 279 280 /* Range end should be called when we have looped over the whole index */ range_end()281 virtual void range_end() {} 282 283 /** 284 Whether the range access method returns records in reverse order. 285 */ 286 virtual bool reverse_sorted() const = 0; 287 /** 288 Whether the range access method is capable of returning records 289 in reverse order. 290 */ 291 virtual bool reverse_sort_possible() const = 0; unique_key_range()292 virtual bool unique_key_range() { return false; } clustered_pk_range()293 virtual bool clustered_pk_range() { return false; } 294 295 /* 296 Request that this quick select produces sorted output. 297 Not all quick selects can provide sorted output, the caller is responsible 298 for calling this function only for those quick selects that can. 299 The implementation is also allowed to provide sorted output even if it 300 was not requested if benificial, or required by implementation 301 internals. 302 */ 303 virtual void need_sorted_output() = 0; 304 enum { 305 QS_TYPE_RANGE = 0, 306 QS_TYPE_INDEX_MERGE = 1, 307 QS_TYPE_RANGE_DESC = 2, 308 QS_TYPE_FULLTEXT = 3, 309 QS_TYPE_ROR_INTERSECT = 4, 310 QS_TYPE_ROR_UNION = 5, 311 QS_TYPE_GROUP_MIN_MAX = 6 312 }; 313 314 /* Get type of this quick select - one of the QS_TYPE_* values */ 315 virtual int get_type() const = 0; 316 virtual bool is_loose_index_scan() const = 0; 317 virtual bool is_agg_loose_index_scan() const = 0; 318 319 /* 320 Initialize this quick select as a merged scan inside a ROR-union or a ROR- 321 intersection scan. The caller must not additionally call init() if this 322 function is called. 323 SYNOPSIS 324 init_ror_merged_scan() 325 reuse_handler If true, the quick select may use table->handler, 326 otherwise it must create and use a separate handler 327 object. 328 RETURN 329 0 Ok 330 other Error 331 */ init_ror_merged_scan(bool reuse_handler)332 virtual int init_ror_merged_scan(bool reuse_handler) 333 { assert(0); return 1; } 334 335 /* 336 Save ROWID of last retrieved row in file->ref. This used in ROR-merging. 337 */ save_last_pos()338 virtual void save_last_pos(){}; 339 340 /* 341 Append comma-separated list of keys this quick select uses to key_names; 342 append comma-separated list of corresponding used lengths to used_lengths. 343 This is used by select_describe. 344 */ 345 virtual void add_keys_and_lengths(String *key_names, 346 String *used_lengths)=0; 347 348 /* 349 Append text representation of quick select structure (what and how is 350 merged) to str. The result is added to "Extra" field in EXPLAIN output. 351 This function is implemented only by quick selects that merge other quick 352 selects output and/or can produce output suitable for merging. 353 */ add_info_string(String * str)354 virtual void add_info_string(String *str) {}; 355 /* 356 Return 1 if any index used by this quick select 357 uses field which is marked in passed bitmap. 358 */ 359 virtual bool is_keys_used(const MY_BITMAP *fields); 360 361 /** 362 Simple sanity check that the quick select has been set up 363 correctly. Function is overridden by quick selects that merge 364 indices. 365 */ is_valid()366 virtual bool is_valid() { return index != MAX_KEY; }; 367 368 /* 369 rowid of last row retrieved by this quick select. This is used only when 370 doing ROR-index_merge selects 371 */ 372 uchar *last_rowid; 373 374 /* 375 Table record buffer used by this quick select. 376 */ 377 uchar *record; 378 #ifndef NDEBUG 379 /* 380 Print quick select information to DBUG_FILE. Caller is responsible 381 for locking DBUG_FILE before this call and unlocking it afterwards. 382 */ 383 virtual void dbug_dump(int indent, bool verbose)= 0; 384 #endif 385 386 /* 387 Returns a QUICK_SELECT with reverse order of to the index. 388 */ make_reverse(uint used_key_parts_arg)389 virtual QUICK_SELECT_I *make_reverse(uint used_key_parts_arg) { return NULL; } set_handler(handler * file_arg)390 virtual void set_handler(handler *file_arg) {} 391 392 /** 393 Get the fields used by the range access method. 394 395 @param[out] used_fields Bitmap of fields that this range access 396 method uses. 397 */ 398 virtual void get_fields_used(MY_BITMAP *used_fields) = 0; 399 void trace_quick_description(Opt_trace_context *trace); 400 }; 401 402 403 class PARAM; 404 class SEL_ARG; 405 406 407 typedef Prealloced_array<QUICK_RANGE*, 16> Quick_ranges; 408 409 /* 410 MRR range sequence, array<QUICK_RANGE> implementation: sequence traversal 411 context. 412 */ 413 typedef struct st_quick_range_seq_ctx 414 { 415 Quick_ranges::const_iterator first; 416 Quick_ranges::const_iterator cur; 417 Quick_ranges::const_iterator last; 418 } QUICK_RANGE_SEQ_CTX; 419 420 range_seq_t quick_range_seq_init(void *init_param, uint n_ranges, uint flags); 421 uint quick_range_seq_next(range_seq_t rseq, KEY_MULTI_RANGE *range); 422 423 424 /* 425 Quick select that does a range scan on a single key. The records are 426 returned in key order if ::need_sorted_output() has been called. 427 */ 428 class QUICK_RANGE_SELECT : public QUICK_SELECT_I 429 { 430 protected: 431 handler *file; 432 /* Members to deal with case when this quick select is a ROR-merged scan */ 433 bool in_ror_merged_scan; 434 435 // TODO: pre-allocate space to avoid malloc/free for small number of columns. 436 MY_BITMAP column_bitmap; 437 438 friend class TRP_ROR_INTERSECT; 439 friend 440 QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, 441 struct st_table_ref *ref, 442 ha_rows records); 443 friend bool get_quick_keys(PARAM *param, 444 QUICK_RANGE_SELECT *quick,KEY_PART *key, 445 SEL_ARG *key_tree, 446 uchar *min_key, uint min_key_flag, 447 uchar *max_key, uint max_key_flag); 448 friend QUICK_RANGE_SELECT *get_quick_select(PARAM*,uint idx, 449 SEL_ARG *key_tree, 450 uint mrr_flags, 451 uint mrr_buf_size, 452 MEM_ROOT *alloc); 453 friend uint quick_range_seq_next(range_seq_t rseq, KEY_MULTI_RANGE *range); 454 friend range_seq_t quick_range_seq_init(void *init_param, 455 uint n_ranges, uint flags); 456 friend class QUICK_SELECT_DESC; 457 friend class QUICK_INDEX_MERGE_SELECT; 458 friend class QUICK_ROR_INTERSECT_SELECT; 459 friend class QUICK_GROUP_MIN_MAX_SELECT; 460 461 Quick_ranges ranges; /* ordered array of range ptrs */ 462 bool free_file; /* TRUE <=> this->file is "owned" by this quick select */ 463 464 /* Range pointers to be used when not using MRR interface */ 465 QUICK_RANGE **cur_range; /* current element in ranges */ 466 QUICK_RANGE *last_range; 467 468 /* Members needed to use the MRR interface */ 469 QUICK_RANGE_SEQ_CTX qr_traversal_ctx; 470 public: 471 uint mrr_flags; /* Flags to be used with MRR interface */ 472 protected: 473 uint mrr_buf_size; /* copy from thd->variables.read_rnd_buff_size */ 474 HANDLER_BUFFER *mrr_buf_desc; /* the handler buffer */ 475 476 /* Info about index we're scanning */ 477 KEY_PART *key_parts; 478 KEY_PART_INFO *key_part_info; 479 480 bool dont_free; /* Used by QUICK_SELECT_DESC */ 481 482 int cmp_next(QUICK_RANGE *range); 483 int cmp_prev(QUICK_RANGE *range); 484 bool row_in_ranges(); 485 public: 486 MEM_ROOT alloc; 487 488 QUICK_RANGE_SELECT(THD *thd, TABLE *table,uint index_arg,bool no_alloc, 489 MEM_ROOT *parent_alloc, bool *create_error); 490 ~QUICK_RANGE_SELECT(); 491 492 void need_sorted_output(); 493 int init(); 494 int reset(void); 495 int get_next(); 496 void range_end(); 497 int get_next_prefix(uint prefix_length, uint group_key_parts, 498 uchar *cur_prefix); reverse_sorted()499 bool reverse_sorted() const { return false; } reverse_sort_possible()500 bool reverse_sort_possible() const { return true; } 501 bool unique_key_range(); 502 int init_ror_merged_scan(bool reuse_handler); save_last_pos()503 void save_last_pos() 504 { file->position(record); } get_type()505 int get_type() const { return QS_TYPE_RANGE; } is_loose_index_scan()506 virtual bool is_loose_index_scan() const { return false; } is_agg_loose_index_scan()507 virtual bool is_agg_loose_index_scan() const { return false; } 508 void add_keys_and_lengths(String *key_names, String *used_lengths); 509 void add_info_string(String *str); 510 #ifndef NDEBUG 511 void dbug_dump(int indent, bool verbose); 512 #endif 513 QUICK_SELECT_I *make_reverse(uint used_key_parts_arg); set_handler(handler * file_arg)514 void set_handler(handler *file_arg) { file= file_arg; } 515 get_fields_used(MY_BITMAP * used_fields)516 virtual void get_fields_used(MY_BITMAP *used_fields) 517 { 518 for (uint i= 0; i < used_key_parts; i++) 519 bitmap_set_bit(used_fields, key_parts[i].field->field_index); 520 } 521 522 private: 523 /* Default copy ctor used by QUICK_SELECT_DESC */ 524 }; 525 526 527 class QUICK_RANGE_SELECT_GEOM: public QUICK_RANGE_SELECT 528 { 529 public: QUICK_RANGE_SELECT_GEOM(THD * thd,TABLE * table,uint index_arg,bool no_alloc,MEM_ROOT * parent_alloc,bool * create_error)530 QUICK_RANGE_SELECT_GEOM(THD *thd, TABLE *table, uint index_arg, 531 bool no_alloc, MEM_ROOT *parent_alloc, 532 bool *create_error) 533 :QUICK_RANGE_SELECT(thd, table, index_arg, no_alloc, parent_alloc, 534 create_error) 535 {}; 536 virtual int get_next(); 537 }; 538 539 540 /* 541 QUICK_INDEX_MERGE_SELECT - index_merge access method quick select. 542 543 QUICK_INDEX_MERGE_SELECT uses 544 * QUICK_RANGE_SELECTs to get rows 545 * Unique class to remove duplicate rows 546 547 INDEX MERGE OPTIMIZER 548 Current implementation doesn't detect all cases where index_merge could 549 be used, in particular: 550 * index_merge will never be used if range scan is possible (even if 551 range scan is more expensive) 552 553 * index_merge+'using index' is not supported (this the consequence of 554 the above restriction) 555 556 * If WHERE part contains complex nested AND and OR conditions, some ways 557 to retrieve rows using index_merge will not be considered. The choice 558 of read plan may depend on the order of conjuncts/disjuncts in WHERE 559 part of the query, see comments near imerge_list_or_list and 560 SEL_IMERGE::or_sel_tree_with_checks functions for details. 561 562 * There is no "index_merge_ref" method (but index_merge on non-first 563 table in join is possible with 'range checked for each record'). 564 565 See comments around SEL_IMERGE class and test_quick_select for more 566 details. 567 568 ROW RETRIEVAL ALGORITHM 569 570 index_merge uses Unique class for duplicates removal. index_merge takes 571 advantage of Clustered Primary Key (CPK) if the table has one. 572 The index_merge algorithm consists of two phases: 573 574 Phase 1 (implemented in QUICK_INDEX_MERGE_SELECT::prepare_unique): 575 prepare() 576 { 577 activate 'index only'; 578 while(retrieve next row for non-CPK scan) 579 { 580 if (there is a CPK scan and row will be retrieved by it) 581 skip this row; 582 else 583 put its rowid into Unique; 584 } 585 deactivate 'index only'; 586 } 587 588 Phase 2 (implemented as sequence of QUICK_INDEX_MERGE_SELECT::get_next 589 calls): 590 591 fetch() 592 { 593 retrieve all rows from row pointers stored in Unique; 594 free Unique; 595 retrieve all rows for CPK scan; 596 } 597 */ 598 599 class QUICK_INDEX_MERGE_SELECT : public QUICK_SELECT_I 600 { 601 Unique *unique; 602 public: 603 QUICK_INDEX_MERGE_SELECT(THD *thd, TABLE *table); 604 ~QUICK_INDEX_MERGE_SELECT(); 605 606 int init(); need_sorted_output()607 void need_sorted_output() { assert(false); /* Can't do it */ } 608 int reset(void); 609 int get_next(); reverse_sorted()610 bool reverse_sorted() const { return false; } reverse_sort_possible()611 bool reverse_sort_possible() const { return false; } unique_key_range()612 bool unique_key_range() { return false; } get_type()613 int get_type() const { return QS_TYPE_INDEX_MERGE; } is_loose_index_scan()614 virtual bool is_loose_index_scan() const { return false; } is_agg_loose_index_scan()615 virtual bool is_agg_loose_index_scan() const { return false; } 616 void add_keys_and_lengths(String *key_names, String *used_lengths); 617 void add_info_string(String *str); 618 bool is_keys_used(const MY_BITMAP *fields); 619 #ifndef NDEBUG 620 void dbug_dump(int indent, bool verbose); 621 #endif 622 623 bool push_quick_back(QUICK_RANGE_SELECT *quick_sel_range); 624 625 /* range quick selects this index_merge read consists of */ 626 List<QUICK_RANGE_SELECT> quick_selects; 627 628 /* quick select that uses clustered primary key (NULL if none) */ 629 QUICK_RANGE_SELECT* pk_quick_select; 630 631 /* true if this select is currently doing a clustered PK scan */ 632 bool doing_pk_scan; 633 634 MEM_ROOT alloc; 635 THD *thd; 636 int read_keys_and_merge(); 637 clustered_pk_range()638 bool clustered_pk_range() { return MY_TEST(pk_quick_select); } 639 is_valid()640 virtual bool is_valid() 641 { 642 List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); 643 QUICK_RANGE_SELECT *quick; 644 bool valid= true; 645 while ((quick= it++)) 646 { 647 if (!quick->is_valid()) 648 { 649 valid= false; 650 break; 651 } 652 } 653 return valid; 654 } 655 get_fields_used(MY_BITMAP * used_fields)656 virtual void get_fields_used(MY_BITMAP *used_fields) 657 { 658 List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); 659 QUICK_RANGE_SELECT *quick; 660 while ((quick= it++)) 661 quick->get_fields_used(used_fields); 662 663 if (pk_quick_select) 664 pk_quick_select->get_fields_used(used_fields); 665 } 666 667 /* used to get rows collected in Unique */ 668 READ_RECORD read_record; 669 }; 670 671 672 /* 673 Rowid-Ordered Retrieval (ROR) index intersection quick select. 674 This quick select produces intersection of row sequences returned 675 by several QUICK_RANGE_SELECTs it "merges". 676 677 All merged QUICK_RANGE_SELECTs must return rowids in rowid order. 678 QUICK_ROR_INTERSECT_SELECT will return rows in rowid order, too. 679 680 All merged quick selects retrieve {rowid, covered_fields} tuples (not full 681 table records). 682 QUICK_ROR_INTERSECT_SELECT retrieves full records if it is not being used 683 by QUICK_ROR_INTERSECT_SELECT and all merged quick selects together don't 684 cover needed all fields. 685 686 If one of the merged quick selects is a Clustered PK range scan, it is 687 used only to filter rowid sequence produced by other merged quick selects. 688 */ 689 690 class QUICK_ROR_INTERSECT_SELECT : public QUICK_SELECT_I 691 { 692 public: 693 QUICK_ROR_INTERSECT_SELECT(THD *thd, TABLE *table, 694 bool retrieve_full_rows, 695 MEM_ROOT *parent_alloc); 696 ~QUICK_ROR_INTERSECT_SELECT(); 697 698 int init(); need_sorted_output()699 void need_sorted_output() { assert(false); /* Can't do it */ } 700 int reset(void); 701 int get_next(); reverse_sorted()702 bool reverse_sorted() const { return false; } reverse_sort_possible()703 bool reverse_sort_possible() const { return false; } unique_key_range()704 bool unique_key_range() { return false; } get_type()705 int get_type() const { return QS_TYPE_ROR_INTERSECT; } is_loose_index_scan()706 virtual bool is_loose_index_scan() const { return false; } is_agg_loose_index_scan()707 virtual bool is_agg_loose_index_scan() const { return false; } 708 void add_keys_and_lengths(String *key_names, String *used_lengths); 709 void add_info_string(String *str); 710 bool is_keys_used(const MY_BITMAP *fields); 711 #ifndef NDEBUG 712 void dbug_dump(int indent, bool verbose); 713 #endif 714 int init_ror_merged_scan(bool reuse_handler); 715 bool push_quick_back(QUICK_RANGE_SELECT *quick_sel_range); 716 717 /* 718 Range quick selects this intersection consists of, not including 719 cpk_quick. 720 */ 721 List<QUICK_RANGE_SELECT> quick_selects; 722 is_valid()723 virtual bool is_valid() 724 { 725 List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); 726 QUICK_RANGE_SELECT *quick; 727 bool valid= true; 728 while ((quick= it++)) 729 { 730 if (!quick->is_valid()) 731 { 732 valid= false; 733 break; 734 } 735 } 736 return valid; 737 } 738 get_fields_used(MY_BITMAP * used_fields)739 virtual void get_fields_used(MY_BITMAP *used_fields) 740 { 741 List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects); 742 QUICK_RANGE_SELECT *quick; 743 while ((quick= it++)) 744 quick->get_fields_used(used_fields); 745 } 746 747 /* 748 Merged quick select that uses Clustered PK, if there is one. This quick 749 select is not used for row retrieval, it is used for row retrieval. 750 */ 751 QUICK_RANGE_SELECT *cpk_quick; 752 753 MEM_ROOT alloc; /* Memory pool for this and merged quick selects data. */ 754 THD *thd; /* current thread */ 755 bool need_to_fetch_row; /* if true, do retrieve full table records. */ 756 /* in top-level quick select, true if merged scans where initialized */ 757 bool scans_inited; 758 }; 759 760 761 /* 762 Comparison function to be used QUICK_ROR_UNION_SELECT::queue priority 763 queue. 764 */ 765 struct Quick_ror_union_less 766 { Quick_ror_union_lessQuick_ror_union_less767 explicit Quick_ror_union_less(const QUICK_SELECT_I *me) 768 : m_me(me) 769 {} operatorQuick_ror_union_less770 bool operator()(QUICK_SELECT_I *a, QUICK_SELECT_I *b) 771 { 772 return m_me->head->file->cmp_ref(a->last_rowid, b->last_rowid) > 0; 773 } 774 const QUICK_SELECT_I *m_me; 775 }; 776 777 778 /* 779 Rowid-Ordered Retrieval index union select. 780 This quick select produces union of row sequences returned by several 781 quick select it "merges". 782 783 All merged quick selects must return rowids in rowid order. 784 QUICK_ROR_UNION_SELECT will return rows in rowid order, too. 785 786 All merged quick selects are set not to retrieve full table records. 787 ROR-union quick select always retrieves full records. 788 789 */ 790 791 class QUICK_ROR_UNION_SELECT : public QUICK_SELECT_I 792 { 793 public: 794 QUICK_ROR_UNION_SELECT(THD *thd, TABLE *table); 795 ~QUICK_ROR_UNION_SELECT(); 796 797 int init(); need_sorted_output()798 void need_sorted_output() { assert(false); /* Can't do it */ } 799 int reset(void); 800 int get_next(); reverse_sorted()801 bool reverse_sorted() const { return false; } reverse_sort_possible()802 bool reverse_sort_possible() const { return false; } unique_key_range()803 bool unique_key_range() { return false; } get_type()804 int get_type() const { return QS_TYPE_ROR_UNION; } is_loose_index_scan()805 virtual bool is_loose_index_scan() const { return false; } is_agg_loose_index_scan()806 virtual bool is_agg_loose_index_scan() const { return false; } 807 void add_keys_and_lengths(String *key_names, String *used_lengths); 808 void add_info_string(String *str); 809 bool is_keys_used(const MY_BITMAP *fields); 810 #ifndef NDEBUG 811 void dbug_dump(int indent, bool verbose); 812 #endif 813 814 bool push_quick_back(QUICK_SELECT_I *quick_sel_range); 815 816 List<QUICK_SELECT_I> quick_selects; /* Merged quick selects */ 817 is_valid()818 virtual bool is_valid() 819 { 820 List_iterator_fast<QUICK_SELECT_I> it(quick_selects); 821 QUICK_SELECT_I *quick; 822 bool valid= true; 823 while ((quick= it++)) 824 { 825 if (!quick->is_valid()) 826 { 827 valid= false; 828 break; 829 } 830 } 831 return valid; 832 } 833 get_fields_used(MY_BITMAP * used_fields)834 virtual void get_fields_used(MY_BITMAP *used_fields) 835 { 836 List_iterator_fast<QUICK_SELECT_I> it(quick_selects); 837 QUICK_SELECT_I *quick; 838 while ((quick= it++)) 839 quick->get_fields_used(used_fields); 840 } 841 842 Priority_queue<QUICK_SELECT_I*, 843 std::vector<QUICK_SELECT_I*, 844 Malloc_allocator<QUICK_SELECT_I*> >, 845 Quick_ror_union_less> 846 queue; /* Priority queue for merge operation */ 847 MEM_ROOT alloc; /* Memory pool for this and merged quick selects data. */ 848 849 THD *thd; /* current thread */ 850 uchar *cur_rowid; /* buffer used in get_next() */ 851 uchar *prev_rowid; /* rowid of last row returned by get_next() */ 852 bool have_prev_rowid; /* true if prev_rowid has valid data */ 853 uint rowid_length; /* table rowid length */ 854 855 private: 856 bool scans_inited; 857 }; 858 859 860 /* 861 Index scan for GROUP-BY queries with MIN/MAX aggregate functions. 862 863 This class provides a specialized index access method for GROUP-BY queries 864 of the forms: 865 866 SELECT A_1,...,A_k, [B_1,...,B_m], [MIN(C)], [MAX(C)] 867 FROM T 868 WHERE [RNG(A_1,...,A_p ; where p <= k)] 869 [AND EQ(B_1,...,B_m)] 870 [AND PC(C)] 871 [AND PA(A_i1,...,A_iq)] 872 GROUP BY A_1,...,A_k; 873 874 or 875 876 SELECT DISTINCT A_i1,...,A_ik 877 FROM T 878 WHERE [RNG(A_1,...,A_p ; where p <= k)] 879 [AND PA(A_i1,...,A_iq)]; 880 881 where all selected fields are parts of the same index. 882 The class of queries that can be processed by this quick select is fully 883 specified in the description of get_best_trp_group_min_max() in opt_range.cc. 884 885 The get_next() method directly produces result tuples, thus obviating the 886 need to call end_send_group() because all grouping is already done inside 887 get_next(). 888 889 Since one of the requirements is that all select fields are part of the same 890 index, this class produces only index keys, and not complete records. 891 */ 892 893 class QUICK_GROUP_MIN_MAX_SELECT : public QUICK_SELECT_I 894 { 895 private: 896 JOIN *join; /* Descriptor of the current query */ 897 KEY *index_info; /* The index chosen for data access */ 898 uchar *record; /* Buffer where the next record is returned. */ 899 uchar *tmp_record; /* Temporary storage for next_min(), next_max(). */ 900 uchar *group_prefix; /* Key prefix consisting of the GROUP fields. */ 901 const uint group_prefix_len; /* Length of the group prefix. */ 902 uint group_key_parts; /* A number of keyparts in the group prefix */ 903 uchar *last_prefix; /* Prefix of the last group for detecting EOF. */ 904 bool have_min; /* Specify whether we are computing */ 905 bool have_max; /* a MIN, a MAX, or both. */ 906 bool have_agg_distinct;/* aggregate_function(DISTINCT ...). */ 907 bool seen_first_key; /* Denotes whether the first key was retrieved.*/ 908 KEY_PART_INFO *min_max_arg_part; /* The keypart of the only argument field */ 909 /* of all MIN/MAX functions. */ 910 uint min_max_arg_len; /* The length of the MIN/MAX argument field */ 911 uchar *key_infix; /* Infix of constants from equality predicates. */ 912 uint key_infix_len; 913 Quick_ranges min_max_ranges; /* Array of range ptrs for the MIN/MAX field. */ 914 uint real_prefix_len; /* Length of key prefix extended with key_infix. */ 915 uint real_key_parts; /* A number of keyparts in the above value. */ 916 List<Item_sum> *min_functions; 917 List<Item_sum> *max_functions; 918 List_iterator<Item_sum> *min_functions_it; 919 List_iterator<Item_sum> *max_functions_it; 920 /* 921 Use index scan to get the next different key instead of jumping into it 922 through index read 923 */ 924 bool is_index_scan; 925 public: 926 /* 927 The following two members are public to allow easy access from 928 TRP_GROUP_MIN_MAX::make_quick() 929 */ 930 MEM_ROOT alloc; /* Memory pool for this and quick_prefix_select data. */ 931 QUICK_RANGE_SELECT *quick_prefix_select;/* For retrieval of group prefixes. */ 932 private: 933 int next_prefix(); 934 int next_min_in_range(); 935 int next_max_in_range(); 936 int next_min(); 937 int next_max(); 938 void update_min_result(); 939 void update_max_result(); 940 public: 941 QUICK_GROUP_MIN_MAX_SELECT(TABLE *table, JOIN *join, bool have_min, 942 bool have_max, bool have_agg_distinct, 943 KEY_PART_INFO *min_max_arg_part, 944 uint group_prefix_len, uint group_key_parts, 945 uint used_key_parts, KEY *index_info, uint 946 use_index, const Cost_estimate *cost_est, 947 ha_rows records, uint key_infix_len, 948 uchar *key_infix, MEM_ROOT 949 *parent_alloc, bool is_index_scan); 950 ~QUICK_GROUP_MIN_MAX_SELECT(); 951 bool add_range(SEL_ARG *sel_range); 952 void update_key_stat(); 953 void adjust_prefix_ranges(); 954 bool alloc_buffers(); 955 int init(); need_sorted_output()956 void need_sorted_output() { /* always do it */ } 957 int reset(); 958 int get_next(); reverse_sorted()959 bool reverse_sorted() const { return false; } reverse_sort_possible()960 bool reverse_sort_possible() const { return false; } unique_key_range()961 bool unique_key_range() { return false; } get_type()962 int get_type() const { return QS_TYPE_GROUP_MIN_MAX; } is_loose_index_scan()963 virtual bool is_loose_index_scan() const { return true; } is_agg_loose_index_scan()964 virtual bool is_agg_loose_index_scan() const { return is_agg_distinct(); } 965 void add_keys_and_lengths(String *key_names, String *used_lengths); 966 #ifndef NDEBUG 967 void dbug_dump(int indent, bool verbose); 968 #endif is_agg_distinct()969 bool is_agg_distinct() const { return have_agg_distinct; } append_loose_scan_type(String * str)970 virtual void append_loose_scan_type(String *str) 971 { 972 if (is_index_scan) 973 str->append(STRING_WITH_LEN("scanning")); 974 } 975 get_fields_used(MY_BITMAP * used_fields)976 virtual void get_fields_used(MY_BITMAP *used_fields) 977 { 978 for (uint i= 0; i < used_key_parts; i++) 979 bitmap_set_bit(used_fields, index_info->key_part[i].field->field_index); 980 } 981 void add_info_string(String *str); 982 }; 983 984 985 class QUICK_SELECT_DESC: public QUICK_RANGE_SELECT 986 { 987 public: 988 QUICK_SELECT_DESC(QUICK_RANGE_SELECT *q, uint used_key_parts, 989 bool *create_err); 990 int get_next(); reverse_sorted()991 bool reverse_sorted() const { return true; } reverse_sort_possible()992 bool reverse_sort_possible() const { return true; } get_type()993 int get_type() const { return QS_TYPE_RANGE_DESC; } is_loose_index_scan()994 virtual bool is_loose_index_scan() const { return false; } is_agg_loose_index_scan()995 virtual bool is_agg_loose_index_scan() const { return false; } make_reverse(uint used_key_parts_arg)996 QUICK_SELECT_I *make_reverse(uint used_key_parts_arg) 997 { 998 return this; // is already reverse sorted 999 } 1000 private: 1001 bool range_reads_after_key(QUICK_RANGE *range); reset(void)1002 int reset(void) { rev_it.rewind(); return QUICK_RANGE_SELECT::reset(); } 1003 List<QUICK_RANGE> rev_ranges; 1004 List_iterator<QUICK_RANGE> rev_it; 1005 uint used_key_parts; 1006 }; 1007 1008 1009 class QEP_shared_owner; 1010 1011 int test_quick_select(THD *thd, key_map keys, table_map prev_tables, 1012 ha_rows limit, bool force_quick_range, 1013 const ORDER::enum_order interesting_order, 1014 const QEP_shared_owner *tab, 1015 Item *cond, key_map *needed_reg, 1016 QUICK_SELECT_I **quick, bool ignore_table_scan); 1017 1018 class FT_SELECT: public QUICK_RANGE_SELECT 1019 { 1020 public: FT_SELECT(THD * thd,TABLE * table,uint key,bool * error)1021 FT_SELECT(THD *thd, TABLE *table, uint key, bool *error) : 1022 QUICK_RANGE_SELECT (thd, table, key, 1, NULL, error) { (void) init(); } ~FT_SELECT()1023 ~FT_SELECT() { file->ft_end(); } init()1024 int init() 1025 { 1026 // No estimation is done for FTS, return 1 for compatibility. 1027 records= 1; 1028 return file->ft_init(); 1029 } reset()1030 int reset() { return 0; } get_next()1031 int get_next() { return file->ft_read(record); } get_type()1032 int get_type() const { return QS_TYPE_FULLTEXT; } is_loose_index_scan()1033 virtual bool is_loose_index_scan() const { return false; } is_agg_loose_index_scan()1034 virtual bool is_agg_loose_index_scan() const { return false; } 1035 }; 1036 1037 FT_SELECT *get_ft_select(THD *thd, TABLE *table, uint key); 1038 QUICK_RANGE_SELECT *get_quick_select_for_ref(THD *thd, TABLE *table, 1039 struct st_table_ref *ref, 1040 ha_rows records); 1041 bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond); 1042 void store_key_image_to_rec(Field *field, uchar *ptr, uint len); 1043 1044 extern String null_string; 1045 1046 #endif 1047