1 /*------------------------------------------------------------------------- 2 * 3 * indexam.c 4 * general index access method routines 5 * 6 * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group 7 * Portions Copyright (c) 1994, Regents of the University of California 8 * 9 * 10 * IDENTIFICATION 11 * src/backend/access/index/indexam.c 12 * 13 * INTERFACE ROUTINES 14 * index_open - open an index relation by relation OID 15 * index_close - close an index relation 16 * index_beginscan - start a scan of an index with amgettuple 17 * index_beginscan_bitmap - start a scan of an index with amgetbitmap 18 * index_rescan - restart a scan of an index 19 * index_endscan - end a scan 20 * index_insert - insert an index tuple into a relation 21 * index_markpos - mark a scan position 22 * index_restrpos - restore a scan position 23 * index_parallelscan_estimate - estimate shared memory for parallel scan 24 * index_parallelscan_initialize - initialize parallel scan 25 * index_parallelrescan - (re)start a parallel scan of an index 26 * index_beginscan_parallel - join parallel index scan 27 * index_getnext_tid - get the next TID from a scan 28 * index_fetch_heap - get the scan's next heap tuple 29 * index_getnext_slot - get the next tuple from a scan 30 * index_getbitmap - get all tuples from a scan 31 * index_bulk_delete - bulk deletion of index tuples 32 * index_vacuum_cleanup - post-deletion cleanup of an index 33 * index_can_return - does index support index-only scans? 34 * index_getprocid - get a support procedure OID 35 * index_getprocinfo - get a support procedure's lookup info 36 * 37 * NOTES 38 * This file contains the index_ routines which used 39 * to be a scattered collection of stuff in access/genam. 40 * 41 *------------------------------------------------------------------------- 42 */ 43 44 #include "postgres.h" 45 46 #include "access/amapi.h" 47 #include "access/heapam.h" 48 #include "access/reloptions.h" 49 #include "access/relscan.h" 50 #include "access/tableam.h" 51 #include "access/transam.h" 52 #include "access/xlog.h" 53 #include "catalog/index.h" 54 #include "catalog/pg_amproc.h" 55 #include "catalog/pg_type.h" 56 #include "commands/defrem.h" 57 #include "nodes/makefuncs.h" 58 #include "pgstat.h" 59 #include "storage/bufmgr.h" 60 #include "storage/lmgr.h" 61 #include "storage/predicate.h" 62 #include "utils/ruleutils.h" 63 #include "utils/snapmgr.h" 64 #include "utils/syscache.h" 65 66 67 /* ---------------------------------------------------------------- 68 * macros used in index_ routines 69 * 70 * Note: the ReindexIsProcessingIndex() check in RELATION_CHECKS is there 71 * to check that we don't try to scan or do retail insertions into an index 72 * that is currently being rebuilt or pending rebuild. This helps to catch 73 * things that don't work when reindexing system catalogs. The assertion 74 * doesn't prevent the actual rebuild because we don't use RELATION_CHECKS 75 * when calling the index AM's ambuild routine, and there is no reason for 76 * ambuild to call its subsidiary routines through this file. 77 * ---------------------------------------------------------------- 78 */ 79 #define RELATION_CHECKS \ 80 ( \ 81 AssertMacro(RelationIsValid(indexRelation)), \ 82 AssertMacro(PointerIsValid(indexRelation->rd_indam)), \ 83 AssertMacro(!ReindexIsProcessingIndex(RelationGetRelid(indexRelation))) \ 84 ) 85 86 #define SCAN_CHECKS \ 87 ( \ 88 AssertMacro(IndexScanIsValid(scan)), \ 89 AssertMacro(RelationIsValid(scan->indexRelation)), \ 90 AssertMacro(PointerIsValid(scan->indexRelation->rd_indam)) \ 91 ) 92 93 #define CHECK_REL_PROCEDURE(pname) \ 94 do { \ 95 if (indexRelation->rd_indam->pname == NULL) \ 96 elog(ERROR, "function %s is not defined for index %s", \ 97 CppAsString(pname), RelationGetRelationName(indexRelation)); \ 98 } while(0) 99 100 #define CHECK_SCAN_PROCEDURE(pname) \ 101 do { \ 102 if (scan->indexRelation->rd_indam->pname == NULL) \ 103 elog(ERROR, "function %s is not defined for index %s", \ 104 CppAsString(pname), RelationGetRelationName(scan->indexRelation)); \ 105 } while(0) 106 107 static IndexScanDesc index_beginscan_internal(Relation indexRelation, 108 int nkeys, int norderbys, Snapshot snapshot, 109 ParallelIndexScanDesc pscan, bool temp_snap); 110 111 112 /* ---------------------------------------------------------------- 113 * index_ interface functions 114 * ---------------------------------------------------------------- 115 */ 116 117 /* ---------------- 118 * index_open - open an index relation by relation OID 119 * 120 * If lockmode is not "NoLock", the specified kind of lock is 121 * obtained on the index. (Generally, NoLock should only be 122 * used if the caller knows it has some appropriate lock on the 123 * index already.) 124 * 125 * An error is raised if the index does not exist. 126 * 127 * This is a convenience routine adapted for indexscan use. 128 * Some callers may prefer to use relation_open directly. 129 * ---------------- 130 */ 131 Relation 132 index_open(Oid relationId, LOCKMODE lockmode) 133 { 134 Relation r; 135 136 r = relation_open(relationId, lockmode); 137 138 if (r->rd_rel->relkind != RELKIND_INDEX && 139 r->rd_rel->relkind != RELKIND_PARTITIONED_INDEX) 140 ereport(ERROR, 141 (errcode(ERRCODE_WRONG_OBJECT_TYPE), 142 errmsg("\"%s\" is not an index", 143 RelationGetRelationName(r)))); 144 145 return r; 146 } 147 148 /* ---------------- 149 * index_close - close an index relation 150 * 151 * If lockmode is not "NoLock", we then release the specified lock. 152 * 153 * Note that it is often sensible to hold a lock beyond index_close; 154 * in that case, the lock is released automatically at xact end. 155 * ---------------- 156 */ 157 void 158 index_close(Relation relation, LOCKMODE lockmode) 159 { 160 LockRelId relid = relation->rd_lockInfo.lockRelId; 161 162 Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES); 163 164 /* The relcache does the real work... */ 165 RelationClose(relation); 166 167 if (lockmode != NoLock) 168 UnlockRelationId(&relid, lockmode); 169 } 170 171 /* ---------------- 172 * index_insert - insert an index tuple into a relation 173 * ---------------- 174 */ 175 bool 176 index_insert(Relation indexRelation, 177 Datum *values, 178 bool *isnull, 179 ItemPointer heap_t_ctid, 180 Relation heapRelation, 181 IndexUniqueCheck checkUnique, 182 IndexInfo *indexInfo) 183 { 184 RELATION_CHECKS; 185 CHECK_REL_PROCEDURE(aminsert); 186 187 if (!(indexRelation->rd_indam->ampredlocks)) 188 CheckForSerializableConflictIn(indexRelation, 189 (ItemPointer) NULL, 190 InvalidBlockNumber); 191 192 return indexRelation->rd_indam->aminsert(indexRelation, values, isnull, 193 heap_t_ctid, heapRelation, 194 checkUnique, indexInfo); 195 } 196 197 /* 198 * index_beginscan - start a scan of an index with amgettuple 199 * 200 * Caller must be holding suitable locks on the heap and the index. 201 */ 202 IndexScanDesc 203 index_beginscan(Relation heapRelation, 204 Relation indexRelation, 205 Snapshot snapshot, 206 int nkeys, int norderbys) 207 { 208 IndexScanDesc scan; 209 210 scan = index_beginscan_internal(indexRelation, nkeys, norderbys, snapshot, NULL, false); 211 212 /* 213 * Save additional parameters into the scandesc. Everything else was set 214 * up by RelationGetIndexScan. 215 */ 216 scan->heapRelation = heapRelation; 217 scan->xs_snapshot = snapshot; 218 219 /* prepare to fetch index matches from table */ 220 scan->xs_heapfetch = table_index_fetch_begin(heapRelation); 221 222 return scan; 223 } 224 225 /* 226 * index_beginscan_bitmap - start a scan of an index with amgetbitmap 227 * 228 * As above, caller had better be holding some lock on the parent heap 229 * relation, even though it's not explicitly mentioned here. 230 */ 231 IndexScanDesc 232 index_beginscan_bitmap(Relation indexRelation, 233 Snapshot snapshot, 234 int nkeys) 235 { 236 IndexScanDesc scan; 237 238 scan = index_beginscan_internal(indexRelation, nkeys, 0, snapshot, NULL, false); 239 240 /* 241 * Save additional parameters into the scandesc. Everything else was set 242 * up by RelationGetIndexScan. 243 */ 244 scan->xs_snapshot = snapshot; 245 246 return scan; 247 } 248 249 /* 250 * index_beginscan_internal --- common code for index_beginscan variants 251 */ 252 static IndexScanDesc 253 index_beginscan_internal(Relation indexRelation, 254 int nkeys, int norderbys, Snapshot snapshot, 255 ParallelIndexScanDesc pscan, bool temp_snap) 256 { 257 IndexScanDesc scan; 258 259 RELATION_CHECKS; 260 CHECK_REL_PROCEDURE(ambeginscan); 261 262 if (!(indexRelation->rd_indam->ampredlocks)) 263 PredicateLockRelation(indexRelation, snapshot); 264 265 /* 266 * We hold a reference count to the relcache entry throughout the scan. 267 */ 268 RelationIncrementReferenceCount(indexRelation); 269 270 /* 271 * Tell the AM to open a scan. 272 */ 273 scan = indexRelation->rd_indam->ambeginscan(indexRelation, nkeys, 274 norderbys); 275 /* Initialize information for parallel scan. */ 276 scan->parallel_scan = pscan; 277 scan->xs_temp_snap = temp_snap; 278 279 return scan; 280 } 281 282 /* ---------------- 283 * index_rescan - (re)start a scan of an index 284 * 285 * During a restart, the caller may specify a new set of scankeys and/or 286 * orderbykeys; but the number of keys cannot differ from what index_beginscan 287 * was told. (Later we might relax that to "must not exceed", but currently 288 * the index AMs tend to assume that scan->numberOfKeys is what to believe.) 289 * To restart the scan without changing keys, pass NULL for the key arrays. 290 * (Of course, keys *must* be passed on the first call, unless 291 * scan->numberOfKeys is zero.) 292 * ---------------- 293 */ 294 void 295 index_rescan(IndexScanDesc scan, 296 ScanKey keys, int nkeys, 297 ScanKey orderbys, int norderbys) 298 { 299 SCAN_CHECKS; 300 CHECK_SCAN_PROCEDURE(amrescan); 301 302 Assert(nkeys == scan->numberOfKeys); 303 Assert(norderbys == scan->numberOfOrderBys); 304 305 /* Release resources (like buffer pins) from table accesses */ 306 if (scan->xs_heapfetch) 307 table_index_fetch_reset(scan->xs_heapfetch); 308 309 scan->kill_prior_tuple = false; /* for safety */ 310 scan->xs_heap_continue = false; 311 312 scan->indexRelation->rd_indam->amrescan(scan, keys, nkeys, 313 orderbys, norderbys); 314 } 315 316 /* ---------------- 317 * index_endscan - end a scan 318 * ---------------- 319 */ 320 void 321 index_endscan(IndexScanDesc scan) 322 { 323 SCAN_CHECKS; 324 CHECK_SCAN_PROCEDURE(amendscan); 325 326 /* Release resources (like buffer pins) from table accesses */ 327 if (scan->xs_heapfetch) 328 { 329 table_index_fetch_end(scan->xs_heapfetch); 330 scan->xs_heapfetch = NULL; 331 } 332 333 /* End the AM's scan */ 334 scan->indexRelation->rd_indam->amendscan(scan); 335 336 /* Release index refcount acquired by index_beginscan */ 337 RelationDecrementReferenceCount(scan->indexRelation); 338 339 if (scan->xs_temp_snap) 340 UnregisterSnapshot(scan->xs_snapshot); 341 342 /* Release the scan data structure itself */ 343 IndexScanEnd(scan); 344 } 345 346 /* ---------------- 347 * index_markpos - mark a scan position 348 * ---------------- 349 */ 350 void 351 index_markpos(IndexScanDesc scan) 352 { 353 SCAN_CHECKS; 354 CHECK_SCAN_PROCEDURE(ammarkpos); 355 356 scan->indexRelation->rd_indam->ammarkpos(scan); 357 } 358 359 /* ---------------- 360 * index_restrpos - restore a scan position 361 * 362 * NOTE: this only restores the internal scan state of the index AM. See 363 * comments for ExecRestrPos(). 364 * 365 * NOTE: For heap, in the presence of HOT chains, mark/restore only works 366 * correctly if the scan's snapshot is MVCC-safe; that ensures that there's at 367 * most one returnable tuple in each HOT chain, and so restoring the prior 368 * state at the granularity of the index AM is sufficient. Since the only 369 * current user of mark/restore functionality is nodeMergejoin.c, this 370 * effectively means that merge-join plans only work for MVCC snapshots. This 371 * could be fixed if necessary, but for now it seems unimportant. 372 * ---------------- 373 */ 374 void 375 index_restrpos(IndexScanDesc scan) 376 { 377 Assert(IsMVCCSnapshot(scan->xs_snapshot)); 378 379 SCAN_CHECKS; 380 CHECK_SCAN_PROCEDURE(amrestrpos); 381 382 /* release resources (like buffer pins) from table accesses */ 383 if (scan->xs_heapfetch) 384 table_index_fetch_reset(scan->xs_heapfetch); 385 386 scan->kill_prior_tuple = false; /* for safety */ 387 scan->xs_heap_continue = false; 388 389 scan->indexRelation->rd_indam->amrestrpos(scan); 390 } 391 392 /* 393 * index_parallelscan_estimate - estimate shared memory for parallel scan 394 * 395 * Currently, we don't pass any information to the AM-specific estimator, 396 * so it can probably only return a constant. In the future, we might need 397 * to pass more information. 398 */ 399 Size 400 index_parallelscan_estimate(Relation indexRelation, Snapshot snapshot) 401 { 402 Size nbytes; 403 404 RELATION_CHECKS; 405 406 nbytes = offsetof(ParallelIndexScanDescData, ps_snapshot_data); 407 nbytes = add_size(nbytes, EstimateSnapshotSpace(snapshot)); 408 nbytes = MAXALIGN(nbytes); 409 410 /* 411 * If amestimateparallelscan is not provided, assume there is no 412 * AM-specific data needed. (It's hard to believe that could work, but 413 * it's easy enough to cater to it here.) 414 */ 415 if (indexRelation->rd_indam->amestimateparallelscan != NULL) 416 nbytes = add_size(nbytes, 417 indexRelation->rd_indam->amestimateparallelscan()); 418 419 return nbytes; 420 } 421 422 /* 423 * index_parallelscan_initialize - initialize parallel scan 424 * 425 * We initialize both the ParallelIndexScanDesc proper and the AM-specific 426 * information which follows it. 427 * 428 * This function calls access method specific initialization routine to 429 * initialize am specific information. Call this just once in the leader 430 * process; then, individual workers attach via index_beginscan_parallel. 431 */ 432 void 433 index_parallelscan_initialize(Relation heapRelation, Relation indexRelation, 434 Snapshot snapshot, ParallelIndexScanDesc target) 435 { 436 Size offset; 437 438 RELATION_CHECKS; 439 440 offset = add_size(offsetof(ParallelIndexScanDescData, ps_snapshot_data), 441 EstimateSnapshotSpace(snapshot)); 442 offset = MAXALIGN(offset); 443 444 target->ps_relid = RelationGetRelid(heapRelation); 445 target->ps_indexid = RelationGetRelid(indexRelation); 446 target->ps_offset = offset; 447 SerializeSnapshot(snapshot, target->ps_snapshot_data); 448 449 /* aminitparallelscan is optional; assume no-op if not provided by AM */ 450 if (indexRelation->rd_indam->aminitparallelscan != NULL) 451 { 452 void *amtarget; 453 454 amtarget = OffsetToPointer(target, offset); 455 indexRelation->rd_indam->aminitparallelscan(amtarget); 456 } 457 } 458 459 /* ---------------- 460 * index_parallelrescan - (re)start a parallel scan of an index 461 * ---------------- 462 */ 463 void 464 index_parallelrescan(IndexScanDesc scan) 465 { 466 SCAN_CHECKS; 467 468 if (scan->xs_heapfetch) 469 table_index_fetch_reset(scan->xs_heapfetch); 470 471 /* amparallelrescan is optional; assume no-op if not provided by AM */ 472 if (scan->indexRelation->rd_indam->amparallelrescan != NULL) 473 scan->indexRelation->rd_indam->amparallelrescan(scan); 474 } 475 476 /* 477 * index_beginscan_parallel - join parallel index scan 478 * 479 * Caller must be holding suitable locks on the heap and the index. 480 */ 481 IndexScanDesc 482 index_beginscan_parallel(Relation heaprel, Relation indexrel, int nkeys, 483 int norderbys, ParallelIndexScanDesc pscan) 484 { 485 Snapshot snapshot; 486 IndexScanDesc scan; 487 488 Assert(RelationGetRelid(heaprel) == pscan->ps_relid); 489 snapshot = RestoreSnapshot(pscan->ps_snapshot_data); 490 RegisterSnapshot(snapshot); 491 scan = index_beginscan_internal(indexrel, nkeys, norderbys, snapshot, 492 pscan, true); 493 494 /* 495 * Save additional parameters into the scandesc. Everything else was set 496 * up by index_beginscan_internal. 497 */ 498 scan->heapRelation = heaprel; 499 scan->xs_snapshot = snapshot; 500 501 /* prepare to fetch index matches from table */ 502 scan->xs_heapfetch = table_index_fetch_begin(heaprel); 503 504 return scan; 505 } 506 507 /* ---------------- 508 * index_getnext_tid - get the next TID from a scan 509 * 510 * The result is the next TID satisfying the scan keys, 511 * or NULL if no more matching tuples exist. 512 * ---------------- 513 */ 514 ItemPointer 515 index_getnext_tid(IndexScanDesc scan, ScanDirection direction) 516 { 517 bool found; 518 519 SCAN_CHECKS; 520 CHECK_SCAN_PROCEDURE(amgettuple); 521 522 Assert(TransactionIdIsValid(RecentGlobalXmin)); 523 524 /* 525 * The AM's amgettuple proc finds the next index entry matching the scan 526 * keys, and puts the TID into scan->xs_heaptid. It should also set 527 * scan->xs_recheck and possibly scan->xs_itup/scan->xs_hitup, though we 528 * pay no attention to those fields here. 529 */ 530 found = scan->indexRelation->rd_indam->amgettuple(scan, direction); 531 532 /* Reset kill flag immediately for safety */ 533 scan->kill_prior_tuple = false; 534 scan->xs_heap_continue = false; 535 536 /* If we're out of index entries, we're done */ 537 if (!found) 538 { 539 /* release resources (like buffer pins) from table accesses */ 540 if (scan->xs_heapfetch) 541 table_index_fetch_reset(scan->xs_heapfetch); 542 543 return NULL; 544 } 545 Assert(ItemPointerIsValid(&scan->xs_heaptid)); 546 547 pgstat_count_index_tuples(scan->indexRelation, 1); 548 549 /* Return the TID of the tuple we found. */ 550 return &scan->xs_heaptid; 551 } 552 553 /* ---------------- 554 * index_fetch_heap - get the scan's next heap tuple 555 * 556 * The result is a visible heap tuple associated with the index TID most 557 * recently fetched by index_getnext_tid, or NULL if no more matching tuples 558 * exist. (There can be more than one matching tuple because of HOT chains, 559 * although when using an MVCC snapshot it should be impossible for more than 560 * one such tuple to exist.) 561 * 562 * On success, the buffer containing the heap tup is pinned (the pin will be 563 * dropped in a future index_getnext_tid, index_fetch_heap or index_endscan 564 * call). 565 * 566 * Note: caller must check scan->xs_recheck, and perform rechecking of the 567 * scan keys if required. We do not do that here because we don't have 568 * enough information to do it efficiently in the general case. 569 * ---------------- 570 */ 571 bool 572 index_fetch_heap(IndexScanDesc scan, TupleTableSlot *slot) 573 { 574 bool all_dead = false; 575 bool found; 576 577 found = table_index_fetch_tuple(scan->xs_heapfetch, &scan->xs_heaptid, 578 scan->xs_snapshot, slot, 579 &scan->xs_heap_continue, &all_dead); 580 581 if (found) 582 pgstat_count_heap_fetch(scan->indexRelation); 583 584 /* 585 * If we scanned a whole HOT chain and found only dead tuples, tell index 586 * AM to kill its entry for that TID (this will take effect in the next 587 * amgettuple call, in index_getnext_tid). We do not do this when in 588 * recovery because it may violate MVCC to do so. See comments in 589 * RelationGetIndexScan(). 590 */ 591 if (!scan->xactStartedInRecovery) 592 scan->kill_prior_tuple = all_dead; 593 594 return found; 595 } 596 597 /* ---------------- 598 * index_getnext_slot - get the next tuple from a scan 599 * 600 * The result is true if a tuple satisfying the scan keys and the snapshot was 601 * found, false otherwise. The tuple is stored in the specified slot. 602 * 603 * On success, resources (like buffer pins) are likely to be held, and will be 604 * dropped by a future index_getnext_tid, index_fetch_heap or index_endscan 605 * call). 606 * 607 * Note: caller must check scan->xs_recheck, and perform rechecking of the 608 * scan keys if required. We do not do that here because we don't have 609 * enough information to do it efficiently in the general case. 610 * ---------------- 611 */ 612 bool 613 index_getnext_slot(IndexScanDesc scan, ScanDirection direction, TupleTableSlot *slot) 614 { 615 for (;;) 616 { 617 if (!scan->xs_heap_continue) 618 { 619 ItemPointer tid; 620 621 /* Time to fetch the next TID from the index */ 622 tid = index_getnext_tid(scan, direction); 623 624 /* If we're out of index entries, we're done */ 625 if (tid == NULL) 626 break; 627 628 Assert(ItemPointerEquals(tid, &scan->xs_heaptid)); 629 } 630 631 /* 632 * Fetch the next (or only) visible heap tuple for this index entry. 633 * If we don't find anything, loop around and grab the next TID from 634 * the index. 635 */ 636 Assert(ItemPointerIsValid(&scan->xs_heaptid)); 637 if (index_fetch_heap(scan, slot)) 638 return true; 639 } 640 641 return false; 642 } 643 644 /* ---------------- 645 * index_getbitmap - get all tuples at once from an index scan 646 * 647 * Adds the TIDs of all heap tuples satisfying the scan keys to a bitmap. 648 * Since there's no interlock between the index scan and the eventual heap 649 * access, this is only safe to use with MVCC-based snapshots: the heap 650 * item slot could have been replaced by a newer tuple by the time we get 651 * to it. 652 * 653 * Returns the number of matching tuples found. (Note: this might be only 654 * approximate, so it should only be used for statistical purposes.) 655 * ---------------- 656 */ 657 int64 658 index_getbitmap(IndexScanDesc scan, TIDBitmap *bitmap) 659 { 660 int64 ntids; 661 662 SCAN_CHECKS; 663 CHECK_SCAN_PROCEDURE(amgetbitmap); 664 665 /* just make sure this is false... */ 666 scan->kill_prior_tuple = false; 667 668 /* 669 * have the am's getbitmap proc do all the work. 670 */ 671 ntids = scan->indexRelation->rd_indam->amgetbitmap(scan, bitmap); 672 673 pgstat_count_index_tuples(scan->indexRelation, ntids); 674 675 return ntids; 676 } 677 678 /* ---------------- 679 * index_bulk_delete - do mass deletion of index entries 680 * 681 * callback routine tells whether a given main-heap tuple is 682 * to be deleted 683 * 684 * return value is an optional palloc'd struct of statistics 685 * ---------------- 686 */ 687 IndexBulkDeleteResult * 688 index_bulk_delete(IndexVacuumInfo *info, 689 IndexBulkDeleteResult *stats, 690 IndexBulkDeleteCallback callback, 691 void *callback_state) 692 { 693 Relation indexRelation = info->index; 694 695 RELATION_CHECKS; 696 CHECK_REL_PROCEDURE(ambulkdelete); 697 698 return indexRelation->rd_indam->ambulkdelete(info, stats, 699 callback, callback_state); 700 } 701 702 /* ---------------- 703 * index_vacuum_cleanup - do post-deletion cleanup of an index 704 * 705 * return value is an optional palloc'd struct of statistics 706 * ---------------- 707 */ 708 IndexBulkDeleteResult * 709 index_vacuum_cleanup(IndexVacuumInfo *info, 710 IndexBulkDeleteResult *stats) 711 { 712 Relation indexRelation = info->index; 713 714 RELATION_CHECKS; 715 CHECK_REL_PROCEDURE(amvacuumcleanup); 716 717 return indexRelation->rd_indam->amvacuumcleanup(info, stats); 718 } 719 720 /* ---------------- 721 * index_can_return 722 * 723 * Does the index access method support index-only scans for the given 724 * column? 725 * ---------------- 726 */ 727 bool 728 index_can_return(Relation indexRelation, int attno) 729 { 730 RELATION_CHECKS; 731 732 /* amcanreturn is optional; assume false if not provided by AM */ 733 if (indexRelation->rd_indam->amcanreturn == NULL) 734 return false; 735 736 return indexRelation->rd_indam->amcanreturn(indexRelation, attno); 737 } 738 739 /* ---------------- 740 * index_getprocid 741 * 742 * Index access methods typically require support routines that are 743 * not directly the implementation of any WHERE-clause query operator 744 * and so cannot be kept in pg_amop. Instead, such routines are kept 745 * in pg_amproc. These registered procedure OIDs are assigned numbers 746 * according to a convention established by the access method. 747 * The general index code doesn't know anything about the routines 748 * involved; it just builds an ordered list of them for 749 * each attribute on which an index is defined. 750 * 751 * As of Postgres 8.3, support routines within an operator family 752 * are further subdivided by the "left type" and "right type" of the 753 * query operator(s) that they support. The "default" functions for a 754 * particular indexed attribute are those with both types equal to 755 * the index opclass' opcintype (note that this is subtly different 756 * from the indexed attribute's own type: it may be a binary-compatible 757 * type instead). Only the default functions are stored in relcache 758 * entries --- access methods can use the syscache to look up non-default 759 * functions. 760 * 761 * This routine returns the requested default procedure OID for a 762 * particular indexed attribute. 763 * ---------------- 764 */ 765 RegProcedure 766 index_getprocid(Relation irel, 767 AttrNumber attnum, 768 uint16 procnum) 769 { 770 RegProcedure *loc; 771 int nproc; 772 int procindex; 773 774 nproc = irel->rd_indam->amsupport; 775 776 Assert(procnum > 0 && procnum <= (uint16) nproc); 777 778 procindex = (nproc * (attnum - 1)) + (procnum - 1); 779 780 loc = irel->rd_support; 781 782 Assert(loc != NULL); 783 784 return loc[procindex]; 785 } 786 787 /* ---------------- 788 * index_getprocinfo 789 * 790 * This routine allows index AMs to keep fmgr lookup info for 791 * support procs in the relcache. As above, only the "default" 792 * functions for any particular indexed attribute are cached. 793 * 794 * Note: the return value points into cached data that will be lost during 795 * any relcache rebuild! Therefore, either use the callinfo right away, 796 * or save it only after having acquired some type of lock on the index rel. 797 * ---------------- 798 */ 799 FmgrInfo * 800 index_getprocinfo(Relation irel, 801 AttrNumber attnum, 802 uint16 procnum) 803 { 804 FmgrInfo *locinfo; 805 int nproc; 806 int optsproc; 807 int procindex; 808 809 nproc = irel->rd_indam->amsupport; 810 optsproc = irel->rd_indam->amoptsprocnum; 811 812 Assert(procnum > 0 && procnum <= (uint16) nproc); 813 814 procindex = (nproc * (attnum - 1)) + (procnum - 1); 815 816 locinfo = irel->rd_supportinfo; 817 818 Assert(locinfo != NULL); 819 820 locinfo += procindex; 821 822 /* Initialize the lookup info if first time through */ 823 if (locinfo->fn_oid == InvalidOid) 824 { 825 RegProcedure *loc = irel->rd_support; 826 RegProcedure procId; 827 828 Assert(loc != NULL); 829 830 procId = loc[procindex]; 831 832 /* 833 * Complain if function was not found during IndexSupportInitialize. 834 * This should not happen unless the system tables contain bogus 835 * entries for the index opclass. (If an AM wants to allow a support 836 * function to be optional, it can use index_getprocid.) 837 */ 838 if (!RegProcedureIsValid(procId)) 839 elog(ERROR, "missing support function %d for attribute %d of index \"%s\"", 840 procnum, attnum, RelationGetRelationName(irel)); 841 842 fmgr_info_cxt(procId, locinfo, irel->rd_indexcxt); 843 844 if (procnum != optsproc) 845 { 846 /* Initialize locinfo->fn_expr with opclass options Const */ 847 bytea **attoptions = RelationGetIndexAttOptions(irel, false); 848 MemoryContext oldcxt = MemoryContextSwitchTo(irel->rd_indexcxt); 849 850 set_fn_opclass_options(locinfo, attoptions[attnum - 1]); 851 852 MemoryContextSwitchTo(oldcxt); 853 } 854 } 855 856 return locinfo; 857 } 858 859 /* ---------------- 860 * index_store_float8_orderby_distances 861 * 862 * Convert AM distance function's results (that can be inexact) 863 * to ORDER BY types and save them into xs_orderbyvals/xs_orderbynulls 864 * for a possible recheck. 865 * ---------------- 866 */ 867 void 868 index_store_float8_orderby_distances(IndexScanDesc scan, Oid *orderByTypes, 869 IndexOrderByDistance *distances, 870 bool recheckOrderBy) 871 { 872 int i; 873 874 Assert(distances || !recheckOrderBy); 875 876 scan->xs_recheckorderby = recheckOrderBy; 877 878 for (i = 0; i < scan->numberOfOrderBys; i++) 879 { 880 if (orderByTypes[i] == FLOAT8OID) 881 { 882 #ifndef USE_FLOAT8_BYVAL 883 /* must free any old value to avoid memory leakage */ 884 if (!scan->xs_orderbynulls[i]) 885 pfree(DatumGetPointer(scan->xs_orderbyvals[i])); 886 #endif 887 if (distances && !distances[i].isnull) 888 { 889 scan->xs_orderbyvals[i] = Float8GetDatum(distances[i].value); 890 scan->xs_orderbynulls[i] = false; 891 } 892 else 893 { 894 scan->xs_orderbyvals[i] = (Datum) 0; 895 scan->xs_orderbynulls[i] = true; 896 } 897 } 898 else if (orderByTypes[i] == FLOAT4OID) 899 { 900 /* convert distance function's result to ORDER BY type */ 901 if (distances && !distances[i].isnull) 902 { 903 scan->xs_orderbyvals[i] = Float4GetDatum((float4) distances[i].value); 904 scan->xs_orderbynulls[i] = false; 905 } 906 else 907 { 908 scan->xs_orderbyvals[i] = (Datum) 0; 909 scan->xs_orderbynulls[i] = true; 910 } 911 } 912 else 913 { 914 /* 915 * If the ordering operator's return value is anything else, we 916 * don't know how to convert the float8 bound calculated by the 917 * distance function to that. The executor won't actually need 918 * the order by values we return here, if there are no lossy 919 * results, so only insist on converting if the *recheck flag is 920 * set. 921 */ 922 if (scan->xs_recheckorderby) 923 elog(ERROR, "ORDER BY operator must return float8 or float4 if the distance function is lossy"); 924 scan->xs_orderbynulls[i] = true; 925 } 926 } 927 } 928 929 /* ---------------- 930 * index_opclass_options 931 * 932 * Parse opclass-specific options for index column. 933 * ---------------- 934 */ 935 bytea * 936 index_opclass_options(Relation indrel, AttrNumber attnum, Datum attoptions, 937 bool validate) 938 { 939 int amoptsprocnum = indrel->rd_indam->amoptsprocnum; 940 Oid procid = InvalidOid; 941 FmgrInfo *procinfo; 942 local_relopts relopts; 943 944 /* fetch options support procedure if specified */ 945 if (amoptsprocnum != 0) 946 procid = index_getprocid(indrel, attnum, amoptsprocnum); 947 948 if (!OidIsValid(procid)) 949 { 950 Oid opclass; 951 Datum indclassDatum; 952 oidvector *indclass; 953 bool isnull; 954 955 if (!DatumGetPointer(attoptions)) 956 return NULL; /* ok, no options, no procedure */ 957 958 /* 959 * Report an error if the opclass's options-parsing procedure does not 960 * exist but the opclass options are specified. 961 */ 962 indclassDatum = SysCacheGetAttr(INDEXRELID, indrel->rd_indextuple, 963 Anum_pg_index_indclass, &isnull); 964 Assert(!isnull); 965 indclass = (oidvector *) DatumGetPointer(indclassDatum); 966 opclass = indclass->values[attnum - 1]; 967 968 ereport(ERROR, 969 (errcode(ERRCODE_INVALID_PARAMETER_VALUE), 970 errmsg("operator class %s has no options", 971 generate_opclass_name(opclass)))); 972 } 973 974 init_local_reloptions(&relopts, 0); 975 976 procinfo = index_getprocinfo(indrel, attnum, amoptsprocnum); 977 978 (void) FunctionCall1(procinfo, PointerGetDatum(&relopts)); 979 980 return build_local_reloptions(&relopts, attoptions, validate); 981 } 982