1 /*------------------------------------------------------------------------- 2 * 3 * nodeIndexonlyscan.c 4 * Routines to support index-only scans 5 * 6 * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group 7 * Portions Copyright (c) 1994, Regents of the University of California 8 * 9 * 10 * IDENTIFICATION 11 * src/backend/executor/nodeIndexonlyscan.c 12 * 13 *------------------------------------------------------------------------- 14 */ 15 /* 16 * INTERFACE ROUTINES 17 * ExecIndexOnlyScan scans an index 18 * IndexOnlyNext retrieve next tuple 19 * ExecInitIndexOnlyScan creates and initializes state info. 20 * ExecReScanIndexOnlyScan rescans the indexed relation. 21 * ExecEndIndexOnlyScan releases all storage. 22 * ExecIndexOnlyMarkPos marks scan position. 23 * ExecIndexOnlyRestrPos restores scan position. 24 * ExecIndexOnlyScanEstimate estimates DSM space needed for 25 * parallel index-only scan 26 * ExecIndexOnlyScanInitializeDSM initialize DSM for parallel 27 * index-only scan 28 * ExecIndexOnlyScanReInitializeDSM reinitialize DSM for fresh scan 29 * ExecIndexOnlyScanInitializeWorker attach to DSM info in parallel worker 30 */ 31 #include "postgres.h" 32 33 #include "access/genam.h" 34 #include "access/relscan.h" 35 #include "access/tableam.h" 36 #include "access/tupdesc.h" 37 #include "access/visibilitymap.h" 38 #include "executor/execdebug.h" 39 #include "executor/nodeIndexonlyscan.h" 40 #include "executor/nodeIndexscan.h" 41 #include "miscadmin.h" 42 #include "storage/bufmgr.h" 43 #include "storage/predicate.h" 44 #include "utils/memutils.h" 45 #include "utils/rel.h" 46 47 48 static TupleTableSlot *IndexOnlyNext(IndexOnlyScanState *node); 49 static void StoreIndexTuple(TupleTableSlot *slot, IndexTuple itup, 50 TupleDesc itupdesc); 51 52 53 /* ---------------------------------------------------------------- 54 * IndexOnlyNext 55 * 56 * Retrieve a tuple from the IndexOnlyScan node's index. 57 * ---------------------------------------------------------------- 58 */ 59 static TupleTableSlot * 60 IndexOnlyNext(IndexOnlyScanState *node) 61 { 62 EState *estate; 63 ExprContext *econtext; 64 ScanDirection direction; 65 IndexScanDesc scandesc; 66 TupleTableSlot *slot; 67 ItemPointer tid; 68 69 /* 70 * extract necessary information from index scan node 71 */ 72 estate = node->ss.ps.state; 73 direction = estate->es_direction; 74 /* flip direction if this is an overall backward scan */ 75 if (ScanDirectionIsBackward(((IndexOnlyScan *) node->ss.ps.plan)->indexorderdir)) 76 { 77 if (ScanDirectionIsForward(direction)) 78 direction = BackwardScanDirection; 79 else if (ScanDirectionIsBackward(direction)) 80 direction = ForwardScanDirection; 81 } 82 scandesc = node->ioss_ScanDesc; 83 econtext = node->ss.ps.ps_ExprContext; 84 slot = node->ss.ss_ScanTupleSlot; 85 86 if (scandesc == NULL) 87 { 88 /* 89 * We reach here if the index only scan is not parallel, or if we're 90 * serially executing an index only scan that was planned to be 91 * parallel. 92 */ 93 scandesc = index_beginscan(node->ss.ss_currentRelation, 94 node->ioss_RelationDesc, 95 estate->es_snapshot, 96 node->ioss_NumScanKeys, 97 node->ioss_NumOrderByKeys); 98 99 node->ioss_ScanDesc = scandesc; 100 101 102 /* Set it up for index-only scan */ 103 node->ioss_ScanDesc->xs_want_itup = true; 104 node->ioss_VMBuffer = InvalidBuffer; 105 106 /* 107 * If no run-time keys to calculate or they are ready, go ahead and 108 * pass the scankeys to the index AM. 109 */ 110 if (node->ioss_NumRuntimeKeys == 0 || node->ioss_RuntimeKeysReady) 111 index_rescan(scandesc, 112 node->ioss_ScanKeys, 113 node->ioss_NumScanKeys, 114 node->ioss_OrderByKeys, 115 node->ioss_NumOrderByKeys); 116 } 117 118 /* 119 * OK, now that we have what we need, fetch the next tuple. 120 */ 121 while ((tid = index_getnext_tid(scandesc, direction)) != NULL) 122 { 123 bool tuple_from_heap = false; 124 125 CHECK_FOR_INTERRUPTS(); 126 127 /* 128 * We can skip the heap fetch if the TID references a heap page on 129 * which all tuples are known visible to everybody. In any case, 130 * we'll use the index tuple not the heap tuple as the data source. 131 * 132 * Note on Memory Ordering Effects: visibilitymap_get_status does not 133 * lock the visibility map buffer, and therefore the result we read 134 * here could be slightly stale. However, it can't be stale enough to 135 * matter. 136 * 137 * We need to detect clearing a VM bit due to an insert right away, 138 * because the tuple is present in the index page but not visible. The 139 * reading of the TID by this scan (using a shared lock on the index 140 * buffer) is serialized with the insert of the TID into the index 141 * (using an exclusive lock on the index buffer). Because the VM bit 142 * is cleared before updating the index, and locking/unlocking of the 143 * index page acts as a full memory barrier, we are sure to see the 144 * cleared bit if we see a recently-inserted TID. 145 * 146 * Deletes do not update the index page (only VACUUM will clear out 147 * the TID), so the clearing of the VM bit by a delete is not 148 * serialized with this test below, and we may see a value that is 149 * significantly stale. However, we don't care about the delete right 150 * away, because the tuple is still visible until the deleting 151 * transaction commits or the statement ends (if it's our 152 * transaction). In either case, the lock on the VM buffer will have 153 * been released (acting as a write barrier) after clearing the bit. 154 * And for us to have a snapshot that includes the deleting 155 * transaction (making the tuple invisible), we must have acquired 156 * ProcArrayLock after that time, acting as a read barrier. 157 * 158 * It's worth going through this complexity to avoid needing to lock 159 * the VM buffer, which could cause significant contention. 160 */ 161 if (!VM_ALL_VISIBLE(scandesc->heapRelation, 162 ItemPointerGetBlockNumber(tid), 163 &node->ioss_VMBuffer)) 164 { 165 /* 166 * Rats, we have to visit the heap to check visibility. 167 */ 168 InstrCountTuples2(node, 1); 169 if (!index_fetch_heap(scandesc, node->ioss_TableSlot)) 170 continue; /* no visible tuple, try next index entry */ 171 172 ExecClearTuple(node->ioss_TableSlot); 173 174 /* 175 * Only MVCC snapshots are supported here, so there should be no 176 * need to keep following the HOT chain once a visible entry has 177 * been found. If we did want to allow that, we'd need to keep 178 * more state to remember not to call index_getnext_tid next time. 179 */ 180 if (scandesc->xs_heap_continue) 181 elog(ERROR, "non-MVCC snapshots are not supported in index-only scans"); 182 183 /* 184 * Note: at this point we are holding a pin on the heap page, as 185 * recorded in scandesc->xs_cbuf. We could release that pin now, 186 * but it's not clear whether it's a win to do so. The next index 187 * entry might require a visit to the same heap page. 188 */ 189 190 tuple_from_heap = true; 191 } 192 193 /* 194 * Fill the scan tuple slot with data from the index. This might be 195 * provided in either HeapTuple or IndexTuple format. Conceivably an 196 * index AM might fill both fields, in which case we prefer the heap 197 * format, since it's probably a bit cheaper to fill a slot from. 198 */ 199 if (scandesc->xs_hitup) 200 { 201 /* 202 * We don't take the trouble to verify that the provided tuple has 203 * exactly the slot's format, but it seems worth doing a quick 204 * check on the number of fields. 205 */ 206 Assert(slot->tts_tupleDescriptor->natts == 207 scandesc->xs_hitupdesc->natts); 208 ExecForceStoreHeapTuple(scandesc->xs_hitup, slot, false); 209 } 210 else if (scandesc->xs_itup) 211 StoreIndexTuple(slot, scandesc->xs_itup, scandesc->xs_itupdesc); 212 else 213 elog(ERROR, "no data returned for index-only scan"); 214 215 /* 216 * If the index was lossy, we have to recheck the index quals. 217 * (Currently, this can never happen, but we should support the case 218 * for possible future use, eg with GiST indexes.) 219 */ 220 if (scandesc->xs_recheck) 221 { 222 econtext->ecxt_scantuple = slot; 223 if (!ExecQualAndReset(node->indexqual, econtext)) 224 { 225 /* Fails recheck, so drop it and loop back for another */ 226 InstrCountFiltered2(node, 1); 227 continue; 228 } 229 } 230 231 /* 232 * We don't currently support rechecking ORDER BY distances. (In 233 * principle, if the index can support retrieval of the originally 234 * indexed value, it should be able to produce an exact distance 235 * calculation too. So it's not clear that adding code here for 236 * recheck/re-sort would be worth the trouble. But we should at least 237 * throw an error if someone tries it.) 238 */ 239 if (scandesc->numberOfOrderBys > 0 && scandesc->xs_recheckorderby) 240 ereport(ERROR, 241 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), 242 errmsg("lossy distance functions are not supported in index-only scans"))); 243 244 /* 245 * If we didn't access the heap, then we'll need to take a predicate 246 * lock explicitly, as if we had. For now we do that at page level. 247 */ 248 if (!tuple_from_heap) 249 PredicateLockPage(scandesc->heapRelation, 250 ItemPointerGetBlockNumber(tid), 251 estate->es_snapshot); 252 253 return slot; 254 } 255 256 /* 257 * if we get here it means the index scan failed so we are at the end of 258 * the scan.. 259 */ 260 return ExecClearTuple(slot); 261 } 262 263 /* 264 * StoreIndexTuple 265 * Fill the slot with data from the index tuple. 266 * 267 * At some point this might be generally-useful functionality, but 268 * right now we don't need it elsewhere. 269 */ 270 static void 271 StoreIndexTuple(TupleTableSlot *slot, IndexTuple itup, TupleDesc itupdesc) 272 { 273 /* 274 * Note: we must use the tupdesc supplied by the AM in index_deform_tuple, 275 * not the slot's tupdesc, in case the latter has different datatypes 276 * (this happens for btree name_ops in particular). They'd better have 277 * the same number of columns though, as well as being datatype-compatible 278 * which is something we can't so easily check. 279 */ 280 Assert(slot->tts_tupleDescriptor->natts == itupdesc->natts); 281 282 ExecClearTuple(slot); 283 index_deform_tuple(itup, itupdesc, slot->tts_values, slot->tts_isnull); 284 ExecStoreVirtualTuple(slot); 285 } 286 287 /* 288 * IndexOnlyRecheck -- access method routine to recheck a tuple in EvalPlanQual 289 * 290 * This can't really happen, since an index can't supply CTID which would 291 * be necessary data for any potential EvalPlanQual target relation. If it 292 * did happen, the EPQ code would pass us the wrong data, namely a heap 293 * tuple not an index tuple. So throw an error. 294 */ 295 static bool 296 IndexOnlyRecheck(IndexOnlyScanState *node, TupleTableSlot *slot) 297 { 298 elog(ERROR, "EvalPlanQual recheck is not supported in index-only scans"); 299 return false; /* keep compiler quiet */ 300 } 301 302 /* ---------------------------------------------------------------- 303 * ExecIndexOnlyScan(node) 304 * ---------------------------------------------------------------- 305 */ 306 static TupleTableSlot * 307 ExecIndexOnlyScan(PlanState *pstate) 308 { 309 IndexOnlyScanState *node = castNode(IndexOnlyScanState, pstate); 310 311 /* 312 * If we have runtime keys and they've not already been set up, do it now. 313 */ 314 if (node->ioss_NumRuntimeKeys != 0 && !node->ioss_RuntimeKeysReady) 315 ExecReScan((PlanState *) node); 316 317 return ExecScan(&node->ss, 318 (ExecScanAccessMtd) IndexOnlyNext, 319 (ExecScanRecheckMtd) IndexOnlyRecheck); 320 } 321 322 /* ---------------------------------------------------------------- 323 * ExecReScanIndexOnlyScan(node) 324 * 325 * Recalculates the values of any scan keys whose value depends on 326 * information known at runtime, then rescans the indexed relation. 327 * 328 * Updating the scan key was formerly done separately in 329 * ExecUpdateIndexScanKeys. Integrating it into ReScan makes 330 * rescans of indices and relations/general streams more uniform. 331 * ---------------------------------------------------------------- 332 */ 333 void 334 ExecReScanIndexOnlyScan(IndexOnlyScanState *node) 335 { 336 /* 337 * If we are doing runtime key calculations (ie, any of the index key 338 * values weren't simple Consts), compute the new key values. But first, 339 * reset the context so we don't leak memory as each outer tuple is 340 * scanned. Note this assumes that we will recalculate *all* runtime keys 341 * on each call. 342 */ 343 if (node->ioss_NumRuntimeKeys != 0) 344 { 345 ExprContext *econtext = node->ioss_RuntimeContext; 346 347 ResetExprContext(econtext); 348 ExecIndexEvalRuntimeKeys(econtext, 349 node->ioss_RuntimeKeys, 350 node->ioss_NumRuntimeKeys); 351 } 352 node->ioss_RuntimeKeysReady = true; 353 354 /* reset index scan */ 355 if (node->ioss_ScanDesc) 356 index_rescan(node->ioss_ScanDesc, 357 node->ioss_ScanKeys, node->ioss_NumScanKeys, 358 node->ioss_OrderByKeys, node->ioss_NumOrderByKeys); 359 360 ExecScanReScan(&node->ss); 361 } 362 363 364 /* ---------------------------------------------------------------- 365 * ExecEndIndexOnlyScan 366 * ---------------------------------------------------------------- 367 */ 368 void 369 ExecEndIndexOnlyScan(IndexOnlyScanState *node) 370 { 371 Relation indexRelationDesc; 372 IndexScanDesc indexScanDesc; 373 374 /* 375 * extract information from the node 376 */ 377 indexRelationDesc = node->ioss_RelationDesc; 378 indexScanDesc = node->ioss_ScanDesc; 379 380 /* Release VM buffer pin, if any. */ 381 if (node->ioss_VMBuffer != InvalidBuffer) 382 { 383 ReleaseBuffer(node->ioss_VMBuffer); 384 node->ioss_VMBuffer = InvalidBuffer; 385 } 386 387 /* 388 * Free the exprcontext(s) ... now dead code, see ExecFreeExprContext 389 */ 390 #ifdef NOT_USED 391 ExecFreeExprContext(&node->ss.ps); 392 if (node->ioss_RuntimeContext) 393 FreeExprContext(node->ioss_RuntimeContext, true); 394 #endif 395 396 /* 397 * clear out tuple table slots 398 */ 399 if (node->ss.ps.ps_ResultTupleSlot) 400 ExecClearTuple(node->ss.ps.ps_ResultTupleSlot); 401 ExecClearTuple(node->ss.ss_ScanTupleSlot); 402 403 /* 404 * close the index relation (no-op if we didn't open it) 405 */ 406 if (indexScanDesc) 407 index_endscan(indexScanDesc); 408 if (indexRelationDesc) 409 index_close(indexRelationDesc, NoLock); 410 } 411 412 /* ---------------------------------------------------------------- 413 * ExecIndexOnlyMarkPos 414 * 415 * Note: we assume that no caller attempts to set a mark before having read 416 * at least one tuple. Otherwise, ioss_ScanDesc might still be NULL. 417 * ---------------------------------------------------------------- 418 */ 419 void 420 ExecIndexOnlyMarkPos(IndexOnlyScanState *node) 421 { 422 EState *estate = node->ss.ps.state; 423 EPQState *epqstate = estate->es_epq_active; 424 425 if (epqstate != NULL) 426 { 427 /* 428 * We are inside an EvalPlanQual recheck. If a test tuple exists for 429 * this relation, then we shouldn't access the index at all. We would 430 * instead need to save, and later restore, the state of the 431 * relsubs_done flag, so that re-fetching the test tuple is possible. 432 * However, given the assumption that no caller sets a mark at the 433 * start of the scan, we can only get here with relsubs_done[i] 434 * already set, and so no state need be saved. 435 */ 436 Index scanrelid = ((Scan *) node->ss.ps.plan)->scanrelid; 437 438 Assert(scanrelid > 0); 439 if (epqstate->relsubs_slot[scanrelid - 1] != NULL || 440 epqstate->relsubs_rowmark[scanrelid - 1] != NULL) 441 { 442 /* Verify the claim above */ 443 if (!epqstate->relsubs_done[scanrelid - 1]) 444 elog(ERROR, "unexpected ExecIndexOnlyMarkPos call in EPQ recheck"); 445 return; 446 } 447 } 448 449 index_markpos(node->ioss_ScanDesc); 450 } 451 452 /* ---------------------------------------------------------------- 453 * ExecIndexOnlyRestrPos 454 * ---------------------------------------------------------------- 455 */ 456 void 457 ExecIndexOnlyRestrPos(IndexOnlyScanState *node) 458 { 459 EState *estate = node->ss.ps.state; 460 EPQState *epqstate = estate->es_epq_active; 461 462 if (estate->es_epq_active != NULL) 463 { 464 /* See comments in ExecIndexMarkPos */ 465 Index scanrelid = ((Scan *) node->ss.ps.plan)->scanrelid; 466 467 Assert(scanrelid > 0); 468 if (epqstate->relsubs_slot[scanrelid - 1] != NULL || 469 epqstate->relsubs_rowmark[scanrelid - 1] != NULL) 470 { 471 /* Verify the claim above */ 472 if (!epqstate->relsubs_done[scanrelid - 1]) 473 elog(ERROR, "unexpected ExecIndexOnlyRestrPos call in EPQ recheck"); 474 return; 475 } 476 } 477 478 index_restrpos(node->ioss_ScanDesc); 479 } 480 481 /* ---------------------------------------------------------------- 482 * ExecInitIndexOnlyScan 483 * 484 * Initializes the index scan's state information, creates 485 * scan keys, and opens the base and index relations. 486 * 487 * Note: index scans have 2 sets of state information because 488 * we have to keep track of the base relation and the 489 * index relation. 490 * ---------------------------------------------------------------- 491 */ 492 IndexOnlyScanState * 493 ExecInitIndexOnlyScan(IndexOnlyScan *node, EState *estate, int eflags) 494 { 495 IndexOnlyScanState *indexstate; 496 Relation currentRelation; 497 LOCKMODE lockmode; 498 TupleDesc tupDesc; 499 500 /* 501 * create state structure 502 */ 503 indexstate = makeNode(IndexOnlyScanState); 504 indexstate->ss.ps.plan = (Plan *) node; 505 indexstate->ss.ps.state = estate; 506 indexstate->ss.ps.ExecProcNode = ExecIndexOnlyScan; 507 508 /* 509 * Miscellaneous initialization 510 * 511 * create expression context for node 512 */ 513 ExecAssignExprContext(estate, &indexstate->ss.ps); 514 515 /* 516 * open the scan relation 517 */ 518 currentRelation = ExecOpenScanRelation(estate, node->scan.scanrelid, eflags); 519 520 indexstate->ss.ss_currentRelation = currentRelation; 521 indexstate->ss.ss_currentScanDesc = NULL; /* no heap scan here */ 522 523 /* 524 * Build the scan tuple type using the indextlist generated by the 525 * planner. We use this, rather than the index's physical tuple 526 * descriptor, because the latter contains storage column types not the 527 * types of the original datums. (It's the AM's responsibility to return 528 * suitable data anyway.) 529 */ 530 tupDesc = ExecTypeFromTL(node->indextlist); 531 ExecInitScanTupleSlot(estate, &indexstate->ss, tupDesc, 532 &TTSOpsVirtual); 533 534 /* 535 * We need another slot, in a format that's suitable for the table AM, for 536 * when we need to fetch a tuple from the table for rechecking visibility. 537 */ 538 indexstate->ioss_TableSlot = 539 ExecAllocTableSlot(&estate->es_tupleTable, 540 RelationGetDescr(currentRelation), 541 table_slot_callbacks(currentRelation)); 542 543 /* 544 * Initialize result type and projection info. The node's targetlist will 545 * contain Vars with varno = INDEX_VAR, referencing the scan tuple. 546 */ 547 ExecInitResultTypeTL(&indexstate->ss.ps); 548 ExecAssignScanProjectionInfoWithVarno(&indexstate->ss, INDEX_VAR); 549 550 /* 551 * initialize child expressions 552 * 553 * Note: we don't initialize all of the indexorderby expression, only the 554 * sub-parts corresponding to runtime keys (see below). 555 */ 556 indexstate->ss.ps.qual = 557 ExecInitQual(node->scan.plan.qual, (PlanState *) indexstate); 558 indexstate->indexqual = 559 ExecInitQual(node->indexqual, (PlanState *) indexstate); 560 561 /* 562 * If we are just doing EXPLAIN (ie, aren't going to run the plan), stop 563 * here. This allows an index-advisor plugin to EXPLAIN a plan containing 564 * references to nonexistent indexes. 565 */ 566 if (eflags & EXEC_FLAG_EXPLAIN_ONLY) 567 return indexstate; 568 569 /* Open the index relation. */ 570 lockmode = exec_rt_fetch(node->scan.scanrelid, estate)->rellockmode; 571 indexstate->ioss_RelationDesc = index_open(node->indexid, lockmode); 572 573 /* 574 * Initialize index-specific scan state 575 */ 576 indexstate->ioss_RuntimeKeysReady = false; 577 indexstate->ioss_RuntimeKeys = NULL; 578 indexstate->ioss_NumRuntimeKeys = 0; 579 580 /* 581 * build the index scan keys from the index qualification 582 */ 583 ExecIndexBuildScanKeys((PlanState *) indexstate, 584 indexstate->ioss_RelationDesc, 585 node->indexqual, 586 false, 587 &indexstate->ioss_ScanKeys, 588 &indexstate->ioss_NumScanKeys, 589 &indexstate->ioss_RuntimeKeys, 590 &indexstate->ioss_NumRuntimeKeys, 591 NULL, /* no ArrayKeys */ 592 NULL); 593 594 /* 595 * any ORDER BY exprs have to be turned into scankeys in the same way 596 */ 597 ExecIndexBuildScanKeys((PlanState *) indexstate, 598 indexstate->ioss_RelationDesc, 599 node->indexorderby, 600 true, 601 &indexstate->ioss_OrderByKeys, 602 &indexstate->ioss_NumOrderByKeys, 603 &indexstate->ioss_RuntimeKeys, 604 &indexstate->ioss_NumRuntimeKeys, 605 NULL, /* no ArrayKeys */ 606 NULL); 607 608 /* 609 * If we have runtime keys, we need an ExprContext to evaluate them. The 610 * node's standard context won't do because we want to reset that context 611 * for every tuple. So, build another context just like the other one... 612 * -tgl 7/11/00 613 */ 614 if (indexstate->ioss_NumRuntimeKeys != 0) 615 { 616 ExprContext *stdecontext = indexstate->ss.ps.ps_ExprContext; 617 618 ExecAssignExprContext(estate, &indexstate->ss.ps); 619 indexstate->ioss_RuntimeContext = indexstate->ss.ps.ps_ExprContext; 620 indexstate->ss.ps.ps_ExprContext = stdecontext; 621 } 622 else 623 { 624 indexstate->ioss_RuntimeContext = NULL; 625 } 626 627 /* 628 * all done. 629 */ 630 return indexstate; 631 } 632 633 /* ---------------------------------------------------------------- 634 * Parallel Index-only Scan Support 635 * ---------------------------------------------------------------- 636 */ 637 638 /* ---------------------------------------------------------------- 639 * ExecIndexOnlyScanEstimate 640 * 641 * Compute the amount of space we'll need in the parallel 642 * query DSM, and inform pcxt->estimator about our needs. 643 * ---------------------------------------------------------------- 644 */ 645 void 646 ExecIndexOnlyScanEstimate(IndexOnlyScanState *node, 647 ParallelContext *pcxt) 648 { 649 EState *estate = node->ss.ps.state; 650 651 node->ioss_PscanLen = index_parallelscan_estimate(node->ioss_RelationDesc, 652 estate->es_snapshot); 653 shm_toc_estimate_chunk(&pcxt->estimator, node->ioss_PscanLen); 654 shm_toc_estimate_keys(&pcxt->estimator, 1); 655 } 656 657 /* ---------------------------------------------------------------- 658 * ExecIndexOnlyScanInitializeDSM 659 * 660 * Set up a parallel index-only scan descriptor. 661 * ---------------------------------------------------------------- 662 */ 663 void 664 ExecIndexOnlyScanInitializeDSM(IndexOnlyScanState *node, 665 ParallelContext *pcxt) 666 { 667 EState *estate = node->ss.ps.state; 668 ParallelIndexScanDesc piscan; 669 670 piscan = shm_toc_allocate(pcxt->toc, node->ioss_PscanLen); 671 index_parallelscan_initialize(node->ss.ss_currentRelation, 672 node->ioss_RelationDesc, 673 estate->es_snapshot, 674 piscan); 675 shm_toc_insert(pcxt->toc, node->ss.ps.plan->plan_node_id, piscan); 676 node->ioss_ScanDesc = 677 index_beginscan_parallel(node->ss.ss_currentRelation, 678 node->ioss_RelationDesc, 679 node->ioss_NumScanKeys, 680 node->ioss_NumOrderByKeys, 681 piscan); 682 node->ioss_ScanDesc->xs_want_itup = true; 683 node->ioss_VMBuffer = InvalidBuffer; 684 685 /* 686 * If no run-time keys to calculate or they are ready, go ahead and pass 687 * the scankeys to the index AM. 688 */ 689 if (node->ioss_NumRuntimeKeys == 0 || node->ioss_RuntimeKeysReady) 690 index_rescan(node->ioss_ScanDesc, 691 node->ioss_ScanKeys, node->ioss_NumScanKeys, 692 node->ioss_OrderByKeys, node->ioss_NumOrderByKeys); 693 } 694 695 /* ---------------------------------------------------------------- 696 * ExecIndexOnlyScanReInitializeDSM 697 * 698 * Reset shared state before beginning a fresh scan. 699 * ---------------------------------------------------------------- 700 */ 701 void 702 ExecIndexOnlyScanReInitializeDSM(IndexOnlyScanState *node, 703 ParallelContext *pcxt) 704 { 705 index_parallelrescan(node->ioss_ScanDesc); 706 } 707 708 /* ---------------------------------------------------------------- 709 * ExecIndexOnlyScanInitializeWorker 710 * 711 * Copy relevant information from TOC into planstate. 712 * ---------------------------------------------------------------- 713 */ 714 void 715 ExecIndexOnlyScanInitializeWorker(IndexOnlyScanState *node, 716 ParallelWorkerContext *pwcxt) 717 { 718 ParallelIndexScanDesc piscan; 719 720 piscan = shm_toc_lookup(pwcxt->toc, node->ss.ps.plan->plan_node_id, false); 721 node->ioss_ScanDesc = 722 index_beginscan_parallel(node->ss.ss_currentRelation, 723 node->ioss_RelationDesc, 724 node->ioss_NumScanKeys, 725 node->ioss_NumOrderByKeys, 726 piscan); 727 node->ioss_ScanDesc->xs_want_itup = true; 728 729 /* 730 * If no run-time keys to calculate or they are ready, go ahead and pass 731 * the scankeys to the index AM. 732 */ 733 if (node->ioss_NumRuntimeKeys == 0 || node->ioss_RuntimeKeysReady) 734 index_rescan(node->ioss_ScanDesc, 735 node->ioss_ScanKeys, node->ioss_NumScanKeys, 736 node->ioss_OrderByKeys, node->ioss_NumOrderByKeys); 737 } 738