1 /*-------------------------------------------------------------------------
2 *
3 * indexam.c
4 * general index access method routines
5 *
6 * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/access/index/indexam.c
12 *
13 * INTERFACE ROUTINES
14 * index_open - open an index relation by relation OID
15 * index_close - close an index relation
16 * index_beginscan - start a scan of an index with amgettuple
17 * index_beginscan_bitmap - start a scan of an index with amgetbitmap
18 * index_rescan - restart a scan of an index
19 * index_endscan - end a scan
20 * index_insert - insert an index tuple into a relation
21 * index_markpos - mark a scan position
22 * index_restrpos - restore a scan position
23 * index_parallelscan_estimate - estimate shared memory for parallel scan
24 * index_parallelscan_initialize - initialize parallel scan
25 * index_parallelrescan - (re)start a parallel scan of an index
26 * index_beginscan_parallel - join parallel index scan
27 * index_getnext_tid - get the next TID from a scan
28 * index_fetch_heap - get the scan's next heap tuple
29 * index_getnext - get the next heap tuple from a scan
30 * index_getbitmap - get all tuples from a scan
31 * index_bulk_delete - bulk deletion of index tuples
32 * index_vacuum_cleanup - post-deletion cleanup of an index
33 * index_can_return - does index support index-only scans?
34 * index_getprocid - get a support procedure OID
35 * index_getprocinfo - get a support procedure's lookup info
36 *
37 * NOTES
38 * This file contains the index_ routines which used
39 * to be a scattered collection of stuff in access/genam.
40 *
41 *
42 * old comments
43 * Scans are implemented as follows:
44 *
45 * `0' represents an invalid item pointer.
46 * `-' represents an unknown item pointer.
47 * `X' represents a known item pointers.
48 * `+' represents known or invalid item pointers.
49 * `*' represents any item pointers.
50 *
51 * State is represented by a triple of these symbols in the order of
52 * previous, current, next. Note that the case of reverse scans works
53 * identically.
54 *
55 * State Result
56 * (1) + + - + 0 0 (if the next item pointer is invalid)
57 * (2) + X - (otherwise)
58 * (3) * 0 0 * 0 0 (no change)
59 * (4) + X 0 X 0 0 (shift)
60 * (5) * + X + X - (shift, add unknown)
61 *
62 * All other states cannot occur.
63 *
64 * Note: It would be possible to cache the status of the previous and
65 * next item pointer using the flags.
66 *
67 *-------------------------------------------------------------------------
68 */
69
70 #include "postgres.h"
71
72 #include "access/amapi.h"
73 #include "access/relscan.h"
74 #include "access/transam.h"
75 #include "access/xlog.h"
76 #include "catalog/index.h"
77 #include "pgstat.h"
78 #include "storage/bufmgr.h"
79 #include "storage/lmgr.h"
80 #include "storage/predicate.h"
81 #include "utils/snapmgr.h"
82 #include "utils/tqual.h"
83
84
85 /* ----------------------------------------------------------------
86 * macros used in index_ routines
87 *
88 * Note: the ReindexIsProcessingIndex() check in RELATION_CHECKS is there
89 * to check that we don't try to scan or do retail insertions into an index
90 * that is currently being rebuilt or pending rebuild. This helps to catch
91 * things that don't work when reindexing system catalogs. The assertion
92 * doesn't prevent the actual rebuild because we don't use RELATION_CHECKS
93 * when calling the index AM's ambuild routine, and there is no reason for
94 * ambuild to call its subsidiary routines through this file.
95 * ----------------------------------------------------------------
96 */
97 #define RELATION_CHECKS \
98 ( \
99 AssertMacro(RelationIsValid(indexRelation)), \
100 AssertMacro(PointerIsValid(indexRelation->rd_amroutine)), \
101 AssertMacro(!ReindexIsProcessingIndex(RelationGetRelid(indexRelation))) \
102 )
103
104 #define SCAN_CHECKS \
105 ( \
106 AssertMacro(IndexScanIsValid(scan)), \
107 AssertMacro(RelationIsValid(scan->indexRelation)), \
108 AssertMacro(PointerIsValid(scan->indexRelation->rd_amroutine)) \
109 )
110
111 #define CHECK_REL_PROCEDURE(pname) \
112 do { \
113 if (indexRelation->rd_amroutine->pname == NULL) \
114 elog(ERROR, "function %s is not defined for index %s", \
115 CppAsString(pname), RelationGetRelationName(indexRelation)); \
116 } while(0)
117
118 #define CHECK_SCAN_PROCEDURE(pname) \
119 do { \
120 if (scan->indexRelation->rd_amroutine->pname == NULL) \
121 elog(ERROR, "function %s is not defined for index %s", \
122 CppAsString(pname), RelationGetRelationName(scan->indexRelation)); \
123 } while(0)
124
125 static IndexScanDesc index_beginscan_internal(Relation indexRelation,
126 int nkeys, int norderbys, Snapshot snapshot,
127 ParallelIndexScanDesc pscan, bool temp_snap);
128
129
130 /* ----------------------------------------------------------------
131 * index_ interface functions
132 * ----------------------------------------------------------------
133 */
134
135 /* ----------------
136 * index_open - open an index relation by relation OID
137 *
138 * If lockmode is not "NoLock", the specified kind of lock is
139 * obtained on the index. (Generally, NoLock should only be
140 * used if the caller knows it has some appropriate lock on the
141 * index already.)
142 *
143 * An error is raised if the index does not exist.
144 *
145 * This is a convenience routine adapted for indexscan use.
146 * Some callers may prefer to use relation_open directly.
147 * ----------------
148 */
149 Relation
index_open(Oid relationId,LOCKMODE lockmode)150 index_open(Oid relationId, LOCKMODE lockmode)
151 {
152 Relation r;
153
154 r = relation_open(relationId, lockmode);
155
156 if (r->rd_rel->relkind != RELKIND_INDEX &&
157 r->rd_rel->relkind != RELKIND_PARTITIONED_INDEX)
158 ereport(ERROR,
159 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
160 errmsg("\"%s\" is not an index",
161 RelationGetRelationName(r))));
162
163 return r;
164 }
165
166 /* ----------------
167 * index_close - close an index relation
168 *
169 * If lockmode is not "NoLock", we then release the specified lock.
170 *
171 * Note that it is often sensible to hold a lock beyond index_close;
172 * in that case, the lock is released automatically at xact end.
173 * ----------------
174 */
175 void
index_close(Relation relation,LOCKMODE lockmode)176 index_close(Relation relation, LOCKMODE lockmode)
177 {
178 LockRelId relid = relation->rd_lockInfo.lockRelId;
179
180 Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
181
182 /* The relcache does the real work... */
183 RelationClose(relation);
184
185 if (lockmode != NoLock)
186 UnlockRelationId(&relid, lockmode);
187 }
188
189 /* ----------------
190 * index_insert - insert an index tuple into a relation
191 * ----------------
192 */
193 bool
index_insert(Relation indexRelation,Datum * values,bool * isnull,ItemPointer heap_t_ctid,Relation heapRelation,IndexUniqueCheck checkUnique,IndexInfo * indexInfo)194 index_insert(Relation indexRelation,
195 Datum *values,
196 bool *isnull,
197 ItemPointer heap_t_ctid,
198 Relation heapRelation,
199 IndexUniqueCheck checkUnique,
200 IndexInfo *indexInfo)
201 {
202 RELATION_CHECKS;
203 CHECK_REL_PROCEDURE(aminsert);
204
205 if (!(indexRelation->rd_amroutine->ampredlocks))
206 CheckForSerializableConflictIn(indexRelation,
207 (HeapTuple) NULL,
208 InvalidBuffer);
209
210 return indexRelation->rd_amroutine->aminsert(indexRelation, values, isnull,
211 heap_t_ctid, heapRelation,
212 checkUnique, indexInfo);
213 }
214
215 /*
216 * index_beginscan - start a scan of an index with amgettuple
217 *
218 * Caller must be holding suitable locks on the heap and the index.
219 */
220 IndexScanDesc
index_beginscan(Relation heapRelation,Relation indexRelation,Snapshot snapshot,int nkeys,int norderbys)221 index_beginscan(Relation heapRelation,
222 Relation indexRelation,
223 Snapshot snapshot,
224 int nkeys, int norderbys)
225 {
226 IndexScanDesc scan;
227
228 scan = index_beginscan_internal(indexRelation, nkeys, norderbys, snapshot, NULL, false);
229
230 /*
231 * Save additional parameters into the scandesc. Everything else was set
232 * up by RelationGetIndexScan.
233 */
234 scan->heapRelation = heapRelation;
235 scan->xs_snapshot = snapshot;
236
237 return scan;
238 }
239
240 /*
241 * index_beginscan_bitmap - start a scan of an index with amgetbitmap
242 *
243 * As above, caller had better be holding some lock on the parent heap
244 * relation, even though it's not explicitly mentioned here.
245 */
246 IndexScanDesc
index_beginscan_bitmap(Relation indexRelation,Snapshot snapshot,int nkeys)247 index_beginscan_bitmap(Relation indexRelation,
248 Snapshot snapshot,
249 int nkeys)
250 {
251 IndexScanDesc scan;
252
253 scan = index_beginscan_internal(indexRelation, nkeys, 0, snapshot, NULL, false);
254
255 /*
256 * Save additional parameters into the scandesc. Everything else was set
257 * up by RelationGetIndexScan.
258 */
259 scan->xs_snapshot = snapshot;
260
261 return scan;
262 }
263
264 /*
265 * index_beginscan_internal --- common code for index_beginscan variants
266 */
267 static IndexScanDesc
index_beginscan_internal(Relation indexRelation,int nkeys,int norderbys,Snapshot snapshot,ParallelIndexScanDesc pscan,bool temp_snap)268 index_beginscan_internal(Relation indexRelation,
269 int nkeys, int norderbys, Snapshot snapshot,
270 ParallelIndexScanDesc pscan, bool temp_snap)
271 {
272 IndexScanDesc scan;
273
274 RELATION_CHECKS;
275 CHECK_REL_PROCEDURE(ambeginscan);
276
277 if (!(indexRelation->rd_amroutine->ampredlocks))
278 PredicateLockRelation(indexRelation, snapshot);
279
280 /*
281 * We hold a reference count to the relcache entry throughout the scan.
282 */
283 RelationIncrementReferenceCount(indexRelation);
284
285 /*
286 * Tell the AM to open a scan.
287 */
288 scan = indexRelation->rd_amroutine->ambeginscan(indexRelation, nkeys,
289 norderbys);
290 /* Initialize information for parallel scan. */
291 scan->parallel_scan = pscan;
292 scan->xs_temp_snap = temp_snap;
293
294 return scan;
295 }
296
297 /* ----------------
298 * index_rescan - (re)start a scan of an index
299 *
300 * During a restart, the caller may specify a new set of scankeys and/or
301 * orderbykeys; but the number of keys cannot differ from what index_beginscan
302 * was told. (Later we might relax that to "must not exceed", but currently
303 * the index AMs tend to assume that scan->numberOfKeys is what to believe.)
304 * To restart the scan without changing keys, pass NULL for the key arrays.
305 * (Of course, keys *must* be passed on the first call, unless
306 * scan->numberOfKeys is zero.)
307 * ----------------
308 */
309 void
index_rescan(IndexScanDesc scan,ScanKey keys,int nkeys,ScanKey orderbys,int norderbys)310 index_rescan(IndexScanDesc scan,
311 ScanKey keys, int nkeys,
312 ScanKey orderbys, int norderbys)
313 {
314 SCAN_CHECKS;
315 CHECK_SCAN_PROCEDURE(amrescan);
316
317 Assert(nkeys == scan->numberOfKeys);
318 Assert(norderbys == scan->numberOfOrderBys);
319
320 /* Release any held pin on a heap page */
321 if (BufferIsValid(scan->xs_cbuf))
322 {
323 ReleaseBuffer(scan->xs_cbuf);
324 scan->xs_cbuf = InvalidBuffer;
325 }
326
327 scan->xs_continue_hot = false;
328
329 scan->kill_prior_tuple = false; /* for safety */
330
331 scan->indexRelation->rd_amroutine->amrescan(scan, keys, nkeys,
332 orderbys, norderbys);
333 }
334
335 /* ----------------
336 * index_endscan - end a scan
337 * ----------------
338 */
339 void
index_endscan(IndexScanDesc scan)340 index_endscan(IndexScanDesc scan)
341 {
342 SCAN_CHECKS;
343 CHECK_SCAN_PROCEDURE(amendscan);
344
345 /* Release any held pin on a heap page */
346 if (BufferIsValid(scan->xs_cbuf))
347 {
348 ReleaseBuffer(scan->xs_cbuf);
349 scan->xs_cbuf = InvalidBuffer;
350 }
351
352 /* End the AM's scan */
353 scan->indexRelation->rd_amroutine->amendscan(scan);
354
355 /* Release index refcount acquired by index_beginscan */
356 RelationDecrementReferenceCount(scan->indexRelation);
357
358 if (scan->xs_temp_snap)
359 UnregisterSnapshot(scan->xs_snapshot);
360
361 /* Release the scan data structure itself */
362 IndexScanEnd(scan);
363 }
364
365 /* ----------------
366 * index_markpos - mark a scan position
367 * ----------------
368 */
369 void
index_markpos(IndexScanDesc scan)370 index_markpos(IndexScanDesc scan)
371 {
372 SCAN_CHECKS;
373 CHECK_SCAN_PROCEDURE(ammarkpos);
374
375 scan->indexRelation->rd_amroutine->ammarkpos(scan);
376 }
377
378 /* ----------------
379 * index_restrpos - restore a scan position
380 *
381 * NOTE: this only restores the internal scan state of the index AM.
382 * The current result tuple (scan->xs_ctup) doesn't change. See comments
383 * for ExecRestrPos().
384 *
385 * NOTE: in the presence of HOT chains, mark/restore only works correctly
386 * if the scan's snapshot is MVCC-safe; that ensures that there's at most one
387 * returnable tuple in each HOT chain, and so restoring the prior state at the
388 * granularity of the index AM is sufficient. Since the only current user
389 * of mark/restore functionality is nodeMergejoin.c, this effectively means
390 * that merge-join plans only work for MVCC snapshots. This could be fixed
391 * if necessary, but for now it seems unimportant.
392 * ----------------
393 */
394 void
index_restrpos(IndexScanDesc scan)395 index_restrpos(IndexScanDesc scan)
396 {
397 Assert(IsMVCCSnapshot(scan->xs_snapshot));
398
399 SCAN_CHECKS;
400 CHECK_SCAN_PROCEDURE(amrestrpos);
401
402 scan->xs_continue_hot = false;
403
404 scan->kill_prior_tuple = false; /* for safety */
405
406 scan->indexRelation->rd_amroutine->amrestrpos(scan);
407 }
408
409 /*
410 * index_parallelscan_estimate - estimate shared memory for parallel scan
411 *
412 * Currently, we don't pass any information to the AM-specific estimator,
413 * so it can probably only return a constant. In the future, we might need
414 * to pass more information.
415 */
416 Size
index_parallelscan_estimate(Relation indexRelation,Snapshot snapshot)417 index_parallelscan_estimate(Relation indexRelation, Snapshot snapshot)
418 {
419 Size nbytes;
420
421 RELATION_CHECKS;
422
423 nbytes = offsetof(ParallelIndexScanDescData, ps_snapshot_data);
424 nbytes = add_size(nbytes, EstimateSnapshotSpace(snapshot));
425 nbytes = MAXALIGN(nbytes);
426
427 /*
428 * If amestimateparallelscan is not provided, assume there is no
429 * AM-specific data needed. (It's hard to believe that could work, but
430 * it's easy enough to cater to it here.)
431 */
432 if (indexRelation->rd_amroutine->amestimateparallelscan != NULL)
433 nbytes = add_size(nbytes,
434 indexRelation->rd_amroutine->amestimateparallelscan());
435
436 return nbytes;
437 }
438
439 /*
440 * index_parallelscan_initialize - initialize parallel scan
441 *
442 * We initialize both the ParallelIndexScanDesc proper and the AM-specific
443 * information which follows it.
444 *
445 * This function calls access method specific initialization routine to
446 * initialize am specific information. Call this just once in the leader
447 * process; then, individual workers attach via index_beginscan_parallel.
448 */
449 void
index_parallelscan_initialize(Relation heapRelation,Relation indexRelation,Snapshot snapshot,ParallelIndexScanDesc target)450 index_parallelscan_initialize(Relation heapRelation, Relation indexRelation,
451 Snapshot snapshot, ParallelIndexScanDesc target)
452 {
453 Size offset;
454
455 RELATION_CHECKS;
456
457 offset = add_size(offsetof(ParallelIndexScanDescData, ps_snapshot_data),
458 EstimateSnapshotSpace(snapshot));
459 offset = MAXALIGN(offset);
460
461 target->ps_relid = RelationGetRelid(heapRelation);
462 target->ps_indexid = RelationGetRelid(indexRelation);
463 target->ps_offset = offset;
464 SerializeSnapshot(snapshot, target->ps_snapshot_data);
465
466 /* aminitparallelscan is optional; assume no-op if not provided by AM */
467 if (indexRelation->rd_amroutine->aminitparallelscan != NULL)
468 {
469 void *amtarget;
470
471 amtarget = OffsetToPointer(target, offset);
472 indexRelation->rd_amroutine->aminitparallelscan(amtarget);
473 }
474 }
475
476 /* ----------------
477 * index_parallelrescan - (re)start a parallel scan of an index
478 * ----------------
479 */
480 void
index_parallelrescan(IndexScanDesc scan)481 index_parallelrescan(IndexScanDesc scan)
482 {
483 SCAN_CHECKS;
484
485 /* amparallelrescan is optional; assume no-op if not provided by AM */
486 if (scan->indexRelation->rd_amroutine->amparallelrescan != NULL)
487 scan->indexRelation->rd_amroutine->amparallelrescan(scan);
488 }
489
490 /*
491 * index_beginscan_parallel - join parallel index scan
492 *
493 * Caller must be holding suitable locks on the heap and the index.
494 */
495 IndexScanDesc
index_beginscan_parallel(Relation heaprel,Relation indexrel,int nkeys,int norderbys,ParallelIndexScanDesc pscan)496 index_beginscan_parallel(Relation heaprel, Relation indexrel, int nkeys,
497 int norderbys, ParallelIndexScanDesc pscan)
498 {
499 Snapshot snapshot;
500 IndexScanDesc scan;
501
502 Assert(RelationGetRelid(heaprel) == pscan->ps_relid);
503 snapshot = RestoreSnapshot(pscan->ps_snapshot_data);
504 RegisterSnapshot(snapshot);
505 scan = index_beginscan_internal(indexrel, nkeys, norderbys, snapshot,
506 pscan, true);
507
508 /*
509 * Save additional parameters into the scandesc. Everything else was set
510 * up by index_beginscan_internal.
511 */
512 scan->heapRelation = heaprel;
513 scan->xs_snapshot = snapshot;
514
515 return scan;
516 }
517
518 /* ----------------
519 * index_getnext_tid - get the next TID from a scan
520 *
521 * The result is the next TID satisfying the scan keys,
522 * or NULL if no more matching tuples exist.
523 * ----------------
524 */
525 ItemPointer
index_getnext_tid(IndexScanDesc scan,ScanDirection direction)526 index_getnext_tid(IndexScanDesc scan, ScanDirection direction)
527 {
528 bool found;
529
530 SCAN_CHECKS;
531 CHECK_SCAN_PROCEDURE(amgettuple);
532
533 Assert(TransactionIdIsValid(RecentGlobalXmin));
534
535 /*
536 * The AM's amgettuple proc finds the next index entry matching the scan
537 * keys, and puts the TID into scan->xs_ctup.t_self. It should also set
538 * scan->xs_recheck and possibly scan->xs_itup/scan->xs_hitup, though we
539 * pay no attention to those fields here.
540 */
541 found = scan->indexRelation->rd_amroutine->amgettuple(scan, direction);
542
543 /* Reset kill flag immediately for safety */
544 scan->kill_prior_tuple = false;
545
546 /* If we're out of index entries, we're done */
547 if (!found)
548 {
549 /* ... but first, release any held pin on a heap page */
550 if (BufferIsValid(scan->xs_cbuf))
551 {
552 ReleaseBuffer(scan->xs_cbuf);
553 scan->xs_cbuf = InvalidBuffer;
554 }
555 return NULL;
556 }
557
558 pgstat_count_index_tuples(scan->indexRelation, 1);
559
560 /* Return the TID of the tuple we found. */
561 return &scan->xs_ctup.t_self;
562 }
563
564 /* ----------------
565 * index_fetch_heap - get the scan's next heap tuple
566 *
567 * The result is a visible heap tuple associated with the index TID most
568 * recently fetched by index_getnext_tid, or NULL if no more matching tuples
569 * exist. (There can be more than one matching tuple because of HOT chains,
570 * although when using an MVCC snapshot it should be impossible for more than
571 * one such tuple to exist.)
572 *
573 * On success, the buffer containing the heap tup is pinned (the pin will be
574 * dropped in a future index_getnext_tid, index_fetch_heap or index_endscan
575 * call).
576 *
577 * Note: caller must check scan->xs_recheck, and perform rechecking of the
578 * scan keys if required. We do not do that here because we don't have
579 * enough information to do it efficiently in the general case.
580 * ----------------
581 */
582 HeapTuple
index_fetch_heap(IndexScanDesc scan)583 index_fetch_heap(IndexScanDesc scan)
584 {
585 ItemPointer tid = &scan->xs_ctup.t_self;
586 bool all_dead = false;
587 bool got_heap_tuple;
588
589 /* We can skip the buffer-switching logic if we're in mid-HOT chain. */
590 if (!scan->xs_continue_hot)
591 {
592 /* Switch to correct buffer if we don't have it already */
593 Buffer prev_buf = scan->xs_cbuf;
594
595 scan->xs_cbuf = ReleaseAndReadBuffer(scan->xs_cbuf,
596 scan->heapRelation,
597 ItemPointerGetBlockNumber(tid));
598
599 /*
600 * Prune page, but only if we weren't already on this page
601 */
602 if (prev_buf != scan->xs_cbuf)
603 heap_page_prune_opt(scan->heapRelation, scan->xs_cbuf);
604 }
605
606 /* Obtain share-lock on the buffer so we can examine visibility */
607 LockBuffer(scan->xs_cbuf, BUFFER_LOCK_SHARE);
608 got_heap_tuple = heap_hot_search_buffer(tid, scan->heapRelation,
609 scan->xs_cbuf,
610 scan->xs_snapshot,
611 &scan->xs_ctup,
612 &all_dead,
613 !scan->xs_continue_hot);
614 LockBuffer(scan->xs_cbuf, BUFFER_LOCK_UNLOCK);
615
616 if (got_heap_tuple)
617 {
618 /*
619 * Only in a non-MVCC snapshot can more than one member of the HOT
620 * chain be visible.
621 */
622 scan->xs_continue_hot = !IsMVCCSnapshot(scan->xs_snapshot);
623 pgstat_count_heap_fetch(scan->indexRelation);
624 return &scan->xs_ctup;
625 }
626
627 /* We've reached the end of the HOT chain. */
628 scan->xs_continue_hot = false;
629
630 /*
631 * If we scanned a whole HOT chain and found only dead tuples, tell index
632 * AM to kill its entry for that TID (this will take effect in the next
633 * amgettuple call, in index_getnext_tid). We do not do this when in
634 * recovery because it may violate MVCC to do so. See comments in
635 * RelationGetIndexScan().
636 */
637 if (!scan->xactStartedInRecovery)
638 scan->kill_prior_tuple = all_dead;
639
640 return NULL;
641 }
642
643 /* ----------------
644 * index_getnext - get the next heap tuple from a scan
645 *
646 * The result is the next heap tuple satisfying the scan keys and the
647 * snapshot, or NULL if no more matching tuples exist.
648 *
649 * On success, the buffer containing the heap tup is pinned (the pin will be
650 * dropped in a future index_getnext_tid, index_fetch_heap or index_endscan
651 * call).
652 *
653 * Note: caller must check scan->xs_recheck, and perform rechecking of the
654 * scan keys if required. We do not do that here because we don't have
655 * enough information to do it efficiently in the general case.
656 * ----------------
657 */
658 HeapTuple
index_getnext(IndexScanDesc scan,ScanDirection direction)659 index_getnext(IndexScanDesc scan, ScanDirection direction)
660 {
661 HeapTuple heapTuple;
662 ItemPointer tid;
663
664 for (;;)
665 {
666 if (scan->xs_continue_hot)
667 {
668 /*
669 * We are resuming scan of a HOT chain after having returned an
670 * earlier member. Must still hold pin on current heap page.
671 */
672 Assert(BufferIsValid(scan->xs_cbuf));
673 Assert(ItemPointerGetBlockNumber(&scan->xs_ctup.t_self) ==
674 BufferGetBlockNumber(scan->xs_cbuf));
675 }
676 else
677 {
678 /* Time to fetch the next TID from the index */
679 tid = index_getnext_tid(scan, direction);
680
681 /* If we're out of index entries, we're done */
682 if (tid == NULL)
683 break;
684 }
685
686 /*
687 * Fetch the next (or only) visible heap tuple for this index entry.
688 * If we don't find anything, loop around and grab the next TID from
689 * the index.
690 */
691 heapTuple = index_fetch_heap(scan);
692 if (heapTuple != NULL)
693 return heapTuple;
694 }
695
696 return NULL; /* failure exit */
697 }
698
699 /* ----------------
700 * index_getbitmap - get all tuples at once from an index scan
701 *
702 * Adds the TIDs of all heap tuples satisfying the scan keys to a bitmap.
703 * Since there's no interlock between the index scan and the eventual heap
704 * access, this is only safe to use with MVCC-based snapshots: the heap
705 * item slot could have been replaced by a newer tuple by the time we get
706 * to it.
707 *
708 * Returns the number of matching tuples found. (Note: this might be only
709 * approximate, so it should only be used for statistical purposes.)
710 * ----------------
711 */
712 int64
index_getbitmap(IndexScanDesc scan,TIDBitmap * bitmap)713 index_getbitmap(IndexScanDesc scan, TIDBitmap *bitmap)
714 {
715 int64 ntids;
716
717 SCAN_CHECKS;
718 CHECK_SCAN_PROCEDURE(amgetbitmap);
719
720 /* just make sure this is false... */
721 scan->kill_prior_tuple = false;
722
723 /*
724 * have the am's getbitmap proc do all the work.
725 */
726 ntids = scan->indexRelation->rd_amroutine->amgetbitmap(scan, bitmap);
727
728 pgstat_count_index_tuples(scan->indexRelation, ntids);
729
730 return ntids;
731 }
732
733 /* ----------------
734 * index_bulk_delete - do mass deletion of index entries
735 *
736 * callback routine tells whether a given main-heap tuple is
737 * to be deleted
738 *
739 * return value is an optional palloc'd struct of statistics
740 * ----------------
741 */
742 IndexBulkDeleteResult *
index_bulk_delete(IndexVacuumInfo * info,IndexBulkDeleteResult * stats,IndexBulkDeleteCallback callback,void * callback_state)743 index_bulk_delete(IndexVacuumInfo *info,
744 IndexBulkDeleteResult *stats,
745 IndexBulkDeleteCallback callback,
746 void *callback_state)
747 {
748 Relation indexRelation = info->index;
749
750 RELATION_CHECKS;
751 CHECK_REL_PROCEDURE(ambulkdelete);
752
753 return indexRelation->rd_amroutine->ambulkdelete(info, stats,
754 callback, callback_state);
755 }
756
757 /* ----------------
758 * index_vacuum_cleanup - do post-deletion cleanup of an index
759 *
760 * return value is an optional palloc'd struct of statistics
761 * ----------------
762 */
763 IndexBulkDeleteResult *
index_vacuum_cleanup(IndexVacuumInfo * info,IndexBulkDeleteResult * stats)764 index_vacuum_cleanup(IndexVacuumInfo *info,
765 IndexBulkDeleteResult *stats)
766 {
767 Relation indexRelation = info->index;
768
769 RELATION_CHECKS;
770 CHECK_REL_PROCEDURE(amvacuumcleanup);
771
772 return indexRelation->rd_amroutine->amvacuumcleanup(info, stats);
773 }
774
775 /* ----------------
776 * index_can_return
777 *
778 * Does the index access method support index-only scans for the given
779 * column?
780 * ----------------
781 */
782 bool
index_can_return(Relation indexRelation,int attno)783 index_can_return(Relation indexRelation, int attno)
784 {
785 RELATION_CHECKS;
786
787 /* amcanreturn is optional; assume false if not provided by AM */
788 if (indexRelation->rd_amroutine->amcanreturn == NULL)
789 return false;
790
791 return indexRelation->rd_amroutine->amcanreturn(indexRelation, attno);
792 }
793
794 /* ----------------
795 * index_getprocid
796 *
797 * Index access methods typically require support routines that are
798 * not directly the implementation of any WHERE-clause query operator
799 * and so cannot be kept in pg_amop. Instead, such routines are kept
800 * in pg_amproc. These registered procedure OIDs are assigned numbers
801 * according to a convention established by the access method.
802 * The general index code doesn't know anything about the routines
803 * involved; it just builds an ordered list of them for
804 * each attribute on which an index is defined.
805 *
806 * As of Postgres 8.3, support routines within an operator family
807 * are further subdivided by the "left type" and "right type" of the
808 * query operator(s) that they support. The "default" functions for a
809 * particular indexed attribute are those with both types equal to
810 * the index opclass' opcintype (note that this is subtly different
811 * from the indexed attribute's own type: it may be a binary-compatible
812 * type instead). Only the default functions are stored in relcache
813 * entries --- access methods can use the syscache to look up non-default
814 * functions.
815 *
816 * This routine returns the requested default procedure OID for a
817 * particular indexed attribute.
818 * ----------------
819 */
820 RegProcedure
index_getprocid(Relation irel,AttrNumber attnum,uint16 procnum)821 index_getprocid(Relation irel,
822 AttrNumber attnum,
823 uint16 procnum)
824 {
825 RegProcedure *loc;
826 int nproc;
827 int procindex;
828
829 nproc = irel->rd_amroutine->amsupport;
830
831 Assert(procnum > 0 && procnum <= (uint16) nproc);
832
833 procindex = (nproc * (attnum - 1)) + (procnum - 1);
834
835 loc = irel->rd_support;
836
837 Assert(loc != NULL);
838
839 return loc[procindex];
840 }
841
842 /* ----------------
843 * index_getprocinfo
844 *
845 * This routine allows index AMs to keep fmgr lookup info for
846 * support procs in the relcache. As above, only the "default"
847 * functions for any particular indexed attribute are cached.
848 *
849 * Note: the return value points into cached data that will be lost during
850 * any relcache rebuild! Therefore, either use the callinfo right away,
851 * or save it only after having acquired some type of lock on the index rel.
852 * ----------------
853 */
854 FmgrInfo *
index_getprocinfo(Relation irel,AttrNumber attnum,uint16 procnum)855 index_getprocinfo(Relation irel,
856 AttrNumber attnum,
857 uint16 procnum)
858 {
859 FmgrInfo *locinfo;
860 int nproc;
861 int procindex;
862
863 nproc = irel->rd_amroutine->amsupport;
864
865 Assert(procnum > 0 && procnum <= (uint16) nproc);
866
867 procindex = (nproc * (attnum - 1)) + (procnum - 1);
868
869 locinfo = irel->rd_supportinfo;
870
871 Assert(locinfo != NULL);
872
873 locinfo += procindex;
874
875 /* Initialize the lookup info if first time through */
876 if (locinfo->fn_oid == InvalidOid)
877 {
878 RegProcedure *loc = irel->rd_support;
879 RegProcedure procId;
880
881 Assert(loc != NULL);
882
883 procId = loc[procindex];
884
885 /*
886 * Complain if function was not found during IndexSupportInitialize.
887 * This should not happen unless the system tables contain bogus
888 * entries for the index opclass. (If an AM wants to allow a support
889 * function to be optional, it can use index_getprocid.)
890 */
891 if (!RegProcedureIsValid(procId))
892 elog(ERROR, "missing support function %d for attribute %d of index \"%s\"",
893 procnum, attnum, RelationGetRelationName(irel));
894
895 fmgr_info_cxt(procId, locinfo, irel->rd_indexcxt);
896 }
897
898 return locinfo;
899 }
900