1 /*-------------------------------------------------------------------------
2  *
3  * nbtree.h
4  *	  header file for postgres btree access method implementation.
5  *
6  *
7  * Portions Copyright (c) 1996-2020, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * src/include/access/nbtree.h
11  *
12  *-------------------------------------------------------------------------
13  */
14 #ifndef NBTREE_H
15 #define NBTREE_H
16 
17 #include "access/amapi.h"
18 #include "access/itup.h"
19 #include "access/sdir.h"
20 #include "access/xlogreader.h"
21 #include "catalog/pg_am_d.h"
22 #include "catalog/pg_index.h"
23 #include "lib/stringinfo.h"
24 #include "storage/bufmgr.h"
25 #include "storage/shm_toc.h"
26 
27 /* There's room for a 16-bit vacuum cycle ID in BTPageOpaqueData */
28 typedef uint16 BTCycleId;
29 
30 /*
31  *	BTPageOpaqueData -- At the end of every page, we store a pointer
32  *	to both siblings in the tree.  This is used to do forward/backward
33  *	index scans.  The next-page link is also critical for recovery when
34  *	a search has navigated to the wrong page due to concurrent page splits
35  *	or deletions; see src/backend/access/nbtree/README for more info.
36  *
37  *	In addition, we store the page's btree level (counting upwards from
38  *	zero at a leaf page) as well as some flag bits indicating the page type
39  *	and status.  If the page is deleted, we replace the level with the
40  *	next-transaction-ID value indicating when it is safe to reclaim the page.
41  *
42  *	We also store a "vacuum cycle ID".  When a page is split while VACUUM is
43  *	processing the index, a nonzero value associated with the VACUUM run is
44  *	stored into both halves of the split page.  (If VACUUM is not running,
45  *	both pages receive zero cycleids.)	This allows VACUUM to detect whether
46  *	a page was split since it started, with a small probability of false match
47  *	if the page was last split some exact multiple of MAX_BT_CYCLE_ID VACUUMs
48  *	ago.  Also, during a split, the BTP_SPLIT_END flag is cleared in the left
49  *	(original) page, and set in the right page, but only if the next page
50  *	to its right has a different cycleid.
51  *
52  *	NOTE: the BTP_LEAF flag bit is redundant since level==0 could be tested
53  *	instead.
54  */
55 
56 typedef struct BTPageOpaqueData
57 {
58 	BlockNumber btpo_prev;		/* left sibling, or P_NONE if leftmost */
59 	BlockNumber btpo_next;		/* right sibling, or P_NONE if rightmost */
60 	union
61 	{
62 		uint32		level;		/* tree level --- zero for leaf pages */
63 		TransactionId xact;		/* next transaction ID, if deleted */
64 	}			btpo;
65 	uint16		btpo_flags;		/* flag bits, see below */
66 	BTCycleId	btpo_cycleid;	/* vacuum cycle ID of latest split */
67 } BTPageOpaqueData;
68 
69 typedef BTPageOpaqueData *BTPageOpaque;
70 
71 /* Bits defined in btpo_flags */
72 #define BTP_LEAF		(1 << 0)	/* leaf page, i.e. not internal page */
73 #define BTP_ROOT		(1 << 1)	/* root page (has no parent) */
74 #define BTP_DELETED		(1 << 2)	/* page has been deleted from tree */
75 #define BTP_META		(1 << 3)	/* meta-page */
76 #define BTP_HALF_DEAD	(1 << 4)	/* empty, but still in tree */
77 #define BTP_SPLIT_END	(1 << 5)	/* rightmost page of split group */
78 #define BTP_HAS_GARBAGE (1 << 6)	/* page has LP_DEAD tuples */
79 #define BTP_INCOMPLETE_SPLIT (1 << 7)	/* right sibling's downlink is missing */
80 
81 /*
82  * The max allowed value of a cycle ID is a bit less than 64K.  This is
83  * for convenience of pg_filedump and similar utilities: we want to use
84  * the last 2 bytes of special space as an index type indicator, and
85  * restricting cycle ID lets btree use that space for vacuum cycle IDs
86  * while still allowing index type to be identified.
87  */
88 #define MAX_BT_CYCLE_ID		0xFF7F
89 
90 
91 /*
92  * The Meta page is always the first page in the btree index.
93  * Its primary purpose is to point to the location of the btree root page.
94  * We also point to the "fast" root, which is the current effective root;
95  * see README for discussion.
96  */
97 
98 typedef struct BTMetaPageData
99 {
100 	uint32		btm_magic;		/* should contain BTREE_MAGIC */
101 	uint32		btm_version;	/* nbtree version (always <= BTREE_VERSION) */
102 	BlockNumber btm_root;		/* current root location */
103 	uint32		btm_level;		/* tree level of the root page */
104 	BlockNumber btm_fastroot;	/* current "fast" root location */
105 	uint32		btm_fastlevel;	/* tree level of the "fast" root page */
106 	/* remaining fields only valid when btm_version >= BTREE_NOVAC_VERSION */
107 	TransactionId btm_oldest_btpo_xact; /* oldest btpo_xact among all deleted
108 										 * pages */
109 	float8		btm_last_cleanup_num_heap_tuples;	/* number of heap tuples
110 													 * during last cleanup */
111 	bool		btm_allequalimage;	/* are all columns "equalimage"? */
112 } BTMetaPageData;
113 
114 #define BTPageGetMeta(p) \
115 	((BTMetaPageData *) PageGetContents(p))
116 
117 /*
118  * The current Btree version is 4.  That's what you'll get when you create
119  * a new index.
120  *
121  * Btree version 3 was used in PostgreSQL v11.  It is mostly the same as
122  * version 4, but heap TIDs were not part of the keyspace.  Index tuples
123  * with duplicate keys could be stored in any order.  We continue to
124  * support reading and writing Btree versions 2 and 3, so that they don't
125  * need to be immediately re-indexed at pg_upgrade.  In order to get the
126  * new heapkeyspace semantics, however, a REINDEX is needed.
127  *
128  * Deduplication is safe to use when the btm_allequalimage field is set to
129  * true.  It's safe to read the btm_allequalimage field on version 3, but
130  * only version 4 indexes make use of deduplication.  Even version 4
131  * indexes created on PostgreSQL v12 will need a REINDEX to make use of
132  * deduplication, though, since there is no other way to set
133  * btm_allequalimage to true (pg_upgrade hasn't been taught to set the
134  * metapage field).
135  *
136  * Btree version 2 is mostly the same as version 3.  There are two new
137  * fields in the metapage that were introduced in version 3.  A version 2
138  * metapage will be automatically upgraded to version 3 on the first
139  * insert to it.  INCLUDE indexes cannot use version 2.
140  */
141 #define BTREE_METAPAGE	0		/* first page is meta */
142 #define BTREE_MAGIC		0x053162	/* magic number in metapage */
143 #define BTREE_VERSION	4		/* current version number */
144 #define BTREE_MIN_VERSION	2	/* minimum supported version */
145 #define BTREE_NOVAC_VERSION	3	/* version with all meta fields set */
146 
147 /*
148  * Maximum size of a btree index entry, including its tuple header.
149  *
150  * We actually need to be able to fit three items on every page,
151  * so restrict any one item to 1/3 the per-page available space.
152  *
153  * There are rare cases where _bt_truncate() will need to enlarge
154  * a heap index tuple to make space for a tiebreaker heap TID
155  * attribute, which we account for here.
156  */
157 #define BTMaxItemSize(page) \
158 	MAXALIGN_DOWN((PageGetPageSize(page) - \
159 				   MAXALIGN(SizeOfPageHeaderData + \
160 							3*sizeof(ItemIdData)  + \
161 							3*sizeof(ItemPointerData)) - \
162 				   MAXALIGN(sizeof(BTPageOpaqueData))) / 3)
163 #define BTMaxItemSizeNoHeapTid(page) \
164 	MAXALIGN_DOWN((PageGetPageSize(page) - \
165 				   MAXALIGN(SizeOfPageHeaderData + 3*sizeof(ItemIdData)) - \
166 				   MAXALIGN(sizeof(BTPageOpaqueData))) / 3)
167 
168 /*
169  * MaxTIDsPerBTreePage is an upper bound on the number of heap TIDs tuples
170  * that may be stored on a btree leaf page.  It is used to size the
171  * per-page temporary buffers used by index scans.
172  *
173  * Note: we don't bother considering per-tuple overheads here to keep
174  * things simple (value is based on how many elements a single array of
175  * heap TIDs must have to fill the space between the page header and
176  * special area).  The value is slightly higher (i.e. more conservative)
177  * than necessary as a result, which is considered acceptable.
178  */
179 #define MaxTIDsPerBTreePage \
180 	(int) ((BLCKSZ - SizeOfPageHeaderData - sizeof(BTPageOpaqueData)) / \
181 		   sizeof(ItemPointerData))
182 
183 /*
184  * The leaf-page fillfactor defaults to 90% but is user-adjustable.
185  * For pages above the leaf level, we use a fixed 70% fillfactor.
186  * The fillfactor is applied during index build and when splitting
187  * a rightmost page; when splitting non-rightmost pages we try to
188  * divide the data equally.  When splitting a page that's entirely
189  * filled with a single value (duplicates), the effective leaf-page
190  * fillfactor is 96%, regardless of whether the page is a rightmost
191  * page.
192  */
193 #define BTREE_MIN_FILLFACTOR		10
194 #define BTREE_DEFAULT_FILLFACTOR	90
195 #define BTREE_NONLEAF_FILLFACTOR	70
196 #define BTREE_SINGLEVAL_FILLFACTOR	96
197 
198 /*
199  *	In general, the btree code tries to localize its knowledge about
200  *	page layout to a couple of routines.  However, we need a special
201  *	value to indicate "no page number" in those places where we expect
202  *	page numbers.  We can use zero for this because we never need to
203  *	make a pointer to the metadata page.
204  */
205 
206 #define P_NONE			0
207 
208 /*
209  * Macros to test whether a page is leftmost or rightmost on its tree level,
210  * as well as other state info kept in the opaque data.
211  */
212 #define P_LEFTMOST(opaque)		((opaque)->btpo_prev == P_NONE)
213 #define P_RIGHTMOST(opaque)		((opaque)->btpo_next == P_NONE)
214 #define P_ISLEAF(opaque)		(((opaque)->btpo_flags & BTP_LEAF) != 0)
215 #define P_ISROOT(opaque)		(((opaque)->btpo_flags & BTP_ROOT) != 0)
216 #define P_ISDELETED(opaque)		(((opaque)->btpo_flags & BTP_DELETED) != 0)
217 #define P_ISMETA(opaque)		(((opaque)->btpo_flags & BTP_META) != 0)
218 #define P_ISHALFDEAD(opaque)	(((opaque)->btpo_flags & BTP_HALF_DEAD) != 0)
219 #define P_IGNORE(opaque)		(((opaque)->btpo_flags & (BTP_DELETED|BTP_HALF_DEAD)) != 0)
220 #define P_HAS_GARBAGE(opaque)	(((opaque)->btpo_flags & BTP_HAS_GARBAGE) != 0)
221 #define P_INCOMPLETE_SPLIT(opaque)	(((opaque)->btpo_flags & BTP_INCOMPLETE_SPLIT) != 0)
222 
223 /*
224  *	Lehman and Yao's algorithm requires a ``high key'' on every non-rightmost
225  *	page.  The high key is not a tuple that is used to visit the heap.  It is
226  *	a pivot tuple (see "Notes on B-Tree tuple format" below for definition).
227  *	The high key on a page is required to be greater than or equal to any
228  *	other key that appears on the page.  If we find ourselves trying to
229  *	insert a key that is strictly > high key, we know we need to move right
230  *	(this should only happen if the page was split since we examined the
231  *	parent page).
232  *
233  *	Our insertion algorithm guarantees that we can use the initial least key
234  *	on our right sibling as the high key.  Once a page is created, its high
235  *	key changes only if the page is split.
236  *
237  *	On a non-rightmost page, the high key lives in item 1 and data items
238  *	start in item 2.  Rightmost pages have no high key, so we store data
239  *	items beginning in item 1.
240  */
241 
242 #define P_HIKEY				((OffsetNumber) 1)
243 #define P_FIRSTKEY			((OffsetNumber) 2)
244 #define P_FIRSTDATAKEY(opaque)	(P_RIGHTMOST(opaque) ? P_HIKEY : P_FIRSTKEY)
245 
246 /*
247  * Notes on B-Tree tuple format, and key and non-key attributes:
248  *
249  * INCLUDE B-Tree indexes have non-key attributes.  These are extra
250  * attributes that may be returned by index-only scans, but do not influence
251  * the order of items in the index (formally, non-key attributes are not
252  * considered to be part of the key space).  Non-key attributes are only
253  * present in leaf index tuples whose item pointers actually point to heap
254  * tuples (non-pivot tuples).  _bt_check_natts() enforces the rules
255  * described here.
256  *
257  * Non-pivot tuple format (plain/non-posting variant):
258  *
259  *  t_tid | t_info | key values | INCLUDE columns, if any
260  *
261  * t_tid points to the heap TID, which is a tiebreaker key column as of
262  * BTREE_VERSION 4.
263  *
264  * Non-pivot tuples complement pivot tuples, which only have key columns.
265  * The sole purpose of pivot tuples is to represent how the key space is
266  * separated.  In general, any B-Tree index that has more than one level
267  * (i.e. any index that does not just consist of a metapage and a single
268  * leaf root page) must have some number of pivot tuples, since pivot
269  * tuples are used for traversing the tree.  Suffix truncation can omit
270  * trailing key columns when a new pivot is formed, which makes minus
271  * infinity their logical value.  Since BTREE_VERSION 4 indexes treat heap
272  * TID as a trailing key column that ensures that all index tuples are
273  * physically unique, it is necessary to represent heap TID as a trailing
274  * key column in pivot tuples, though very often this can be truncated
275  * away, just like any other key column. (Actually, the heap TID is
276  * omitted rather than truncated, since its representation is different to
277  * the non-pivot representation.)
278  *
279  * Pivot tuple format:
280  *
281  *  t_tid | t_info | key values | [heap TID]
282  *
283  * We store the number of columns present inside pivot tuples by abusing
284  * their t_tid offset field, since pivot tuples never need to store a real
285  * offset (pivot tuples generally store a downlink in t_tid, though).  The
286  * offset field only stores the number of columns/attributes when the
287  * INDEX_ALT_TID_MASK bit is set, which doesn't count the trailing heap
288  * TID column sometimes stored in pivot tuples -- that's represented by
289  * the presence of BT_PIVOT_HEAP_TID_ATTR.  The INDEX_ALT_TID_MASK bit in
290  * t_info is always set on BTREE_VERSION 4 pivot tuples, since
291  * BTreeTupleIsPivot() must work reliably on heapkeyspace versions.
292  *
293  * In version 2 or version 3 (!heapkeyspace) indexes, INDEX_ALT_TID_MASK
294  * might not be set in pivot tuples.  BTreeTupleIsPivot() won't work
295  * reliably as a result.  The number of columns stored is implicitly the
296  * same as the number of columns in the index, just like any non-pivot
297  * tuple. (The number of columns stored should not vary, since suffix
298  * truncation of key columns is unsafe within any !heapkeyspace index.)
299  *
300  * The 12 least significant bits from t_tid's offset number are used to
301  * represent the number of key columns within a pivot tuple.  This leaves 4
302  * status bits (BT_STATUS_OFFSET_MASK bits), which are shared by all tuples
303  * that have the INDEX_ALT_TID_MASK bit set (set in t_info) to store basic
304  * tuple metadata.  BTreeTupleIsPivot() and BTreeTupleIsPosting() use the
305  * BT_STATUS_OFFSET_MASK bits.
306  *
307  * Sometimes non-pivot tuples also use a representation that repurposes
308  * t_tid to store metadata rather than a TID.  PostgreSQL v13 introduced a
309  * new non-pivot tuple format to support deduplication: posting list
310  * tuples.  Deduplication merges together multiple equal non-pivot tuples
311  * into a logically equivalent, space efficient representation.  A posting
312  * list is an array of ItemPointerData elements.  Non-pivot tuples are
313  * merged together to form posting list tuples lazily, at the point where
314  * we'd otherwise have to split a leaf page.
315  *
316  * Posting tuple format (alternative non-pivot tuple representation):
317  *
318  *  t_tid | t_info | key values | posting list (TID array)
319  *
320  * Posting list tuples are recognized as such by having the
321  * INDEX_ALT_TID_MASK status bit set in t_info and the BT_IS_POSTING status
322  * bit set in t_tid's offset number.  These flags redefine the content of
323  * the posting tuple's t_tid to store the location of the posting list
324  * (instead of a block number), as well as the total number of heap TIDs
325  * present in the tuple (instead of a real offset number).
326  *
327  * The 12 least significant bits from t_tid's offset number are used to
328  * represent the number of heap TIDs present in the tuple, leaving 4 status
329  * bits (the BT_STATUS_OFFSET_MASK bits).  Like any non-pivot tuple, the
330  * number of columns stored is always implicitly the total number in the
331  * index (in practice there can never be non-key columns stored, since
332  * deduplication is not supported with INCLUDE indexes).
333  */
334 #define INDEX_ALT_TID_MASK			INDEX_AM_RESERVED_BIT
335 
336 /* Item pointer offset bit masks */
337 #define BT_OFFSET_MASK				0x0FFF
338 #define BT_STATUS_OFFSET_MASK		0xF000
339 /* BT_STATUS_OFFSET_MASK status bits */
340 #define BT_PIVOT_HEAP_TID_ATTR		0x1000
341 #define BT_IS_POSTING				0x2000
342 
343 /*
344  * Note: BTreeTupleIsPivot() can have false negatives (but not false
345  * positives) when used with !heapkeyspace indexes
346  */
347 static inline bool
BTreeTupleIsPivot(IndexTuple itup)348 BTreeTupleIsPivot(IndexTuple itup)
349 {
350 	if ((itup->t_info & INDEX_ALT_TID_MASK) == 0)
351 		return false;
352 	/* absence of BT_IS_POSTING in offset number indicates pivot tuple */
353 	if ((ItemPointerGetOffsetNumberNoCheck(&itup->t_tid) & BT_IS_POSTING) != 0)
354 		return false;
355 
356 	return true;
357 }
358 
359 static inline bool
BTreeTupleIsPosting(IndexTuple itup)360 BTreeTupleIsPosting(IndexTuple itup)
361 {
362 	if ((itup->t_info & INDEX_ALT_TID_MASK) == 0)
363 		return false;
364 	/* presence of BT_IS_POSTING in offset number indicates posting tuple */
365 	if ((ItemPointerGetOffsetNumberNoCheck(&itup->t_tid) & BT_IS_POSTING) == 0)
366 		return false;
367 
368 	return true;
369 }
370 
371 static inline void
BTreeTupleSetPosting(IndexTuple itup,uint16 nhtids,int postingoffset)372 BTreeTupleSetPosting(IndexTuple itup, uint16 nhtids, int postingoffset)
373 {
374 	Assert(nhtids > 1);
375 	Assert((nhtids & BT_STATUS_OFFSET_MASK) == 0);
376 	Assert((size_t) postingoffset == MAXALIGN(postingoffset));
377 	Assert(postingoffset < INDEX_SIZE_MASK);
378 	Assert(!BTreeTupleIsPivot(itup));
379 
380 	itup->t_info |= INDEX_ALT_TID_MASK;
381 	ItemPointerSetOffsetNumber(&itup->t_tid, (nhtids | BT_IS_POSTING));
382 	ItemPointerSetBlockNumber(&itup->t_tid, postingoffset);
383 }
384 
385 static inline uint16
BTreeTupleGetNPosting(IndexTuple posting)386 BTreeTupleGetNPosting(IndexTuple posting)
387 {
388 	OffsetNumber existing;
389 
390 	Assert(BTreeTupleIsPosting(posting));
391 
392 	existing = ItemPointerGetOffsetNumberNoCheck(&posting->t_tid);
393 	return (existing & BT_OFFSET_MASK);
394 }
395 
396 static inline uint32
BTreeTupleGetPostingOffset(IndexTuple posting)397 BTreeTupleGetPostingOffset(IndexTuple posting)
398 {
399 	Assert(BTreeTupleIsPosting(posting));
400 
401 	return ItemPointerGetBlockNumberNoCheck(&posting->t_tid);
402 }
403 
404 static inline ItemPointer
BTreeTupleGetPosting(IndexTuple posting)405 BTreeTupleGetPosting(IndexTuple posting)
406 {
407 	return (ItemPointer) ((char *) posting +
408 						  BTreeTupleGetPostingOffset(posting));
409 }
410 
411 static inline ItemPointer
BTreeTupleGetPostingN(IndexTuple posting,int n)412 BTreeTupleGetPostingN(IndexTuple posting, int n)
413 {
414 	return BTreeTupleGetPosting(posting) + n;
415 }
416 
417 /*
418  * Get/set downlink block number in pivot tuple.
419  *
420  * Note: Cannot assert that tuple is a pivot tuple.  If we did so then
421  * !heapkeyspace indexes would exhibit false positive assertion failures.
422  */
423 static inline BlockNumber
BTreeTupleGetDownLink(IndexTuple pivot)424 BTreeTupleGetDownLink(IndexTuple pivot)
425 {
426 	return ItemPointerGetBlockNumberNoCheck(&pivot->t_tid);
427 }
428 
429 static inline void
BTreeTupleSetDownLink(IndexTuple pivot,BlockNumber blkno)430 BTreeTupleSetDownLink(IndexTuple pivot, BlockNumber blkno)
431 {
432 	ItemPointerSetBlockNumber(&pivot->t_tid, blkno);
433 }
434 
435 /*
436  * Get number of attributes within tuple.
437  *
438  * Note that this does not include an implicit tiebreaker heap TID
439  * attribute, if any.  Note also that the number of key attributes must be
440  * explicitly represented in all heapkeyspace pivot tuples.
441  *
442  * Note: This is defined as a macro rather than an inline function to
443  * avoid including rel.h.
444  */
445 #define BTreeTupleGetNAtts(itup, rel)	\
446 	( \
447 		(BTreeTupleIsPivot(itup)) ? \
448 		( \
449 			ItemPointerGetOffsetNumberNoCheck(&(itup)->t_tid) & BT_OFFSET_MASK \
450 		) \
451 		: \
452 		IndexRelationGetNumberOfAttributes(rel) \
453 	)
454 
455 /*
456  * Set number of key attributes in tuple.
457  *
458  * The heap TID tiebreaker attribute bit may also be set here, indicating that
459  * a heap TID value will be stored at the end of the tuple (i.e. using the
460  * special pivot tuple representation).
461  */
462 static inline void
BTreeTupleSetNAtts(IndexTuple itup,uint16 nkeyatts,bool heaptid)463 BTreeTupleSetNAtts(IndexTuple itup, uint16 nkeyatts, bool heaptid)
464 {
465 	Assert(nkeyatts <= INDEX_MAX_KEYS);
466 	Assert((nkeyatts & BT_STATUS_OFFSET_MASK) == 0);
467 	Assert(!heaptid || nkeyatts > 0);
468 	Assert(!BTreeTupleIsPivot(itup) || nkeyatts == 0);
469 
470 	itup->t_info |= INDEX_ALT_TID_MASK;
471 
472 	if (heaptid)
473 		nkeyatts |= BT_PIVOT_HEAP_TID_ATTR;
474 
475 	/* BT_IS_POSTING bit is deliberately unset here */
476 	ItemPointerSetOffsetNumber(&itup->t_tid, nkeyatts);
477 	Assert(BTreeTupleIsPivot(itup));
478 }
479 
480 /*
481  * Get/set leaf page's "top parent" link from its high key.  Used during page
482  * deletion.
483  *
484  * Note: Cannot assert that tuple is a pivot tuple.  If we did so then
485  * !heapkeyspace indexes would exhibit false positive assertion failures.
486  */
487 static inline BlockNumber
BTreeTupleGetTopParent(IndexTuple leafhikey)488 BTreeTupleGetTopParent(IndexTuple leafhikey)
489 {
490 	return ItemPointerGetBlockNumberNoCheck(&leafhikey->t_tid);
491 }
492 
493 static inline void
BTreeTupleSetTopParent(IndexTuple leafhikey,BlockNumber blkno)494 BTreeTupleSetTopParent(IndexTuple leafhikey, BlockNumber blkno)
495 {
496 	ItemPointerSetBlockNumber(&leafhikey->t_tid, blkno);
497 	BTreeTupleSetNAtts(leafhikey, 0, false);
498 }
499 
500 /*
501  * Get tiebreaker heap TID attribute, if any.
502  *
503  * This returns the first/lowest heap TID in the case of a posting list tuple.
504  */
505 static inline ItemPointer
BTreeTupleGetHeapTID(IndexTuple itup)506 BTreeTupleGetHeapTID(IndexTuple itup)
507 {
508 	if (BTreeTupleIsPivot(itup))
509 	{
510 		/* Pivot tuple heap TID representation? */
511 		if ((ItemPointerGetOffsetNumberNoCheck(&itup->t_tid) &
512 			 BT_PIVOT_HEAP_TID_ATTR) != 0)
513 			return (ItemPointer) ((char *) itup + IndexTupleSize(itup) -
514 								  sizeof(ItemPointerData));
515 
516 		/* Heap TID attribute was truncated */
517 		return NULL;
518 	}
519 	else if (BTreeTupleIsPosting(itup))
520 		return BTreeTupleGetPosting(itup);
521 
522 	return &itup->t_tid;
523 }
524 
525 /*
526  * Get maximum heap TID attribute, which could be the only TID in the case of
527  * a non-pivot tuple that does not have a posting list tuple.
528  *
529  * Works with non-pivot tuples only.
530  */
531 static inline ItemPointer
BTreeTupleGetMaxHeapTID(IndexTuple itup)532 BTreeTupleGetMaxHeapTID(IndexTuple itup)
533 {
534 	Assert(!BTreeTupleIsPivot(itup));
535 
536 	if (BTreeTupleIsPosting(itup))
537 	{
538 		uint16		nposting = BTreeTupleGetNPosting(itup);
539 
540 		return BTreeTupleGetPostingN(itup, nposting - 1);
541 	}
542 
543 	return &itup->t_tid;
544 }
545 
546 /*
547  *	Operator strategy numbers for B-tree have been moved to access/stratnum.h,
548  *	because many places need to use them in ScanKeyInit() calls.
549  *
550  *	The strategy numbers are chosen so that we can commute them by
551  *	subtraction, thus:
552  */
553 #define BTCommuteStrategyNumber(strat)	(BTMaxStrategyNumber + 1 - (strat))
554 
555 /*
556  *	When a new operator class is declared, we require that the user
557  *	supply us with an amproc procedure (BTORDER_PROC) for determining
558  *	whether, for two keys a and b, a < b, a = b, or a > b.  This routine
559  *	must return < 0, 0, > 0, respectively, in these three cases.
560  *
561  *	To facilitate accelerated sorting, an operator class may choose to
562  *	offer a second procedure (BTSORTSUPPORT_PROC).  For full details, see
563  *	src/include/utils/sortsupport.h.
564  *
565  *	To support window frames defined by "RANGE offset PRECEDING/FOLLOWING",
566  *	an operator class may choose to offer a third amproc procedure
567  *	(BTINRANGE_PROC), independently of whether it offers sortsupport.
568  *	For full details, see doc/src/sgml/btree.sgml.
569  *
570  *	To facilitate B-Tree deduplication, an operator class may choose to
571  *	offer a forth amproc procedure (BTEQUALIMAGE_PROC).  For full details,
572  *	see doc/src/sgml/btree.sgml.
573  */
574 
575 #define BTORDER_PROC		1
576 #define BTSORTSUPPORT_PROC	2
577 #define BTINRANGE_PROC		3
578 #define BTEQUALIMAGE_PROC	4
579 #define BTOPTIONS_PROC		5
580 #define BTNProcs			5
581 
582 /*
583  *	We need to be able to tell the difference between read and write
584  *	requests for pages, in order to do locking correctly.
585  */
586 
587 #define BT_READ			BUFFER_LOCK_SHARE
588 #define BT_WRITE		BUFFER_LOCK_EXCLUSIVE
589 
590 /*
591  * BTStackData -- As we descend a tree, we push the location of pivot
592  * tuples whose downlink we are about to follow onto a private stack.  If
593  * we split a leaf, we use this stack to walk back up the tree and insert
594  * data into its parent page at the correct location.  We also have to
595  * recursively insert into the grandparent page if and when the parent page
596  * splits.  Our private stack can become stale due to concurrent page
597  * splits and page deletions, but it should never give us an irredeemably
598  * bad picture.
599  */
600 typedef struct BTStackData
601 {
602 	BlockNumber bts_blkno;
603 	OffsetNumber bts_offset;
604 	struct BTStackData *bts_parent;
605 } BTStackData;
606 
607 typedef BTStackData *BTStack;
608 
609 /*
610  * BTScanInsertData is the btree-private state needed to find an initial
611  * position for an indexscan, or to insert new tuples -- an "insertion
612  * scankey" (not to be confused with a search scankey).  It's used to descend
613  * a B-Tree using _bt_search.
614  *
615  * heapkeyspace indicates if we expect all keys in the index to be physically
616  * unique because heap TID is used as a tiebreaker attribute, and if index may
617  * have truncated key attributes in pivot tuples.  This is actually a property
618  * of the index relation itself (not an indexscan).  heapkeyspace indexes are
619  * indexes whose version is >= version 4.  It's convenient to keep this close
620  * by, rather than accessing the metapage repeatedly.
621  *
622  * allequalimage is set to indicate that deduplication is safe for the index.
623  * This is also a property of the index relation rather than an indexscan.
624  *
625  * anynullkeys indicates if any of the keys had NULL value when scankey was
626  * built from index tuple (note that already-truncated tuple key attributes
627  * set NULL as a placeholder key value, which also affects value of
628  * anynullkeys).  This is a convenience for unique index non-pivot tuple
629  * insertion, which usually temporarily unsets scantid, but shouldn't iff
630  * anynullkeys is true.  Value generally matches non-pivot tuple's HasNulls
631  * bit, but may not when inserting into an INCLUDE index (tuple header value
632  * is affected by the NULL-ness of both key and non-key attributes).
633  *
634  * When nextkey is false (the usual case), _bt_search and _bt_binsrch will
635  * locate the first item >= scankey.  When nextkey is true, they will locate
636  * the first item > scan key.
637  *
638  * pivotsearch is set to true by callers that want to re-find a leaf page
639  * using a scankey built from a leaf page's high key.  Most callers set this
640  * to false.
641  *
642  * scantid is the heap TID that is used as a final tiebreaker attribute.  It
643  * is set to NULL when index scan doesn't need to find a position for a
644  * specific physical tuple.  Must be set when inserting new tuples into
645  * heapkeyspace indexes, since every tuple in the tree unambiguously belongs
646  * in one exact position (it's never set with !heapkeyspace indexes, though).
647  * Despite the representational difference, nbtree search code considers
648  * scantid to be just another insertion scankey attribute.
649  *
650  * scankeys is an array of scan key entries for attributes that are compared
651  * before scantid (user-visible attributes).  keysz is the size of the array.
652  * During insertion, there must be a scan key for every attribute, but when
653  * starting a regular index scan some can be omitted.  The array is used as a
654  * flexible array member, though it's sized in a way that makes it possible to
655  * use stack allocations.  See nbtree/README for full details.
656  */
657 typedef struct BTScanInsertData
658 {
659 	bool		heapkeyspace;
660 	bool		allequalimage;
661 	bool		anynullkeys;
662 	bool		nextkey;
663 	bool		pivotsearch;
664 	ItemPointer scantid;		/* tiebreaker for scankeys */
665 	int			keysz;			/* Size of scankeys array */
666 	ScanKeyData scankeys[INDEX_MAX_KEYS];	/* Must appear last */
667 } BTScanInsertData;
668 
669 typedef BTScanInsertData *BTScanInsert;
670 
671 /*
672  * BTInsertStateData is a working area used during insertion.
673  *
674  * This is filled in after descending the tree to the first leaf page the new
675  * tuple might belong on.  Tracks the current position while performing
676  * uniqueness check, before we have determined which exact page to insert
677  * to.
678  *
679  * (This should be private to nbtinsert.c, but it's also used by
680  * _bt_binsrch_insert)
681  */
682 typedef struct BTInsertStateData
683 {
684 	IndexTuple	itup;			/* Item we're inserting */
685 	Size		itemsz;			/* Size of itup -- should be MAXALIGN()'d */
686 	BTScanInsert itup_key;		/* Insertion scankey */
687 
688 	/* Buffer containing leaf page we're likely to insert itup on */
689 	Buffer		buf;
690 
691 	/*
692 	 * Cache of bounds within the current buffer.  Only used for insertions
693 	 * where _bt_check_unique is called.  See _bt_binsrch_insert and
694 	 * _bt_findinsertloc for details.
695 	 */
696 	bool		bounds_valid;
697 	OffsetNumber low;
698 	OffsetNumber stricthigh;
699 
700 	/*
701 	 * if _bt_binsrch_insert found the location inside existing posting list,
702 	 * save the position inside the list.  -1 sentinel value indicates overlap
703 	 * with an existing posting list tuple that has its LP_DEAD bit set.
704 	 */
705 	int			postingoff;
706 } BTInsertStateData;
707 
708 typedef BTInsertStateData *BTInsertState;
709 
710 /*
711  * State used to representing an individual pending tuple during
712  * deduplication.
713  */
714 typedef struct BTDedupInterval
715 {
716 	OffsetNumber baseoff;
717 	uint16		nitems;
718 } BTDedupInterval;
719 
720 /*
721  * BTDedupStateData is a working area used during deduplication.
722  *
723  * The status info fields track the state of a whole-page deduplication pass.
724  * State about the current pending posting list is also tracked.
725  *
726  * A pending posting list is comprised of a contiguous group of equal items
727  * from the page, starting from page offset number 'baseoff'.  This is the
728  * offset number of the "base" tuple for new posting list.  'nitems' is the
729  * current total number of existing items from the page that will be merged to
730  * make a new posting list tuple, including the base tuple item.  (Existing
731  * items may themselves be posting list tuples, or regular non-pivot tuples.)
732  *
733  * The total size of the existing tuples to be freed when pending posting list
734  * is processed gets tracked by 'phystupsize'.  This information allows
735  * deduplication to calculate the space saving for each new posting list
736  * tuple, and for the entire pass over the page as a whole.
737  */
738 typedef struct BTDedupStateData
739 {
740 	/* Deduplication status info for entire pass over page */
741 	bool		deduplicate;	/* Still deduplicating page? */
742 	int			nmaxitems;		/* Number of max-sized tuples so far */
743 	Size		maxpostingsize; /* Limit on size of final tuple */
744 
745 	/* Metadata about base tuple of current pending posting list */
746 	IndexTuple	base;			/* Use to form new posting list */
747 	OffsetNumber baseoff;		/* page offset of base */
748 	Size		basetupsize;	/* base size without original posting list */
749 
750 	/* Other metadata about pending posting list */
751 	ItemPointer htids;			/* Heap TIDs in pending posting list */
752 	int			nhtids;			/* Number of heap TIDs in htids array */
753 	int			nitems;			/* Number of existing tuples/line pointers */
754 	Size		phystupsize;	/* Includes line pointer overhead */
755 
756 	/*
757 	 * Array of tuples to go on new version of the page.  Contains one entry
758 	 * for each group of consecutive items.  Note that existing tuples that
759 	 * will not become posting list tuples do not appear in the array (they
760 	 * are implicitly unchanged by deduplication pass).
761 	 */
762 	int			nintervals;		/* current number of intervals in array */
763 	BTDedupInterval intervals[MaxIndexTuplesPerPage];
764 } BTDedupStateData;
765 
766 typedef BTDedupStateData *BTDedupState;
767 
768 /*
769  * BTVacuumPostingData is state that represents how to VACUUM a posting list
770  * tuple when some (though not all) of its TIDs are to be deleted.
771  *
772  * Convention is that itup field is the original posting list tuple on input,
773  * and palloc()'d final tuple used to overwrite existing tuple on output.
774  */
775 typedef struct BTVacuumPostingData
776 {
777 	/* Tuple that will be/was updated */
778 	IndexTuple	itup;
779 	OffsetNumber updatedoffset;
780 
781 	/* State needed to describe final itup in WAL */
782 	uint16		ndeletedtids;
783 	uint16		deletetids[FLEXIBLE_ARRAY_MEMBER];
784 } BTVacuumPostingData;
785 
786 typedef BTVacuumPostingData *BTVacuumPosting;
787 
788 /*
789  * BTScanOpaqueData is the btree-private state needed for an indexscan.
790  * This consists of preprocessed scan keys (see _bt_preprocess_keys() for
791  * details of the preprocessing), information about the current location
792  * of the scan, and information about the marked location, if any.  (We use
793  * BTScanPosData to represent the data needed for each of current and marked
794  * locations.)	In addition we can remember some known-killed index entries
795  * that must be marked before we can move off the current page.
796  *
797  * Index scans work a page at a time: we pin and read-lock the page, identify
798  * all the matching items on the page and save them in BTScanPosData, then
799  * release the read-lock while returning the items to the caller for
800  * processing.  This approach minimizes lock/unlock traffic.  Note that we
801  * keep the pin on the index page until the caller is done with all the items
802  * (this is needed for VACUUM synchronization, see nbtree/README).  When we
803  * are ready to step to the next page, if the caller has told us any of the
804  * items were killed, we re-lock the page to mark them killed, then unlock.
805  * Finally we drop the pin and step to the next page in the appropriate
806  * direction.
807  *
808  * If we are doing an index-only scan, we save the entire IndexTuple for each
809  * matched item, otherwise only its heap TID and offset.  The IndexTuples go
810  * into a separate workspace array; each BTScanPosItem stores its tuple's
811  * offset within that array.  Posting list tuples store a "base" tuple once,
812  * allowing the same key to be returned for each TID in the posting list
813  * tuple.
814  */
815 
816 typedef struct BTScanPosItem	/* what we remember about each match */
817 {
818 	ItemPointerData heapTid;	/* TID of referenced heap item */
819 	OffsetNumber indexOffset;	/* index item's location within page */
820 	LocationIndex tupleOffset;	/* IndexTuple's offset in workspace, if any */
821 } BTScanPosItem;
822 
823 typedef struct BTScanPosData
824 {
825 	Buffer		buf;			/* if valid, the buffer is pinned */
826 
827 	XLogRecPtr	lsn;			/* pos in the WAL stream when page was read */
828 	BlockNumber currPage;		/* page referenced by items array */
829 	BlockNumber nextPage;		/* page's right link when we scanned it */
830 
831 	/*
832 	 * moreLeft and moreRight track whether we think there may be matching
833 	 * index entries to the left and right of the current page, respectively.
834 	 * We can clear the appropriate one of these flags when _bt_checkkeys()
835 	 * returns continuescan = false.
836 	 */
837 	bool		moreLeft;
838 	bool		moreRight;
839 
840 	/*
841 	 * If we are doing an index-only scan, nextTupleOffset is the first free
842 	 * location in the associated tuple storage workspace.
843 	 */
844 	int			nextTupleOffset;
845 
846 	/*
847 	 * The items array is always ordered in index order (ie, increasing
848 	 * indexoffset).  When scanning backwards it is convenient to fill the
849 	 * array back-to-front, so we start at the last slot and fill downwards.
850 	 * Hence we need both a first-valid-entry and a last-valid-entry counter.
851 	 * itemIndex is a cursor showing which entry was last returned to caller.
852 	 */
853 	int			firstItem;		/* first valid index in items[] */
854 	int			lastItem;		/* last valid index in items[] */
855 	int			itemIndex;		/* current index in items[] */
856 
857 	BTScanPosItem items[MaxTIDsPerBTreePage];	/* MUST BE LAST */
858 } BTScanPosData;
859 
860 typedef BTScanPosData *BTScanPos;
861 
862 #define BTScanPosIsPinned(scanpos) \
863 ( \
864 	AssertMacro(BlockNumberIsValid((scanpos).currPage) || \
865 				!BufferIsValid((scanpos).buf)), \
866 	BufferIsValid((scanpos).buf) \
867 )
868 #define BTScanPosUnpin(scanpos) \
869 	do { \
870 		ReleaseBuffer((scanpos).buf); \
871 		(scanpos).buf = InvalidBuffer; \
872 	} while (0)
873 #define BTScanPosUnpinIfPinned(scanpos) \
874 	do { \
875 		if (BTScanPosIsPinned(scanpos)) \
876 			BTScanPosUnpin(scanpos); \
877 	} while (0)
878 
879 #define BTScanPosIsValid(scanpos) \
880 ( \
881 	AssertMacro(BlockNumberIsValid((scanpos).currPage) || \
882 				!BufferIsValid((scanpos).buf)), \
883 	BlockNumberIsValid((scanpos).currPage) \
884 )
885 #define BTScanPosInvalidate(scanpos) \
886 	do { \
887 		(scanpos).currPage = InvalidBlockNumber; \
888 		(scanpos).nextPage = InvalidBlockNumber; \
889 		(scanpos).buf = InvalidBuffer; \
890 		(scanpos).lsn = InvalidXLogRecPtr; \
891 		(scanpos).nextTupleOffset = 0; \
892 	} while (0)
893 
894 /* We need one of these for each equality-type SK_SEARCHARRAY scan key */
895 typedef struct BTArrayKeyInfo
896 {
897 	int			scan_key;		/* index of associated key in arrayKeyData */
898 	int			cur_elem;		/* index of current element in elem_values */
899 	int			mark_elem;		/* index of marked element in elem_values */
900 	int			num_elems;		/* number of elems in current array value */
901 	Datum	   *elem_values;	/* array of num_elems Datums */
902 } BTArrayKeyInfo;
903 
904 typedef struct BTScanOpaqueData
905 {
906 	/* these fields are set by _bt_preprocess_keys(): */
907 	bool		qual_ok;		/* false if qual can never be satisfied */
908 	int			numberOfKeys;	/* number of preprocessed scan keys */
909 	ScanKey		keyData;		/* array of preprocessed scan keys */
910 
911 	/* workspace for SK_SEARCHARRAY support */
912 	ScanKey		arrayKeyData;	/* modified copy of scan->keyData */
913 	int			numArrayKeys;	/* number of equality-type array keys (-1 if
914 								 * there are any unsatisfiable array keys) */
915 	int			arrayKeyCount;	/* count indicating number of array scan keys
916 								 * processed */
917 	BTArrayKeyInfo *arrayKeys;	/* info about each equality-type array key */
918 	MemoryContext arrayContext; /* scan-lifespan context for array data */
919 
920 	/* info about killed items if any (killedItems is NULL if never used) */
921 	int		   *killedItems;	/* currPos.items indexes of killed items */
922 	int			numKilled;		/* number of currently stored items */
923 
924 	/*
925 	 * If we are doing an index-only scan, these are the tuple storage
926 	 * workspaces for the currPos and markPos respectively.  Each is of size
927 	 * BLCKSZ, so it can hold as much as a full page's worth of tuples.
928 	 */
929 	char	   *currTuples;		/* tuple storage for currPos */
930 	char	   *markTuples;		/* tuple storage for markPos */
931 
932 	/*
933 	 * If the marked position is on the same page as current position, we
934 	 * don't use markPos, but just keep the marked itemIndex in markItemIndex
935 	 * (all the rest of currPos is valid for the mark position). Hence, to
936 	 * determine if there is a mark, first look at markItemIndex, then at
937 	 * markPos.
938 	 */
939 	int			markItemIndex;	/* itemIndex, or -1 if not valid */
940 
941 	/* keep these last in struct for efficiency */
942 	BTScanPosData currPos;		/* current position data */
943 	BTScanPosData markPos;		/* marked position, if any */
944 } BTScanOpaqueData;
945 
946 typedef BTScanOpaqueData *BTScanOpaque;
947 
948 /*
949  * We use some private sk_flags bits in preprocessed scan keys.  We're allowed
950  * to use bits 16-31 (see skey.h).  The uppermost bits are copied from the
951  * index's indoption[] array entry for the index attribute.
952  */
953 #define SK_BT_REQFWD	0x00010000	/* required to continue forward scan */
954 #define SK_BT_REQBKWD	0x00020000	/* required to continue backward scan */
955 #define SK_BT_INDOPTION_SHIFT  24	/* must clear the above bits */
956 #define SK_BT_DESC			(INDOPTION_DESC << SK_BT_INDOPTION_SHIFT)
957 #define SK_BT_NULLS_FIRST	(INDOPTION_NULLS_FIRST << SK_BT_INDOPTION_SHIFT)
958 
959 typedef struct BTOptions
960 {
961 	int32		varlena_header_;	/* varlena header (do not touch directly!) */
962 	int			fillfactor;		/* page fill factor in percent (0..100) */
963 	/* fraction of newly inserted tuples prior to trigger index cleanup */
964 	float8		vacuum_cleanup_index_scale_factor;
965 	bool		deduplicate_items;	/* Try to deduplicate items? */
966 } BTOptions;
967 
968 #define BTGetFillFactor(relation) \
969 	(AssertMacro(relation->rd_rel->relkind == RELKIND_INDEX && \
970 				 relation->rd_rel->relam == BTREE_AM_OID), \
971 	 (relation)->rd_options ? \
972 	 ((BTOptions *) (relation)->rd_options)->fillfactor : \
973 	 BTREE_DEFAULT_FILLFACTOR)
974 #define BTGetTargetPageFreeSpace(relation) \
975 	(BLCKSZ * (100 - BTGetFillFactor(relation)) / 100)
976 #define BTGetDeduplicateItems(relation) \
977 	(AssertMacro(relation->rd_rel->relkind == RELKIND_INDEX && \
978 				 relation->rd_rel->relam == BTREE_AM_OID), \
979 	((relation)->rd_options ? \
980 	 ((BTOptions *) (relation)->rd_options)->deduplicate_items : true))
981 
982 /*
983  * Constant definition for progress reporting.  Phase numbers must match
984  * btbuildphasename.
985  */
986 /* PROGRESS_CREATEIDX_SUBPHASE_INITIALIZE is 1 (see progress.h) */
987 #define PROGRESS_BTREE_PHASE_INDEXBUILD_TABLESCAN		2
988 #define PROGRESS_BTREE_PHASE_PERFORMSORT_1				3
989 #define PROGRESS_BTREE_PHASE_PERFORMSORT_2				4
990 #define PROGRESS_BTREE_PHASE_LEAF_LOAD					5
991 
992 /*
993  * external entry points for btree, in nbtree.c
994  */
995 extern void btbuildempty(Relation index);
996 extern bool btinsert(Relation rel, Datum *values, bool *isnull,
997 					 ItemPointer ht_ctid, Relation heapRel,
998 					 IndexUniqueCheck checkUnique,
999 					 struct IndexInfo *indexInfo);
1000 extern IndexScanDesc btbeginscan(Relation rel, int nkeys, int norderbys);
1001 extern Size btestimateparallelscan(void);
1002 extern void btinitparallelscan(void *target);
1003 extern bool btgettuple(IndexScanDesc scan, ScanDirection dir);
1004 extern int64 btgetbitmap(IndexScanDesc scan, TIDBitmap *tbm);
1005 extern void btrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
1006 					 ScanKey orderbys, int norderbys);
1007 extern void btparallelrescan(IndexScanDesc scan);
1008 extern void btendscan(IndexScanDesc scan);
1009 extern void btmarkpos(IndexScanDesc scan);
1010 extern void btrestrpos(IndexScanDesc scan);
1011 extern IndexBulkDeleteResult *btbulkdelete(IndexVacuumInfo *info,
1012 										   IndexBulkDeleteResult *stats,
1013 										   IndexBulkDeleteCallback callback,
1014 										   void *callback_state);
1015 extern IndexBulkDeleteResult *btvacuumcleanup(IndexVacuumInfo *info,
1016 											  IndexBulkDeleteResult *stats);
1017 extern bool btcanreturn(Relation index, int attno);
1018 
1019 /*
1020  * prototypes for internal functions in nbtree.c
1021  */
1022 extern bool _bt_parallel_seize(IndexScanDesc scan, BlockNumber *pageno);
1023 extern void _bt_parallel_release(IndexScanDesc scan, BlockNumber scan_page);
1024 extern void _bt_parallel_done(IndexScanDesc scan);
1025 extern void _bt_parallel_advance_array_keys(IndexScanDesc scan);
1026 
1027 /*
1028  * prototypes for functions in nbtdedup.c
1029  */
1030 extern void _bt_dedup_one_page(Relation rel, Buffer buf, Relation heapRel,
1031 							   IndexTuple newitem, Size newitemsz,
1032 							   bool checkingunique);
1033 extern void _bt_dedup_start_pending(BTDedupState state, IndexTuple base,
1034 									OffsetNumber baseoff);
1035 extern bool _bt_dedup_save_htid(BTDedupState state, IndexTuple itup);
1036 extern Size _bt_dedup_finish_pending(Page newpage, BTDedupState state);
1037 extern IndexTuple _bt_form_posting(IndexTuple base, ItemPointer htids,
1038 								   int nhtids);
1039 extern void _bt_update_posting(BTVacuumPosting vacposting);
1040 extern IndexTuple _bt_swap_posting(IndexTuple newitem, IndexTuple oposting,
1041 								   int postingoff);
1042 
1043 /*
1044  * prototypes for functions in nbtinsert.c
1045  */
1046 extern bool _bt_doinsert(Relation rel, IndexTuple itup,
1047 						 IndexUniqueCheck checkUnique, Relation heapRel);
1048 extern void _bt_finish_split(Relation rel, Buffer lbuf, BTStack stack);
1049 extern Buffer _bt_getstackbuf(Relation rel, BTStack stack, BlockNumber child);
1050 
1051 /*
1052  * prototypes for functions in nbtsplitloc.c
1053  */
1054 extern OffsetNumber _bt_findsplitloc(Relation rel, Page origpage,
1055 									 OffsetNumber newitemoff, Size newitemsz, IndexTuple newitem,
1056 									 bool *newitemonleft);
1057 
1058 /*
1059  * prototypes for functions in nbtpage.c
1060  */
1061 extern void _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level,
1062 							 bool allequalimage);
1063 extern void _bt_update_meta_cleanup_info(Relation rel,
1064 										 TransactionId oldestBtpoXact, float8 numHeapTuples);
1065 extern void _bt_upgrademetapage(Page page);
1066 extern Buffer _bt_getroot(Relation rel, int access);
1067 extern Buffer _bt_gettrueroot(Relation rel);
1068 extern int	_bt_getrootheight(Relation rel);
1069 extern void _bt_metaversion(Relation rel, bool *heapkeyspace,
1070 							bool *allequalimage);
1071 extern void _bt_checkpage(Relation rel, Buffer buf);
1072 extern Buffer _bt_getbuf(Relation rel, BlockNumber blkno, int access);
1073 extern Buffer _bt_relandgetbuf(Relation rel, Buffer obuf,
1074 							   BlockNumber blkno, int access);
1075 extern void _bt_relbuf(Relation rel, Buffer buf);
1076 extern void _bt_pageinit(Page page, Size size);
1077 extern bool _bt_page_recyclable(Page page);
1078 extern void _bt_delitems_vacuum(Relation rel, Buffer buf,
1079 								OffsetNumber *deletable, int ndeletable,
1080 								BTVacuumPosting *updatable, int nupdatable);
1081 extern void _bt_delitems_delete(Relation rel, Buffer buf,
1082 								OffsetNumber *deletable, int ndeletable,
1083 								Relation heapRel);
1084 extern uint32 _bt_pagedel(Relation rel, Buffer leafbuf,
1085 						  TransactionId *oldestBtpoXact);
1086 
1087 /*
1088  * prototypes for functions in nbtsearch.c
1089  */
1090 extern BTStack _bt_search(Relation rel, BTScanInsert key, Buffer *bufP,
1091 						  int access, Snapshot snapshot);
1092 extern Buffer _bt_moveright(Relation rel, BTScanInsert key, Buffer buf,
1093 							bool forupdate, BTStack stack, int access, Snapshot snapshot);
1094 extern OffsetNumber _bt_binsrch_insert(Relation rel, BTInsertState insertstate);
1095 extern int32 _bt_compare(Relation rel, BTScanInsert key, Page page, OffsetNumber offnum);
1096 extern bool _bt_first(IndexScanDesc scan, ScanDirection dir);
1097 extern bool _bt_next(IndexScanDesc scan, ScanDirection dir);
1098 extern Buffer _bt_get_endpoint(Relation rel, uint32 level, bool rightmost,
1099 							   Snapshot snapshot);
1100 
1101 /*
1102  * prototypes for functions in nbtutils.c
1103  */
1104 extern BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup);
1105 extern void _bt_freestack(BTStack stack);
1106 extern void _bt_preprocess_array_keys(IndexScanDesc scan);
1107 extern void _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir);
1108 extern bool _bt_advance_array_keys(IndexScanDesc scan, ScanDirection dir);
1109 extern void _bt_mark_array_keys(IndexScanDesc scan);
1110 extern void _bt_restore_array_keys(IndexScanDesc scan);
1111 extern void _bt_preprocess_keys(IndexScanDesc scan);
1112 extern bool _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
1113 						  int tupnatts, ScanDirection dir, bool *continuescan);
1114 extern void _bt_killitems(IndexScanDesc scan);
1115 extern BTCycleId _bt_vacuum_cycleid(Relation rel);
1116 extern BTCycleId _bt_start_vacuum(Relation rel);
1117 extern void _bt_end_vacuum(Relation rel);
1118 extern void _bt_end_vacuum_callback(int code, Datum arg);
1119 extern Size BTreeShmemSize(void);
1120 extern void BTreeShmemInit(void);
1121 extern bytea *btoptions(Datum reloptions, bool validate);
1122 extern bool btproperty(Oid index_oid, int attno,
1123 					   IndexAMProperty prop, const char *propname,
1124 					   bool *res, bool *isnull);
1125 extern char *btbuildphasename(int64 phasenum);
1126 extern IndexTuple _bt_truncate(Relation rel, IndexTuple lastleft,
1127 							   IndexTuple firstright, BTScanInsert itup_key);
1128 extern int	_bt_keep_natts_fast(Relation rel, IndexTuple lastleft,
1129 								IndexTuple firstright);
1130 extern bool _bt_check_natts(Relation rel, bool heapkeyspace, Page page,
1131 							OffsetNumber offnum);
1132 extern void _bt_check_third_page(Relation rel, Relation heap,
1133 								 bool needheaptidspace, Page page, IndexTuple newtup);
1134 extern bool _bt_allequalimage(Relation rel, bool debugmessage);
1135 
1136 /*
1137  * prototypes for functions in nbtvalidate.c
1138  */
1139 extern bool btvalidate(Oid opclassoid);
1140 
1141 /*
1142  * prototypes for functions in nbtsort.c
1143  */
1144 extern IndexBuildResult *btbuild(Relation heap, Relation index,
1145 								 struct IndexInfo *indexInfo);
1146 extern void _bt_parallel_build_main(dsm_segment *seg, shm_toc *toc);
1147 
1148 #endif							/* NBTREE_H */
1149