1 /*-------------------------------------------------------------------------
2  *
3  * nbtpage.c
4  *	  BTree-specific page management code for the Postgres btree access
5  *	  method.
6  *
7  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  *
11  * IDENTIFICATION
12  *	  src/backend/access/nbtree/nbtpage.c
13  *
14  *	NOTES
15  *	   Postgres btree pages look like ordinary relation pages.  The opaque
16  *	   data at high addresses includes pointers to left and right siblings
17  *	   and flag data describing page state.  The first page in a btree, page
18  *	   zero, is special -- it stores meta-information describing the tree.
19  *	   Pages one and higher store the actual tree data.
20  *
21  *-------------------------------------------------------------------------
22  */
23 #include "postgres.h"
24 
25 #include "access/nbtree.h"
26 #include "access/nbtxlog.h"
27 #include "access/transam.h"
28 #include "access/xlog.h"
29 #include "access/xloginsert.h"
30 #include "miscadmin.h"
31 #include "storage/indexfsm.h"
32 #include "storage/lmgr.h"
33 #include "storage/predicate.h"
34 #include "utils/snapmgr.h"
35 
36 static bool _bt_mark_page_halfdead(Relation rel, Buffer buf, BTStack stack);
37 static bool _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf,
38 						 bool *rightsib_empty);
39 static bool _bt_lock_branch_parent(Relation rel, BlockNumber child,
40 					   BTStack stack, Buffer *topparent, OffsetNumber *topoff,
41 					   BlockNumber *target, BlockNumber *rightsib);
42 static void _bt_log_reuse_page(Relation rel, BlockNumber blkno,
43 				   TransactionId latestRemovedXid);
44 
45 /*
46  *	_bt_initmetapage() -- Fill a page buffer with a correct metapage image
47  */
48 void
_bt_initmetapage(Page page,BlockNumber rootbknum,uint32 level)49 _bt_initmetapage(Page page, BlockNumber rootbknum, uint32 level)
50 {
51 	BTMetaPageData *metad;
52 	BTPageOpaque metaopaque;
53 
54 	_bt_pageinit(page, BLCKSZ);
55 
56 	metad = BTPageGetMeta(page);
57 	metad->btm_magic = BTREE_MAGIC;
58 	metad->btm_version = BTREE_VERSION;
59 	metad->btm_root = rootbknum;
60 	metad->btm_level = level;
61 	metad->btm_fastroot = rootbknum;
62 	metad->btm_fastlevel = level;
63 
64 	metaopaque = (BTPageOpaque) PageGetSpecialPointer(page);
65 	metaopaque->btpo_flags = BTP_META;
66 
67 	/*
68 	 * Set pd_lower just past the end of the metadata.  This is not essential
69 	 * but it makes the page look compressible to xlog.c.
70 	 */
71 	((PageHeader) page)->pd_lower =
72 		((char *) metad + sizeof(BTMetaPageData)) - (char *) page;
73 }
74 
75 /*
76  *	_bt_getroot() -- Get the root page of the btree.
77  *
78  *		Since the root page can move around the btree file, we have to read
79  *		its location from the metadata page, and then read the root page
80  *		itself.  If no root page exists yet, we have to create one.  The
81  *		standard class of race conditions exists here; I think I covered
82  *		them all in the Hopi Indian rain dance of lock requests below.
83  *
84  *		The access type parameter (BT_READ or BT_WRITE) controls whether
85  *		a new root page will be created or not.  If access = BT_READ,
86  *		and no root page exists, we just return InvalidBuffer.  For
87  *		BT_WRITE, we try to create the root page if it doesn't exist.
88  *		NOTE that the returned root page will have only a read lock set
89  *		on it even if access = BT_WRITE!
90  *
91  *		The returned page is not necessarily the true root --- it could be
92  *		a "fast root" (a page that is alone in its level due to deletions).
93  *		Also, if the root page is split while we are "in flight" to it,
94  *		what we will return is the old root, which is now just the leftmost
95  *		page on a probably-not-very-wide level.  For most purposes this is
96  *		as good as or better than the true root, so we do not bother to
97  *		insist on finding the true root.  We do, however, guarantee to
98  *		return a live (not deleted or half-dead) page.
99  *
100  *		On successful return, the root page is pinned and read-locked.
101  *		The metadata page is not locked or pinned on exit.
102  */
103 Buffer
_bt_getroot(Relation rel,int access)104 _bt_getroot(Relation rel, int access)
105 {
106 	Buffer		metabuf;
107 	Page		metapg;
108 	BTPageOpaque metaopaque;
109 	Buffer		rootbuf;
110 	Page		rootpage;
111 	BTPageOpaque rootopaque;
112 	BlockNumber rootblkno;
113 	uint32		rootlevel;
114 	BTMetaPageData *metad;
115 
116 	/*
117 	 * Try to use previously-cached metapage data to find the root.  This
118 	 * normally saves one buffer access per index search, which is a very
119 	 * helpful savings in bufmgr traffic and hence contention.
120 	 */
121 	if (rel->rd_amcache != NULL)
122 	{
123 		metad = (BTMetaPageData *) rel->rd_amcache;
124 		/* We shouldn't have cached it if any of these fail */
125 		Assert(metad->btm_magic == BTREE_MAGIC);
126 		Assert(metad->btm_version == BTREE_VERSION);
127 		Assert(metad->btm_root != P_NONE);
128 
129 		rootblkno = metad->btm_fastroot;
130 		Assert(rootblkno != P_NONE);
131 		rootlevel = metad->btm_fastlevel;
132 
133 		rootbuf = _bt_getbuf(rel, rootblkno, BT_READ);
134 		rootpage = BufferGetPage(rootbuf);
135 		rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
136 
137 		/*
138 		 * Since the cache might be stale, we check the page more carefully
139 		 * here than normal.  We *must* check that it's not deleted. If it's
140 		 * not alone on its level, then we reject too --- this may be overly
141 		 * paranoid but better safe than sorry.  Note we don't check P_ISROOT,
142 		 * because that's not set in a "fast root".
143 		 */
144 		if (!P_IGNORE(rootopaque) &&
145 			rootopaque->btpo.level == rootlevel &&
146 			P_LEFTMOST(rootopaque) &&
147 			P_RIGHTMOST(rootopaque))
148 		{
149 			/* OK, accept cached page as the root */
150 			return rootbuf;
151 		}
152 		_bt_relbuf(rel, rootbuf);
153 		/* Cache is stale, throw it away */
154 		if (rel->rd_amcache)
155 			pfree(rel->rd_amcache);
156 		rel->rd_amcache = NULL;
157 	}
158 
159 	metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
160 	metapg = BufferGetPage(metabuf);
161 	metaopaque = (BTPageOpaque) PageGetSpecialPointer(metapg);
162 	metad = BTPageGetMeta(metapg);
163 
164 	/* sanity-check the metapage */
165 	if (!(metaopaque->btpo_flags & BTP_META) ||
166 		metad->btm_magic != BTREE_MAGIC)
167 		ereport(ERROR,
168 				(errcode(ERRCODE_INDEX_CORRUPTED),
169 				 errmsg("index \"%s\" is not a btree",
170 						RelationGetRelationName(rel))));
171 
172 	if (metad->btm_version != BTREE_VERSION)
173 		ereport(ERROR,
174 				(errcode(ERRCODE_INDEX_CORRUPTED),
175 				 errmsg("version mismatch in index \"%s\": file version %d, code version %d",
176 						RelationGetRelationName(rel),
177 						metad->btm_version, BTREE_VERSION)));
178 
179 	/* if no root page initialized yet, do it */
180 	if (metad->btm_root == P_NONE)
181 	{
182 		/* If access = BT_READ, caller doesn't want us to create root yet */
183 		if (access == BT_READ)
184 		{
185 			_bt_relbuf(rel, metabuf);
186 			return InvalidBuffer;
187 		}
188 
189 		/* trade in our read lock for a write lock */
190 		LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
191 		LockBuffer(metabuf, BT_WRITE);
192 
193 		/*
194 		 * Race condition:	if someone else initialized the metadata between
195 		 * the time we released the read lock and acquired the write lock, we
196 		 * must avoid doing it again.
197 		 */
198 		if (metad->btm_root != P_NONE)
199 		{
200 			/*
201 			 * Metadata initialized by someone else.  In order to guarantee no
202 			 * deadlocks, we have to release the metadata page and start all
203 			 * over again.  (Is that really true? But it's hardly worth trying
204 			 * to optimize this case.)
205 			 */
206 			_bt_relbuf(rel, metabuf);
207 			return _bt_getroot(rel, access);
208 		}
209 
210 		/*
211 		 * Get, initialize, write, and leave a lock of the appropriate type on
212 		 * the new root page.  Since this is the first page in the tree, it's
213 		 * a leaf as well as the root.
214 		 */
215 		rootbuf = _bt_getbuf(rel, P_NEW, BT_WRITE);
216 		rootblkno = BufferGetBlockNumber(rootbuf);
217 		rootpage = BufferGetPage(rootbuf);
218 		rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
219 		rootopaque->btpo_prev = rootopaque->btpo_next = P_NONE;
220 		rootopaque->btpo_flags = (BTP_LEAF | BTP_ROOT);
221 		rootopaque->btpo.level = 0;
222 		rootopaque->btpo_cycleid = 0;
223 
224 		/* NO ELOG(ERROR) till meta is updated */
225 		START_CRIT_SECTION();
226 
227 		metad->btm_root = rootblkno;
228 		metad->btm_level = 0;
229 		metad->btm_fastroot = rootblkno;
230 		metad->btm_fastlevel = 0;
231 
232 		MarkBufferDirty(rootbuf);
233 		MarkBufferDirty(metabuf);
234 
235 		/* XLOG stuff */
236 		if (RelationNeedsWAL(rel))
237 		{
238 			xl_btree_newroot xlrec;
239 			XLogRecPtr	recptr;
240 			xl_btree_metadata md;
241 
242 			XLogBeginInsert();
243 			XLogRegisterBuffer(0, rootbuf, REGBUF_WILL_INIT);
244 			XLogRegisterBuffer(2, metabuf, REGBUF_WILL_INIT);
245 
246 			md.root = rootblkno;
247 			md.level = 0;
248 			md.fastroot = rootblkno;
249 			md.fastlevel = 0;
250 
251 			XLogRegisterBufData(2, (char *) &md, sizeof(xl_btree_metadata));
252 
253 			xlrec.rootblk = rootblkno;
254 			xlrec.level = 0;
255 
256 			XLogRegisterData((char *) &xlrec, SizeOfBtreeNewroot);
257 
258 			recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_NEWROOT);
259 
260 			PageSetLSN(rootpage, recptr);
261 			PageSetLSN(metapg, recptr);
262 		}
263 
264 		END_CRIT_SECTION();
265 
266 		/*
267 		 * swap root write lock for read lock.  There is no danger of anyone
268 		 * else accessing the new root page while it's unlocked, since no one
269 		 * else knows where it is yet.
270 		 */
271 		LockBuffer(rootbuf, BUFFER_LOCK_UNLOCK);
272 		LockBuffer(rootbuf, BT_READ);
273 
274 		/* okay, metadata is correct, release lock on it */
275 		_bt_relbuf(rel, metabuf);
276 	}
277 	else
278 	{
279 		rootblkno = metad->btm_fastroot;
280 		Assert(rootblkno != P_NONE);
281 		rootlevel = metad->btm_fastlevel;
282 
283 		/*
284 		 * Cache the metapage data for next time
285 		 */
286 		rel->rd_amcache = MemoryContextAlloc(rel->rd_indexcxt,
287 											 sizeof(BTMetaPageData));
288 		memcpy(rel->rd_amcache, metad, sizeof(BTMetaPageData));
289 
290 		/*
291 		 * We are done with the metapage; arrange to release it via first
292 		 * _bt_relandgetbuf call
293 		 */
294 		rootbuf = metabuf;
295 
296 		for (;;)
297 		{
298 			rootbuf = _bt_relandgetbuf(rel, rootbuf, rootblkno, BT_READ);
299 			rootpage = BufferGetPage(rootbuf);
300 			rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
301 
302 			if (!P_IGNORE(rootopaque))
303 				break;
304 
305 			/* it's dead, Jim.  step right one page */
306 			if (P_RIGHTMOST(rootopaque))
307 				elog(ERROR, "no live root page found in index \"%s\"",
308 					 RelationGetRelationName(rel));
309 			rootblkno = rootopaque->btpo_next;
310 		}
311 
312 		/* Note: can't check btpo.level on deleted pages */
313 		if (rootopaque->btpo.level != rootlevel)
314 			elog(ERROR, "root page %u of index \"%s\" has level %u, expected %u",
315 				 rootblkno, RelationGetRelationName(rel),
316 				 rootopaque->btpo.level, rootlevel);
317 	}
318 
319 	/*
320 	 * By here, we have a pin and read lock on the root page, and no lock set
321 	 * on the metadata page.  Return the root page's buffer.
322 	 */
323 	return rootbuf;
324 }
325 
326 /*
327  *	_bt_gettrueroot() -- Get the true root page of the btree.
328  *
329  *		This is the same as the BT_READ case of _bt_getroot(), except
330  *		we follow the true-root link not the fast-root link.
331  *
332  * By the time we acquire lock on the root page, it might have been split and
333  * not be the true root anymore.  This is okay for the present uses of this
334  * routine; we only really need to be able to move up at least one tree level
335  * from whatever non-root page we were at.  If we ever do need to lock the
336  * one true root page, we could loop here, re-reading the metapage on each
337  * failure.  (Note that it wouldn't do to hold the lock on the metapage while
338  * moving to the root --- that'd deadlock against any concurrent root split.)
339  */
340 Buffer
_bt_gettrueroot(Relation rel)341 _bt_gettrueroot(Relation rel)
342 {
343 	Buffer		metabuf;
344 	Page		metapg;
345 	BTPageOpaque metaopaque;
346 	Buffer		rootbuf;
347 	Page		rootpage;
348 	BTPageOpaque rootopaque;
349 	BlockNumber rootblkno;
350 	uint32		rootlevel;
351 	BTMetaPageData *metad;
352 
353 	/*
354 	 * We don't try to use cached metapage data here, since (a) this path is
355 	 * not performance-critical, and (b) if we are here it suggests our cache
356 	 * is out-of-date anyway.  In light of point (b), it's probably safest to
357 	 * actively flush any cached metapage info.
358 	 */
359 	if (rel->rd_amcache)
360 		pfree(rel->rd_amcache);
361 	rel->rd_amcache = NULL;
362 
363 	metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
364 	metapg = BufferGetPage(metabuf);
365 	metaopaque = (BTPageOpaque) PageGetSpecialPointer(metapg);
366 	metad = BTPageGetMeta(metapg);
367 
368 	if (!(metaopaque->btpo_flags & BTP_META) ||
369 		metad->btm_magic != BTREE_MAGIC)
370 		ereport(ERROR,
371 				(errcode(ERRCODE_INDEX_CORRUPTED),
372 				 errmsg("index \"%s\" is not a btree",
373 						RelationGetRelationName(rel))));
374 
375 	if (metad->btm_version != BTREE_VERSION)
376 		ereport(ERROR,
377 				(errcode(ERRCODE_INDEX_CORRUPTED),
378 				 errmsg("version mismatch in index \"%s\": file version %d, code version %d",
379 						RelationGetRelationName(rel),
380 						metad->btm_version, BTREE_VERSION)));
381 
382 	/* if no root page initialized yet, fail */
383 	if (metad->btm_root == P_NONE)
384 	{
385 		_bt_relbuf(rel, metabuf);
386 		return InvalidBuffer;
387 	}
388 
389 	rootblkno = metad->btm_root;
390 	rootlevel = metad->btm_level;
391 
392 	/*
393 	 * We are done with the metapage; arrange to release it via first
394 	 * _bt_relandgetbuf call
395 	 */
396 	rootbuf = metabuf;
397 
398 	for (;;)
399 	{
400 		rootbuf = _bt_relandgetbuf(rel, rootbuf, rootblkno, BT_READ);
401 		rootpage = BufferGetPage(rootbuf);
402 		rootopaque = (BTPageOpaque) PageGetSpecialPointer(rootpage);
403 
404 		if (!P_IGNORE(rootopaque))
405 			break;
406 
407 		/* it's dead, Jim.  step right one page */
408 		if (P_RIGHTMOST(rootopaque))
409 			elog(ERROR, "no live root page found in index \"%s\"",
410 				 RelationGetRelationName(rel));
411 		rootblkno = rootopaque->btpo_next;
412 	}
413 
414 	/* Note: can't check btpo.level on deleted pages */
415 	if (rootopaque->btpo.level != rootlevel)
416 		elog(ERROR, "root page %u of index \"%s\" has level %u, expected %u",
417 			 rootblkno, RelationGetRelationName(rel),
418 			 rootopaque->btpo.level, rootlevel);
419 
420 	return rootbuf;
421 }
422 
423 /*
424  *	_bt_getrootheight() -- Get the height of the btree search tree.
425  *
426  *		We return the level (counting from zero) of the current fast root.
427  *		This represents the number of tree levels we'd have to descend through
428  *		to start any btree index search.
429  *
430  *		This is used by the planner for cost-estimation purposes.  Since it's
431  *		only an estimate, slightly-stale data is fine, hence we don't worry
432  *		about updating previously cached data.
433  */
434 int
_bt_getrootheight(Relation rel)435 _bt_getrootheight(Relation rel)
436 {
437 	BTMetaPageData *metad;
438 
439 	/*
440 	 * We can get what we need from the cached metapage data.  If it's not
441 	 * cached yet, load it.  Sanity checks here must match _bt_getroot().
442 	 */
443 	if (rel->rd_amcache == NULL)
444 	{
445 		Buffer		metabuf;
446 		Page		metapg;
447 		BTPageOpaque metaopaque;
448 
449 		metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_READ);
450 		metapg = BufferGetPage(metabuf);
451 		metaopaque = (BTPageOpaque) PageGetSpecialPointer(metapg);
452 		metad = BTPageGetMeta(metapg);
453 
454 		/* sanity-check the metapage */
455 		if (!(metaopaque->btpo_flags & BTP_META) ||
456 			metad->btm_magic != BTREE_MAGIC)
457 			ereport(ERROR,
458 					(errcode(ERRCODE_INDEX_CORRUPTED),
459 					 errmsg("index \"%s\" is not a btree",
460 							RelationGetRelationName(rel))));
461 
462 		if (metad->btm_version != BTREE_VERSION)
463 			ereport(ERROR,
464 					(errcode(ERRCODE_INDEX_CORRUPTED),
465 					 errmsg("version mismatch in index \"%s\": file version %d, code version %d",
466 							RelationGetRelationName(rel),
467 							metad->btm_version, BTREE_VERSION)));
468 
469 		/*
470 		 * If there's no root page yet, _bt_getroot() doesn't expect a cache
471 		 * to be made, so just stop here and report the index height is zero.
472 		 * (XXX perhaps _bt_getroot() should be changed to allow this case.)
473 		 */
474 		if (metad->btm_root == P_NONE)
475 		{
476 			_bt_relbuf(rel, metabuf);
477 			return 0;
478 		}
479 
480 		/*
481 		 * Cache the metapage data for next time
482 		 */
483 		rel->rd_amcache = MemoryContextAlloc(rel->rd_indexcxt,
484 											 sizeof(BTMetaPageData));
485 		memcpy(rel->rd_amcache, metad, sizeof(BTMetaPageData));
486 
487 		_bt_relbuf(rel, metabuf);
488 	}
489 
490 	metad = (BTMetaPageData *) rel->rd_amcache;
491 	/* We shouldn't have cached it if any of these fail */
492 	Assert(metad->btm_magic == BTREE_MAGIC);
493 	Assert(metad->btm_version == BTREE_VERSION);
494 	Assert(metad->btm_fastroot != P_NONE);
495 
496 	return metad->btm_fastlevel;
497 }
498 
499 /*
500  *	_bt_checkpage() -- Verify that a freshly-read page looks sane.
501  */
502 void
_bt_checkpage(Relation rel,Buffer buf)503 _bt_checkpage(Relation rel, Buffer buf)
504 {
505 	Page		page = BufferGetPage(buf);
506 
507 	/*
508 	 * ReadBuffer verifies that every newly-read page passes
509 	 * PageHeaderIsValid, which means it either contains a reasonably sane
510 	 * page header or is all-zero.  We have to defend against the all-zero
511 	 * case, however.
512 	 */
513 	if (PageIsNew(page))
514 		ereport(ERROR,
515 				(errcode(ERRCODE_INDEX_CORRUPTED),
516 				 errmsg("index \"%s\" contains unexpected zero page at block %u",
517 						RelationGetRelationName(rel),
518 						BufferGetBlockNumber(buf)),
519 				 errhint("Please REINDEX it.")));
520 
521 	/*
522 	 * Additionally check that the special area looks sane.
523 	 */
524 	if (PageGetSpecialSize(page) != MAXALIGN(sizeof(BTPageOpaqueData)))
525 		ereport(ERROR,
526 				(errcode(ERRCODE_INDEX_CORRUPTED),
527 				 errmsg("index \"%s\" contains corrupted page at block %u",
528 						RelationGetRelationName(rel),
529 						BufferGetBlockNumber(buf)),
530 				 errhint("Please REINDEX it.")));
531 }
532 
533 /*
534  * Log the reuse of a page from the FSM.
535  */
536 static void
_bt_log_reuse_page(Relation rel,BlockNumber blkno,TransactionId latestRemovedXid)537 _bt_log_reuse_page(Relation rel, BlockNumber blkno, TransactionId latestRemovedXid)
538 {
539 	xl_btree_reuse_page xlrec_reuse;
540 
541 	/*
542 	 * Note that we don't register the buffer with the record, because this
543 	 * operation doesn't modify the page. This record only exists to provide a
544 	 * conflict point for Hot Standby.
545 	 */
546 
547 	/* XLOG stuff */
548 	xlrec_reuse.node = rel->rd_node;
549 	xlrec_reuse.block = blkno;
550 	xlrec_reuse.latestRemovedXid = latestRemovedXid;
551 
552 	XLogBeginInsert();
553 	XLogRegisterData((char *) &xlrec_reuse, SizeOfBtreeReusePage);
554 
555 	XLogInsert(RM_BTREE_ID, XLOG_BTREE_REUSE_PAGE);
556 }
557 
558 /*
559  *	_bt_getbuf() -- Get a buffer by block number for read or write.
560  *
561  *		blkno == P_NEW means to get an unallocated index page.  The page
562  *		will be initialized before returning it.
563  *
564  *		When this routine returns, the appropriate lock is set on the
565  *		requested buffer and its reference count has been incremented
566  *		(ie, the buffer is "locked and pinned").  Also, we apply
567  *		_bt_checkpage to sanity-check the page (except in P_NEW case).
568  */
569 Buffer
_bt_getbuf(Relation rel,BlockNumber blkno,int access)570 _bt_getbuf(Relation rel, BlockNumber blkno, int access)
571 {
572 	Buffer		buf;
573 
574 	if (blkno != P_NEW)
575 	{
576 		/* Read an existing block of the relation */
577 		buf = ReadBuffer(rel, blkno);
578 		LockBuffer(buf, access);
579 		_bt_checkpage(rel, buf);
580 	}
581 	else
582 	{
583 		bool		needLock;
584 		Page		page;
585 
586 		Assert(access == BT_WRITE);
587 
588 		/*
589 		 * First see if the FSM knows of any free pages.
590 		 *
591 		 * We can't trust the FSM's report unreservedly; we have to check that
592 		 * the page is still free.  (For example, an already-free page could
593 		 * have been re-used between the time the last VACUUM scanned it and
594 		 * the time the VACUUM made its FSM updates.)
595 		 *
596 		 * In fact, it's worse than that: we can't even assume that it's safe
597 		 * to take a lock on the reported page.  If somebody else has a lock
598 		 * on it, or even worse our own caller does, we could deadlock.  (The
599 		 * own-caller scenario is actually not improbable. Consider an index
600 		 * on a serial or timestamp column.  Nearly all splits will be at the
601 		 * rightmost page, so it's entirely likely that _bt_split will call us
602 		 * while holding a lock on the page most recently acquired from FSM. A
603 		 * VACUUM running concurrently with the previous split could well have
604 		 * placed that page back in FSM.)
605 		 *
606 		 * To get around that, we ask for only a conditional lock on the
607 		 * reported page.  If we fail, then someone else is using the page,
608 		 * and we may reasonably assume it's not free.  (If we happen to be
609 		 * wrong, the worst consequence is the page will be lost to use till
610 		 * the next VACUUM, which is no big problem.)
611 		 */
612 		for (;;)
613 		{
614 			blkno = GetFreeIndexPage(rel);
615 			if (blkno == InvalidBlockNumber)
616 				break;
617 			buf = ReadBuffer(rel, blkno);
618 			if (ConditionalLockBuffer(buf))
619 			{
620 				page = BufferGetPage(buf);
621 				if (_bt_page_recyclable(page))
622 				{
623 					/*
624 					 * If we are generating WAL for Hot Standby then create a
625 					 * WAL record that will allow us to conflict with queries
626 					 * running on standby, in case they have snapshots older
627 					 * than btpo.xact.  This can only apply if the page does
628 					 * have a valid btpo.xact value, ie not if it's new.  (We
629 					 * must check that because an all-zero page has no special
630 					 * space.)
631 					 */
632 					if (XLogStandbyInfoActive() && RelationNeedsWAL(rel) &&
633 						!PageIsNew(page))
634 					{
635 						BTPageOpaque opaque = (BTPageOpaque) PageGetSpecialPointer(page);
636 
637 						_bt_log_reuse_page(rel, blkno, opaque->btpo.xact);
638 					}
639 
640 					/* Okay to use page.  Re-initialize and return it */
641 					_bt_pageinit(page, BufferGetPageSize(buf));
642 					return buf;
643 				}
644 				elog(DEBUG2, "FSM returned nonrecyclable page");
645 				_bt_relbuf(rel, buf);
646 			}
647 			else
648 			{
649 				elog(DEBUG2, "FSM returned nonlockable page");
650 				/* couldn't get lock, so just drop pin */
651 				ReleaseBuffer(buf);
652 			}
653 		}
654 
655 		/*
656 		 * Extend the relation by one page.
657 		 *
658 		 * We have to use a lock to ensure no one else is extending the rel at
659 		 * the same time, else we will both try to initialize the same new
660 		 * page.  We can skip locking for new or temp relations, however,
661 		 * since no one else could be accessing them.
662 		 */
663 		needLock = !RELATION_IS_LOCAL(rel);
664 
665 		if (needLock)
666 			LockRelationForExtension(rel, ExclusiveLock);
667 
668 		buf = ReadBuffer(rel, P_NEW);
669 
670 		/* Acquire buffer lock on new page */
671 		LockBuffer(buf, BT_WRITE);
672 
673 		/*
674 		 * Release the file-extension lock; it's now OK for someone else to
675 		 * extend the relation some more.  Note that we cannot release this
676 		 * lock before we have buffer lock on the new page, or we risk a race
677 		 * condition against btvacuumscan --- see comments therein.
678 		 */
679 		if (needLock)
680 			UnlockRelationForExtension(rel, ExclusiveLock);
681 
682 		/* Initialize the new page before returning it */
683 		page = BufferGetPage(buf);
684 		Assert(PageIsNew(page));
685 		_bt_pageinit(page, BufferGetPageSize(buf));
686 	}
687 
688 	/* ref count and lock type are correct */
689 	return buf;
690 }
691 
692 /*
693  *	_bt_relandgetbuf() -- release a locked buffer and get another one.
694  *
695  * This is equivalent to _bt_relbuf followed by _bt_getbuf, with the
696  * exception that blkno may not be P_NEW.  Also, if obuf is InvalidBuffer
697  * then it reduces to just _bt_getbuf; allowing this case simplifies some
698  * callers.
699  *
700  * The original motivation for using this was to avoid two entries to the
701  * bufmgr when one would do.  However, now it's mainly just a notational
702  * convenience.  The only case where it saves work over _bt_relbuf/_bt_getbuf
703  * is when the target page is the same one already in the buffer.
704  */
705 Buffer
_bt_relandgetbuf(Relation rel,Buffer obuf,BlockNumber blkno,int access)706 _bt_relandgetbuf(Relation rel, Buffer obuf, BlockNumber blkno, int access)
707 {
708 	Buffer		buf;
709 
710 	Assert(blkno != P_NEW);
711 	if (BufferIsValid(obuf))
712 		LockBuffer(obuf, BUFFER_LOCK_UNLOCK);
713 	buf = ReleaseAndReadBuffer(obuf, rel, blkno);
714 	LockBuffer(buf, access);
715 	_bt_checkpage(rel, buf);
716 	return buf;
717 }
718 
719 /*
720  *	_bt_relbuf() -- release a locked buffer.
721  *
722  * Lock and pin (refcount) are both dropped.
723  */
724 void
_bt_relbuf(Relation rel,Buffer buf)725 _bt_relbuf(Relation rel, Buffer buf)
726 {
727 	UnlockReleaseBuffer(buf);
728 }
729 
730 /*
731  *	_bt_pageinit() -- Initialize a new page.
732  *
733  * On return, the page header is initialized; data space is empty;
734  * special space is zeroed out.
735  */
736 void
_bt_pageinit(Page page,Size size)737 _bt_pageinit(Page page, Size size)
738 {
739 	PageInit(page, size, sizeof(BTPageOpaqueData));
740 }
741 
742 /*
743  *	_bt_page_recyclable() -- Is an existing page recyclable?
744  *
745  * This exists to make sure _bt_getbuf and btvacuumscan have the same
746  * policy about whether a page is safe to re-use.  But note that _bt_getbuf
747  * knows enough to distinguish the PageIsNew condition from the other one.
748  * At some point it might be appropriate to redesign this to have a three-way
749  * result value.
750  */
751 bool
_bt_page_recyclable(Page page)752 _bt_page_recyclable(Page page)
753 {
754 	BTPageOpaque opaque;
755 
756 	/*
757 	 * It's possible to find an all-zeroes page in an index --- for example, a
758 	 * backend might successfully extend the relation one page and then crash
759 	 * before it is able to make a WAL entry for adding the page. If we find a
760 	 * zeroed page then reclaim it.
761 	 */
762 	if (PageIsNew(page))
763 		return true;
764 
765 	/*
766 	 * Otherwise, recycle if deleted and too old to have any processes
767 	 * interested in it.
768 	 */
769 	opaque = (BTPageOpaque) PageGetSpecialPointer(page);
770 	if (P_ISDELETED(opaque) &&
771 		TransactionIdPrecedes(opaque->btpo.xact, RecentGlobalXmin))
772 		return true;
773 	return false;
774 }
775 
776 /*
777  * Delete item(s) from a btree page during VACUUM.
778  *
779  * This must only be used for deleting leaf items.  Deleting an item on a
780  * non-leaf page has to be done as part of an atomic action that includes
781  * deleting the page it points to.
782  *
783  * This routine assumes that the caller has pinned and locked the buffer.
784  * Also, the given itemnos *must* appear in increasing order in the array.
785  *
786  * We record VACUUMs and b-tree deletes differently in WAL. InHotStandby
787  * we need to be able to pin all of the blocks in the btree in physical
788  * order when replaying the effects of a VACUUM, just as we do for the
789  * original VACUUM itself. lastBlockVacuumed allows us to tell whether an
790  * intermediate range of blocks has had no changes at all by VACUUM,
791  * and so must be scanned anyway during replay. We always write a WAL record
792  * for the last block in the index, whether or not it contained any items
793  * to be removed. This allows us to scan right up to end of index to
794  * ensure correct locking.
795  */
796 void
_bt_delitems_vacuum(Relation rel,Buffer buf,OffsetNumber * itemnos,int nitems,BlockNumber lastBlockVacuumed)797 _bt_delitems_vacuum(Relation rel, Buffer buf,
798 					OffsetNumber *itemnos, int nitems,
799 					BlockNumber lastBlockVacuumed)
800 {
801 	Page		page = BufferGetPage(buf);
802 	BTPageOpaque opaque;
803 
804 	/* No ereport(ERROR) until changes are logged */
805 	START_CRIT_SECTION();
806 
807 	/* Fix the page */
808 	if (nitems > 0)
809 		PageIndexMultiDelete(page, itemnos, nitems);
810 
811 	/*
812 	 * We can clear the vacuum cycle ID since this page has certainly been
813 	 * processed by the current vacuum scan.
814 	 */
815 	opaque = (BTPageOpaque) PageGetSpecialPointer(page);
816 	opaque->btpo_cycleid = 0;
817 
818 	/*
819 	 * Mark the page as not containing any LP_DEAD items.  This is not
820 	 * certainly true (there might be some that have recently been marked, but
821 	 * weren't included in our target-item list), but it will almost always be
822 	 * true and it doesn't seem worth an additional page scan to check it.
823 	 * Remember that BTP_HAS_GARBAGE is only a hint anyway.
824 	 */
825 	opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
826 
827 	MarkBufferDirty(buf);
828 
829 	/* XLOG stuff */
830 	if (RelationNeedsWAL(rel))
831 	{
832 		XLogRecPtr	recptr;
833 		xl_btree_vacuum xlrec_vacuum;
834 
835 		xlrec_vacuum.lastBlockVacuumed = lastBlockVacuumed;
836 
837 		XLogBeginInsert();
838 		XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
839 		XLogRegisterData((char *) &xlrec_vacuum, SizeOfBtreeVacuum);
840 
841 		/*
842 		 * The target-offsets array is not in the buffer, but pretend that it
843 		 * is.  When XLogInsert stores the whole buffer, the offsets array
844 		 * need not be stored too.
845 		 */
846 		if (nitems > 0)
847 			XLogRegisterBufData(0, (char *) itemnos, nitems * sizeof(OffsetNumber));
848 
849 		recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_VACUUM);
850 
851 		PageSetLSN(page, recptr);
852 	}
853 
854 	END_CRIT_SECTION();
855 }
856 
857 /*
858  * Delete item(s) from a btree page during single-page cleanup.
859  *
860  * As above, must only be used on leaf pages.
861  *
862  * This routine assumes that the caller has pinned and locked the buffer.
863  * Also, the given itemnos *must* appear in increasing order in the array.
864  *
865  * This is nearly the same as _bt_delitems_vacuum as far as what it does to
866  * the page, but the WAL logging considerations are quite different.  See
867  * comments for _bt_delitems_vacuum.
868  */
869 void
_bt_delitems_delete(Relation rel,Buffer buf,OffsetNumber * itemnos,int nitems,Relation heapRel)870 _bt_delitems_delete(Relation rel, Buffer buf,
871 					OffsetNumber *itemnos, int nitems,
872 					Relation heapRel)
873 {
874 	Page		page = BufferGetPage(buf);
875 	BTPageOpaque opaque;
876 
877 	/* Shouldn't be called unless there's something to do */
878 	Assert(nitems > 0);
879 
880 	/* No ereport(ERROR) until changes are logged */
881 	START_CRIT_SECTION();
882 
883 	/* Fix the page */
884 	PageIndexMultiDelete(page, itemnos, nitems);
885 
886 	/*
887 	 * Unlike _bt_delitems_vacuum, we *must not* clear the vacuum cycle ID,
888 	 * because this is not called by VACUUM.
889 	 */
890 
891 	/*
892 	 * Mark the page as not containing any LP_DEAD items.  This is not
893 	 * certainly true (there might be some that have recently been marked, but
894 	 * weren't included in our target-item list), but it will almost always be
895 	 * true and it doesn't seem worth an additional page scan to check it.
896 	 * Remember that BTP_HAS_GARBAGE is only a hint anyway.
897 	 */
898 	opaque = (BTPageOpaque) PageGetSpecialPointer(page);
899 	opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
900 
901 	MarkBufferDirty(buf);
902 
903 	/* XLOG stuff */
904 	if (RelationNeedsWAL(rel))
905 	{
906 		XLogRecPtr	recptr;
907 		xl_btree_delete xlrec_delete;
908 
909 		xlrec_delete.hnode = heapRel->rd_node;
910 		xlrec_delete.nitems = nitems;
911 
912 		XLogBeginInsert();
913 		XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
914 		XLogRegisterData((char *) &xlrec_delete, SizeOfBtreeDelete);
915 
916 		/*
917 		 * We need the target-offsets array whether or not we store the whole
918 		 * buffer, to allow us to find the latestRemovedXid on a standby
919 		 * server.
920 		 */
921 		XLogRegisterData((char *) itemnos, nitems * sizeof(OffsetNumber));
922 
923 		recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_DELETE);
924 
925 		PageSetLSN(page, recptr);
926 	}
927 
928 	END_CRIT_SECTION();
929 }
930 
931 /*
932  * Returns true, if the given block has the half-dead flag set.
933  */
934 static bool
_bt_is_page_halfdead(Relation rel,BlockNumber blk)935 _bt_is_page_halfdead(Relation rel, BlockNumber blk)
936 {
937 	Buffer		buf;
938 	Page		page;
939 	BTPageOpaque opaque;
940 	bool		result;
941 
942 	buf = _bt_getbuf(rel, blk, BT_READ);
943 	page = BufferGetPage(buf);
944 	opaque = (BTPageOpaque) PageGetSpecialPointer(page);
945 
946 	result = P_ISHALFDEAD(opaque);
947 	_bt_relbuf(rel, buf);
948 
949 	return result;
950 }
951 
952 /*
953  * Subroutine to find the parent of the branch we're deleting.  This climbs
954  * up the tree until it finds a page with more than one child, i.e. a page
955  * that will not be totally emptied by the deletion.  The chain of pages below
956  * it, with one downlink each, will form the branch that we need to delete.
957  *
958  * If we cannot remove the downlink from the parent, because it's the
959  * rightmost entry, returns false.  On success, *topparent and *topoff are set
960  * to the buffer holding the parent, and the offset of the downlink in it.
961  * *topparent is write-locked, the caller is responsible for releasing it when
962  * done.  *target is set to the topmost page in the branch to-be-deleted, i.e.
963  * the page whose downlink *topparent / *topoff point to, and *rightsib to its
964  * right sibling.
965  *
966  * "child" is the leaf page we wish to delete, and "stack" is a search stack
967  * leading to it (approximately).  Note that we will update the stack
968  * entry(s) to reflect current downlink positions --- this is harmless and
969  * indeed saves later search effort in _bt_pagedel.  The caller should
970  * initialize *target and *rightsib to the leaf page and its right sibling.
971  *
972  * Note: it's OK to release page locks on any internal pages between the leaf
973  * and *topparent, because a safe deletion can't become unsafe due to
974  * concurrent activity.  An internal page can only acquire an entry if the
975  * child is split, but that cannot happen as long as we hold a lock on the
976  * leaf.
977  */
978 static bool
_bt_lock_branch_parent(Relation rel,BlockNumber child,BTStack stack,Buffer * topparent,OffsetNumber * topoff,BlockNumber * target,BlockNumber * rightsib)979 _bt_lock_branch_parent(Relation rel, BlockNumber child, BTStack stack,
980 					   Buffer *topparent, OffsetNumber *topoff,
981 					   BlockNumber *target, BlockNumber *rightsib)
982 {
983 	BlockNumber parent;
984 	OffsetNumber poffset,
985 				maxoff;
986 	Buffer		pbuf;
987 	Page		page;
988 	BTPageOpaque opaque;
989 	BlockNumber leftsib;
990 
991 	/*
992 	 * Locate the downlink of "child" in the parent (updating the stack entry
993 	 * if needed)
994 	 */
995 	ItemPointerSet(&(stack->bts_btentry.t_tid), child, P_HIKEY);
996 	pbuf = _bt_getstackbuf(rel, stack, BT_WRITE);
997 	if (pbuf == InvalidBuffer)
998 		elog(ERROR, "failed to re-find parent key in index \"%s\" for deletion target page %u",
999 			 RelationGetRelationName(rel), child);
1000 	parent = stack->bts_blkno;
1001 	poffset = stack->bts_offset;
1002 
1003 	page = BufferGetPage(pbuf);
1004 	opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1005 	maxoff = PageGetMaxOffsetNumber(page);
1006 
1007 	/*
1008 	 * If the target is the rightmost child of its parent, then we can't
1009 	 * delete, unless it's also the only child.
1010 	 */
1011 	if (poffset >= maxoff)
1012 	{
1013 		/* It's rightmost child... */
1014 		if (poffset == P_FIRSTDATAKEY(opaque))
1015 		{
1016 			/*
1017 			 * It's only child, so safe if parent would itself be removable.
1018 			 * We have to check the parent itself, and then recurse to test
1019 			 * the conditions at the parent's parent.
1020 			 */
1021 			if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) ||
1022 				P_INCOMPLETE_SPLIT(opaque))
1023 			{
1024 				_bt_relbuf(rel, pbuf);
1025 				return false;
1026 			}
1027 
1028 			*target = parent;
1029 			*rightsib = opaque->btpo_next;
1030 			leftsib = opaque->btpo_prev;
1031 
1032 			_bt_relbuf(rel, pbuf);
1033 
1034 			/*
1035 			 * Like in _bt_pagedel, check that the left sibling is not marked
1036 			 * with INCOMPLETE_SPLIT flag.  That would mean that there is no
1037 			 * downlink to the page to be deleted, and the page deletion
1038 			 * algorithm isn't prepared to handle that.
1039 			 */
1040 			if (leftsib != P_NONE)
1041 			{
1042 				Buffer		lbuf;
1043 				Page		lpage;
1044 				BTPageOpaque lopaque;
1045 
1046 				lbuf = _bt_getbuf(rel, leftsib, BT_READ);
1047 				lpage = BufferGetPage(lbuf);
1048 				lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
1049 
1050 				/*
1051 				 * If the left sibling was concurrently split, so that its
1052 				 * next-pointer doesn't point to the current page anymore, the
1053 				 * split that created the current page must be completed. (We
1054 				 * don't allow splitting an incompletely split page again
1055 				 * until the previous split has been completed)
1056 				 */
1057 				if (lopaque->btpo_next == parent &&
1058 					P_INCOMPLETE_SPLIT(lopaque))
1059 				{
1060 					_bt_relbuf(rel, lbuf);
1061 					return false;
1062 				}
1063 				_bt_relbuf(rel, lbuf);
1064 			}
1065 
1066 			/*
1067 			 * Perform the same check on this internal level that
1068 			 * _bt_mark_page_halfdead performed on the leaf level.
1069 			 */
1070 			if (_bt_is_page_halfdead(rel, *rightsib))
1071 			{
1072 				elog(DEBUG1, "could not delete page %u because its right sibling %u is half-dead",
1073 					 parent, *rightsib);
1074 				return false;
1075 			}
1076 
1077 			return _bt_lock_branch_parent(rel, parent, stack->bts_parent,
1078 										  topparent, topoff, target, rightsib);
1079 		}
1080 		else
1081 		{
1082 			/* Unsafe to delete */
1083 			_bt_relbuf(rel, pbuf);
1084 			return false;
1085 		}
1086 	}
1087 	else
1088 	{
1089 		/* Not rightmost child, so safe to delete */
1090 		*topparent = pbuf;
1091 		*topoff = poffset;
1092 		return true;
1093 	}
1094 }
1095 
1096 /*
1097  * _bt_pagedel() -- Delete a page from the b-tree, if legal to do so.
1098  *
1099  * This action unlinks the page from the b-tree structure, removing all
1100  * pointers leading to it --- but not touching its own left and right links.
1101  * The page cannot be physically reclaimed right away, since other processes
1102  * may currently be trying to follow links leading to the page; they have to
1103  * be allowed to use its right-link to recover.  See nbtree/README.
1104  *
1105  * On entry, the target buffer must be pinned and locked (either read or write
1106  * lock is OK).  This lock and pin will be dropped before exiting.
1107  *
1108  * Returns the number of pages successfully deleted (zero if page cannot
1109  * be deleted now; could be more than one if parent or sibling pages were
1110  * deleted too).
1111  *
1112  * NOTE: this leaks memory.  Rather than trying to clean up everything
1113  * carefully, it's better to run it in a temp context that can be reset
1114  * frequently.
1115  */
1116 int
_bt_pagedel(Relation rel,Buffer buf)1117 _bt_pagedel(Relation rel, Buffer buf)
1118 {
1119 	int			ndeleted = 0;
1120 	BlockNumber rightsib;
1121 	bool		rightsib_empty;
1122 	Page		page;
1123 	BTPageOpaque opaque;
1124 
1125 	/*
1126 	 * "stack" is a search stack leading (approximately) to the target page.
1127 	 * It is initially NULL, but when iterating, we keep it to avoid
1128 	 * duplicated search effort.
1129 	 *
1130 	 * Also, when "stack" is not NULL, we have already checked that the
1131 	 * current page is not the right half of an incomplete split, i.e. the
1132 	 * left sibling does not have its INCOMPLETE_SPLIT flag set.
1133 	 */
1134 	BTStack		stack = NULL;
1135 
1136 	for (;;)
1137 	{
1138 		page = BufferGetPage(buf);
1139 		opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1140 
1141 		/*
1142 		 * Internal pages are never deleted directly, only as part of deleting
1143 		 * the whole branch all the way down to leaf level.
1144 		 */
1145 		if (!P_ISLEAF(opaque))
1146 		{
1147 			/*
1148 			 * Pre-9.4 page deletion only marked internal pages as half-dead,
1149 			 * but now we only use that flag on leaf pages. The old algorithm
1150 			 * was never supposed to leave half-dead pages in the tree, it was
1151 			 * just a transient state, but it was nevertheless possible in
1152 			 * error scenarios. We don't know how to deal with them here. They
1153 			 * are harmless as far as searches are considered, but inserts
1154 			 * into the deleted keyspace could add out-of-order downlinks in
1155 			 * the upper levels. Log a notice, hopefully the admin will notice
1156 			 * and reindex.
1157 			 */
1158 			if (P_ISHALFDEAD(opaque))
1159 				ereport(LOG,
1160 						(errcode(ERRCODE_INDEX_CORRUPTED),
1161 						 errmsg("index \"%s\" contains a half-dead internal page",
1162 								RelationGetRelationName(rel)),
1163 						 errhint("This can be caused by an interrupted VACUUM in version 9.3 or older, before upgrade. Please REINDEX it.")));
1164 			_bt_relbuf(rel, buf);
1165 			return ndeleted;
1166 		}
1167 
1168 		/*
1169 		 * We can never delete rightmost pages nor root pages.  While at it,
1170 		 * check that page is not already deleted and is empty.
1171 		 *
1172 		 * To keep the algorithm simple, we also never delete an incompletely
1173 		 * split page (they should be rare enough that this doesn't make any
1174 		 * meaningful difference to disk usage):
1175 		 *
1176 		 * The INCOMPLETE_SPLIT flag on the page tells us if the page is the
1177 		 * left half of an incomplete split, but ensuring that it's not the
1178 		 * right half is more complicated.  For that, we have to check that
1179 		 * the left sibling doesn't have its INCOMPLETE_SPLIT flag set.  On
1180 		 * the first iteration, we temporarily release the lock on the current
1181 		 * page, and check the left sibling and also construct a search stack
1182 		 * to.  On subsequent iterations, we know we stepped right from a page
1183 		 * that passed these tests, so it's OK.
1184 		 */
1185 		if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque) ||
1186 			P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page) ||
1187 			P_INCOMPLETE_SPLIT(opaque))
1188 		{
1189 			/* Should never fail to delete a half-dead page */
1190 			Assert(!P_ISHALFDEAD(opaque));
1191 
1192 			_bt_relbuf(rel, buf);
1193 			return ndeleted;
1194 		}
1195 
1196 		/*
1197 		 * First, remove downlink pointing to the page (or a parent of the
1198 		 * page, if we are going to delete a taller branch), and mark the page
1199 		 * as half-dead.
1200 		 */
1201 		if (!P_ISHALFDEAD(opaque))
1202 		{
1203 			/*
1204 			 * We need an approximate pointer to the page's parent page.  We
1205 			 * use the standard search mechanism to search for the page's high
1206 			 * key; this will give us a link to either the current parent or
1207 			 * someplace to its left (if there are multiple equal high keys).
1208 			 *
1209 			 * Also check if this is the right-half of an incomplete split
1210 			 * (see comment above).
1211 			 */
1212 			if (!stack)
1213 			{
1214 				ScanKey		itup_scankey;
1215 				ItemId		itemid;
1216 				IndexTuple	targetkey;
1217 				Buffer		lbuf;
1218 				BlockNumber leftsib;
1219 
1220 				itemid = PageGetItemId(page, P_HIKEY);
1221 				targetkey = CopyIndexTuple((IndexTuple) PageGetItem(page, itemid));
1222 
1223 				leftsib = opaque->btpo_prev;
1224 
1225 				/*
1226 				 * To avoid deadlocks, we'd better drop the leaf page lock
1227 				 * before going further.
1228 				 */
1229 				LockBuffer(buf, BUFFER_LOCK_UNLOCK);
1230 
1231 				/*
1232 				 * Fetch the left sibling, to check that it's not marked with
1233 				 * INCOMPLETE_SPLIT flag.  That would mean that the page
1234 				 * to-be-deleted doesn't have a downlink, and the page
1235 				 * deletion algorithm isn't prepared to handle that.
1236 				 */
1237 				if (!P_LEFTMOST(opaque))
1238 				{
1239 					BTPageOpaque lopaque;
1240 					Page		lpage;
1241 
1242 					lbuf = _bt_getbuf(rel, leftsib, BT_READ);
1243 					lpage = BufferGetPage(lbuf);
1244 					lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
1245 
1246 					/*
1247 					 * If the left sibling is split again by another backend,
1248 					 * after we released the lock, we know that the first
1249 					 * split must have finished, because we don't allow an
1250 					 * incompletely-split page to be split again.  So we don't
1251 					 * need to walk right here.
1252 					 */
1253 					if (lopaque->btpo_next == BufferGetBlockNumber(buf) &&
1254 						P_INCOMPLETE_SPLIT(lopaque))
1255 					{
1256 						ReleaseBuffer(buf);
1257 						_bt_relbuf(rel, lbuf);
1258 						return ndeleted;
1259 					}
1260 					_bt_relbuf(rel, lbuf);
1261 				}
1262 
1263 				/* we need an insertion scan key for the search, so build one */
1264 				itup_scankey = _bt_mkscankey(rel, targetkey);
1265 				/* find the leftmost leaf page containing this key */
1266 				stack = _bt_search(rel, rel->rd_rel->relnatts, itup_scankey,
1267 								   false, &lbuf, BT_READ, NULL);
1268 				/* don't need a pin on the page */
1269 				_bt_relbuf(rel, lbuf);
1270 
1271 				/*
1272 				 * Re-lock the leaf page, and start over, to re-check that the
1273 				 * page can still be deleted.
1274 				 */
1275 				LockBuffer(buf, BT_WRITE);
1276 				continue;
1277 			}
1278 
1279 			if (!_bt_mark_page_halfdead(rel, buf, stack))
1280 			{
1281 				_bt_relbuf(rel, buf);
1282 				return ndeleted;
1283 			}
1284 		}
1285 
1286 		/*
1287 		 * Then unlink it from its siblings.  Each call to
1288 		 * _bt_unlink_halfdead_page unlinks the topmost page from the branch,
1289 		 * making it shallower.  Iterate until the leaf page is gone.
1290 		 */
1291 		rightsib_empty = false;
1292 		while (P_ISHALFDEAD(opaque))
1293 		{
1294 			/* will check for interrupts, once lock is released */
1295 			if (!_bt_unlink_halfdead_page(rel, buf, &rightsib_empty))
1296 			{
1297 				/* _bt_unlink_halfdead_page already released buffer */
1298 				return ndeleted;
1299 			}
1300 			ndeleted++;
1301 		}
1302 
1303 		rightsib = opaque->btpo_next;
1304 
1305 		_bt_relbuf(rel, buf);
1306 
1307 		/*
1308 		 * Check here, as calling loops will have locks held, preventing
1309 		 * interrupts from being processed.
1310 		 */
1311 		CHECK_FOR_INTERRUPTS();
1312 
1313 		/*
1314 		 * The page has now been deleted. If its right sibling is completely
1315 		 * empty, it's possible that the reason we haven't deleted it earlier
1316 		 * is that it was the rightmost child of the parent. Now that we
1317 		 * removed the downlink for this page, the right sibling might now be
1318 		 * the only child of the parent, and could be removed. It would be
1319 		 * picked up by the next vacuum anyway, but might as well try to
1320 		 * remove it now, so loop back to process the right sibling.
1321 		 */
1322 		if (!rightsib_empty)
1323 			break;
1324 
1325 		buf = _bt_getbuf(rel, rightsib, BT_WRITE);
1326 	}
1327 
1328 	return ndeleted;
1329 }
1330 
1331 /*
1332  * First stage of page deletion.  Remove the downlink to the top of the
1333  * branch being deleted, and mark the leaf page as half-dead.
1334  */
1335 static bool
_bt_mark_page_halfdead(Relation rel,Buffer leafbuf,BTStack stack)1336 _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack)
1337 {
1338 	BlockNumber leafblkno;
1339 	BlockNumber leafrightsib;
1340 	BlockNumber target;
1341 	BlockNumber rightsib;
1342 	ItemId		itemid;
1343 	Page		page;
1344 	BTPageOpaque opaque;
1345 	Buffer		topparent;
1346 	OffsetNumber topoff;
1347 	OffsetNumber nextoffset;
1348 	IndexTuple	itup;
1349 	IndexTupleData trunctuple;
1350 
1351 	page = BufferGetPage(leafbuf);
1352 	opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1353 
1354 	Assert(!P_RIGHTMOST(opaque) && !P_ISROOT(opaque) && !P_ISDELETED(opaque) &&
1355 		   !P_ISHALFDEAD(opaque) && P_ISLEAF(opaque) &&
1356 		   P_FIRSTDATAKEY(opaque) > PageGetMaxOffsetNumber(page));
1357 
1358 	/*
1359 	 * Save info about the leaf page.
1360 	 */
1361 	leafblkno = BufferGetBlockNumber(leafbuf);
1362 	leafrightsib = opaque->btpo_next;
1363 
1364 	/*
1365 	 * Before attempting to lock the parent page, check that the right sibling
1366 	 * is not in half-dead state.  A half-dead right sibling would have no
1367 	 * downlink in the parent, which would be highly confusing later when we
1368 	 * delete the downlink that follows the current page's downlink. (I
1369 	 * believe the deletion would work correctly, but it would fail the
1370 	 * cross-check we make that the following downlink points to the right
1371 	 * sibling of the delete page.)
1372 	 */
1373 	if (_bt_is_page_halfdead(rel, leafrightsib))
1374 	{
1375 		elog(DEBUG1, "could not delete page %u because its right sibling %u is half-dead",
1376 			 leafblkno, leafrightsib);
1377 		return false;
1378 	}
1379 
1380 	/*
1381 	 * We cannot delete a page that is the rightmost child of its immediate
1382 	 * parent, unless it is the only child --- in which case the parent has to
1383 	 * be deleted too, and the same condition applies recursively to it. We
1384 	 * have to check this condition all the way up before trying to delete,
1385 	 * and lock the final parent of the to-be-deleted branch.
1386 	 */
1387 	rightsib = leafrightsib;
1388 	target = leafblkno;
1389 	if (!_bt_lock_branch_parent(rel, leafblkno, stack,
1390 								&topparent, &topoff, &target, &rightsib))
1391 		return false;
1392 
1393 	/*
1394 	 * Check that the parent-page index items we're about to delete/overwrite
1395 	 * contain what we expect.  This can fail if the index has become corrupt
1396 	 * for some reason.  We want to throw any error before entering the
1397 	 * critical section --- otherwise it'd be a PANIC.
1398 	 *
1399 	 * The test on the target item is just an Assert because
1400 	 * _bt_lock_branch_parent should have guaranteed it has the expected
1401 	 * contents.  The test on the next-child downlink is known to sometimes
1402 	 * fail in the field, though.
1403 	 */
1404 	page = BufferGetPage(topparent);
1405 	opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1406 
1407 #ifdef USE_ASSERT_CHECKING
1408 	itemid = PageGetItemId(page, topoff);
1409 	itup = (IndexTuple) PageGetItem(page, itemid);
1410 	Assert(ItemPointerGetBlockNumber(&(itup->t_tid)) == target);
1411 #endif
1412 
1413 	nextoffset = OffsetNumberNext(topoff);
1414 	itemid = PageGetItemId(page, nextoffset);
1415 	itup = (IndexTuple) PageGetItem(page, itemid);
1416 	if (ItemPointerGetBlockNumber(&(itup->t_tid)) != rightsib)
1417 		elog(ERROR, "right sibling %u of block %u is not next child %u of block %u in index \"%s\"",
1418 			 rightsib, target, ItemPointerGetBlockNumber(&(itup->t_tid)),
1419 			 BufferGetBlockNumber(topparent), RelationGetRelationName(rel));
1420 
1421 	/*
1422 	 * Any insert which would have gone on the leaf block will now go to its
1423 	 * right sibling.
1424 	 */
1425 	PredicateLockPageCombine(rel, leafblkno, leafrightsib);
1426 
1427 	/* No ereport(ERROR) until changes are logged */
1428 	START_CRIT_SECTION();
1429 
1430 	/*
1431 	 * Update parent.  The normal case is a tad tricky because we want to
1432 	 * delete the target's downlink and the *following* key.  Easiest way is
1433 	 * to copy the right sibling's downlink over the target downlink, and then
1434 	 * delete the following item.
1435 	 */
1436 	page = BufferGetPage(topparent);
1437 	opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1438 
1439 	itemid = PageGetItemId(page, topoff);
1440 	itup = (IndexTuple) PageGetItem(page, itemid);
1441 	ItemPointerSet(&(itup->t_tid), rightsib, P_HIKEY);
1442 
1443 	nextoffset = OffsetNumberNext(topoff);
1444 	PageIndexTupleDelete(page, nextoffset);
1445 
1446 	/*
1447 	 * Mark the leaf page as half-dead, and stamp it with a pointer to the
1448 	 * highest internal page in the branch we're deleting.  We use the tid of
1449 	 * the high key to store it.
1450 	 */
1451 	page = BufferGetPage(leafbuf);
1452 	opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1453 	opaque->btpo_flags |= BTP_HALF_DEAD;
1454 
1455 	PageIndexTupleDelete(page, P_HIKEY);
1456 	Assert(PageGetMaxOffsetNumber(page) == 0);
1457 	MemSet(&trunctuple, 0, sizeof(IndexTupleData));
1458 	trunctuple.t_info = sizeof(IndexTupleData);
1459 	if (target != leafblkno)
1460 		ItemPointerSet(&trunctuple.t_tid, target, P_HIKEY);
1461 	else
1462 		ItemPointerSetInvalid(&trunctuple.t_tid);
1463 	if (PageAddItem(page, (Item) &trunctuple, sizeof(IndexTupleData), P_HIKEY,
1464 					false, false) == InvalidOffsetNumber)
1465 		elog(ERROR, "could not add dummy high key to half-dead page");
1466 
1467 	/* Must mark buffers dirty before XLogInsert */
1468 	MarkBufferDirty(topparent);
1469 	MarkBufferDirty(leafbuf);
1470 
1471 	/* XLOG stuff */
1472 	if (RelationNeedsWAL(rel))
1473 	{
1474 		xl_btree_mark_page_halfdead xlrec;
1475 		XLogRecPtr	recptr;
1476 
1477 		xlrec.poffset = topoff;
1478 		xlrec.leafblk = leafblkno;
1479 		if (target != leafblkno)
1480 			xlrec.topparent = target;
1481 		else
1482 			xlrec.topparent = InvalidBlockNumber;
1483 
1484 		XLogBeginInsert();
1485 		XLogRegisterBuffer(0, leafbuf, REGBUF_WILL_INIT);
1486 		XLogRegisterBuffer(1, topparent, REGBUF_STANDARD);
1487 
1488 		page = BufferGetPage(leafbuf);
1489 		opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1490 		xlrec.leftblk = opaque->btpo_prev;
1491 		xlrec.rightblk = opaque->btpo_next;
1492 
1493 		XLogRegisterData((char *) &xlrec, SizeOfBtreeMarkPageHalfDead);
1494 
1495 		recptr = XLogInsert(RM_BTREE_ID, XLOG_BTREE_MARK_PAGE_HALFDEAD);
1496 
1497 		page = BufferGetPage(topparent);
1498 		PageSetLSN(page, recptr);
1499 		page = BufferGetPage(leafbuf);
1500 		PageSetLSN(page, recptr);
1501 	}
1502 
1503 	END_CRIT_SECTION();
1504 
1505 	_bt_relbuf(rel, topparent);
1506 	return true;
1507 }
1508 
1509 /*
1510  * Unlink a page in a branch of half-dead pages from its siblings.
1511  *
1512  * If the leaf page still has a downlink pointing to it, unlinks the highest
1513  * parent in the to-be-deleted branch instead of the leaf page.  To get rid
1514  * of the whole branch, including the leaf page itself, iterate until the
1515  * leaf page is deleted.
1516  *
1517  * Returns 'false' if the page could not be unlinked (shouldn't happen).
1518  * If the (new) right sibling of the page is empty, *rightsib_empty is set
1519  * to true.
1520  *
1521  * Must hold pin and lock on leafbuf at entry (read or write doesn't matter).
1522  * On success exit, we'll be holding pin and write lock.  On failure exit,
1523  * we'll release both pin and lock before returning (we define it that way
1524  * to avoid having to reacquire a lock we already released).
1525  */
1526 static bool
_bt_unlink_halfdead_page(Relation rel,Buffer leafbuf,bool * rightsib_empty)1527 _bt_unlink_halfdead_page(Relation rel, Buffer leafbuf, bool *rightsib_empty)
1528 {
1529 	BlockNumber leafblkno = BufferGetBlockNumber(leafbuf);
1530 	BlockNumber leafleftsib;
1531 	BlockNumber leafrightsib;
1532 	BlockNumber target;
1533 	BlockNumber leftsib;
1534 	BlockNumber rightsib;
1535 	Buffer		lbuf = InvalidBuffer;
1536 	Buffer		buf;
1537 	Buffer		rbuf;
1538 	Buffer		metabuf = InvalidBuffer;
1539 	Page		metapg = NULL;
1540 	BTMetaPageData *metad = NULL;
1541 	ItemId		itemid;
1542 	Page		page;
1543 	BTPageOpaque opaque;
1544 	bool		rightsib_is_rightmost;
1545 	int			targetlevel;
1546 	ItemPointer leafhikey;
1547 	BlockNumber nextchild;
1548 
1549 	page = BufferGetPage(leafbuf);
1550 	opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1551 
1552 	Assert(P_ISLEAF(opaque) && P_ISHALFDEAD(opaque));
1553 
1554 	/*
1555 	 * Remember some information about the leaf page.
1556 	 */
1557 	itemid = PageGetItemId(page, P_HIKEY);
1558 	leafhikey = &((IndexTuple) PageGetItem(page, itemid))->t_tid;
1559 	leafleftsib = opaque->btpo_prev;
1560 	leafrightsib = opaque->btpo_next;
1561 
1562 	LockBuffer(leafbuf, BUFFER_LOCK_UNLOCK);
1563 
1564 	/*
1565 	 * Check here, as calling loops will have locks held, preventing
1566 	 * interrupts from being processed.
1567 	 */
1568 	CHECK_FOR_INTERRUPTS();
1569 
1570 	/*
1571 	 * If the leaf page still has a parent pointing to it (or a chain of
1572 	 * parents), we don't unlink the leaf page yet, but the topmost remaining
1573 	 * parent in the branch.  Set 'target' and 'buf' to reference the page
1574 	 * actually being unlinked.
1575 	 */
1576 	if (ItemPointerIsValid(leafhikey))
1577 	{
1578 		target = ItemPointerGetBlockNumber(leafhikey);
1579 		Assert(target != leafblkno);
1580 
1581 		/* fetch the block number of the topmost parent's left sibling */
1582 		buf = _bt_getbuf(rel, target, BT_READ);
1583 		page = BufferGetPage(buf);
1584 		opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1585 		leftsib = opaque->btpo_prev;
1586 		targetlevel = opaque->btpo.level;
1587 
1588 		/*
1589 		 * To avoid deadlocks, we'd better drop the target page lock before
1590 		 * going further.
1591 		 */
1592 		LockBuffer(buf, BUFFER_LOCK_UNLOCK);
1593 	}
1594 	else
1595 	{
1596 		target = leafblkno;
1597 
1598 		buf = leafbuf;
1599 		leftsib = leafleftsib;
1600 		targetlevel = 0;
1601 	}
1602 
1603 	/*
1604 	 * We have to lock the pages we need to modify in the standard order:
1605 	 * moving right, then up.  Else we will deadlock against other writers.
1606 	 *
1607 	 * So, first lock the leaf page, if it's not the target.  Then find and
1608 	 * write-lock the current left sibling of the target page.  The sibling
1609 	 * that was current a moment ago could have split, so we may have to move
1610 	 * right.  This search could fail if either the sibling or the target page
1611 	 * was deleted by someone else meanwhile; if so, give up.  (Right now,
1612 	 * that should never happen, since page deletion is only done in VACUUM
1613 	 * and there shouldn't be multiple VACUUMs concurrently on the same
1614 	 * table.)
1615 	 */
1616 	if (target != leafblkno)
1617 		LockBuffer(leafbuf, BT_WRITE);
1618 	if (leftsib != P_NONE)
1619 	{
1620 		lbuf = _bt_getbuf(rel, leftsib, BT_WRITE);
1621 		page = BufferGetPage(lbuf);
1622 		opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1623 		while (P_ISDELETED(opaque) || opaque->btpo_next != target)
1624 		{
1625 			/* step right one page */
1626 			leftsib = opaque->btpo_next;
1627 			_bt_relbuf(rel, lbuf);
1628 
1629 			/*
1630 			 * It'd be good to check for interrupts here, but it's not easy to
1631 			 * do so because a lock is always held. This block isn't
1632 			 * frequently reached, so hopefully the consequences of not
1633 			 * checking interrupts aren't too bad.
1634 			 */
1635 
1636 			if (leftsib == P_NONE)
1637 			{
1638 				elog(LOG, "no left sibling (concurrent deletion?) of block %u in \"%s\"",
1639 					 target,
1640 					 RelationGetRelationName(rel));
1641 				if (target != leafblkno)
1642 				{
1643 					/* we have only a pin on target, but pin+lock on leafbuf */
1644 					ReleaseBuffer(buf);
1645 					_bt_relbuf(rel, leafbuf);
1646 				}
1647 				else
1648 				{
1649 					/* we have only a pin on leafbuf */
1650 					ReleaseBuffer(leafbuf);
1651 				}
1652 				return false;
1653 			}
1654 			lbuf = _bt_getbuf(rel, leftsib, BT_WRITE);
1655 			page = BufferGetPage(lbuf);
1656 			opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1657 		}
1658 	}
1659 	else
1660 		lbuf = InvalidBuffer;
1661 
1662 	/*
1663 	 * Next write-lock the target page itself.  It should be okay to take just
1664 	 * a write lock not a superexclusive lock, since no scans would stop on an
1665 	 * empty page.
1666 	 */
1667 	LockBuffer(buf, BT_WRITE);
1668 	page = BufferGetPage(buf);
1669 	opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1670 
1671 	/*
1672 	 * Check page is still empty etc, else abandon deletion.  This is just for
1673 	 * paranoia's sake; a half-dead page cannot resurrect because there can be
1674 	 * only one vacuum process running at a time.
1675 	 */
1676 	if (P_RIGHTMOST(opaque) || P_ISROOT(opaque) || P_ISDELETED(opaque))
1677 	{
1678 		elog(ERROR, "half-dead page changed status unexpectedly in block %u of index \"%s\"",
1679 			 target, RelationGetRelationName(rel));
1680 	}
1681 	if (opaque->btpo_prev != leftsib)
1682 		elog(ERROR, "left link changed unexpectedly in block %u of index \"%s\"",
1683 			 target, RelationGetRelationName(rel));
1684 
1685 	if (target == leafblkno)
1686 	{
1687 		if (P_FIRSTDATAKEY(opaque) <= PageGetMaxOffsetNumber(page) ||
1688 			!P_ISLEAF(opaque) || !P_ISHALFDEAD(opaque))
1689 			elog(ERROR, "half-dead page changed status unexpectedly in block %u of index \"%s\"",
1690 				 target, RelationGetRelationName(rel));
1691 		nextchild = InvalidBlockNumber;
1692 	}
1693 	else
1694 	{
1695 		if (P_FIRSTDATAKEY(opaque) != PageGetMaxOffsetNumber(page) ||
1696 			P_ISLEAF(opaque))
1697 			elog(ERROR, "half-dead page changed status unexpectedly in block %u of index \"%s\"",
1698 				 target, RelationGetRelationName(rel));
1699 
1700 		/* remember the next non-leaf child down in the branch. */
1701 		itemid = PageGetItemId(page, P_FIRSTDATAKEY(opaque));
1702 		nextchild = ItemPointerGetBlockNumber(&((IndexTuple) PageGetItem(page, itemid))->t_tid);
1703 		if (nextchild == leafblkno)
1704 			nextchild = InvalidBlockNumber;
1705 	}
1706 
1707 	/*
1708 	 * And next write-lock the (current) right sibling.
1709 	 */
1710 	rightsib = opaque->btpo_next;
1711 	rbuf = _bt_getbuf(rel, rightsib, BT_WRITE);
1712 	page = BufferGetPage(rbuf);
1713 	opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1714 	if (opaque->btpo_prev != target)
1715 		elog(ERROR, "right sibling's left-link doesn't match: "
1716 			 "block %u links to %u instead of expected %u in index \"%s\"",
1717 			 rightsib, opaque->btpo_prev, target,
1718 			 RelationGetRelationName(rel));
1719 	rightsib_is_rightmost = P_RIGHTMOST(opaque);
1720 	*rightsib_empty = (P_FIRSTDATAKEY(opaque) > PageGetMaxOffsetNumber(page));
1721 
1722 	/*
1723 	 * If we are deleting the next-to-last page on the target's level, then
1724 	 * the rightsib is a candidate to become the new fast root. (In theory, it
1725 	 * might be possible to push the fast root even further down, but the odds
1726 	 * of doing so are slim, and the locking considerations daunting.)
1727 	 *
1728 	 * We don't support handling this in the case where the parent is becoming
1729 	 * half-dead, even though it theoretically could occur.
1730 	 *
1731 	 * We can safely acquire a lock on the metapage here --- see comments for
1732 	 * _bt_newroot().
1733 	 */
1734 	if (leftsib == P_NONE && rightsib_is_rightmost)
1735 	{
1736 		page = BufferGetPage(rbuf);
1737 		opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1738 		if (P_RIGHTMOST(opaque))
1739 		{
1740 			/* rightsib will be the only one left on the level */
1741 			metabuf = _bt_getbuf(rel, BTREE_METAPAGE, BT_WRITE);
1742 			metapg = BufferGetPage(metabuf);
1743 			metad = BTPageGetMeta(metapg);
1744 
1745 			/*
1746 			 * The expected case here is btm_fastlevel == targetlevel+1; if
1747 			 * the fastlevel is <= targetlevel, something is wrong, and we
1748 			 * choose to overwrite it to fix it.
1749 			 */
1750 			if (metad->btm_fastlevel > targetlevel + 1)
1751 			{
1752 				/* no update wanted */
1753 				_bt_relbuf(rel, metabuf);
1754 				metabuf = InvalidBuffer;
1755 			}
1756 		}
1757 	}
1758 
1759 	/*
1760 	 * Here we begin doing the deletion.
1761 	 */
1762 
1763 	/* No ereport(ERROR) until changes are logged */
1764 	START_CRIT_SECTION();
1765 
1766 	/*
1767 	 * Update siblings' side-links.  Note the target page's side-links will
1768 	 * continue to point to the siblings.  Asserts here are just rechecking
1769 	 * things we already verified above.
1770 	 */
1771 	if (BufferIsValid(lbuf))
1772 	{
1773 		page = BufferGetPage(lbuf);
1774 		opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1775 		Assert(opaque->btpo_next == target);
1776 		opaque->btpo_next = rightsib;
1777 	}
1778 	page = BufferGetPage(rbuf);
1779 	opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1780 	Assert(opaque->btpo_prev == target);
1781 	opaque->btpo_prev = leftsib;
1782 
1783 	/*
1784 	 * If we deleted a parent of the targeted leaf page, instead of the leaf
1785 	 * itself, update the leaf to point to the next remaining child in the
1786 	 * branch.
1787 	 */
1788 	if (target != leafblkno)
1789 	{
1790 		if (nextchild == InvalidBlockNumber)
1791 			ItemPointerSetInvalid(leafhikey);
1792 		else
1793 			ItemPointerSet(leafhikey, nextchild, P_HIKEY);
1794 	}
1795 
1796 	/*
1797 	 * Mark the page itself deleted.  It can be recycled when all current
1798 	 * transactions are gone.  Storing GetTopTransactionId() would work, but
1799 	 * we're in VACUUM and would not otherwise have an XID.  Having already
1800 	 * updated links to the target, ReadNewTransactionId() suffices as an
1801 	 * upper bound.  Any scan having retained a now-stale link is advertising
1802 	 * in its PGXACT an xmin less than or equal to the value we read here.  It
1803 	 * will continue to do so, holding back RecentGlobalXmin, for the duration
1804 	 * of that scan.
1805 	 */
1806 	page = BufferGetPage(buf);
1807 	opaque = (BTPageOpaque) PageGetSpecialPointer(page);
1808 	opaque->btpo_flags &= ~BTP_HALF_DEAD;
1809 	opaque->btpo_flags |= BTP_DELETED;
1810 	opaque->btpo.xact = ReadNewTransactionId();
1811 
1812 	/* And update the metapage, if needed */
1813 	if (BufferIsValid(metabuf))
1814 	{
1815 		metad->btm_fastroot = rightsib;
1816 		metad->btm_fastlevel = targetlevel;
1817 		MarkBufferDirty(metabuf);
1818 	}
1819 
1820 	/* Must mark buffers dirty before XLogInsert */
1821 	MarkBufferDirty(rbuf);
1822 	MarkBufferDirty(buf);
1823 	if (BufferIsValid(lbuf))
1824 		MarkBufferDirty(lbuf);
1825 	if (target != leafblkno)
1826 		MarkBufferDirty(leafbuf);
1827 
1828 	/* XLOG stuff */
1829 	if (RelationNeedsWAL(rel))
1830 	{
1831 		xl_btree_unlink_page xlrec;
1832 		xl_btree_metadata xlmeta;
1833 		uint8		xlinfo;
1834 		XLogRecPtr	recptr;
1835 
1836 		XLogBeginInsert();
1837 
1838 		XLogRegisterBuffer(0, buf, REGBUF_WILL_INIT);
1839 		if (BufferIsValid(lbuf))
1840 			XLogRegisterBuffer(1, lbuf, REGBUF_STANDARD);
1841 		XLogRegisterBuffer(2, rbuf, REGBUF_STANDARD);
1842 		if (target != leafblkno)
1843 			XLogRegisterBuffer(3, leafbuf, REGBUF_WILL_INIT);
1844 
1845 		/* information on the unlinked block */
1846 		xlrec.leftsib = leftsib;
1847 		xlrec.rightsib = rightsib;
1848 		xlrec.btpo_xact = opaque->btpo.xact;
1849 
1850 		/* information needed to recreate the leaf block (if not the target) */
1851 		xlrec.leafleftsib = leafleftsib;
1852 		xlrec.leafrightsib = leafrightsib;
1853 		xlrec.topparent = nextchild;
1854 
1855 		XLogRegisterData((char *) &xlrec, SizeOfBtreeUnlinkPage);
1856 
1857 		if (BufferIsValid(metabuf))
1858 		{
1859 			XLogRegisterBuffer(4, metabuf, REGBUF_WILL_INIT);
1860 
1861 			xlmeta.root = metad->btm_root;
1862 			xlmeta.level = metad->btm_level;
1863 			xlmeta.fastroot = metad->btm_fastroot;
1864 			xlmeta.fastlevel = metad->btm_fastlevel;
1865 
1866 			XLogRegisterBufData(4, (char *) &xlmeta, sizeof(xl_btree_metadata));
1867 			xlinfo = XLOG_BTREE_UNLINK_PAGE_META;
1868 		}
1869 		else
1870 			xlinfo = XLOG_BTREE_UNLINK_PAGE;
1871 
1872 		recptr = XLogInsert(RM_BTREE_ID, xlinfo);
1873 
1874 		if (BufferIsValid(metabuf))
1875 		{
1876 			PageSetLSN(metapg, recptr);
1877 		}
1878 		page = BufferGetPage(rbuf);
1879 		PageSetLSN(page, recptr);
1880 		page = BufferGetPage(buf);
1881 		PageSetLSN(page, recptr);
1882 		if (BufferIsValid(lbuf))
1883 		{
1884 			page = BufferGetPage(lbuf);
1885 			PageSetLSN(page, recptr);
1886 		}
1887 		if (target != leafblkno)
1888 		{
1889 			page = BufferGetPage(leafbuf);
1890 			PageSetLSN(page, recptr);
1891 		}
1892 	}
1893 
1894 	END_CRIT_SECTION();
1895 
1896 	/* release metapage */
1897 	if (BufferIsValid(metabuf))
1898 		_bt_relbuf(rel, metabuf);
1899 
1900 	/* release siblings */
1901 	if (BufferIsValid(lbuf))
1902 		_bt_relbuf(rel, lbuf);
1903 	_bt_relbuf(rel, rbuf);
1904 
1905 	/*
1906 	 * Release the target, if it was not the leaf block.  The leaf is always
1907 	 * kept locked.
1908 	 */
1909 	if (target != leafblkno)
1910 		_bt_relbuf(rel, buf);
1911 
1912 	return true;
1913 }
1914