1 /*
2  * brin_revmap.c
3  *		Range map for BRIN indexes
4  *
5  * The range map (revmap) is a translation structure for BRIN indexes: for each
6  * page range there is one summary tuple, and its location is tracked by the
7  * revmap.  Whenever a new tuple is inserted into a table that violates the
8  * previously recorded summary values, a new tuple is inserted into the index
9  * and the revmap is updated to point to it.
10  *
11  * The revmap is stored in the first pages of the index, immediately following
12  * the metapage.  When the revmap needs to be expanded, all tuples on the
13  * regular BRIN page at that block (if any) are moved out of the way.
14  *
15  * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
16  * Portions Copyright (c) 1994, Regents of the University of California
17  *
18  * IDENTIFICATION
19  *	  src/backend/access/brin/brin_revmap.c
20  */
21 #include "postgres.h"
22 
23 #include "access/brin_page.h"
24 #include "access/brin_pageops.h"
25 #include "access/brin_revmap.h"
26 #include "access/brin_tuple.h"
27 #include "access/brin_xlog.h"
28 #include "access/rmgr.h"
29 #include "access/xloginsert.h"
30 #include "miscadmin.h"
31 #include "storage/bufmgr.h"
32 #include "storage/lmgr.h"
33 #include "utils/rel.h"
34 
35 
36 /*
37  * In revmap pages, each item stores an ItemPointerData.  These defines let one
38  * find the logical revmap page number and index number of the revmap item for
39  * the given heap block number.
40  */
41 #define HEAPBLK_TO_REVMAP_BLK(pagesPerRange, heapBlk) \
42 	((heapBlk / pagesPerRange) / REVMAP_PAGE_MAXITEMS)
43 #define HEAPBLK_TO_REVMAP_INDEX(pagesPerRange, heapBlk) \
44 	((heapBlk / pagesPerRange) % REVMAP_PAGE_MAXITEMS)
45 
46 
47 struct BrinRevmap
48 {
49 	Relation	rm_irel;
50 	BlockNumber rm_pagesPerRange;
51 	BlockNumber rm_lastRevmapPage;	/* cached from the metapage */
52 	Buffer		rm_metaBuf;
53 	Buffer		rm_currBuf;
54 };
55 
56 /* typedef appears in brin_revmap.h */
57 
58 
59 static BlockNumber revmap_get_blkno(BrinRevmap *revmap,
60 									BlockNumber heapBlk);
61 static Buffer revmap_get_buffer(BrinRevmap *revmap, BlockNumber heapBlk);
62 static BlockNumber revmap_extend_and_get_blkno(BrinRevmap *revmap,
63 											   BlockNumber heapBlk);
64 static void revmap_physical_extend(BrinRevmap *revmap);
65 
66 /*
67  * Initialize an access object for a range map.  This must be freed by
68  * brinRevmapTerminate when caller is done with it.
69  */
70 BrinRevmap *
brinRevmapInitialize(Relation idxrel,BlockNumber * pagesPerRange,Snapshot snapshot)71 brinRevmapInitialize(Relation idxrel, BlockNumber *pagesPerRange,
72 					 Snapshot snapshot)
73 {
74 	BrinRevmap *revmap;
75 	Buffer		meta;
76 	BrinMetaPageData *metadata;
77 	Page		page;
78 
79 	meta = ReadBuffer(idxrel, BRIN_METAPAGE_BLKNO);
80 	LockBuffer(meta, BUFFER_LOCK_SHARE);
81 	page = BufferGetPage(meta);
82 	TestForOldSnapshot(snapshot, idxrel, page);
83 	metadata = (BrinMetaPageData *) PageGetContents(page);
84 
85 	revmap = palloc(sizeof(BrinRevmap));
86 	revmap->rm_irel = idxrel;
87 	revmap->rm_pagesPerRange = metadata->pagesPerRange;
88 	revmap->rm_lastRevmapPage = metadata->lastRevmapPage;
89 	revmap->rm_metaBuf = meta;
90 	revmap->rm_currBuf = InvalidBuffer;
91 
92 	*pagesPerRange = metadata->pagesPerRange;
93 
94 	LockBuffer(meta, BUFFER_LOCK_UNLOCK);
95 
96 	return revmap;
97 }
98 
99 /*
100  * Release resources associated with a revmap access object.
101  */
102 void
brinRevmapTerminate(BrinRevmap * revmap)103 brinRevmapTerminate(BrinRevmap *revmap)
104 {
105 	ReleaseBuffer(revmap->rm_metaBuf);
106 	if (revmap->rm_currBuf != InvalidBuffer)
107 		ReleaseBuffer(revmap->rm_currBuf);
108 	pfree(revmap);
109 }
110 
111 /*
112  * Extend the revmap to cover the given heap block number.
113  */
114 void
brinRevmapExtend(BrinRevmap * revmap,BlockNumber heapBlk)115 brinRevmapExtend(BrinRevmap *revmap, BlockNumber heapBlk)
116 {
117 	BlockNumber mapBlk PG_USED_FOR_ASSERTS_ONLY;
118 
119 	mapBlk = revmap_extend_and_get_blkno(revmap, heapBlk);
120 
121 	/* Ensure the buffer we got is in the expected range */
122 	Assert(mapBlk != InvalidBlockNumber &&
123 		   mapBlk != BRIN_METAPAGE_BLKNO &&
124 		   mapBlk <= revmap->rm_lastRevmapPage);
125 }
126 
127 /*
128  * Prepare to insert an entry into the revmap; the revmap buffer in which the
129  * entry is to reside is locked and returned.  Most callers should call
130  * brinRevmapExtend beforehand, as this routine does not extend the revmap if
131  * it's not long enough.
132  *
133  * The returned buffer is also recorded in the revmap struct; finishing that
134  * releases the buffer, therefore the caller needn't do it explicitly.
135  */
136 Buffer
brinLockRevmapPageForUpdate(BrinRevmap * revmap,BlockNumber heapBlk)137 brinLockRevmapPageForUpdate(BrinRevmap *revmap, BlockNumber heapBlk)
138 {
139 	Buffer		rmBuf;
140 
141 	rmBuf = revmap_get_buffer(revmap, heapBlk);
142 	LockBuffer(rmBuf, BUFFER_LOCK_EXCLUSIVE);
143 
144 	return rmBuf;
145 }
146 
147 /*
148  * In the given revmap buffer (locked appropriately by caller), which is used
149  * in a BRIN index of pagesPerRange pages per range, set the element
150  * corresponding to heap block number heapBlk to the given TID.
151  *
152  * Once the operation is complete, the caller must update the LSN on the
153  * returned buffer.
154  *
155  * This is used both in regular operation and during WAL replay.
156  */
157 void
brinSetHeapBlockItemptr(Buffer buf,BlockNumber pagesPerRange,BlockNumber heapBlk,ItemPointerData tid)158 brinSetHeapBlockItemptr(Buffer buf, BlockNumber pagesPerRange,
159 						BlockNumber heapBlk, ItemPointerData tid)
160 {
161 	RevmapContents *contents;
162 	ItemPointerData *iptr;
163 	Page		page;
164 
165 	/* The correct page should already be pinned and locked */
166 	page = BufferGetPage(buf);
167 	contents = (RevmapContents *) PageGetContents(page);
168 	iptr = (ItemPointerData *) contents->rm_tids;
169 	iptr += HEAPBLK_TO_REVMAP_INDEX(pagesPerRange, heapBlk);
170 
171 	if (ItemPointerIsValid(&tid))
172 		ItemPointerSet(iptr,
173 					   ItemPointerGetBlockNumber(&tid),
174 					   ItemPointerGetOffsetNumber(&tid));
175 	else
176 		ItemPointerSetInvalid(iptr);
177 }
178 
179 /*
180  * Fetch the BrinTuple for a given heap block.
181  *
182  * The buffer containing the tuple is locked, and returned in *buf.  The
183  * returned tuple points to the shared buffer and must not be freed; if caller
184  * wants to use it after releasing the buffer lock, it must create its own
185  * palloc'ed copy.  As an optimization, the caller can pass a pinned buffer
186  * *buf on entry, which will avoid a pin-unpin cycle when the next tuple is on
187  * the same page as a previous one.
188  *
189  * If no tuple is found for the given heap range, returns NULL. In that case,
190  * *buf might still be updated (and pin must be released by caller), but it's
191  * not locked.
192  *
193  * The output tuple offset within the buffer is returned in *off, and its size
194  * is returned in *size.
195  */
196 BrinTuple *
brinGetTupleForHeapBlock(BrinRevmap * revmap,BlockNumber heapBlk,Buffer * buf,OffsetNumber * off,Size * size,int mode,Snapshot snapshot)197 brinGetTupleForHeapBlock(BrinRevmap *revmap, BlockNumber heapBlk,
198 						 Buffer *buf, OffsetNumber *off, Size *size, int mode,
199 						 Snapshot snapshot)
200 {
201 	Relation	idxRel = revmap->rm_irel;
202 	BlockNumber mapBlk;
203 	RevmapContents *contents;
204 	ItemPointerData *iptr;
205 	BlockNumber blk;
206 	Page		page;
207 	ItemId		lp;
208 	BrinTuple  *tup;
209 	ItemPointerData previptr;
210 
211 	/* normalize the heap block number to be the first page in the range */
212 	heapBlk = (heapBlk / revmap->rm_pagesPerRange) * revmap->rm_pagesPerRange;
213 
214 	/*
215 	 * Compute the revmap page number we need.  If Invalid is returned (i.e.,
216 	 * the revmap page hasn't been created yet), the requested page range is
217 	 * not summarized.
218 	 */
219 	mapBlk = revmap_get_blkno(revmap, heapBlk);
220 	if (mapBlk == InvalidBlockNumber)
221 	{
222 		*off = InvalidOffsetNumber;
223 		return NULL;
224 	}
225 
226 	ItemPointerSetInvalid(&previptr);
227 	for (;;)
228 	{
229 		CHECK_FOR_INTERRUPTS();
230 
231 		if (revmap->rm_currBuf == InvalidBuffer ||
232 			BufferGetBlockNumber(revmap->rm_currBuf) != mapBlk)
233 		{
234 			if (revmap->rm_currBuf != InvalidBuffer)
235 				ReleaseBuffer(revmap->rm_currBuf);
236 
237 			Assert(mapBlk != InvalidBlockNumber);
238 			revmap->rm_currBuf = ReadBuffer(revmap->rm_irel, mapBlk);
239 		}
240 
241 		LockBuffer(revmap->rm_currBuf, BUFFER_LOCK_SHARE);
242 
243 		contents = (RevmapContents *)
244 			PageGetContents(BufferGetPage(revmap->rm_currBuf));
245 		iptr = contents->rm_tids;
246 		iptr += HEAPBLK_TO_REVMAP_INDEX(revmap->rm_pagesPerRange, heapBlk);
247 
248 		if (!ItemPointerIsValid(iptr))
249 		{
250 			LockBuffer(revmap->rm_currBuf, BUFFER_LOCK_UNLOCK);
251 			return NULL;
252 		}
253 
254 		/*
255 		 * Check the TID we got in a previous iteration, if any, and save the
256 		 * current TID we got from the revmap; if we loop, we can sanity-check
257 		 * that the next one we get is different.  Otherwise we might be stuck
258 		 * looping forever if the revmap is somehow badly broken.
259 		 */
260 		if (ItemPointerIsValid(&previptr) && ItemPointerEquals(&previptr, iptr))
261 			ereport(ERROR,
262 					(errcode(ERRCODE_INDEX_CORRUPTED),
263 					 errmsg_internal("corrupted BRIN index: inconsistent range map")));
264 		previptr = *iptr;
265 
266 		blk = ItemPointerGetBlockNumber(iptr);
267 		*off = ItemPointerGetOffsetNumber(iptr);
268 
269 		LockBuffer(revmap->rm_currBuf, BUFFER_LOCK_UNLOCK);
270 
271 		/* Ok, got a pointer to where the BrinTuple should be. Fetch it. */
272 		if (!BufferIsValid(*buf) || BufferGetBlockNumber(*buf) != blk)
273 		{
274 			if (BufferIsValid(*buf))
275 				ReleaseBuffer(*buf);
276 			*buf = ReadBuffer(idxRel, blk);
277 		}
278 		LockBuffer(*buf, mode);
279 		page = BufferGetPage(*buf);
280 		TestForOldSnapshot(snapshot, idxRel, page);
281 
282 		/* If we land on a revmap page, start over */
283 		if (BRIN_IS_REGULAR_PAGE(page))
284 		{
285 			/*
286 			 * If the offset number is greater than what's in the page, it's
287 			 * possible that the range was desummarized concurrently. Just
288 			 * return NULL to handle that case.
289 			 */
290 			if (*off > PageGetMaxOffsetNumber(page))
291 			{
292 				LockBuffer(*buf, BUFFER_LOCK_UNLOCK);
293 				return NULL;
294 			}
295 
296 			lp = PageGetItemId(page, *off);
297 			if (ItemIdIsUsed(lp))
298 			{
299 				tup = (BrinTuple *) PageGetItem(page, lp);
300 
301 				if (tup->bt_blkno == heapBlk)
302 				{
303 					if (size)
304 						*size = ItemIdGetLength(lp);
305 					/* found it! */
306 					return tup;
307 				}
308 			}
309 		}
310 
311 		/*
312 		 * No luck. Assume that the revmap was updated concurrently.
313 		 */
314 		LockBuffer(*buf, BUFFER_LOCK_UNLOCK);
315 	}
316 	/* not reached, but keep compiler quiet */
317 	return NULL;
318 }
319 
320 /*
321  * Delete an index tuple, marking a page range as unsummarized.
322  *
323  * Index must be locked in ShareUpdateExclusiveLock mode.
324  *
325  * Return false if caller should retry.
326  */
327 bool
brinRevmapDesummarizeRange(Relation idxrel,BlockNumber heapBlk)328 brinRevmapDesummarizeRange(Relation idxrel, BlockNumber heapBlk)
329 {
330 	BrinRevmap *revmap;
331 	BlockNumber pagesPerRange;
332 	RevmapContents *contents;
333 	ItemPointerData *iptr;
334 	ItemPointerData invalidIptr;
335 	BlockNumber revmapBlk;
336 	Buffer		revmapBuf;
337 	Buffer		regBuf;
338 	Page		revmapPg;
339 	Page		regPg;
340 	OffsetNumber revmapOffset;
341 	OffsetNumber regOffset;
342 	ItemId		lp;
343 
344 	revmap = brinRevmapInitialize(idxrel, &pagesPerRange, NULL);
345 
346 	revmapBlk = revmap_get_blkno(revmap, heapBlk);
347 	if (!BlockNumberIsValid(revmapBlk))
348 	{
349 		/* revmap page doesn't exist: range not summarized, we're done */
350 		brinRevmapTerminate(revmap);
351 		return true;
352 	}
353 
354 	/* Lock the revmap page, obtain the index tuple pointer from it */
355 	revmapBuf = brinLockRevmapPageForUpdate(revmap, heapBlk);
356 	revmapPg = BufferGetPage(revmapBuf);
357 	revmapOffset = HEAPBLK_TO_REVMAP_INDEX(revmap->rm_pagesPerRange, heapBlk);
358 
359 	contents = (RevmapContents *) PageGetContents(revmapPg);
360 	iptr = contents->rm_tids;
361 	iptr += revmapOffset;
362 
363 	if (!ItemPointerIsValid(iptr))
364 	{
365 		/* no index tuple: range not summarized, we're done */
366 		LockBuffer(revmapBuf, BUFFER_LOCK_UNLOCK);
367 		brinRevmapTerminate(revmap);
368 		return true;
369 	}
370 
371 	regBuf = ReadBuffer(idxrel, ItemPointerGetBlockNumber(iptr));
372 	LockBuffer(regBuf, BUFFER_LOCK_EXCLUSIVE);
373 	regPg = BufferGetPage(regBuf);
374 
375 	/*
376 	 * We're only removing data, not reading it, so there's no need to
377 	 * TestForOldSnapshot here.
378 	 */
379 
380 	/* if this is no longer a regular page, tell caller to start over */
381 	if (!BRIN_IS_REGULAR_PAGE(regPg))
382 	{
383 		LockBuffer(revmapBuf, BUFFER_LOCK_UNLOCK);
384 		LockBuffer(regBuf, BUFFER_LOCK_UNLOCK);
385 		brinRevmapTerminate(revmap);
386 		return false;
387 	}
388 
389 	regOffset = ItemPointerGetOffsetNumber(iptr);
390 	if (regOffset > PageGetMaxOffsetNumber(regPg))
391 		ereport(ERROR,
392 				(errcode(ERRCODE_INDEX_CORRUPTED),
393 				 errmsg("corrupted BRIN index: inconsistent range map")));
394 
395 	lp = PageGetItemId(regPg, regOffset);
396 	if (!ItemIdIsUsed(lp))
397 		ereport(ERROR,
398 				(errcode(ERRCODE_INDEX_CORRUPTED),
399 				 errmsg("corrupted BRIN index: inconsistent range map")));
400 
401 	/*
402 	 * Placeholder tuples only appear during unfinished summarization, and we
403 	 * hold ShareUpdateExclusiveLock, so this function cannot run concurrently
404 	 * with that.  So any placeholder tuples that exist are leftovers from a
405 	 * crashed or aborted summarization; remove them silently.
406 	 */
407 
408 	START_CRIT_SECTION();
409 
410 	ItemPointerSetInvalid(&invalidIptr);
411 	brinSetHeapBlockItemptr(revmapBuf, revmap->rm_pagesPerRange, heapBlk,
412 							invalidIptr);
413 	PageIndexTupleDeleteNoCompact(regPg, regOffset);
414 	/* XXX record free space in FSM? */
415 
416 	MarkBufferDirty(regBuf);
417 	MarkBufferDirty(revmapBuf);
418 
419 	if (RelationNeedsWAL(idxrel))
420 	{
421 		xl_brin_desummarize xlrec;
422 		XLogRecPtr	recptr;
423 
424 		xlrec.pagesPerRange = revmap->rm_pagesPerRange;
425 		xlrec.heapBlk = heapBlk;
426 		xlrec.regOffset = regOffset;
427 
428 		XLogBeginInsert();
429 		XLogRegisterData((char *) &xlrec, SizeOfBrinDesummarize);
430 		XLogRegisterBuffer(0, revmapBuf, 0);
431 		XLogRegisterBuffer(1, regBuf, REGBUF_STANDARD);
432 		recptr = XLogInsert(RM_BRIN_ID, XLOG_BRIN_DESUMMARIZE);
433 		PageSetLSN(revmapPg, recptr);
434 		PageSetLSN(regPg, recptr);
435 	}
436 
437 	END_CRIT_SECTION();
438 
439 	UnlockReleaseBuffer(regBuf);
440 	LockBuffer(revmapBuf, BUFFER_LOCK_UNLOCK);
441 	brinRevmapTerminate(revmap);
442 
443 	return true;
444 }
445 
446 /*
447  * Given a heap block number, find the corresponding physical revmap block
448  * number and return it.  If the revmap page hasn't been allocated yet, return
449  * InvalidBlockNumber.
450  */
451 static BlockNumber
revmap_get_blkno(BrinRevmap * revmap,BlockNumber heapBlk)452 revmap_get_blkno(BrinRevmap *revmap, BlockNumber heapBlk)
453 {
454 	BlockNumber targetblk;
455 
456 	/* obtain revmap block number, skip 1 for metapage block */
457 	targetblk = HEAPBLK_TO_REVMAP_BLK(revmap->rm_pagesPerRange, heapBlk) + 1;
458 
459 	/* Normal case: the revmap page is already allocated */
460 	if (targetblk <= revmap->rm_lastRevmapPage)
461 		return targetblk;
462 
463 	return InvalidBlockNumber;
464 }
465 
466 /*
467  * Obtain and return a buffer containing the revmap page for the given heap
468  * page.  The revmap must have been previously extended to cover that page.
469  * The returned buffer is also recorded in the revmap struct; finishing that
470  * releases the buffer, therefore the caller needn't do it explicitly.
471  */
472 static Buffer
revmap_get_buffer(BrinRevmap * revmap,BlockNumber heapBlk)473 revmap_get_buffer(BrinRevmap *revmap, BlockNumber heapBlk)
474 {
475 	BlockNumber mapBlk;
476 
477 	/* Translate the heap block number to physical index location. */
478 	mapBlk = revmap_get_blkno(revmap, heapBlk);
479 
480 	if (mapBlk == InvalidBlockNumber)
481 		elog(ERROR, "revmap does not cover heap block %u", heapBlk);
482 
483 	/* Ensure the buffer we got is in the expected range */
484 	Assert(mapBlk != BRIN_METAPAGE_BLKNO &&
485 		   mapBlk <= revmap->rm_lastRevmapPage);
486 
487 	/*
488 	 * Obtain the buffer from which we need to read.  If we already have the
489 	 * correct buffer in our access struct, use that; otherwise, release that,
490 	 * (if valid) and read the one we need.
491 	 */
492 	if (revmap->rm_currBuf == InvalidBuffer ||
493 		mapBlk != BufferGetBlockNumber(revmap->rm_currBuf))
494 	{
495 		if (revmap->rm_currBuf != InvalidBuffer)
496 			ReleaseBuffer(revmap->rm_currBuf);
497 
498 		revmap->rm_currBuf = ReadBuffer(revmap->rm_irel, mapBlk);
499 	}
500 
501 	return revmap->rm_currBuf;
502 }
503 
504 /*
505  * Given a heap block number, find the corresponding physical revmap block
506  * number and return it. If the revmap page hasn't been allocated yet, extend
507  * the revmap until it is.
508  */
509 static BlockNumber
revmap_extend_and_get_blkno(BrinRevmap * revmap,BlockNumber heapBlk)510 revmap_extend_and_get_blkno(BrinRevmap *revmap, BlockNumber heapBlk)
511 {
512 	BlockNumber targetblk;
513 
514 	/* obtain revmap block number, skip 1 for metapage block */
515 	targetblk = HEAPBLK_TO_REVMAP_BLK(revmap->rm_pagesPerRange, heapBlk) + 1;
516 
517 	/* Extend the revmap, if necessary */
518 	while (targetblk > revmap->rm_lastRevmapPage)
519 	{
520 		CHECK_FOR_INTERRUPTS();
521 		revmap_physical_extend(revmap);
522 	}
523 
524 	return targetblk;
525 }
526 
527 /*
528  * Try to extend the revmap by one page.  This might not happen for a number of
529  * reasons; caller is expected to retry until the expected outcome is obtained.
530  */
531 static void
revmap_physical_extend(BrinRevmap * revmap)532 revmap_physical_extend(BrinRevmap *revmap)
533 {
534 	Buffer		buf;
535 	Page		page;
536 	Page		metapage;
537 	BrinMetaPageData *metadata;
538 	BlockNumber mapBlk;
539 	BlockNumber nblocks;
540 	Relation	irel = revmap->rm_irel;
541 	bool		needLock = !RELATION_IS_LOCAL(irel);
542 
543 	/*
544 	 * Lock the metapage. This locks out concurrent extensions of the revmap,
545 	 * but note that we still need to grab the relation extension lock because
546 	 * another backend can extend the index with regular BRIN pages.
547 	 */
548 	LockBuffer(revmap->rm_metaBuf, BUFFER_LOCK_EXCLUSIVE);
549 	metapage = BufferGetPage(revmap->rm_metaBuf);
550 	metadata = (BrinMetaPageData *) PageGetContents(metapage);
551 
552 	/*
553 	 * Check that our cached lastRevmapPage value was up-to-date; if it
554 	 * wasn't, update the cached copy and have caller start over.
555 	 */
556 	if (metadata->lastRevmapPage != revmap->rm_lastRevmapPage)
557 	{
558 		revmap->rm_lastRevmapPage = metadata->lastRevmapPage;
559 		LockBuffer(revmap->rm_metaBuf, BUFFER_LOCK_UNLOCK);
560 		return;
561 	}
562 	mapBlk = metadata->lastRevmapPage + 1;
563 
564 	nblocks = RelationGetNumberOfBlocks(irel);
565 	if (mapBlk < nblocks)
566 	{
567 		buf = ReadBuffer(irel, mapBlk);
568 		LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
569 		page = BufferGetPage(buf);
570 	}
571 	else
572 	{
573 		if (needLock)
574 			LockRelationForExtension(irel, ExclusiveLock);
575 
576 		buf = ReadBuffer(irel, P_NEW);
577 		if (BufferGetBlockNumber(buf) != mapBlk)
578 		{
579 			/*
580 			 * Very rare corner case: somebody extended the relation
581 			 * concurrently after we read its length.  If this happens, give
582 			 * up and have caller start over.  We will have to evacuate that
583 			 * page from under whoever is using it.
584 			 */
585 			if (needLock)
586 				UnlockRelationForExtension(irel, ExclusiveLock);
587 			LockBuffer(revmap->rm_metaBuf, BUFFER_LOCK_UNLOCK);
588 			ReleaseBuffer(buf);
589 			return;
590 		}
591 		LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
592 		page = BufferGetPage(buf);
593 
594 		if (needLock)
595 			UnlockRelationForExtension(irel, ExclusiveLock);
596 	}
597 
598 	/* Check that it's a regular block (or an empty page) */
599 	if (!PageIsNew(page) && !BRIN_IS_REGULAR_PAGE(page))
600 		ereport(ERROR,
601 				(errcode(ERRCODE_INDEX_CORRUPTED),
602 				 errmsg("unexpected page type 0x%04X in BRIN index \"%s\" block %u",
603 						BrinPageType(page),
604 						RelationGetRelationName(irel),
605 						BufferGetBlockNumber(buf))));
606 
607 	/* If the page is in use, evacuate it and restart */
608 	if (brin_start_evacuating_page(irel, buf))
609 	{
610 		LockBuffer(revmap->rm_metaBuf, BUFFER_LOCK_UNLOCK);
611 		brin_evacuate_page(irel, revmap->rm_pagesPerRange, revmap, buf);
612 
613 		/* have caller start over */
614 		return;
615 	}
616 
617 	/*
618 	 * Ok, we have now locked the metapage and the target block. Re-initialize
619 	 * the target block as a revmap page, and update the metapage.
620 	 */
621 	START_CRIT_SECTION();
622 
623 	/* the rm_tids array is initialized to all invalid by PageInit */
624 	brin_page_init(page, BRIN_PAGETYPE_REVMAP);
625 	MarkBufferDirty(buf);
626 
627 	metadata->lastRevmapPage = mapBlk;
628 
629 	/*
630 	 * Set pd_lower just past the end of the metadata.  This is essential,
631 	 * because without doing so, metadata will be lost if xlog.c compresses
632 	 * the page.  (We must do this here because pre-v11 versions of PG did not
633 	 * set the metapage's pd_lower correctly, so a pg_upgraded index might
634 	 * contain the wrong value.)
635 	 */
636 	((PageHeader) metapage)->pd_lower =
637 		((char *) metadata + sizeof(BrinMetaPageData)) - (char *) metapage;
638 
639 	MarkBufferDirty(revmap->rm_metaBuf);
640 
641 	if (RelationNeedsWAL(revmap->rm_irel))
642 	{
643 		xl_brin_revmap_extend xlrec;
644 		XLogRecPtr	recptr;
645 
646 		xlrec.targetBlk = mapBlk;
647 
648 		XLogBeginInsert();
649 		XLogRegisterData((char *) &xlrec, SizeOfBrinRevmapExtend);
650 		XLogRegisterBuffer(0, revmap->rm_metaBuf, REGBUF_STANDARD);
651 
652 		XLogRegisterBuffer(1, buf, REGBUF_WILL_INIT);
653 
654 		recptr = XLogInsert(RM_BRIN_ID, XLOG_BRIN_REVMAP_EXTEND);
655 		PageSetLSN(metapage, recptr);
656 		PageSetLSN(page, recptr);
657 	}
658 
659 	END_CRIT_SECTION();
660 
661 	LockBuffer(revmap->rm_metaBuf, BUFFER_LOCK_UNLOCK);
662 
663 	UnlockReleaseBuffer(buf);
664 }
665