1 /*-------------------------------------------------------------------------
2  *
3  * hio.c
4  *	  POSTGRES heap access method input/output code.
5  *
6  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *	  src/backend/access/heap/hio.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 
16 #include "postgres.h"
17 
18 #include "access/heapam.h"
19 #include "access/hio.h"
20 #include "access/htup_details.h"
21 #include "access/visibilitymap.h"
22 #include "storage/bufmgr.h"
23 #include "storage/freespace.h"
24 #include "storage/lmgr.h"
25 #include "storage/smgr.h"
26 
27 
28 /*
29  * RelationPutHeapTuple - place tuple at specified page
30  *
31  * !!! EREPORT(ERROR) IS DISALLOWED HERE !!!  Must PANIC on failure!!!
32  *
33  * Note - caller must hold BUFFER_LOCK_EXCLUSIVE on the buffer.
34  */
35 void
RelationPutHeapTuple(Relation relation,Buffer buffer,HeapTuple tuple,bool token)36 RelationPutHeapTuple(Relation relation,
37 					 Buffer buffer,
38 					 HeapTuple tuple,
39 					 bool token)
40 {
41 	Page		pageHeader;
42 	OffsetNumber offnum;
43 
44 	/*
45 	 * A tuple that's being inserted speculatively should already have its
46 	 * token set.
47 	 */
48 	Assert(!token || HeapTupleHeaderIsSpeculative(tuple->t_data));
49 
50 	/* Add the tuple to the page */
51 	pageHeader = BufferGetPage(buffer);
52 
53 	offnum = PageAddItem(pageHeader, (Item) tuple->t_data,
54 						 tuple->t_len, InvalidOffsetNumber, false, true);
55 
56 	if (offnum == InvalidOffsetNumber)
57 		elog(PANIC, "failed to add tuple to page");
58 
59 	/* Update tuple->t_self to the actual position where it was stored */
60 	ItemPointerSet(&(tuple->t_self), BufferGetBlockNumber(buffer), offnum);
61 
62 	/*
63 	 * Insert the correct position into CTID of the stored tuple, too (unless
64 	 * this is a speculative insertion, in which case the token is held in
65 	 * CTID field instead)
66 	 */
67 	if (!token)
68 	{
69 		ItemId		itemId = PageGetItemId(pageHeader, offnum);
70 		Item		item = PageGetItem(pageHeader, itemId);
71 
72 		((HeapTupleHeader) item)->t_ctid = tuple->t_self;
73 	}
74 }
75 
76 /*
77  * Read in a buffer, using bulk-insert strategy if bistate isn't NULL.
78  */
79 static Buffer
ReadBufferBI(Relation relation,BlockNumber targetBlock,BulkInsertState bistate)80 ReadBufferBI(Relation relation, BlockNumber targetBlock,
81 			 BulkInsertState bistate)
82 {
83 	Buffer		buffer;
84 
85 	/* If not bulk-insert, exactly like ReadBuffer */
86 	if (!bistate)
87 		return ReadBuffer(relation, targetBlock);
88 
89 	/* If we have the desired block already pinned, re-pin and return it */
90 	if (bistate->current_buf != InvalidBuffer)
91 	{
92 		if (BufferGetBlockNumber(bistate->current_buf) == targetBlock)
93 		{
94 			IncrBufferRefCount(bistate->current_buf);
95 			return bistate->current_buf;
96 		}
97 		/* ... else drop the old buffer */
98 		ReleaseBuffer(bistate->current_buf);
99 		bistate->current_buf = InvalidBuffer;
100 	}
101 
102 	/* Perform a read using the buffer strategy */
103 	buffer = ReadBufferExtended(relation, MAIN_FORKNUM, targetBlock,
104 								RBM_NORMAL, bistate->strategy);
105 
106 	/* Save the selected block as target for future inserts */
107 	IncrBufferRefCount(buffer);
108 	bistate->current_buf = buffer;
109 
110 	return buffer;
111 }
112 
113 /*
114  * For each heap page which is all-visible, acquire a pin on the appropriate
115  * visibility map page, if we haven't already got one.
116  *
117  * buffer2 may be InvalidBuffer, if only one buffer is involved.  buffer1
118  * must not be InvalidBuffer.  If both buffers are specified, block1 must
119  * be less than block2.
120  */
121 static void
GetVisibilityMapPins(Relation relation,Buffer buffer1,Buffer buffer2,BlockNumber block1,BlockNumber block2,Buffer * vmbuffer1,Buffer * vmbuffer2)122 GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2,
123 					 BlockNumber block1, BlockNumber block2,
124 					 Buffer *vmbuffer1, Buffer *vmbuffer2)
125 {
126 	bool		need_to_pin_buffer1;
127 	bool		need_to_pin_buffer2;
128 
129 	Assert(BufferIsValid(buffer1));
130 	Assert(buffer2 == InvalidBuffer || block1 <= block2);
131 
132 	while (1)
133 	{
134 		/* Figure out which pins we need but don't have. */
135 		need_to_pin_buffer1 = PageIsAllVisible(BufferGetPage(buffer1))
136 			&& !visibilitymap_pin_ok(block1, *vmbuffer1);
137 		need_to_pin_buffer2 = buffer2 != InvalidBuffer
138 			&& PageIsAllVisible(BufferGetPage(buffer2))
139 			&& !visibilitymap_pin_ok(block2, *vmbuffer2);
140 		if (!need_to_pin_buffer1 && !need_to_pin_buffer2)
141 			return;
142 
143 		/* We must unlock both buffers before doing any I/O. */
144 		LockBuffer(buffer1, BUFFER_LOCK_UNLOCK);
145 		if (buffer2 != InvalidBuffer && buffer2 != buffer1)
146 			LockBuffer(buffer2, BUFFER_LOCK_UNLOCK);
147 
148 		/* Get pins. */
149 		if (need_to_pin_buffer1)
150 			visibilitymap_pin(relation, block1, vmbuffer1);
151 		if (need_to_pin_buffer2)
152 			visibilitymap_pin(relation, block2, vmbuffer2);
153 
154 		/* Relock buffers. */
155 		LockBuffer(buffer1, BUFFER_LOCK_EXCLUSIVE);
156 		if (buffer2 != InvalidBuffer && buffer2 != buffer1)
157 			LockBuffer(buffer2, BUFFER_LOCK_EXCLUSIVE);
158 
159 		/*
160 		 * If there are two buffers involved and we pinned just one of them,
161 		 * it's possible that the second one became all-visible while we were
162 		 * busy pinning the first one.  If it looks like that's a possible
163 		 * scenario, we'll need to make a second pass through this loop.
164 		 */
165 		if (buffer2 == InvalidBuffer || buffer1 == buffer2
166 			|| (need_to_pin_buffer1 && need_to_pin_buffer2))
167 			break;
168 	}
169 }
170 
171 /*
172  * Extend a relation by multiple blocks to avoid future contention on the
173  * relation extension lock.  Our goal is to pre-extend the relation by an
174  * amount which ramps up as the degree of contention ramps up, but limiting
175  * the result to some sane overall value.
176  */
177 static void
RelationAddExtraBlocks(Relation relation,BulkInsertState bistate)178 RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
179 {
180 	Page		page;
181 	BlockNumber blockNum = InvalidBlockNumber,
182 				firstBlock = InvalidBlockNumber;
183 	int			extraBlocks = 0;
184 	int			lockWaiters = 0;
185 	Size		freespace = 0;
186 	Buffer		buffer;
187 
188 	/* Use the length of the lock wait queue to judge how much to extend. */
189 	lockWaiters = RelationExtensionLockWaiterCount(relation);
190 	if (lockWaiters <= 0)
191 		return;
192 
193 	/*
194 	 * It might seem like multiplying the number of lock waiters by as much as
195 	 * 20 is too aggressive, but benchmarking revealed that smaller numbers
196 	 * were insufficient.  512 is just an arbitrary cap to prevent
197 	 * pathological results.
198 	 */
199 	extraBlocks = Min(512, lockWaiters * 20);
200 
201 	while (extraBlocks-- >= 0)
202 	{
203 		/* Ouch - an unnecessary lseek() each time through the loop! */
204 		buffer = ReadBufferBI(relation, P_NEW, bistate);
205 
206 		/* Extend by one page. */
207 		LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
208 		page = BufferGetPage(buffer);
209 		PageInit(page, BufferGetPageSize(buffer), 0);
210 		MarkBufferDirty(buffer);
211 		blockNum = BufferGetBlockNumber(buffer);
212 		freespace = PageGetHeapFreeSpace(page);
213 		UnlockReleaseBuffer(buffer);
214 
215 		/* Remember first block number thus added. */
216 		if (firstBlock == InvalidBlockNumber)
217 			firstBlock = blockNum;
218 
219 		/*
220 		 * Immediately update the bottom level of the FSM.  This has a good
221 		 * chance of making this page visible to other concurrently inserting
222 		 * backends, and we want that to happen without delay.
223 		 */
224 		RecordPageWithFreeSpace(relation, blockNum, freespace);
225 	}
226 
227 	/*
228 	 * Updating the upper levels of the free space map is too expensive to do
229 	 * for every block, but it's worth doing once at the end to make sure that
230 	 * subsequent insertion activity sees all of those nifty free pages we
231 	 * just inserted.
232 	 *
233 	 * Note that we're using the freespace value that was reported for the
234 	 * last block we added as if it were the freespace value for every block
235 	 * we added.  That's actually true, because they're all equally empty.
236 	 */
237 	UpdateFreeSpaceMap(relation, firstBlock, blockNum, freespace);
238 }
239 
240 /*
241  * RelationGetBufferForTuple
242  *
243  *	Returns pinned and exclusive-locked buffer of a page in given relation
244  *	with free space >= given len.
245  *
246  *	If otherBuffer is not InvalidBuffer, then it references a previously
247  *	pinned buffer of another page in the same relation; on return, this
248  *	buffer will also be exclusive-locked.  (This case is used by heap_update;
249  *	the otherBuffer contains the tuple being updated.)
250  *
251  *	The reason for passing otherBuffer is that if two backends are doing
252  *	concurrent heap_update operations, a deadlock could occur if they try
253  *	to lock the same two buffers in opposite orders.  To ensure that this
254  *	can't happen, we impose the rule that buffers of a relation must be
255  *	locked in increasing page number order.  This is most conveniently done
256  *	by having RelationGetBufferForTuple lock them both, with suitable care
257  *	for ordering.
258  *
259  *	NOTE: it is unlikely, but not quite impossible, for otherBuffer to be the
260  *	same buffer we select for insertion of the new tuple (this could only
261  *	happen if space is freed in that page after heap_update finds there's not
262  *	enough there).  In that case, the page will be pinned and locked only once.
263  *
264  *	For the vmbuffer and vmbuffer_other arguments, we avoid deadlock by
265  *	locking them only after locking the corresponding heap page, and taking
266  *	no further lwlocks while they are locked.
267  *
268  *	We normally use FSM to help us find free space.  However,
269  *	if HEAP_INSERT_SKIP_FSM is specified, we just append a new empty page to
270  *	the end of the relation if the tuple won't fit on the current target page.
271  *	This can save some cycles when we know the relation is new and doesn't
272  *	contain useful amounts of free space.
273  *
274  *	HEAP_INSERT_SKIP_FSM is also useful for non-WAL-logged additions to a
275  *	relation, if the caller holds exclusive lock and is careful to invalidate
276  *	relation's smgr_targblock before the first insertion --- that ensures that
277  *	all insertions will occur into newly added pages and not be intermixed
278  *	with tuples from other transactions.  That way, a crash can't risk losing
279  *	any committed data of other transactions.  (See heap_insert's comments
280  *	for additional constraints needed for safe usage of this behavior.)
281  *
282  *	The caller can also provide a BulkInsertState object to optimize many
283  *	insertions into the same relation.  This keeps a pin on the current
284  *	insertion target page (to save pin/unpin cycles) and also passes a
285  *	BULKWRITE buffer selection strategy object to the buffer manager.
286  *	Passing NULL for bistate selects the default behavior.
287  *
288  *	We always try to avoid filling existing pages further than the fillfactor.
289  *	This is OK since this routine is not consulted when updating a tuple and
290  *	keeping it on the same page, which is the scenario fillfactor is meant
291  *	to reserve space for.
292  *
293  *	ereport(ERROR) is allowed here, so this routine *must* be called
294  *	before any (unlogged) changes are made in buffer pool.
295  */
296 Buffer
RelationGetBufferForTuple(Relation relation,Size len,Buffer otherBuffer,int options,BulkInsertState bistate,Buffer * vmbuffer,Buffer * vmbuffer_other)297 RelationGetBufferForTuple(Relation relation, Size len,
298 						  Buffer otherBuffer, int options,
299 						  BulkInsertState bistate,
300 						  Buffer *vmbuffer, Buffer *vmbuffer_other)
301 {
302 	bool		use_fsm = !(options & HEAP_INSERT_SKIP_FSM);
303 	Buffer		buffer = InvalidBuffer;
304 	Page		page;
305 	Size		pageFreeSpace = 0,
306 				saveFreeSpace = 0;
307 	BlockNumber targetBlock,
308 				otherBlock;
309 	bool		needLock;
310 
311 	len = MAXALIGN(len);		/* be conservative */
312 
313 	/* Bulk insert is not supported for updates, only inserts. */
314 	Assert(otherBuffer == InvalidBuffer || !bistate);
315 
316 	/*
317 	 * If we're gonna fail for oversize tuple, do it right away
318 	 */
319 	if (len > MaxHeapTupleSize)
320 		ereport(ERROR,
321 				(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
322 				 errmsg("row is too big: size %zu, maximum size %zu",
323 						len, MaxHeapTupleSize)));
324 
325 	/* Compute desired extra freespace due to fillfactor option */
326 	saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
327 												   HEAP_DEFAULT_FILLFACTOR);
328 
329 	if (otherBuffer != InvalidBuffer)
330 		otherBlock = BufferGetBlockNumber(otherBuffer);
331 	else
332 		otherBlock = InvalidBlockNumber;		/* just to keep compiler quiet */
333 
334 	/*
335 	 * We first try to put the tuple on the same page we last inserted a tuple
336 	 * on, as cached in the BulkInsertState or relcache entry.  If that
337 	 * doesn't work, we ask the Free Space Map to locate a suitable page.
338 	 * Since the FSM's info might be out of date, we have to be prepared to
339 	 * loop around and retry multiple times. (To insure this isn't an infinite
340 	 * loop, we must update the FSM with the correct amount of free space on
341 	 * each page that proves not to be suitable.)  If the FSM has no record of
342 	 * a page with enough free space, we give up and extend the relation.
343 	 *
344 	 * When use_fsm is false, we either put the tuple onto the existing target
345 	 * page or extend the relation.
346 	 */
347 	if (len + saveFreeSpace > MaxHeapTupleSize)
348 	{
349 		/* can't fit, don't bother asking FSM */
350 		targetBlock = InvalidBlockNumber;
351 		use_fsm = false;
352 	}
353 	else if (bistate && bistate->current_buf != InvalidBuffer)
354 		targetBlock = BufferGetBlockNumber(bistate->current_buf);
355 	else
356 		targetBlock = RelationGetTargetBlock(relation);
357 
358 	if (targetBlock == InvalidBlockNumber && use_fsm)
359 	{
360 		/*
361 		 * We have no cached target page, so ask the FSM for an initial
362 		 * target.
363 		 */
364 		targetBlock = GetPageWithFreeSpace(relation, len + saveFreeSpace);
365 
366 		/*
367 		 * If the FSM knows nothing of the rel, try the last page before we
368 		 * give up and extend.  This avoids one-tuple-per-page syndrome during
369 		 * bootstrapping or in a recently-started system.
370 		 */
371 		if (targetBlock == InvalidBlockNumber)
372 		{
373 			BlockNumber nblocks = RelationGetNumberOfBlocks(relation);
374 
375 			if (nblocks > 0)
376 				targetBlock = nblocks - 1;
377 		}
378 	}
379 
380 loop:
381 	while (targetBlock != InvalidBlockNumber)
382 	{
383 		/*
384 		 * Read and exclusive-lock the target block, as well as the other
385 		 * block if one was given, taking suitable care with lock ordering and
386 		 * the possibility they are the same block.
387 		 *
388 		 * If the page-level all-visible flag is set, caller will need to
389 		 * clear both that and the corresponding visibility map bit.  However,
390 		 * by the time we return, we'll have x-locked the buffer, and we don't
391 		 * want to do any I/O while in that state.  So we check the bit here
392 		 * before taking the lock, and pin the page if it appears necessary.
393 		 * Checking without the lock creates a risk of getting the wrong
394 		 * answer, so we'll have to recheck after acquiring the lock.
395 		 */
396 		if (otherBuffer == InvalidBuffer)
397 		{
398 			/* easy case */
399 			buffer = ReadBufferBI(relation, targetBlock, bistate);
400 			if (PageIsAllVisible(BufferGetPage(buffer)))
401 				visibilitymap_pin(relation, targetBlock, vmbuffer);
402 			LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
403 		}
404 		else if (otherBlock == targetBlock)
405 		{
406 			/* also easy case */
407 			buffer = otherBuffer;
408 			if (PageIsAllVisible(BufferGetPage(buffer)))
409 				visibilitymap_pin(relation, targetBlock, vmbuffer);
410 			LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
411 		}
412 		else if (otherBlock < targetBlock)
413 		{
414 			/* lock other buffer first */
415 			buffer = ReadBuffer(relation, targetBlock);
416 			if (PageIsAllVisible(BufferGetPage(buffer)))
417 				visibilitymap_pin(relation, targetBlock, vmbuffer);
418 			LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE);
419 			LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
420 		}
421 		else
422 		{
423 			/* lock target buffer first */
424 			buffer = ReadBuffer(relation, targetBlock);
425 			if (PageIsAllVisible(BufferGetPage(buffer)))
426 				visibilitymap_pin(relation, targetBlock, vmbuffer);
427 			LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
428 			LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE);
429 		}
430 
431 		/*
432 		 * We now have the target page (and the other buffer, if any) pinned
433 		 * and locked.  However, since our initial PageIsAllVisible checks
434 		 * were performed before acquiring the lock, the results might now be
435 		 * out of date, either for the selected victim buffer, or for the
436 		 * other buffer passed by the caller.  In that case, we'll need to
437 		 * give up our locks, go get the pin(s) we failed to get earlier, and
438 		 * re-lock.  That's pretty painful, but hopefully shouldn't happen
439 		 * often.
440 		 *
441 		 * Note that there's a small possibility that we didn't pin the page
442 		 * above but still have the correct page pinned anyway, either because
443 		 * we've already made a previous pass through this loop, or because
444 		 * caller passed us the right page anyway.
445 		 *
446 		 * Note also that it's possible that by the time we get the pin and
447 		 * retake the buffer locks, the visibility map bit will have been
448 		 * cleared by some other backend anyway.  In that case, we'll have
449 		 * done a bit of extra work for no gain, but there's no real harm
450 		 * done.
451 		 */
452 		if (otherBuffer == InvalidBuffer || targetBlock <= otherBlock)
453 			GetVisibilityMapPins(relation, buffer, otherBuffer,
454 								 targetBlock, otherBlock, vmbuffer,
455 								 vmbuffer_other);
456 		else
457 			GetVisibilityMapPins(relation, otherBuffer, buffer,
458 								 otherBlock, targetBlock, vmbuffer_other,
459 								 vmbuffer);
460 
461 		/*
462 		 * Now we can check to see if there's enough free space here. If so,
463 		 * we're done.
464 		 */
465 		page = BufferGetPage(buffer);
466 		pageFreeSpace = PageGetHeapFreeSpace(page);
467 		if (len + saveFreeSpace <= pageFreeSpace)
468 		{
469 			/* use this page as future insert target, too */
470 			RelationSetTargetBlock(relation, targetBlock);
471 			return buffer;
472 		}
473 
474 		/*
475 		 * Not enough space, so we must give up our page locks and pin (if
476 		 * any) and prepare to look elsewhere.  We don't care which order we
477 		 * unlock the two buffers in, so this can be slightly simpler than the
478 		 * code above.
479 		 */
480 		LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
481 		if (otherBuffer == InvalidBuffer)
482 			ReleaseBuffer(buffer);
483 		else if (otherBlock != targetBlock)
484 		{
485 			LockBuffer(otherBuffer, BUFFER_LOCK_UNLOCK);
486 			ReleaseBuffer(buffer);
487 		}
488 
489 		/* Without FSM, always fall out of the loop and extend */
490 		if (!use_fsm)
491 			break;
492 
493 		/*
494 		 * Update FSM as to condition of this page, and ask for another page
495 		 * to try.
496 		 */
497 		targetBlock = RecordAndGetPageWithFreeSpace(relation,
498 													targetBlock,
499 													pageFreeSpace,
500 													len + saveFreeSpace);
501 	}
502 
503 	/*
504 	 * Have to extend the relation.
505 	 *
506 	 * We have to use a lock to ensure no one else is extending the rel at the
507 	 * same time, else we will both try to initialize the same new page.  We
508 	 * can skip locking for new or temp relations, however, since no one else
509 	 * could be accessing them.
510 	 */
511 	needLock = !RELATION_IS_LOCAL(relation);
512 
513 	/*
514 	 * If we need the lock but are not able to acquire it immediately, we'll
515 	 * consider extending the relation by multiple blocks at a time to manage
516 	 * contention on the relation extension lock.  However, this only makes
517 	 * sense if we're using the FSM; otherwise, there's no point.
518 	 */
519 	if (needLock)
520 	{
521 		if (!use_fsm)
522 			LockRelationForExtension(relation, ExclusiveLock);
523 		else if (!ConditionalLockRelationForExtension(relation, ExclusiveLock))
524 		{
525 			/* Couldn't get the lock immediately; wait for it. */
526 			LockRelationForExtension(relation, ExclusiveLock);
527 
528 			/*
529 			 * Check if some other backend has extended a block for us while
530 			 * we were waiting on the lock.
531 			 */
532 			targetBlock = GetPageWithFreeSpace(relation, len + saveFreeSpace);
533 
534 			/*
535 			 * If some other waiter has already extended the relation, we
536 			 * don't need to do so; just use the existing freespace.
537 			 */
538 			if (targetBlock != InvalidBlockNumber)
539 			{
540 				UnlockRelationForExtension(relation, ExclusiveLock);
541 				goto loop;
542 			}
543 
544 			/* Time to bulk-extend. */
545 			RelationAddExtraBlocks(relation, bistate);
546 		}
547 	}
548 
549 	/*
550 	 * In addition to whatever extension we performed above, we always add at
551 	 * least one block to satisfy our own request.
552 	 *
553 	 * XXX This does an lseek - rather expensive - but at the moment it is the
554 	 * only way to accurately determine how many blocks are in a relation.  Is
555 	 * it worth keeping an accurate file length in shared memory someplace,
556 	 * rather than relying on the kernel to do it for us?
557 	 */
558 	buffer = ReadBufferBI(relation, P_NEW, bistate);
559 
560 	/*
561 	 * We can be certain that locking the otherBuffer first is OK, since it
562 	 * must have a lower page number.
563 	 */
564 	if (otherBuffer != InvalidBuffer)
565 		LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE);
566 
567 	/*
568 	 * Now acquire lock on the new page.
569 	 */
570 	LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
571 
572 	/*
573 	 * Release the file-extension lock; it's now OK for someone else to extend
574 	 * the relation some more.  Note that we cannot release this lock before
575 	 * we have buffer lock on the new page, or we risk a race condition
576 	 * against vacuumlazy.c --- see comments therein.
577 	 */
578 	if (needLock)
579 		UnlockRelationForExtension(relation, ExclusiveLock);
580 
581 	/*
582 	 * We need to initialize the empty new page.  Double-check that it really
583 	 * is empty (this should never happen, but if it does we don't want to
584 	 * risk wiping out valid data).
585 	 */
586 	page = BufferGetPage(buffer);
587 
588 	if (!PageIsNew(page))
589 		elog(ERROR, "page %u of relation \"%s\" should be empty but is not",
590 			 BufferGetBlockNumber(buffer),
591 			 RelationGetRelationName(relation));
592 
593 	PageInit(page, BufferGetPageSize(buffer), 0);
594 
595 	if (len > PageGetHeapFreeSpace(page))
596 	{
597 		/* We should not get here given the test at the top */
598 		elog(PANIC, "tuple is too big: size %zu", len);
599 	}
600 
601 	/*
602 	 * Remember the new page as our target for future insertions.
603 	 *
604 	 * XXX should we enter the new page into the free space map immediately,
605 	 * or just keep it for this backend's exclusive use in the short run
606 	 * (until VACUUM sees it)?	Seems to depend on whether you expect the
607 	 * current backend to make more insertions or not, which is probably a
608 	 * good bet most of the time.  So for now, don't add it to FSM yet.
609 	 */
610 	RelationSetTargetBlock(relation, BufferGetBlockNumber(buffer));
611 
612 	return buffer;
613 }
614