1 /*-------------------------------------------------------------------------
2 *
3 * hio.c
4 * POSTGRES heap access method input/output code.
5 *
6 * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/access/heap/hio.c
12 *
13 *-------------------------------------------------------------------------
14 */
15
16 #include "postgres.h"
17
18 #include "access/heapam.h"
19 #include "access/hio.h"
20 #include "access/htup_details.h"
21 #include "access/visibilitymap.h"
22 #include "storage/bufmgr.h"
23 #include "storage/freespace.h"
24 #include "storage/lmgr.h"
25 #include "storage/smgr.h"
26
27
28 /*
29 * RelationPutHeapTuple - place tuple at specified page
30 *
31 * !!! EREPORT(ERROR) IS DISALLOWED HERE !!! Must PANIC on failure!!!
32 *
33 * Note - caller must hold BUFFER_LOCK_EXCLUSIVE on the buffer.
34 */
35 void
RelationPutHeapTuple(Relation relation,Buffer buffer,HeapTuple tuple,bool token)36 RelationPutHeapTuple(Relation relation,
37 Buffer buffer,
38 HeapTuple tuple,
39 bool token)
40 {
41 Page pageHeader;
42 OffsetNumber offnum;
43
44 /*
45 * A tuple that's being inserted speculatively should already have its
46 * token set.
47 */
48 Assert(!token || HeapTupleHeaderIsSpeculative(tuple->t_data));
49
50 /* Add the tuple to the page */
51 pageHeader = BufferGetPage(buffer);
52
53 offnum = PageAddItem(pageHeader, (Item) tuple->t_data,
54 tuple->t_len, InvalidOffsetNumber, false, true);
55
56 if (offnum == InvalidOffsetNumber)
57 elog(PANIC, "failed to add tuple to page");
58
59 /* Update tuple->t_self to the actual position where it was stored */
60 ItemPointerSet(&(tuple->t_self), BufferGetBlockNumber(buffer), offnum);
61
62 /*
63 * Insert the correct position into CTID of the stored tuple, too (unless
64 * this is a speculative insertion, in which case the token is held in
65 * CTID field instead)
66 */
67 if (!token)
68 {
69 ItemId itemId = PageGetItemId(pageHeader, offnum);
70 HeapTupleHeader item = (HeapTupleHeader) PageGetItem(pageHeader, itemId);
71
72 item->t_ctid = tuple->t_self;
73 }
74 }
75
76 /*
77 * Read in a buffer, using bulk-insert strategy if bistate isn't NULL.
78 */
79 static Buffer
ReadBufferBI(Relation relation,BlockNumber targetBlock,BulkInsertState bistate)80 ReadBufferBI(Relation relation, BlockNumber targetBlock,
81 BulkInsertState bistate)
82 {
83 Buffer buffer;
84
85 /* If not bulk-insert, exactly like ReadBuffer */
86 if (!bistate)
87 return ReadBuffer(relation, targetBlock);
88
89 /* If we have the desired block already pinned, re-pin and return it */
90 if (bistate->current_buf != InvalidBuffer)
91 {
92 if (BufferGetBlockNumber(bistate->current_buf) == targetBlock)
93 {
94 IncrBufferRefCount(bistate->current_buf);
95 return bistate->current_buf;
96 }
97 /* ... else drop the old buffer */
98 ReleaseBuffer(bistate->current_buf);
99 bistate->current_buf = InvalidBuffer;
100 }
101
102 /* Perform a read using the buffer strategy */
103 buffer = ReadBufferExtended(relation, MAIN_FORKNUM, targetBlock,
104 RBM_NORMAL, bistate->strategy);
105
106 /* Save the selected block as target for future inserts */
107 IncrBufferRefCount(buffer);
108 bistate->current_buf = buffer;
109
110 return buffer;
111 }
112
113 /*
114 * For each heap page which is all-visible, acquire a pin on the appropriate
115 * visibility map page, if we haven't already got one.
116 *
117 * buffer2 may be InvalidBuffer, if only one buffer is involved. buffer1
118 * must not be InvalidBuffer. If both buffers are specified, block1 must
119 * be less than block2.
120 */
121 static void
GetVisibilityMapPins(Relation relation,Buffer buffer1,Buffer buffer2,BlockNumber block1,BlockNumber block2,Buffer * vmbuffer1,Buffer * vmbuffer2)122 GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2,
123 BlockNumber block1, BlockNumber block2,
124 Buffer *vmbuffer1, Buffer *vmbuffer2)
125 {
126 bool need_to_pin_buffer1;
127 bool need_to_pin_buffer2;
128
129 Assert(BufferIsValid(buffer1));
130 Assert(buffer2 == InvalidBuffer || block1 <= block2);
131
132 while (1)
133 {
134 /* Figure out which pins we need but don't have. */
135 need_to_pin_buffer1 = PageIsAllVisible(BufferGetPage(buffer1))
136 && !visibilitymap_pin_ok(block1, *vmbuffer1);
137 need_to_pin_buffer2 = buffer2 != InvalidBuffer
138 && PageIsAllVisible(BufferGetPage(buffer2))
139 && !visibilitymap_pin_ok(block2, *vmbuffer2);
140 if (!need_to_pin_buffer1 && !need_to_pin_buffer2)
141 return;
142
143 /* We must unlock both buffers before doing any I/O. */
144 LockBuffer(buffer1, BUFFER_LOCK_UNLOCK);
145 if (buffer2 != InvalidBuffer && buffer2 != buffer1)
146 LockBuffer(buffer2, BUFFER_LOCK_UNLOCK);
147
148 /* Get pins. */
149 if (need_to_pin_buffer1)
150 visibilitymap_pin(relation, block1, vmbuffer1);
151 if (need_to_pin_buffer2)
152 visibilitymap_pin(relation, block2, vmbuffer2);
153
154 /* Relock buffers. */
155 LockBuffer(buffer1, BUFFER_LOCK_EXCLUSIVE);
156 if (buffer2 != InvalidBuffer && buffer2 != buffer1)
157 LockBuffer(buffer2, BUFFER_LOCK_EXCLUSIVE);
158
159 /*
160 * If there are two buffers involved and we pinned just one of them,
161 * it's possible that the second one became all-visible while we were
162 * busy pinning the first one. If it looks like that's a possible
163 * scenario, we'll need to make a second pass through this loop.
164 */
165 if (buffer2 == InvalidBuffer || buffer1 == buffer2
166 || (need_to_pin_buffer1 && need_to_pin_buffer2))
167 break;
168 }
169 }
170
171 /*
172 * Extend a relation by multiple blocks to avoid future contention on the
173 * relation extension lock. Our goal is to pre-extend the relation by an
174 * amount which ramps up as the degree of contention ramps up, but limiting
175 * the result to some sane overall value.
176 */
177 static void
RelationAddExtraBlocks(Relation relation,BulkInsertState bistate)178 RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
179 {
180 BlockNumber blockNum,
181 firstBlock = InvalidBlockNumber;
182 int extraBlocks;
183 int lockWaiters;
184
185 /* Use the length of the lock wait queue to judge how much to extend. */
186 lockWaiters = RelationExtensionLockWaiterCount(relation);
187 if (lockWaiters <= 0)
188 return;
189
190 /*
191 * It might seem like multiplying the number of lock waiters by as much as
192 * 20 is too aggressive, but benchmarking revealed that smaller numbers
193 * were insufficient. 512 is just an arbitrary cap to prevent
194 * pathological results.
195 */
196 extraBlocks = Min(512, lockWaiters * 20);
197
198 do
199 {
200 Buffer buffer;
201 Page page;
202 Size freespace;
203
204 /*
205 * Extend by one page. This should generally match the main-line
206 * extension code in RelationGetBufferForTuple, except that we hold
207 * the relation extension lock throughout.
208 */
209 buffer = ReadBufferBI(relation, P_NEW, bistate);
210
211 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
212 page = BufferGetPage(buffer);
213
214 if (!PageIsNew(page))
215 elog(ERROR, "page %u of relation \"%s\" should be empty but is not",
216 BufferGetBlockNumber(buffer),
217 RelationGetRelationName(relation));
218
219 PageInit(page, BufferGetPageSize(buffer), 0);
220
221 /*
222 * We mark all the new buffers dirty, but do nothing to write them
223 * out; they'll probably get used soon, and even if they are not, a
224 * crash will leave an okay all-zeroes page on disk.
225 */
226 MarkBufferDirty(buffer);
227
228 /* we'll need this info below */
229 blockNum = BufferGetBlockNumber(buffer);
230 freespace = PageGetHeapFreeSpace(page);
231
232 UnlockReleaseBuffer(buffer);
233
234 /* Remember first block number thus added. */
235 if (firstBlock == InvalidBlockNumber)
236 firstBlock = blockNum;
237
238 /*
239 * Immediately update the bottom level of the FSM. This has a good
240 * chance of making this page visible to other concurrently inserting
241 * backends, and we want that to happen without delay.
242 */
243 RecordPageWithFreeSpace(relation, blockNum, freespace);
244 }
245 while (--extraBlocks > 0);
246
247 /*
248 * Updating the upper levels of the free space map is too expensive to do
249 * for every block, but it's worth doing once at the end to make sure that
250 * subsequent insertion activity sees all of those nifty free pages we
251 * just inserted.
252 */
253 FreeSpaceMapVacuumRange(relation, firstBlock, blockNum + 1);
254 }
255
256 /*
257 * RelationGetBufferForTuple
258 *
259 * Returns pinned and exclusive-locked buffer of a page in given relation
260 * with free space >= given len.
261 *
262 * If otherBuffer is not InvalidBuffer, then it references a previously
263 * pinned buffer of another page in the same relation; on return, this
264 * buffer will also be exclusive-locked. (This case is used by heap_update;
265 * the otherBuffer contains the tuple being updated.)
266 *
267 * The reason for passing otherBuffer is that if two backends are doing
268 * concurrent heap_update operations, a deadlock could occur if they try
269 * to lock the same two buffers in opposite orders. To ensure that this
270 * can't happen, we impose the rule that buffers of a relation must be
271 * locked in increasing page number order. This is most conveniently done
272 * by having RelationGetBufferForTuple lock them both, with suitable care
273 * for ordering.
274 *
275 * NOTE: it is unlikely, but not quite impossible, for otherBuffer to be the
276 * same buffer we select for insertion of the new tuple (this could only
277 * happen if space is freed in that page after heap_update finds there's not
278 * enough there). In that case, the page will be pinned and locked only once.
279 *
280 * For the vmbuffer and vmbuffer_other arguments, we avoid deadlock by
281 * locking them only after locking the corresponding heap page, and taking
282 * no further lwlocks while they are locked.
283 *
284 * We normally use FSM to help us find free space. However,
285 * if HEAP_INSERT_SKIP_FSM is specified, we just append a new empty page to
286 * the end of the relation if the tuple won't fit on the current target page.
287 * This can save some cycles when we know the relation is new and doesn't
288 * contain useful amounts of free space.
289 *
290 * HEAP_INSERT_SKIP_FSM is also useful for non-WAL-logged additions to a
291 * relation, if the caller holds exclusive lock and is careful to invalidate
292 * relation's smgr_targblock before the first insertion --- that ensures that
293 * all insertions will occur into newly added pages and not be intermixed
294 * with tuples from other transactions. That way, a crash can't risk losing
295 * any committed data of other transactions. (See heap_insert's comments
296 * for additional constraints needed for safe usage of this behavior.)
297 *
298 * The caller can also provide a BulkInsertState object to optimize many
299 * insertions into the same relation. This keeps a pin on the current
300 * insertion target page (to save pin/unpin cycles) and also passes a
301 * BULKWRITE buffer selection strategy object to the buffer manager.
302 * Passing NULL for bistate selects the default behavior.
303 *
304 * We always try to avoid filling existing pages further than the fillfactor.
305 * This is OK since this routine is not consulted when updating a tuple and
306 * keeping it on the same page, which is the scenario fillfactor is meant
307 * to reserve space for.
308 *
309 * ereport(ERROR) is allowed here, so this routine *must* be called
310 * before any (unlogged) changes are made in buffer pool.
311 */
312 Buffer
RelationGetBufferForTuple(Relation relation,Size len,Buffer otherBuffer,int options,BulkInsertState bistate,Buffer * vmbuffer,Buffer * vmbuffer_other)313 RelationGetBufferForTuple(Relation relation, Size len,
314 Buffer otherBuffer, int options,
315 BulkInsertState bistate,
316 Buffer *vmbuffer, Buffer *vmbuffer_other)
317 {
318 bool use_fsm = !(options & HEAP_INSERT_SKIP_FSM);
319 Buffer buffer = InvalidBuffer;
320 Page page;
321 Size pageFreeSpace = 0,
322 saveFreeSpace = 0;
323 BlockNumber targetBlock,
324 otherBlock;
325 bool needLock;
326
327 len = MAXALIGN(len); /* be conservative */
328
329 /* Bulk insert is not supported for updates, only inserts. */
330 Assert(otherBuffer == InvalidBuffer || !bistate);
331
332 /*
333 * If we're gonna fail for oversize tuple, do it right away
334 */
335 if (len > MaxHeapTupleSize)
336 ereport(ERROR,
337 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
338 errmsg("row is too big: size %zu, maximum size %zu",
339 len, MaxHeapTupleSize)));
340
341 /* Compute desired extra freespace due to fillfactor option */
342 saveFreeSpace = RelationGetTargetPageFreeSpace(relation,
343 HEAP_DEFAULT_FILLFACTOR);
344
345 if (otherBuffer != InvalidBuffer)
346 otherBlock = BufferGetBlockNumber(otherBuffer);
347 else
348 otherBlock = InvalidBlockNumber; /* just to keep compiler quiet */
349
350 /*
351 * We first try to put the tuple on the same page we last inserted a tuple
352 * on, as cached in the BulkInsertState or relcache entry. If that
353 * doesn't work, we ask the Free Space Map to locate a suitable page.
354 * Since the FSM's info might be out of date, we have to be prepared to
355 * loop around and retry multiple times. (To insure this isn't an infinite
356 * loop, we must update the FSM with the correct amount of free space on
357 * each page that proves not to be suitable.) If the FSM has no record of
358 * a page with enough free space, we give up and extend the relation.
359 *
360 * When use_fsm is false, we either put the tuple onto the existing target
361 * page or extend the relation.
362 */
363 if (len + saveFreeSpace > MaxHeapTupleSize)
364 {
365 /* can't fit, don't bother asking FSM */
366 targetBlock = InvalidBlockNumber;
367 use_fsm = false;
368 }
369 else if (bistate && bistate->current_buf != InvalidBuffer)
370 targetBlock = BufferGetBlockNumber(bistate->current_buf);
371 else
372 targetBlock = RelationGetTargetBlock(relation);
373
374 if (targetBlock == InvalidBlockNumber && use_fsm)
375 {
376 /*
377 * We have no cached target page, so ask the FSM for an initial
378 * target.
379 */
380 targetBlock = GetPageWithFreeSpace(relation, len + saveFreeSpace);
381
382 /*
383 * If the FSM knows nothing of the rel, try the last page before we
384 * give up and extend. This avoids one-tuple-per-page syndrome during
385 * bootstrapping or in a recently-started system.
386 */
387 if (targetBlock == InvalidBlockNumber)
388 {
389 BlockNumber nblocks = RelationGetNumberOfBlocks(relation);
390
391 if (nblocks > 0)
392 targetBlock = nblocks - 1;
393 }
394 }
395
396 loop:
397 while (targetBlock != InvalidBlockNumber)
398 {
399 /*
400 * Read and exclusive-lock the target block, as well as the other
401 * block if one was given, taking suitable care with lock ordering and
402 * the possibility they are the same block.
403 *
404 * If the page-level all-visible flag is set, caller will need to
405 * clear both that and the corresponding visibility map bit. However,
406 * by the time we return, we'll have x-locked the buffer, and we don't
407 * want to do any I/O while in that state. So we check the bit here
408 * before taking the lock, and pin the page if it appears necessary.
409 * Checking without the lock creates a risk of getting the wrong
410 * answer, so we'll have to recheck after acquiring the lock.
411 */
412 if (otherBuffer == InvalidBuffer)
413 {
414 /* easy case */
415 buffer = ReadBufferBI(relation, targetBlock, bistate);
416 if (PageIsAllVisible(BufferGetPage(buffer)))
417 visibilitymap_pin(relation, targetBlock, vmbuffer);
418 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
419 }
420 else if (otherBlock == targetBlock)
421 {
422 /* also easy case */
423 buffer = otherBuffer;
424 if (PageIsAllVisible(BufferGetPage(buffer)))
425 visibilitymap_pin(relation, targetBlock, vmbuffer);
426 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
427 }
428 else if (otherBlock < targetBlock)
429 {
430 /* lock other buffer first */
431 buffer = ReadBuffer(relation, targetBlock);
432 if (PageIsAllVisible(BufferGetPage(buffer)))
433 visibilitymap_pin(relation, targetBlock, vmbuffer);
434 LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE);
435 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
436 }
437 else
438 {
439 /* lock target buffer first */
440 buffer = ReadBuffer(relation, targetBlock);
441 if (PageIsAllVisible(BufferGetPage(buffer)))
442 visibilitymap_pin(relation, targetBlock, vmbuffer);
443 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
444 LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE);
445 }
446
447 /*
448 * We now have the target page (and the other buffer, if any) pinned
449 * and locked. However, since our initial PageIsAllVisible checks
450 * were performed before acquiring the lock, the results might now be
451 * out of date, either for the selected victim buffer, or for the
452 * other buffer passed by the caller. In that case, we'll need to
453 * give up our locks, go get the pin(s) we failed to get earlier, and
454 * re-lock. That's pretty painful, but hopefully shouldn't happen
455 * often.
456 *
457 * Note that there's a small possibility that we didn't pin the page
458 * above but still have the correct page pinned anyway, either because
459 * we've already made a previous pass through this loop, or because
460 * caller passed us the right page anyway.
461 *
462 * Note also that it's possible that by the time we get the pin and
463 * retake the buffer locks, the visibility map bit will have been
464 * cleared by some other backend anyway. In that case, we'll have
465 * done a bit of extra work for no gain, but there's no real harm
466 * done.
467 */
468 if (otherBuffer == InvalidBuffer || targetBlock <= otherBlock)
469 GetVisibilityMapPins(relation, buffer, otherBuffer,
470 targetBlock, otherBlock, vmbuffer,
471 vmbuffer_other);
472 else
473 GetVisibilityMapPins(relation, otherBuffer, buffer,
474 otherBlock, targetBlock, vmbuffer_other,
475 vmbuffer);
476
477 /*
478 * Now we can check to see if there's enough free space here. If so,
479 * we're done.
480 */
481 page = BufferGetPage(buffer);
482 pageFreeSpace = PageGetHeapFreeSpace(page);
483 if (len + saveFreeSpace <= pageFreeSpace)
484 {
485 /* use this page as future insert target, too */
486 RelationSetTargetBlock(relation, targetBlock);
487 return buffer;
488 }
489
490 /*
491 * Not enough space, so we must give up our page locks and pin (if
492 * any) and prepare to look elsewhere. We don't care which order we
493 * unlock the two buffers in, so this can be slightly simpler than the
494 * code above.
495 */
496 LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
497 if (otherBuffer == InvalidBuffer)
498 ReleaseBuffer(buffer);
499 else if (otherBlock != targetBlock)
500 {
501 LockBuffer(otherBuffer, BUFFER_LOCK_UNLOCK);
502 ReleaseBuffer(buffer);
503 }
504
505 /* Without FSM, always fall out of the loop and extend */
506 if (!use_fsm)
507 break;
508
509 /*
510 * Update FSM as to condition of this page, and ask for another page
511 * to try.
512 */
513 targetBlock = RecordAndGetPageWithFreeSpace(relation,
514 targetBlock,
515 pageFreeSpace,
516 len + saveFreeSpace);
517 }
518
519 /*
520 * Have to extend the relation.
521 *
522 * We have to use a lock to ensure no one else is extending the rel at the
523 * same time, else we will both try to initialize the same new page. We
524 * can skip locking for new or temp relations, however, since no one else
525 * could be accessing them.
526 */
527 needLock = !RELATION_IS_LOCAL(relation);
528
529 /*
530 * If we need the lock but are not able to acquire it immediately, we'll
531 * consider extending the relation by multiple blocks at a time to manage
532 * contention on the relation extension lock. However, this only makes
533 * sense if we're using the FSM; otherwise, there's no point.
534 */
535 if (needLock)
536 {
537 if (!use_fsm)
538 LockRelationForExtension(relation, ExclusiveLock);
539 else if (!ConditionalLockRelationForExtension(relation, ExclusiveLock))
540 {
541 /* Couldn't get the lock immediately; wait for it. */
542 LockRelationForExtension(relation, ExclusiveLock);
543
544 /*
545 * Check if some other backend has extended a block for us while
546 * we were waiting on the lock.
547 */
548 targetBlock = GetPageWithFreeSpace(relation, len + saveFreeSpace);
549
550 /*
551 * If some other waiter has already extended the relation, we
552 * don't need to do so; just use the existing freespace.
553 */
554 if (targetBlock != InvalidBlockNumber)
555 {
556 UnlockRelationForExtension(relation, ExclusiveLock);
557 goto loop;
558 }
559
560 /* Time to bulk-extend. */
561 RelationAddExtraBlocks(relation, bistate);
562 }
563 }
564
565 /*
566 * In addition to whatever extension we performed above, we always add at
567 * least one block to satisfy our own request.
568 *
569 * XXX This does an lseek - rather expensive - but at the moment it is the
570 * only way to accurately determine how many blocks are in a relation. Is
571 * it worth keeping an accurate file length in shared memory someplace,
572 * rather than relying on the kernel to do it for us?
573 */
574 buffer = ReadBufferBI(relation, P_NEW, bistate);
575
576 /*
577 * We can be certain that locking the otherBuffer first is OK, since it
578 * must have a lower page number.
579 */
580 if (otherBuffer != InvalidBuffer)
581 LockBuffer(otherBuffer, BUFFER_LOCK_EXCLUSIVE);
582
583 /*
584 * Now acquire lock on the new page.
585 */
586 LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
587
588 /*
589 * Release the file-extension lock; it's now OK for someone else to extend
590 * the relation some more. Note that we cannot release this lock before
591 * we have buffer lock on the new page, or we risk a race condition
592 * against vacuumlazy.c --- see comments therein.
593 */
594 if (needLock)
595 UnlockRelationForExtension(relation, ExclusiveLock);
596
597 /*
598 * We need to initialize the empty new page. Double-check that it really
599 * is empty (this should never happen, but if it does we don't want to
600 * risk wiping out valid data).
601 */
602 page = BufferGetPage(buffer);
603
604 if (!PageIsNew(page))
605 elog(ERROR, "page %u of relation \"%s\" should be empty but is not",
606 BufferGetBlockNumber(buffer),
607 RelationGetRelationName(relation));
608
609 PageInit(page, BufferGetPageSize(buffer), 0);
610
611 if (len > PageGetHeapFreeSpace(page))
612 {
613 /* We should not get here given the test at the top */
614 elog(PANIC, "tuple is too big: size %zu", len);
615 }
616
617 /*
618 * Remember the new page as our target for future insertions.
619 *
620 * XXX should we enter the new page into the free space map immediately,
621 * or just keep it for this backend's exclusive use in the short run
622 * (until VACUUM sees it)? Seems to depend on whether you expect the
623 * current backend to make more insertions or not, which is probably a
624 * good bet most of the time. So for now, don't add it to FSM yet.
625 */
626 RelationSetTargetBlock(relation, BufferGetBlockNumber(buffer));
627
628 return buffer;
629 }
630