1 /*-------------------------------------------------------------------------
2 *
3 * rewriteheap.c
4 * Support functions to rewrite tables.
5 *
6 * These functions provide a facility to completely rewrite a heap, while
7 * preserving visibility information and update chains.
8 *
9 * INTERFACE
10 *
11 * The caller is responsible for creating the new heap, all catalog
12 * changes, supplying the tuples to be written to the new heap, and
13 * rebuilding indexes. The caller must hold AccessExclusiveLock on the
14 * target table, because we assume no one else is writing into it.
15 *
16 * To use the facility:
17 *
18 * begin_heap_rewrite
19 * while (fetch next tuple)
20 * {
21 * if (tuple is dead)
22 * rewrite_heap_dead_tuple
23 * else
24 * {
25 * // do any transformations here if required
26 * rewrite_heap_tuple
27 * }
28 * }
29 * end_heap_rewrite
30 *
31 * The contents of the new relation shouldn't be relied on until after
32 * end_heap_rewrite is called.
33 *
34 *
35 * IMPLEMENTATION
36 *
37 * This would be a fairly trivial affair, except that we need to maintain
38 * the ctid chains that link versions of an updated tuple together.
39 * Since the newly stored tuples will have tids different from the original
40 * ones, if we just copied t_ctid fields to the new table the links would
41 * be wrong. When we are required to copy a (presumably recently-dead or
42 * delete-in-progress) tuple whose ctid doesn't point to itself, we have
43 * to substitute the correct ctid instead.
44 *
45 * For each ctid reference from A -> B, we might encounter either A first
46 * or B first. (Note that a tuple in the middle of a chain is both A and B
47 * of different pairs.)
48 *
49 * If we encounter A first, we'll store the tuple in the unresolved_tups
50 * hash table. When we later encounter B, we remove A from the hash table,
51 * fix the ctid to point to the new location of B, and insert both A and B
52 * to the new heap.
53 *
54 * If we encounter B first, we can insert B to the new heap right away.
55 * We then add an entry to the old_new_tid_map hash table showing B's
56 * original tid (in the old heap) and new tid (in the new heap).
57 * When we later encounter A, we get the new location of B from the table,
58 * and can write A immediately with the correct ctid.
59 *
60 * Entries in the hash tables can be removed as soon as the later tuple
61 * is encountered. That helps to keep the memory usage down. At the end,
62 * both tables are usually empty; we should have encountered both A and B
63 * of each pair. However, it's possible for A to be RECENTLY_DEAD and B
64 * entirely DEAD according to HeapTupleSatisfiesVacuum, because the test
65 * for deadness using OldestXmin is not exact. In such a case we might
66 * encounter B first, and skip it, and find A later. Then A would be added
67 * to unresolved_tups, and stay there until end of the rewrite. Since
68 * this case is very unusual, we don't worry about the memory usage.
69 *
70 * Using in-memory hash tables means that we use some memory for each live
71 * update chain in the table, from the time we find one end of the
72 * reference until we find the other end. That shouldn't be a problem in
73 * practice, but if you do something like an UPDATE without a where-clause
74 * on a large table, and then run CLUSTER in the same transaction, you
75 * could run out of memory. It doesn't seem worthwhile to add support for
76 * spill-to-disk, as there shouldn't be that many RECENTLY_DEAD tuples in a
77 * table under normal circumstances. Furthermore, in the typical scenario
78 * of CLUSTERing on an unchanging key column, we'll see all the versions
79 * of a given tuple together anyway, and so the peak memory usage is only
80 * proportional to the number of RECENTLY_DEAD versions of a single row, not
81 * in the whole table. Note that if we do fail halfway through a CLUSTER,
82 * the old table is still valid, so failure is not catastrophic.
83 *
84 * We can't use the normal heap_insert function to insert into the new
85 * heap, because heap_insert overwrites the visibility information.
86 * We use a special-purpose raw_heap_insert function instead, which
87 * is optimized for bulk inserting a lot of tuples, knowing that we have
88 * exclusive access to the heap. raw_heap_insert builds new pages in
89 * local storage. When a page is full, or at the end of the process,
90 * we insert it to WAL as a single record and then write it to disk
91 * directly through smgr. Note, however, that any data sent to the new
92 * heap's TOAST table will go through the normal bufmgr.
93 *
94 *
95 * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
96 * Portions Copyright (c) 1994-5, Regents of the University of California
97 *
98 * IDENTIFICATION
99 * src/backend/access/heap/rewriteheap.c
100 *
101 *-------------------------------------------------------------------------
102 */
103 #include "postgres.h"
104
105 #include <sys/stat.h>
106 #include <unistd.h>
107
108 #include "access/heapam.h"
109 #include "access/heapam_xlog.h"
110 #include "access/heaptoast.h"
111 #include "access/rewriteheap.h"
112 #include "access/transam.h"
113 #include "access/xact.h"
114 #include "access/xloginsert.h"
115 #include "catalog/catalog.h"
116 #include "lib/ilist.h"
117 #include "miscadmin.h"
118 #include "pgstat.h"
119 #include "replication/logical.h"
120 #include "replication/slot.h"
121 #include "storage/bufmgr.h"
122 #include "storage/fd.h"
123 #include "storage/procarray.h"
124 #include "storage/smgr.h"
125 #include "utils/memutils.h"
126 #include "utils/rel.h"
127
128 /*
129 * State associated with a rewrite operation. This is opaque to the user
130 * of the rewrite facility.
131 */
132 typedef struct RewriteStateData
133 {
134 Relation rs_old_rel; /* source heap */
135 Relation rs_new_rel; /* destination heap */
136 Page rs_buffer; /* page currently being built */
137 BlockNumber rs_blockno; /* block where page will go */
138 bool rs_buffer_valid; /* T if any tuples in buffer */
139 bool rs_logical_rewrite; /* do we need to do logical rewriting */
140 TransactionId rs_oldest_xmin; /* oldest xmin used by caller to determine
141 * tuple visibility */
142 TransactionId rs_freeze_xid; /* Xid that will be used as freeze cutoff
143 * point */
144 TransactionId rs_logical_xmin; /* Xid that will be used as cutoff point
145 * for logical rewrites */
146 MultiXactId rs_cutoff_multi; /* MultiXactId that will be used as cutoff
147 * point for multixacts */
148 MemoryContext rs_cxt; /* for hash tables and entries and tuples in
149 * them */
150 XLogRecPtr rs_begin_lsn; /* XLogInsertLsn when starting the rewrite */
151 HTAB *rs_unresolved_tups; /* unmatched A tuples */
152 HTAB *rs_old_new_tid_map; /* unmatched B tuples */
153 HTAB *rs_logical_mappings; /* logical remapping files */
154 uint32 rs_num_rewrite_mappings; /* # in memory mappings */
155 } RewriteStateData;
156
157 /*
158 * The lookup keys for the hash tables are tuple TID and xmin (we must check
159 * both to avoid false matches from dead tuples). Beware that there is
160 * probably some padding space in this struct; it must be zeroed out for
161 * correct hashtable operation.
162 */
163 typedef struct
164 {
165 TransactionId xmin; /* tuple xmin */
166 ItemPointerData tid; /* tuple location in old heap */
167 } TidHashKey;
168
169 /*
170 * Entry structures for the hash tables
171 */
172 typedef struct
173 {
174 TidHashKey key; /* expected xmin/old location of B tuple */
175 ItemPointerData old_tid; /* A's location in the old heap */
176 HeapTuple tuple; /* A's tuple contents */
177 } UnresolvedTupData;
178
179 typedef UnresolvedTupData *UnresolvedTup;
180
181 typedef struct
182 {
183 TidHashKey key; /* actual xmin/old location of B tuple */
184 ItemPointerData new_tid; /* where we put it in the new heap */
185 } OldToNewMappingData;
186
187 typedef OldToNewMappingData *OldToNewMapping;
188
189 /*
190 * In-Memory data for an xid that might need logical remapping entries
191 * to be logged.
192 */
193 typedef struct RewriteMappingFile
194 {
195 TransactionId xid; /* xid that might need to see the row */
196 int vfd; /* fd of mappings file */
197 off_t off; /* how far have we written yet */
198 uint32 num_mappings; /* number of in-memory mappings */
199 dlist_head mappings; /* list of in-memory mappings */
200 char path[MAXPGPATH]; /* path, for error messages */
201 } RewriteMappingFile;
202
203 /*
204 * A single In-Memory logical rewrite mapping, hanging off
205 * RewriteMappingFile->mappings.
206 */
207 typedef struct RewriteMappingDataEntry
208 {
209 LogicalRewriteMappingData map; /* map between old and new location of the
210 * tuple */
211 dlist_node node;
212 } RewriteMappingDataEntry;
213
214
215 /* prototypes for internal functions */
216 static void raw_heap_insert(RewriteState state, HeapTuple tup);
217
218 /* internal logical remapping prototypes */
219 static void logical_begin_heap_rewrite(RewriteState state);
220 static void logical_rewrite_heap_tuple(RewriteState state, ItemPointerData old_tid, HeapTuple new_tuple);
221 static void logical_end_heap_rewrite(RewriteState state);
222
223
224 /*
225 * Begin a rewrite of a table
226 *
227 * old_heap old, locked heap relation tuples will be read from
228 * new_heap new, locked heap relation to insert tuples to
229 * oldest_xmin xid used by the caller to determine which tuples are dead
230 * freeze_xid xid before which tuples will be frozen
231 * cutoff_multi multixact before which multis will be removed
232 *
233 * Returns an opaque RewriteState, allocated in current memory context,
234 * to be used in subsequent calls to the other functions.
235 */
236 RewriteState
begin_heap_rewrite(Relation old_heap,Relation new_heap,TransactionId oldest_xmin,TransactionId freeze_xid,MultiXactId cutoff_multi)237 begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xmin,
238 TransactionId freeze_xid, MultiXactId cutoff_multi)
239 {
240 RewriteState state;
241 MemoryContext rw_cxt;
242 MemoryContext old_cxt;
243 HASHCTL hash_ctl;
244
245 /*
246 * To ease cleanup, make a separate context that will contain the
247 * RewriteState struct itself plus all subsidiary data.
248 */
249 rw_cxt = AllocSetContextCreate(CurrentMemoryContext,
250 "Table rewrite",
251 ALLOCSET_DEFAULT_SIZES);
252 old_cxt = MemoryContextSwitchTo(rw_cxt);
253
254 /* Create and fill in the state struct */
255 state = palloc0(sizeof(RewriteStateData));
256
257 state->rs_old_rel = old_heap;
258 state->rs_new_rel = new_heap;
259 state->rs_buffer = (Page) palloc(BLCKSZ);
260 /* new_heap needn't be empty, just locked */
261 state->rs_blockno = RelationGetNumberOfBlocks(new_heap);
262 state->rs_buffer_valid = false;
263 state->rs_oldest_xmin = oldest_xmin;
264 state->rs_freeze_xid = freeze_xid;
265 state->rs_cutoff_multi = cutoff_multi;
266 state->rs_cxt = rw_cxt;
267
268 /* Initialize hash tables used to track update chains */
269 hash_ctl.keysize = sizeof(TidHashKey);
270 hash_ctl.entrysize = sizeof(UnresolvedTupData);
271 hash_ctl.hcxt = state->rs_cxt;
272
273 state->rs_unresolved_tups =
274 hash_create("Rewrite / Unresolved ctids",
275 128, /* arbitrary initial size */
276 &hash_ctl,
277 HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
278
279 hash_ctl.entrysize = sizeof(OldToNewMappingData);
280
281 state->rs_old_new_tid_map =
282 hash_create("Rewrite / Old to new tid map",
283 128, /* arbitrary initial size */
284 &hash_ctl,
285 HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
286
287 MemoryContextSwitchTo(old_cxt);
288
289 logical_begin_heap_rewrite(state);
290
291 return state;
292 }
293
294 /*
295 * End a rewrite.
296 *
297 * state and any other resources are freed.
298 */
299 void
end_heap_rewrite(RewriteState state)300 end_heap_rewrite(RewriteState state)
301 {
302 HASH_SEQ_STATUS seq_status;
303 UnresolvedTup unresolved;
304
305 /*
306 * Write any remaining tuples in the UnresolvedTups table. If we have any
307 * left, they should in fact be dead, but let's err on the safe side.
308 */
309 hash_seq_init(&seq_status, state->rs_unresolved_tups);
310
311 while ((unresolved = hash_seq_search(&seq_status)) != NULL)
312 {
313 ItemPointerSetInvalid(&unresolved->tuple->t_data->t_ctid);
314 raw_heap_insert(state, unresolved->tuple);
315 }
316
317 /* Write the last page, if any */
318 if (state->rs_buffer_valid)
319 {
320 if (RelationNeedsWAL(state->rs_new_rel))
321 log_newpage(&state->rs_new_rel->rd_node,
322 MAIN_FORKNUM,
323 state->rs_blockno,
324 state->rs_buffer,
325 true);
326
327 PageSetChecksumInplace(state->rs_buffer, state->rs_blockno);
328
329 RelationOpenSmgr(state->rs_new_rel);
330 smgrextend(state->rs_new_rel->rd_smgr, MAIN_FORKNUM, state->rs_blockno,
331 (char *) state->rs_buffer, true);
332 }
333
334 /*
335 * When we WAL-logged rel pages, we must nonetheless fsync them. The
336 * reason is the same as in storage.c's RelationCopyStorage(): we're
337 * writing data that's not in shared buffers, and so a CHECKPOINT
338 * occurring during the rewriteheap operation won't have fsync'd data we
339 * wrote before the checkpoint.
340 */
341 if (RelationNeedsWAL(state->rs_new_rel))
342 {
343 /* for an empty table, this could be first smgr access */
344 RelationOpenSmgr(state->rs_new_rel);
345 smgrimmedsync(state->rs_new_rel->rd_smgr, MAIN_FORKNUM);
346 }
347
348 logical_end_heap_rewrite(state);
349
350 /* Deleting the context frees everything */
351 MemoryContextDelete(state->rs_cxt);
352 }
353
354 /*
355 * Add a tuple to the new heap.
356 *
357 * Visibility information is copied from the original tuple, except that
358 * we "freeze" very-old tuples. Note that since we scribble on new_tuple,
359 * it had better be temp storage not a pointer to the original tuple.
360 *
361 * state opaque state as returned by begin_heap_rewrite
362 * old_tuple original tuple in the old heap
363 * new_tuple new, rewritten tuple to be inserted to new heap
364 */
365 void
rewrite_heap_tuple(RewriteState state,HeapTuple old_tuple,HeapTuple new_tuple)366 rewrite_heap_tuple(RewriteState state,
367 HeapTuple old_tuple, HeapTuple new_tuple)
368 {
369 MemoryContext old_cxt;
370 ItemPointerData old_tid;
371 TidHashKey hashkey;
372 bool found;
373 bool free_new;
374
375 old_cxt = MemoryContextSwitchTo(state->rs_cxt);
376
377 /*
378 * Copy the original tuple's visibility information into new_tuple.
379 *
380 * XXX we might later need to copy some t_infomask2 bits, too? Right now,
381 * we intentionally clear the HOT status bits.
382 */
383 memcpy(&new_tuple->t_data->t_choice.t_heap,
384 &old_tuple->t_data->t_choice.t_heap,
385 sizeof(HeapTupleFields));
386
387 new_tuple->t_data->t_infomask &= ~HEAP_XACT_MASK;
388 new_tuple->t_data->t_infomask2 &= ~HEAP2_XACT_MASK;
389 new_tuple->t_data->t_infomask |=
390 old_tuple->t_data->t_infomask & HEAP_XACT_MASK;
391
392 /*
393 * While we have our hands on the tuple, we may as well freeze any
394 * eligible xmin or xmax, so that future VACUUM effort can be saved.
395 */
396 heap_freeze_tuple(new_tuple->t_data,
397 state->rs_old_rel->rd_rel->relfrozenxid,
398 state->rs_old_rel->rd_rel->relminmxid,
399 state->rs_freeze_xid,
400 state->rs_cutoff_multi);
401
402 /*
403 * Invalid ctid means that ctid should point to the tuple itself. We'll
404 * override it later if the tuple is part of an update chain.
405 */
406 ItemPointerSetInvalid(&new_tuple->t_data->t_ctid);
407
408 /*
409 * If the tuple has been updated, check the old-to-new mapping hash table.
410 */
411 if (!((old_tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
412 HeapTupleHeaderIsOnlyLocked(old_tuple->t_data)) &&
413 !HeapTupleHeaderIndicatesMovedPartitions(old_tuple->t_data) &&
414 !(ItemPointerEquals(&(old_tuple->t_self),
415 &(old_tuple->t_data->t_ctid))))
416 {
417 OldToNewMapping mapping;
418
419 memset(&hashkey, 0, sizeof(hashkey));
420 hashkey.xmin = HeapTupleHeaderGetUpdateXid(old_tuple->t_data);
421 hashkey.tid = old_tuple->t_data->t_ctid;
422
423 mapping = (OldToNewMapping)
424 hash_search(state->rs_old_new_tid_map, &hashkey,
425 HASH_FIND, NULL);
426
427 if (mapping != NULL)
428 {
429 /*
430 * We've already copied the tuple that t_ctid points to, so we can
431 * set the ctid of this tuple to point to the new location, and
432 * insert it right away.
433 */
434 new_tuple->t_data->t_ctid = mapping->new_tid;
435
436 /* We don't need the mapping entry anymore */
437 hash_search(state->rs_old_new_tid_map, &hashkey,
438 HASH_REMOVE, &found);
439 Assert(found);
440 }
441 else
442 {
443 /*
444 * We haven't seen the tuple t_ctid points to yet. Stash this
445 * tuple into unresolved_tups to be written later.
446 */
447 UnresolvedTup unresolved;
448
449 unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
450 HASH_ENTER, &found);
451 Assert(!found);
452
453 unresolved->old_tid = old_tuple->t_self;
454 unresolved->tuple = heap_copytuple(new_tuple);
455
456 /*
457 * We can't do anything more now, since we don't know where the
458 * tuple will be written.
459 */
460 MemoryContextSwitchTo(old_cxt);
461 return;
462 }
463 }
464
465 /*
466 * Now we will write the tuple, and then check to see if it is the B tuple
467 * in any new or known pair. When we resolve a known pair, we will be
468 * able to write that pair's A tuple, and then we have to check if it
469 * resolves some other pair. Hence, we need a loop here.
470 */
471 old_tid = old_tuple->t_self;
472 free_new = false;
473
474 for (;;)
475 {
476 ItemPointerData new_tid;
477
478 /* Insert the tuple and find out where it's put in new_heap */
479 raw_heap_insert(state, new_tuple);
480 new_tid = new_tuple->t_self;
481
482 logical_rewrite_heap_tuple(state, old_tid, new_tuple);
483
484 /*
485 * If the tuple is the updated version of a row, and the prior version
486 * wouldn't be DEAD yet, then we need to either resolve the prior
487 * version (if it's waiting in rs_unresolved_tups), or make an entry
488 * in rs_old_new_tid_map (so we can resolve it when we do see it). The
489 * previous tuple's xmax would equal this one's xmin, so it's
490 * RECENTLY_DEAD if and only if the xmin is not before OldestXmin.
491 */
492 if ((new_tuple->t_data->t_infomask & HEAP_UPDATED) &&
493 !TransactionIdPrecedes(HeapTupleHeaderGetXmin(new_tuple->t_data),
494 state->rs_oldest_xmin))
495 {
496 /*
497 * Okay, this is B in an update pair. See if we've seen A.
498 */
499 UnresolvedTup unresolved;
500
501 memset(&hashkey, 0, sizeof(hashkey));
502 hashkey.xmin = HeapTupleHeaderGetXmin(new_tuple->t_data);
503 hashkey.tid = old_tid;
504
505 unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
506 HASH_FIND, NULL);
507
508 if (unresolved != NULL)
509 {
510 /*
511 * We have seen and memorized the previous tuple already. Now
512 * that we know where we inserted the tuple its t_ctid points
513 * to, fix its t_ctid and insert it to the new heap.
514 */
515 if (free_new)
516 heap_freetuple(new_tuple);
517 new_tuple = unresolved->tuple;
518 free_new = true;
519 old_tid = unresolved->old_tid;
520 new_tuple->t_data->t_ctid = new_tid;
521
522 /*
523 * We don't need the hash entry anymore, but don't free its
524 * tuple just yet.
525 */
526 hash_search(state->rs_unresolved_tups, &hashkey,
527 HASH_REMOVE, &found);
528 Assert(found);
529
530 /* loop back to insert the previous tuple in the chain */
531 continue;
532 }
533 else
534 {
535 /*
536 * Remember the new tid of this tuple. We'll use it to set the
537 * ctid when we find the previous tuple in the chain.
538 */
539 OldToNewMapping mapping;
540
541 mapping = hash_search(state->rs_old_new_tid_map, &hashkey,
542 HASH_ENTER, &found);
543 Assert(!found);
544
545 mapping->new_tid = new_tid;
546 }
547 }
548
549 /* Done with this (chain of) tuples, for now */
550 if (free_new)
551 heap_freetuple(new_tuple);
552 break;
553 }
554
555 MemoryContextSwitchTo(old_cxt);
556 }
557
558 /*
559 * Register a dead tuple with an ongoing rewrite. Dead tuples are not
560 * copied to the new table, but we still make note of them so that we
561 * can release some resources earlier.
562 *
563 * Returns true if a tuple was removed from the unresolved_tups table.
564 * This indicates that that tuple, previously thought to be "recently dead",
565 * is now known really dead and won't be written to the output.
566 */
567 bool
rewrite_heap_dead_tuple(RewriteState state,HeapTuple old_tuple)568 rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
569 {
570 /*
571 * If we have already seen an earlier tuple in the update chain that
572 * points to this tuple, let's forget about that earlier tuple. It's in
573 * fact dead as well, our simple xmax < OldestXmin test in
574 * HeapTupleSatisfiesVacuum just wasn't enough to detect it. It happens
575 * when xmin of a tuple is greater than xmax, which sounds
576 * counter-intuitive but is perfectly valid.
577 *
578 * We don't bother to try to detect the situation the other way round,
579 * when we encounter the dead tuple first and then the recently dead one
580 * that points to it. If that happens, we'll have some unmatched entries
581 * in the UnresolvedTups hash table at the end. That can happen anyway,
582 * because a vacuum might have removed the dead tuple in the chain before
583 * us.
584 */
585 UnresolvedTup unresolved;
586 TidHashKey hashkey;
587 bool found;
588
589 memset(&hashkey, 0, sizeof(hashkey));
590 hashkey.xmin = HeapTupleHeaderGetXmin(old_tuple->t_data);
591 hashkey.tid = old_tuple->t_self;
592
593 unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
594 HASH_FIND, NULL);
595
596 if (unresolved != NULL)
597 {
598 /* Need to free the contained tuple as well as the hashtable entry */
599 heap_freetuple(unresolved->tuple);
600 hash_search(state->rs_unresolved_tups, &hashkey,
601 HASH_REMOVE, &found);
602 Assert(found);
603 return true;
604 }
605
606 return false;
607 }
608
609 /*
610 * Insert a tuple to the new relation. This has to track heap_insert
611 * and its subsidiary functions!
612 *
613 * t_self of the tuple is set to the new TID of the tuple. If t_ctid of the
614 * tuple is invalid on entry, it's replaced with the new TID as well (in
615 * the inserted data only, not in the caller's copy).
616 */
617 static void
raw_heap_insert(RewriteState state,HeapTuple tup)618 raw_heap_insert(RewriteState state, HeapTuple tup)
619 {
620 Page page = state->rs_buffer;
621 Size pageFreeSpace,
622 saveFreeSpace;
623 Size len;
624 OffsetNumber newoff;
625 HeapTuple heaptup;
626
627 /*
628 * If the new tuple is too big for storage or contains already toasted
629 * out-of-line attributes from some other relation, invoke the toaster.
630 *
631 * Note: below this point, heaptup is the data we actually intend to store
632 * into the relation; tup is the caller's original untoasted data.
633 */
634 if (state->rs_new_rel->rd_rel->relkind == RELKIND_TOASTVALUE)
635 {
636 /* toast table entries should never be recursively toasted */
637 Assert(!HeapTupleHasExternal(tup));
638 heaptup = tup;
639 }
640 else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
641 {
642 int options = HEAP_INSERT_SKIP_FSM;
643
644 /*
645 * While rewriting the heap for VACUUM FULL / CLUSTER, make sure data
646 * for the TOAST table are not logically decoded. The main heap is
647 * WAL-logged as XLOG FPI records, which are not logically decoded.
648 */
649 options |= HEAP_INSERT_NO_LOGICAL;
650
651 heaptup = heap_toast_insert_or_update(state->rs_new_rel, tup, NULL,
652 options);
653 }
654 else
655 heaptup = tup;
656
657 len = MAXALIGN(heaptup->t_len); /* be conservative */
658
659 /*
660 * If we're gonna fail for oversize tuple, do it right away
661 */
662 if (len > MaxHeapTupleSize)
663 ereport(ERROR,
664 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
665 errmsg("row is too big: size %zu, maximum size %zu",
666 len, MaxHeapTupleSize)));
667
668 /* Compute desired extra freespace due to fillfactor option */
669 saveFreeSpace = RelationGetTargetPageFreeSpace(state->rs_new_rel,
670 HEAP_DEFAULT_FILLFACTOR);
671
672 /* Now we can check to see if there's enough free space already. */
673 if (state->rs_buffer_valid)
674 {
675 pageFreeSpace = PageGetHeapFreeSpace(page);
676
677 if (len + saveFreeSpace > pageFreeSpace)
678 {
679 /*
680 * Doesn't fit, so write out the existing page. It always
681 * contains a tuple. Hence, unlike RelationGetBufferForTuple(),
682 * enforce saveFreeSpace unconditionally.
683 */
684
685 /* XLOG stuff */
686 if (RelationNeedsWAL(state->rs_new_rel))
687 log_newpage(&state->rs_new_rel->rd_node,
688 MAIN_FORKNUM,
689 state->rs_blockno,
690 page,
691 true);
692
693 /*
694 * Now write the page. We say skipFsync = true because there's no
695 * need for smgr to schedule an fsync for this write; we'll do it
696 * ourselves in end_heap_rewrite.
697 */
698 RelationOpenSmgr(state->rs_new_rel);
699
700 PageSetChecksumInplace(page, state->rs_blockno);
701
702 smgrextend(state->rs_new_rel->rd_smgr, MAIN_FORKNUM,
703 state->rs_blockno, (char *) page, true);
704
705 state->rs_blockno++;
706 state->rs_buffer_valid = false;
707 }
708 }
709
710 if (!state->rs_buffer_valid)
711 {
712 /* Initialize a new empty page */
713 PageInit(page, BLCKSZ, 0);
714 state->rs_buffer_valid = true;
715 }
716
717 /* And now we can insert the tuple into the page */
718 newoff = PageAddItem(page, (Item) heaptup->t_data, heaptup->t_len,
719 InvalidOffsetNumber, false, true);
720 if (newoff == InvalidOffsetNumber)
721 elog(ERROR, "failed to add tuple");
722
723 /* Update caller's t_self to the actual position where it was stored */
724 ItemPointerSet(&(tup->t_self), state->rs_blockno, newoff);
725
726 /*
727 * Insert the correct position into CTID of the stored tuple, too, if the
728 * caller didn't supply a valid CTID.
729 */
730 if (!ItemPointerIsValid(&tup->t_data->t_ctid))
731 {
732 ItemId newitemid;
733 HeapTupleHeader onpage_tup;
734
735 newitemid = PageGetItemId(page, newoff);
736 onpage_tup = (HeapTupleHeader) PageGetItem(page, newitemid);
737
738 onpage_tup->t_ctid = tup->t_self;
739 }
740
741 /* If heaptup is a private copy, release it. */
742 if (heaptup != tup)
743 heap_freetuple(heaptup);
744 }
745
746 /* ------------------------------------------------------------------------
747 * Logical rewrite support
748 *
749 * When doing logical decoding - which relies on using cmin/cmax of catalog
750 * tuples, via xl_heap_new_cid records - heap rewrites have to log enough
751 * information to allow the decoding backend to updates its internal mapping
752 * of (relfilenode,ctid) => (cmin, cmax) to be correct for the rewritten heap.
753 *
754 * For that, every time we find a tuple that's been modified in a catalog
755 * relation within the xmin horizon of any decoding slot, we log a mapping
756 * from the old to the new location.
757 *
758 * To deal with rewrites that abort the filename of a mapping file contains
759 * the xid of the transaction performing the rewrite, which then can be
760 * checked before being read in.
761 *
762 * For efficiency we don't immediately spill every single map mapping for a
763 * row to disk but only do so in batches when we've collected several of them
764 * in memory or when end_heap_rewrite() has been called.
765 *
766 * Crash-Safety: This module diverts from the usual patterns of doing WAL
767 * since it cannot rely on checkpoint flushing out all buffers and thus
768 * waiting for exclusive locks on buffers. Usually the XLogInsert() covering
769 * buffer modifications is performed while the buffer(s) that are being
770 * modified are exclusively locked guaranteeing that both the WAL record and
771 * the modified heap are on either side of the checkpoint. But since the
772 * mapping files we log aren't in shared_buffers that interlock doesn't work.
773 *
774 * Instead we simply write the mapping files out to disk, *before* the
775 * XLogInsert() is performed. That guarantees that either the XLogInsert() is
776 * inserted after the checkpoint's redo pointer or that the checkpoint (via
777 * CheckPointLogicalRewriteHeap()) has flushed the (partial) mapping file to
778 * disk. That leaves the tail end that has not yet been flushed open to
779 * corruption, which is solved by including the current offset in the
780 * xl_heap_rewrite_mapping records and truncating the mapping file to it
781 * during replay. Every time a rewrite is finished all generated mapping files
782 * are synced to disk.
783 *
784 * Note that if we were only concerned about crash safety we wouldn't have to
785 * deal with WAL logging at all - an fsync() at the end of a rewrite would be
786 * sufficient for crash safety. Any mapping that hasn't been safely flushed to
787 * disk has to be by an aborted (explicitly or via a crash) transaction and is
788 * ignored by virtue of the xid in its name being subject to a
789 * TransactionDidCommit() check. But we want to support having standbys via
790 * physical replication, both for availability and to do logical decoding
791 * there.
792 * ------------------------------------------------------------------------
793 */
794
795 /*
796 * Do preparations for logging logical mappings during a rewrite if
797 * necessary. If we detect that we don't need to log anything we'll prevent
798 * any further action by the various logical rewrite functions.
799 */
800 static void
logical_begin_heap_rewrite(RewriteState state)801 logical_begin_heap_rewrite(RewriteState state)
802 {
803 HASHCTL hash_ctl;
804 TransactionId logical_xmin;
805
806 /*
807 * We only need to persist these mappings if the rewritten table can be
808 * accessed during logical decoding, if not, we can skip doing any
809 * additional work.
810 */
811 state->rs_logical_rewrite =
812 RelationIsAccessibleInLogicalDecoding(state->rs_old_rel);
813
814 if (!state->rs_logical_rewrite)
815 return;
816
817 ProcArrayGetReplicationSlotXmin(NULL, &logical_xmin);
818
819 /*
820 * If there are no logical slots in progress we don't need to do anything,
821 * there cannot be any remappings for relevant rows yet. The relation's
822 * lock protects us against races.
823 */
824 if (logical_xmin == InvalidTransactionId)
825 {
826 state->rs_logical_rewrite = false;
827 return;
828 }
829
830 state->rs_logical_xmin = logical_xmin;
831 state->rs_begin_lsn = GetXLogInsertRecPtr();
832 state->rs_num_rewrite_mappings = 0;
833
834 hash_ctl.keysize = sizeof(TransactionId);
835 hash_ctl.entrysize = sizeof(RewriteMappingFile);
836 hash_ctl.hcxt = state->rs_cxt;
837
838 state->rs_logical_mappings =
839 hash_create("Logical rewrite mapping",
840 128, /* arbitrary initial size */
841 &hash_ctl,
842 HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
843 }
844
845 /*
846 * Flush all logical in-memory mappings to disk, but don't fsync them yet.
847 */
848 static void
logical_heap_rewrite_flush_mappings(RewriteState state)849 logical_heap_rewrite_flush_mappings(RewriteState state)
850 {
851 HASH_SEQ_STATUS seq_status;
852 RewriteMappingFile *src;
853 dlist_mutable_iter iter;
854
855 Assert(state->rs_logical_rewrite);
856
857 /* no logical rewrite in progress, no need to iterate over mappings */
858 if (state->rs_num_rewrite_mappings == 0)
859 return;
860
861 elog(DEBUG1, "flushing %u logical rewrite mapping entries",
862 state->rs_num_rewrite_mappings);
863
864 hash_seq_init(&seq_status, state->rs_logical_mappings);
865 while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL)
866 {
867 char *waldata;
868 char *waldata_start;
869 xl_heap_rewrite_mapping xlrec;
870 Oid dboid;
871 uint32 len;
872 int written;
873
874 /* this file hasn't got any new mappings */
875 if (src->num_mappings == 0)
876 continue;
877
878 if (state->rs_old_rel->rd_rel->relisshared)
879 dboid = InvalidOid;
880 else
881 dboid = MyDatabaseId;
882
883 xlrec.num_mappings = src->num_mappings;
884 xlrec.mapped_rel = RelationGetRelid(state->rs_old_rel);
885 xlrec.mapped_xid = src->xid;
886 xlrec.mapped_db = dboid;
887 xlrec.offset = src->off;
888 xlrec.start_lsn = state->rs_begin_lsn;
889
890 /* write all mappings consecutively */
891 len = src->num_mappings * sizeof(LogicalRewriteMappingData);
892 waldata_start = waldata = palloc(len);
893
894 /*
895 * collect data we need to write out, but don't modify ondisk data yet
896 */
897 dlist_foreach_modify(iter, &src->mappings)
898 {
899 RewriteMappingDataEntry *pmap;
900
901 pmap = dlist_container(RewriteMappingDataEntry, node, iter.cur);
902
903 memcpy(waldata, &pmap->map, sizeof(pmap->map));
904 waldata += sizeof(pmap->map);
905
906 /* remove from the list and free */
907 dlist_delete(&pmap->node);
908 pfree(pmap);
909
910 /* update bookkeeping */
911 state->rs_num_rewrite_mappings--;
912 src->num_mappings--;
913 }
914
915 Assert(src->num_mappings == 0);
916 Assert(waldata == waldata_start + len);
917
918 /*
919 * Note that we deviate from the usual WAL coding practices here,
920 * check the above "Logical rewrite support" comment for reasoning.
921 */
922 written = FileWrite(src->vfd, waldata_start, len, src->off,
923 WAIT_EVENT_LOGICAL_REWRITE_WRITE);
924 if (written != len)
925 ereport(ERROR,
926 (errcode_for_file_access(),
927 errmsg("could not write to file \"%s\", wrote %d of %d: %m", src->path,
928 written, len)));
929 src->off += len;
930
931 XLogBeginInsert();
932 XLogRegisterData((char *) (&xlrec), sizeof(xlrec));
933 XLogRegisterData(waldata_start, len);
934
935 /* write xlog record */
936 XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_REWRITE);
937
938 pfree(waldata_start);
939 }
940 Assert(state->rs_num_rewrite_mappings == 0);
941 }
942
943 /*
944 * Logical remapping part of end_heap_rewrite().
945 */
946 static void
logical_end_heap_rewrite(RewriteState state)947 logical_end_heap_rewrite(RewriteState state)
948 {
949 HASH_SEQ_STATUS seq_status;
950 RewriteMappingFile *src;
951
952 /* done, no logical rewrite in progress */
953 if (!state->rs_logical_rewrite)
954 return;
955
956 /* writeout remaining in-memory entries */
957 if (state->rs_num_rewrite_mappings > 0)
958 logical_heap_rewrite_flush_mappings(state);
959
960 /* Iterate over all mappings we have written and fsync the files. */
961 hash_seq_init(&seq_status, state->rs_logical_mappings);
962 while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL)
963 {
964 if (FileSync(src->vfd, WAIT_EVENT_LOGICAL_REWRITE_SYNC) != 0)
965 ereport(data_sync_elevel(ERROR),
966 (errcode_for_file_access(),
967 errmsg("could not fsync file \"%s\": %m", src->path)));
968 FileClose(src->vfd);
969 }
970 /* memory context cleanup will deal with the rest */
971 }
972
973 /*
974 * Log a single (old->new) mapping for 'xid'.
975 */
976 static void
logical_rewrite_log_mapping(RewriteState state,TransactionId xid,LogicalRewriteMappingData * map)977 logical_rewrite_log_mapping(RewriteState state, TransactionId xid,
978 LogicalRewriteMappingData *map)
979 {
980 RewriteMappingFile *src;
981 RewriteMappingDataEntry *pmap;
982 Oid relid;
983 bool found;
984
985 relid = RelationGetRelid(state->rs_old_rel);
986
987 /* look for existing mappings for this 'mapped' xid */
988 src = hash_search(state->rs_logical_mappings, &xid,
989 HASH_ENTER, &found);
990
991 /*
992 * We haven't yet had the need to map anything for this xid, create
993 * per-xid data structures.
994 */
995 if (!found)
996 {
997 char path[MAXPGPATH];
998 Oid dboid;
999
1000 if (state->rs_old_rel->rd_rel->relisshared)
1001 dboid = InvalidOid;
1002 else
1003 dboid = MyDatabaseId;
1004
1005 snprintf(path, MAXPGPATH,
1006 "pg_logical/mappings/" LOGICAL_REWRITE_FORMAT,
1007 dboid, relid,
1008 LSN_FORMAT_ARGS(state->rs_begin_lsn),
1009 xid, GetCurrentTransactionId());
1010
1011 dlist_init(&src->mappings);
1012 src->num_mappings = 0;
1013 src->off = 0;
1014 memcpy(src->path, path, sizeof(path));
1015 src->vfd = PathNameOpenFile(path,
1016 O_CREAT | O_EXCL | O_WRONLY | PG_BINARY);
1017 if (src->vfd < 0)
1018 ereport(ERROR,
1019 (errcode_for_file_access(),
1020 errmsg("could not create file \"%s\": %m", path)));
1021 }
1022
1023 pmap = MemoryContextAlloc(state->rs_cxt,
1024 sizeof(RewriteMappingDataEntry));
1025 memcpy(&pmap->map, map, sizeof(LogicalRewriteMappingData));
1026 dlist_push_tail(&src->mappings, &pmap->node);
1027 src->num_mappings++;
1028 state->rs_num_rewrite_mappings++;
1029
1030 /*
1031 * Write out buffer every time we've too many in-memory entries across all
1032 * mapping files.
1033 */
1034 if (state->rs_num_rewrite_mappings >= 1000 /* arbitrary number */ )
1035 logical_heap_rewrite_flush_mappings(state);
1036 }
1037
1038 /*
1039 * Perform logical remapping for a tuple that's mapped from old_tid to
1040 * new_tuple->t_self by rewrite_heap_tuple() if necessary for the tuple.
1041 */
1042 static void
logical_rewrite_heap_tuple(RewriteState state,ItemPointerData old_tid,HeapTuple new_tuple)1043 logical_rewrite_heap_tuple(RewriteState state, ItemPointerData old_tid,
1044 HeapTuple new_tuple)
1045 {
1046 ItemPointerData new_tid = new_tuple->t_self;
1047 TransactionId cutoff = state->rs_logical_xmin;
1048 TransactionId xmin;
1049 TransactionId xmax;
1050 bool do_log_xmin = false;
1051 bool do_log_xmax = false;
1052 LogicalRewriteMappingData map;
1053
1054 /* no logical rewrite in progress, we don't need to log anything */
1055 if (!state->rs_logical_rewrite)
1056 return;
1057
1058 xmin = HeapTupleHeaderGetXmin(new_tuple->t_data);
1059 /* use *GetUpdateXid to correctly deal with multixacts */
1060 xmax = HeapTupleHeaderGetUpdateXid(new_tuple->t_data);
1061
1062 /*
1063 * Log the mapping iff the tuple has been created recently.
1064 */
1065 if (TransactionIdIsNormal(xmin) && !TransactionIdPrecedes(xmin, cutoff))
1066 do_log_xmin = true;
1067
1068 if (!TransactionIdIsNormal(xmax))
1069 {
1070 /*
1071 * no xmax is set, can't have any permanent ones, so this check is
1072 * sufficient
1073 */
1074 }
1075 else if (HEAP_XMAX_IS_LOCKED_ONLY(new_tuple->t_data->t_infomask))
1076 {
1077 /* only locked, we don't care */
1078 }
1079 else if (!TransactionIdPrecedes(xmax, cutoff))
1080 {
1081 /* tuple has been deleted recently, log */
1082 do_log_xmax = true;
1083 }
1084
1085 /* if neither needs to be logged, we're done */
1086 if (!do_log_xmin && !do_log_xmax)
1087 return;
1088
1089 /* fill out mapping information */
1090 map.old_node = state->rs_old_rel->rd_node;
1091 map.old_tid = old_tid;
1092 map.new_node = state->rs_new_rel->rd_node;
1093 map.new_tid = new_tid;
1094
1095 /* ---
1096 * Now persist the mapping for the individual xids that are affected. We
1097 * need to log for both xmin and xmax if they aren't the same transaction
1098 * since the mapping files are per "affected" xid.
1099 * We don't muster all that much effort detecting whether xmin and xmax
1100 * are actually the same transaction, we just check whether the xid is the
1101 * same disregarding subtransactions. Logging too much is relatively
1102 * harmless and we could never do the check fully since subtransaction
1103 * data is thrown away during restarts.
1104 * ---
1105 */
1106 if (do_log_xmin)
1107 logical_rewrite_log_mapping(state, xmin, &map);
1108 /* separately log mapping for xmax unless it'd be redundant */
1109 if (do_log_xmax && !TransactionIdEquals(xmin, xmax))
1110 logical_rewrite_log_mapping(state, xmax, &map);
1111 }
1112
1113 /*
1114 * Replay XLOG_HEAP2_REWRITE records
1115 */
1116 void
heap_xlog_logical_rewrite(XLogReaderState * r)1117 heap_xlog_logical_rewrite(XLogReaderState *r)
1118 {
1119 char path[MAXPGPATH];
1120 int fd;
1121 xl_heap_rewrite_mapping *xlrec;
1122 uint32 len;
1123 char *data;
1124
1125 xlrec = (xl_heap_rewrite_mapping *) XLogRecGetData(r);
1126
1127 snprintf(path, MAXPGPATH,
1128 "pg_logical/mappings/" LOGICAL_REWRITE_FORMAT,
1129 xlrec->mapped_db, xlrec->mapped_rel,
1130 LSN_FORMAT_ARGS(xlrec->start_lsn),
1131 xlrec->mapped_xid, XLogRecGetXid(r));
1132
1133 fd = OpenTransientFile(path,
1134 O_CREAT | O_WRONLY | PG_BINARY);
1135 if (fd < 0)
1136 ereport(ERROR,
1137 (errcode_for_file_access(),
1138 errmsg("could not create file \"%s\": %m", path)));
1139
1140 /*
1141 * Truncate all data that's not guaranteed to have been safely fsynced (by
1142 * previous record or by the last checkpoint).
1143 */
1144 pgstat_report_wait_start(WAIT_EVENT_LOGICAL_REWRITE_TRUNCATE);
1145 if (ftruncate(fd, xlrec->offset) != 0)
1146 ereport(ERROR,
1147 (errcode_for_file_access(),
1148 errmsg("could not truncate file \"%s\" to %u: %m",
1149 path, (uint32) xlrec->offset)));
1150 pgstat_report_wait_end();
1151
1152 data = XLogRecGetData(r) + sizeof(*xlrec);
1153
1154 len = xlrec->num_mappings * sizeof(LogicalRewriteMappingData);
1155
1156 /* write out tail end of mapping file (again) */
1157 errno = 0;
1158 pgstat_report_wait_start(WAIT_EVENT_LOGICAL_REWRITE_MAPPING_WRITE);
1159 if (pg_pwrite(fd, data, len, xlrec->offset) != len)
1160 {
1161 /* if write didn't set errno, assume problem is no disk space */
1162 if (errno == 0)
1163 errno = ENOSPC;
1164 ereport(ERROR,
1165 (errcode_for_file_access(),
1166 errmsg("could not write to file \"%s\": %m", path)));
1167 }
1168 pgstat_report_wait_end();
1169
1170 /*
1171 * Now fsync all previously written data. We could improve things and only
1172 * do this for the last write to a file, but the required bookkeeping
1173 * doesn't seem worth the trouble.
1174 */
1175 pgstat_report_wait_start(WAIT_EVENT_LOGICAL_REWRITE_MAPPING_SYNC);
1176 if (pg_fsync(fd) != 0)
1177 ereport(data_sync_elevel(ERROR),
1178 (errcode_for_file_access(),
1179 errmsg("could not fsync file \"%s\": %m", path)));
1180 pgstat_report_wait_end();
1181
1182 if (CloseTransientFile(fd) != 0)
1183 ereport(ERROR,
1184 (errcode_for_file_access(),
1185 errmsg("could not close file \"%s\": %m", path)));
1186 }
1187
1188 /* ---
1189 * Perform a checkpoint for logical rewrite mappings
1190 *
1191 * This serves two tasks:
1192 * 1) Remove all mappings not needed anymore based on the logical restart LSN
1193 * 2) Flush all remaining mappings to disk, so that replay after a checkpoint
1194 * only has to deal with the parts of a mapping that have been written out
1195 * after the checkpoint started.
1196 * ---
1197 */
1198 void
CheckPointLogicalRewriteHeap(void)1199 CheckPointLogicalRewriteHeap(void)
1200 {
1201 XLogRecPtr cutoff;
1202 XLogRecPtr redo;
1203 DIR *mappings_dir;
1204 struct dirent *mapping_de;
1205 char path[MAXPGPATH + 20];
1206
1207 /*
1208 * We start of with a minimum of the last redo pointer. No new decoding
1209 * slot will start before that, so that's a safe upper bound for removal.
1210 */
1211 redo = GetRedoRecPtr();
1212
1213 /* now check for the restart ptrs from existing slots */
1214 cutoff = ReplicationSlotsComputeLogicalRestartLSN();
1215
1216 /* don't start earlier than the restart lsn */
1217 if (cutoff != InvalidXLogRecPtr && redo < cutoff)
1218 cutoff = redo;
1219
1220 mappings_dir = AllocateDir("pg_logical/mappings");
1221 while ((mapping_de = ReadDir(mappings_dir, "pg_logical/mappings")) != NULL)
1222 {
1223 struct stat statbuf;
1224 Oid dboid;
1225 Oid relid;
1226 XLogRecPtr lsn;
1227 TransactionId rewrite_xid;
1228 TransactionId create_xid;
1229 uint32 hi,
1230 lo;
1231
1232 if (strcmp(mapping_de->d_name, ".") == 0 ||
1233 strcmp(mapping_de->d_name, "..") == 0)
1234 continue;
1235
1236 snprintf(path, sizeof(path), "pg_logical/mappings/%s", mapping_de->d_name);
1237 if (lstat(path, &statbuf) == 0 && !S_ISREG(statbuf.st_mode))
1238 continue;
1239
1240 /* Skip over files that cannot be ours. */
1241 if (strncmp(mapping_de->d_name, "map-", 4) != 0)
1242 continue;
1243
1244 if (sscanf(mapping_de->d_name, LOGICAL_REWRITE_FORMAT,
1245 &dboid, &relid, &hi, &lo, &rewrite_xid, &create_xid) != 6)
1246 elog(ERROR, "could not parse filename \"%s\"", mapping_de->d_name);
1247
1248 lsn = ((uint64) hi) << 32 | lo;
1249
1250 if (lsn < cutoff || cutoff == InvalidXLogRecPtr)
1251 {
1252 elog(DEBUG1, "removing logical rewrite file \"%s\"", path);
1253 if (unlink(path) < 0)
1254 ereport(ERROR,
1255 (errcode_for_file_access(),
1256 errmsg("could not remove file \"%s\": %m", path)));
1257 }
1258 else
1259 {
1260 /* on some operating systems fsyncing a file requires O_RDWR */
1261 int fd = OpenTransientFile(path, O_RDWR | PG_BINARY);
1262
1263 /*
1264 * The file cannot vanish due to concurrency since this function
1265 * is the only one removing logical mappings and only one
1266 * checkpoint can be in progress at a time.
1267 */
1268 if (fd < 0)
1269 ereport(ERROR,
1270 (errcode_for_file_access(),
1271 errmsg("could not open file \"%s\": %m", path)));
1272
1273 /*
1274 * We could try to avoid fsyncing files that either haven't
1275 * changed or have only been created since the checkpoint's start,
1276 * but it's currently not deemed worth the effort.
1277 */
1278 pgstat_report_wait_start(WAIT_EVENT_LOGICAL_REWRITE_CHECKPOINT_SYNC);
1279 if (pg_fsync(fd) != 0)
1280 ereport(data_sync_elevel(ERROR),
1281 (errcode_for_file_access(),
1282 errmsg("could not fsync file \"%s\": %m", path)));
1283 pgstat_report_wait_end();
1284
1285 if (CloseTransientFile(fd) != 0)
1286 ereport(ERROR,
1287 (errcode_for_file_access(),
1288 errmsg("could not close file \"%s\": %m", path)));
1289 }
1290 }
1291 FreeDir(mappings_dir);
1292 }
1293