1 /*-------------------------------------------------------------------------
2 *
3 * rewriteheap.c
4 * Support functions to rewrite tables.
5 *
6 * These functions provide a facility to completely rewrite a heap, while
7 * preserving visibility information and update chains.
8 *
9 * INTERFACE
10 *
11 * The caller is responsible for creating the new heap, all catalog
12 * changes, supplying the tuples to be written to the new heap, and
13 * rebuilding indexes. The caller must hold AccessExclusiveLock on the
14 * target table, because we assume no one else is writing into it.
15 *
16 * To use the facility:
17 *
18 * begin_heap_rewrite
19 * while (fetch next tuple)
20 * {
21 * if (tuple is dead)
22 * rewrite_heap_dead_tuple
23 * else
24 * {
25 * // do any transformations here if required
26 * rewrite_heap_tuple
27 * }
28 * }
29 * end_heap_rewrite
30 *
31 * The contents of the new relation shouldn't be relied on until after
32 * end_heap_rewrite is called.
33 *
34 *
35 * IMPLEMENTATION
36 *
37 * This would be a fairly trivial affair, except that we need to maintain
38 * the ctid chains that link versions of an updated tuple together.
39 * Since the newly stored tuples will have tids different from the original
40 * ones, if we just copied t_ctid fields to the new table the links would
41 * be wrong. When we are required to copy a (presumably recently-dead or
42 * delete-in-progress) tuple whose ctid doesn't point to itself, we have
43 * to substitute the correct ctid instead.
44 *
45 * For each ctid reference from A -> B, we might encounter either A first
46 * or B first. (Note that a tuple in the middle of a chain is both A and B
47 * of different pairs.)
48 *
49 * If we encounter A first, we'll store the tuple in the unresolved_tups
50 * hash table. When we later encounter B, we remove A from the hash table,
51 * fix the ctid to point to the new location of B, and insert both A and B
52 * to the new heap.
53 *
54 * If we encounter B first, we can insert B to the new heap right away.
55 * We then add an entry to the old_new_tid_map hash table showing B's
56 * original tid (in the old heap) and new tid (in the new heap).
57 * When we later encounter A, we get the new location of B from the table,
58 * and can write A immediately with the correct ctid.
59 *
60 * Entries in the hash tables can be removed as soon as the later tuple
61 * is encountered. That helps to keep the memory usage down. At the end,
62 * both tables are usually empty; we should have encountered both A and B
63 * of each pair. However, it's possible for A to be RECENTLY_DEAD and B
64 * entirely DEAD according to HeapTupleSatisfiesVacuum, because the test
65 * for deadness using OldestXmin is not exact. In such a case we might
66 * encounter B first, and skip it, and find A later. Then A would be added
67 * to unresolved_tups, and stay there until end of the rewrite. Since
68 * this case is very unusual, we don't worry about the memory usage.
69 *
70 * Using in-memory hash tables means that we use some memory for each live
71 * update chain in the table, from the time we find one end of the
72 * reference until we find the other end. That shouldn't be a problem in
73 * practice, but if you do something like an UPDATE without a where-clause
74 * on a large table, and then run CLUSTER in the same transaction, you
75 * could run out of memory. It doesn't seem worthwhile to add support for
76 * spill-to-disk, as there shouldn't be that many RECENTLY_DEAD tuples in a
77 * table under normal circumstances. Furthermore, in the typical scenario
78 * of CLUSTERing on an unchanging key column, we'll see all the versions
79 * of a given tuple together anyway, and so the peak memory usage is only
80 * proportional to the number of RECENTLY_DEAD versions of a single row, not
81 * in the whole table. Note that if we do fail halfway through a CLUSTER,
82 * the old table is still valid, so failure is not catastrophic.
83 *
84 * We can't use the normal heap_insert function to insert into the new
85 * heap, because heap_insert overwrites the visibility information.
86 * We use a special-purpose raw_heap_insert function instead, which
87 * is optimized for bulk inserting a lot of tuples, knowing that we have
88 * exclusive access to the heap. raw_heap_insert builds new pages in
89 * local storage. When a page is full, or at the end of the process,
90 * we insert it to WAL as a single record and then write it to disk
91 * directly through smgr. Note, however, that any data sent to the new
92 * heap's TOAST table will go through the normal bufmgr.
93 *
94 *
95 * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
96 * Portions Copyright (c) 1994-5, Regents of the University of California
97 *
98 * IDENTIFICATION
99 * src/backend/access/heap/rewriteheap.c
100 *
101 *-------------------------------------------------------------------------
102 */
103 #include "postgres.h"
104
105 #include <sys/stat.h>
106 #include <unistd.h>
107
108 #include "miscadmin.h"
109
110 #include "access/heapam.h"
111 #include "access/heapam_xlog.h"
112 #include "access/rewriteheap.h"
113 #include "access/transam.h"
114 #include "access/tuptoaster.h"
115 #include "access/xact.h"
116 #include "access/xloginsert.h"
117
118 #include "catalog/catalog.h"
119
120 #include "lib/ilist.h"
121
122 #include "replication/logical.h"
123 #include "replication/slot.h"
124
125 #include "storage/bufmgr.h"
126 #include "storage/fd.h"
127 #include "storage/smgr.h"
128
129 #include "utils/memutils.h"
130 #include "utils/rel.h"
131 #include "utils/tqual.h"
132
133 #include "storage/procarray.h"
134
135 /*
136 * State associated with a rewrite operation. This is opaque to the user
137 * of the rewrite facility.
138 */
139 typedef struct RewriteStateData
140 {
141 Relation rs_old_rel; /* source heap */
142 Relation rs_new_rel; /* destination heap */
143 Page rs_buffer; /* page currently being built */
144 BlockNumber rs_blockno; /* block where page will go */
145 bool rs_buffer_valid; /* T if any tuples in buffer */
146 bool rs_use_wal; /* must we WAL-log inserts? */
147 bool rs_logical_rewrite; /* do we need to do logical rewriting */
148 TransactionId rs_oldest_xmin; /* oldest xmin used by caller to
149 * determine tuple visibility */
150 TransactionId rs_freeze_xid;/* Xid that will be used as freeze cutoff
151 * point */
152 TransactionId rs_logical_xmin; /* Xid that will be used as cutoff
153 * point for logical rewrites */
154 MultiXactId rs_cutoff_multi;/* MultiXactId that will be used as cutoff
155 * point for multixacts */
156 MemoryContext rs_cxt; /* for hash tables and entries and tuples in
157 * them */
158 XLogRecPtr rs_begin_lsn; /* XLogInsertLsn when starting the rewrite */
159 HTAB *rs_unresolved_tups; /* unmatched A tuples */
160 HTAB *rs_old_new_tid_map; /* unmatched B tuples */
161 HTAB *rs_logical_mappings; /* logical remapping files */
162 uint32 rs_num_rewrite_mappings; /* # in memory mappings */
163 } RewriteStateData;
164
165 /*
166 * The lookup keys for the hash tables are tuple TID and xmin (we must check
167 * both to avoid false matches from dead tuples). Beware that there is
168 * probably some padding space in this struct; it must be zeroed out for
169 * correct hashtable operation.
170 */
171 typedef struct
172 {
173 TransactionId xmin; /* tuple xmin */
174 ItemPointerData tid; /* tuple location in old heap */
175 } TidHashKey;
176
177 /*
178 * Entry structures for the hash tables
179 */
180 typedef struct
181 {
182 TidHashKey key; /* expected xmin/old location of B tuple */
183 ItemPointerData old_tid; /* A's location in the old heap */
184 HeapTuple tuple; /* A's tuple contents */
185 } UnresolvedTupData;
186
187 typedef UnresolvedTupData *UnresolvedTup;
188
189 typedef struct
190 {
191 TidHashKey key; /* actual xmin/old location of B tuple */
192 ItemPointerData new_tid; /* where we put it in the new heap */
193 } OldToNewMappingData;
194
195 typedef OldToNewMappingData *OldToNewMapping;
196
197 /*
198 * In-Memory data for an xid that might need logical remapping entries
199 * to be logged.
200 */
201 typedef struct RewriteMappingFile
202 {
203 TransactionId xid; /* xid that might need to see the row */
204 int vfd; /* fd of mappings file */
205 off_t off; /* how far have we written yet */
206 uint32 num_mappings; /* number of in-memory mappings */
207 dlist_head mappings; /* list of in-memory mappings */
208 char path[MAXPGPATH]; /* path, for error messages */
209 } RewriteMappingFile;
210
211 /*
212 * A single In-Memory logical rewrite mapping, hanging off
213 * RewriteMappingFile->mappings.
214 */
215 typedef struct RewriteMappingDataEntry
216 {
217 LogicalRewriteMappingData map; /* map between old and new location of
218 * the tuple */
219 dlist_node node;
220 } RewriteMappingDataEntry;
221
222
223 /* prototypes for internal functions */
224 static void raw_heap_insert(RewriteState state, HeapTuple tup);
225
226 /* internal logical remapping prototypes */
227 static void logical_begin_heap_rewrite(RewriteState state);
228 static void logical_rewrite_heap_tuple(RewriteState state, ItemPointerData old_tid, HeapTuple new_tuple);
229 static void logical_end_heap_rewrite(RewriteState state);
230
231
232 /*
233 * Begin a rewrite of a table
234 *
235 * old_heap old, locked heap relation tuples will be read from
236 * new_heap new, locked heap relation to insert tuples to
237 * oldest_xmin xid used by the caller to determine which tuples are dead
238 * freeze_xid xid before which tuples will be frozen
239 * min_multi multixact before which multis will be removed
240 * use_wal should the inserts to the new heap be WAL-logged?
241 *
242 * Returns an opaque RewriteState, allocated in current memory context,
243 * to be used in subsequent calls to the other functions.
244 */
245 RewriteState
begin_heap_rewrite(Relation old_heap,Relation new_heap,TransactionId oldest_xmin,TransactionId freeze_xid,MultiXactId cutoff_multi,bool use_wal)246 begin_heap_rewrite(Relation old_heap, Relation new_heap, TransactionId oldest_xmin,
247 TransactionId freeze_xid, MultiXactId cutoff_multi,
248 bool use_wal)
249 {
250 RewriteState state;
251 MemoryContext rw_cxt;
252 MemoryContext old_cxt;
253 HASHCTL hash_ctl;
254
255 /*
256 * To ease cleanup, make a separate context that will contain the
257 * RewriteState struct itself plus all subsidiary data.
258 */
259 rw_cxt = AllocSetContextCreate(CurrentMemoryContext,
260 "Table rewrite",
261 ALLOCSET_DEFAULT_SIZES);
262 old_cxt = MemoryContextSwitchTo(rw_cxt);
263
264 /* Create and fill in the state struct */
265 state = palloc0(sizeof(RewriteStateData));
266
267 state->rs_old_rel = old_heap;
268 state->rs_new_rel = new_heap;
269 state->rs_buffer = (Page) palloc(BLCKSZ);
270 /* new_heap needn't be empty, just locked */
271 state->rs_blockno = RelationGetNumberOfBlocks(new_heap);
272 state->rs_buffer_valid = false;
273 state->rs_use_wal = use_wal;
274 state->rs_oldest_xmin = oldest_xmin;
275 state->rs_freeze_xid = freeze_xid;
276 state->rs_cutoff_multi = cutoff_multi;
277 state->rs_cxt = rw_cxt;
278
279 /* Initialize hash tables used to track update chains */
280 memset(&hash_ctl, 0, sizeof(hash_ctl));
281 hash_ctl.keysize = sizeof(TidHashKey);
282 hash_ctl.entrysize = sizeof(UnresolvedTupData);
283 hash_ctl.hcxt = state->rs_cxt;
284
285 state->rs_unresolved_tups =
286 hash_create("Rewrite / Unresolved ctids",
287 128, /* arbitrary initial size */
288 &hash_ctl,
289 HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
290
291 hash_ctl.entrysize = sizeof(OldToNewMappingData);
292
293 state->rs_old_new_tid_map =
294 hash_create("Rewrite / Old to new tid map",
295 128, /* arbitrary initial size */
296 &hash_ctl,
297 HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
298
299 MemoryContextSwitchTo(old_cxt);
300
301 logical_begin_heap_rewrite(state);
302
303 return state;
304 }
305
306 /*
307 * End a rewrite.
308 *
309 * state and any other resources are freed.
310 */
311 void
end_heap_rewrite(RewriteState state)312 end_heap_rewrite(RewriteState state)
313 {
314 HASH_SEQ_STATUS seq_status;
315 UnresolvedTup unresolved;
316
317 /*
318 * Write any remaining tuples in the UnresolvedTups table. If we have any
319 * left, they should in fact be dead, but let's err on the safe side.
320 */
321 hash_seq_init(&seq_status, state->rs_unresolved_tups);
322
323 while ((unresolved = hash_seq_search(&seq_status)) != NULL)
324 {
325 ItemPointerSetInvalid(&unresolved->tuple->t_data->t_ctid);
326 raw_heap_insert(state, unresolved->tuple);
327 }
328
329 /* Write the last page, if any */
330 if (state->rs_buffer_valid)
331 {
332 if (state->rs_use_wal)
333 log_newpage(&state->rs_new_rel->rd_node,
334 MAIN_FORKNUM,
335 state->rs_blockno,
336 state->rs_buffer,
337 true);
338 RelationOpenSmgr(state->rs_new_rel);
339
340 PageSetChecksumInplace(state->rs_buffer, state->rs_blockno);
341
342 smgrextend(state->rs_new_rel->rd_smgr, MAIN_FORKNUM, state->rs_blockno,
343 (char *) state->rs_buffer, true);
344 }
345
346 /*
347 * If the rel is WAL-logged, must fsync before commit. We use heap_sync
348 * to ensure that the toast table gets fsync'd too.
349 *
350 * It's obvious that we must do this when not WAL-logging. It's less
351 * obvious that we have to do it even if we did WAL-log the pages. The
352 * reason is the same as in tablecmds.c's copy_relation_data(): we're
353 * writing data that's not in shared buffers, and so a CHECKPOINT
354 * occurring during the rewriteheap operation won't have fsync'd data we
355 * wrote before the checkpoint.
356 */
357 if (RelationNeedsWAL(state->rs_new_rel))
358 heap_sync(state->rs_new_rel);
359
360 logical_end_heap_rewrite(state);
361
362 /* Deleting the context frees everything */
363 MemoryContextDelete(state->rs_cxt);
364 }
365
366 /*
367 * Add a tuple to the new heap.
368 *
369 * Visibility information is copied from the original tuple, except that
370 * we "freeze" very-old tuples. Note that since we scribble on new_tuple,
371 * it had better be temp storage not a pointer to the original tuple.
372 *
373 * state opaque state as returned by begin_heap_rewrite
374 * old_tuple original tuple in the old heap
375 * new_tuple new, rewritten tuple to be inserted to new heap
376 */
377 void
rewrite_heap_tuple(RewriteState state,HeapTuple old_tuple,HeapTuple new_tuple)378 rewrite_heap_tuple(RewriteState state,
379 HeapTuple old_tuple, HeapTuple new_tuple)
380 {
381 MemoryContext old_cxt;
382 ItemPointerData old_tid;
383 TidHashKey hashkey;
384 bool found;
385 bool free_new;
386
387 old_cxt = MemoryContextSwitchTo(state->rs_cxt);
388
389 /*
390 * Copy the original tuple's visibility information into new_tuple.
391 *
392 * XXX we might later need to copy some t_infomask2 bits, too? Right now,
393 * we intentionally clear the HOT status bits.
394 */
395 memcpy(&new_tuple->t_data->t_choice.t_heap,
396 &old_tuple->t_data->t_choice.t_heap,
397 sizeof(HeapTupleFields));
398
399 new_tuple->t_data->t_infomask &= ~HEAP_XACT_MASK;
400 new_tuple->t_data->t_infomask2 &= ~HEAP2_XACT_MASK;
401 new_tuple->t_data->t_infomask |=
402 old_tuple->t_data->t_infomask & HEAP_XACT_MASK;
403
404 /*
405 * While we have our hands on the tuple, we may as well freeze any
406 * eligible xmin or xmax, so that future VACUUM effort can be saved.
407 */
408 heap_freeze_tuple(new_tuple->t_data,
409 state->rs_old_rel->rd_rel->relfrozenxid,
410 state->rs_old_rel->rd_rel->relminmxid,
411 state->rs_freeze_xid,
412 state->rs_cutoff_multi);
413
414 /*
415 * Invalid ctid means that ctid should point to the tuple itself. We'll
416 * override it later if the tuple is part of an update chain.
417 */
418 ItemPointerSetInvalid(&new_tuple->t_data->t_ctid);
419
420 /*
421 * If the tuple has been updated, check the old-to-new mapping hash table.
422 */
423 if (!((old_tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
424 HeapTupleHeaderIsOnlyLocked(old_tuple->t_data)) &&
425 !(ItemPointerEquals(&(old_tuple->t_self),
426 &(old_tuple->t_data->t_ctid))))
427 {
428 OldToNewMapping mapping;
429
430 memset(&hashkey, 0, sizeof(hashkey));
431 hashkey.xmin = HeapTupleHeaderGetUpdateXid(old_tuple->t_data);
432 hashkey.tid = old_tuple->t_data->t_ctid;
433
434 mapping = (OldToNewMapping)
435 hash_search(state->rs_old_new_tid_map, &hashkey,
436 HASH_FIND, NULL);
437
438 if (mapping != NULL)
439 {
440 /*
441 * We've already copied the tuple that t_ctid points to, so we can
442 * set the ctid of this tuple to point to the new location, and
443 * insert it right away.
444 */
445 new_tuple->t_data->t_ctid = mapping->new_tid;
446
447 /* We don't need the mapping entry anymore */
448 hash_search(state->rs_old_new_tid_map, &hashkey,
449 HASH_REMOVE, &found);
450 Assert(found);
451 }
452 else
453 {
454 /*
455 * We haven't seen the tuple t_ctid points to yet. Stash this
456 * tuple into unresolved_tups to be written later.
457 */
458 UnresolvedTup unresolved;
459
460 unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
461 HASH_ENTER, &found);
462 Assert(!found);
463
464 unresolved->old_tid = old_tuple->t_self;
465 unresolved->tuple = heap_copytuple(new_tuple);
466
467 /*
468 * We can't do anything more now, since we don't know where the
469 * tuple will be written.
470 */
471 MemoryContextSwitchTo(old_cxt);
472 return;
473 }
474 }
475
476 /*
477 * Now we will write the tuple, and then check to see if it is the B tuple
478 * in any new or known pair. When we resolve a known pair, we will be
479 * able to write that pair's A tuple, and then we have to check if it
480 * resolves some other pair. Hence, we need a loop here.
481 */
482 old_tid = old_tuple->t_self;
483 free_new = false;
484
485 for (;;)
486 {
487 ItemPointerData new_tid;
488
489 /* Insert the tuple and find out where it's put in new_heap */
490 raw_heap_insert(state, new_tuple);
491 new_tid = new_tuple->t_self;
492
493 logical_rewrite_heap_tuple(state, old_tid, new_tuple);
494
495 /*
496 * If the tuple is the updated version of a row, and the prior version
497 * wouldn't be DEAD yet, then we need to either resolve the prior
498 * version (if it's waiting in rs_unresolved_tups), or make an entry
499 * in rs_old_new_tid_map (so we can resolve it when we do see it). The
500 * previous tuple's xmax would equal this one's xmin, so it's
501 * RECENTLY_DEAD if and only if the xmin is not before OldestXmin.
502 */
503 if ((new_tuple->t_data->t_infomask & HEAP_UPDATED) &&
504 !TransactionIdPrecedes(HeapTupleHeaderGetXmin(new_tuple->t_data),
505 state->rs_oldest_xmin))
506 {
507 /*
508 * Okay, this is B in an update pair. See if we've seen A.
509 */
510 UnresolvedTup unresolved;
511
512 memset(&hashkey, 0, sizeof(hashkey));
513 hashkey.xmin = HeapTupleHeaderGetXmin(new_tuple->t_data);
514 hashkey.tid = old_tid;
515
516 unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
517 HASH_FIND, NULL);
518
519 if (unresolved != NULL)
520 {
521 /*
522 * We have seen and memorized the previous tuple already. Now
523 * that we know where we inserted the tuple its t_ctid points
524 * to, fix its t_ctid and insert it to the new heap.
525 */
526 if (free_new)
527 heap_freetuple(new_tuple);
528 new_tuple = unresolved->tuple;
529 free_new = true;
530 old_tid = unresolved->old_tid;
531 new_tuple->t_data->t_ctid = new_tid;
532
533 /*
534 * We don't need the hash entry anymore, but don't free its
535 * tuple just yet.
536 */
537 hash_search(state->rs_unresolved_tups, &hashkey,
538 HASH_REMOVE, &found);
539 Assert(found);
540
541 /* loop back to insert the previous tuple in the chain */
542 continue;
543 }
544 else
545 {
546 /*
547 * Remember the new tid of this tuple. We'll use it to set the
548 * ctid when we find the previous tuple in the chain.
549 */
550 OldToNewMapping mapping;
551
552 mapping = hash_search(state->rs_old_new_tid_map, &hashkey,
553 HASH_ENTER, &found);
554 Assert(!found);
555
556 mapping->new_tid = new_tid;
557 }
558 }
559
560 /* Done with this (chain of) tuples, for now */
561 if (free_new)
562 heap_freetuple(new_tuple);
563 break;
564 }
565
566 MemoryContextSwitchTo(old_cxt);
567 }
568
569 /*
570 * Register a dead tuple with an ongoing rewrite. Dead tuples are not
571 * copied to the new table, but we still make note of them so that we
572 * can release some resources earlier.
573 *
574 * Returns true if a tuple was removed from the unresolved_tups table.
575 * This indicates that that tuple, previously thought to be "recently dead",
576 * is now known really dead and won't be written to the output.
577 */
578 bool
rewrite_heap_dead_tuple(RewriteState state,HeapTuple old_tuple)579 rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
580 {
581 /*
582 * If we have already seen an earlier tuple in the update chain that
583 * points to this tuple, let's forget about that earlier tuple. It's in
584 * fact dead as well, our simple xmax < OldestXmin test in
585 * HeapTupleSatisfiesVacuum just wasn't enough to detect it. It happens
586 * when xmin of a tuple is greater than xmax, which sounds
587 * counter-intuitive but is perfectly valid.
588 *
589 * We don't bother to try to detect the situation the other way round,
590 * when we encounter the dead tuple first and then the recently dead one
591 * that points to it. If that happens, we'll have some unmatched entries
592 * in the UnresolvedTups hash table at the end. That can happen anyway,
593 * because a vacuum might have removed the dead tuple in the chain before
594 * us.
595 */
596 UnresolvedTup unresolved;
597 TidHashKey hashkey;
598 bool found;
599
600 memset(&hashkey, 0, sizeof(hashkey));
601 hashkey.xmin = HeapTupleHeaderGetXmin(old_tuple->t_data);
602 hashkey.tid = old_tuple->t_self;
603
604 unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
605 HASH_FIND, NULL);
606
607 if (unresolved != NULL)
608 {
609 /* Need to free the contained tuple as well as the hashtable entry */
610 heap_freetuple(unresolved->tuple);
611 hash_search(state->rs_unresolved_tups, &hashkey,
612 HASH_REMOVE, &found);
613 Assert(found);
614 return true;
615 }
616
617 return false;
618 }
619
620 /*
621 * Insert a tuple to the new relation. This has to track heap_insert
622 * and its subsidiary functions!
623 *
624 * t_self of the tuple is set to the new TID of the tuple. If t_ctid of the
625 * tuple is invalid on entry, it's replaced with the new TID as well (in
626 * the inserted data only, not in the caller's copy).
627 */
628 static void
raw_heap_insert(RewriteState state,HeapTuple tup)629 raw_heap_insert(RewriteState state, HeapTuple tup)
630 {
631 Page page = state->rs_buffer;
632 Size pageFreeSpace,
633 saveFreeSpace;
634 Size len;
635 OffsetNumber newoff;
636 HeapTuple heaptup;
637
638 /*
639 * If the new tuple is too big for storage or contains already toasted
640 * out-of-line attributes from some other relation, invoke the toaster.
641 *
642 * Note: below this point, heaptup is the data we actually intend to store
643 * into the relation; tup is the caller's original untoasted data.
644 */
645 if (state->rs_new_rel->rd_rel->relkind == RELKIND_TOASTVALUE)
646 {
647 /* toast table entries should never be recursively toasted */
648 Assert(!HeapTupleHasExternal(tup));
649 heaptup = tup;
650 }
651 else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
652 {
653 int options = HEAP_INSERT_SKIP_FSM;
654
655 if (!state->rs_use_wal)
656 options |= HEAP_INSERT_SKIP_WAL;
657
658 /*
659 * While rewriting the heap for VACUUM FULL / CLUSTER, make sure data
660 * for the TOAST table are not logically decoded. The main heap is
661 * WAL-logged as XLOG FPI records, which are not logically decoded.
662 */
663 options |= HEAP_INSERT_NO_LOGICAL;
664
665 heaptup = toast_insert_or_update(state->rs_new_rel, tup, NULL,
666 options);
667 }
668 else
669 heaptup = tup;
670
671 len = MAXALIGN(heaptup->t_len); /* be conservative */
672
673 /*
674 * If we're gonna fail for oversize tuple, do it right away
675 */
676 if (len > MaxHeapTupleSize)
677 ereport(ERROR,
678 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
679 errmsg("row is too big: size %zu, maximum size %zu",
680 len, MaxHeapTupleSize)));
681
682 /* Compute desired extra freespace due to fillfactor option */
683 saveFreeSpace = RelationGetTargetPageFreeSpace(state->rs_new_rel,
684 HEAP_DEFAULT_FILLFACTOR);
685
686 /* Now we can check to see if there's enough free space already. */
687 if (state->rs_buffer_valid)
688 {
689 pageFreeSpace = PageGetHeapFreeSpace(page);
690
691 if (len + saveFreeSpace > pageFreeSpace)
692 {
693 /* Doesn't fit, so write out the existing page */
694
695 /* XLOG stuff */
696 if (state->rs_use_wal)
697 log_newpage(&state->rs_new_rel->rd_node,
698 MAIN_FORKNUM,
699 state->rs_blockno,
700 page,
701 true);
702
703 /*
704 * Now write the page. We say isTemp = true even if it's not a
705 * temp table, because there's no need for smgr to schedule an
706 * fsync for this write; we'll do it ourselves in
707 * end_heap_rewrite.
708 */
709 RelationOpenSmgr(state->rs_new_rel);
710
711 PageSetChecksumInplace(page, state->rs_blockno);
712
713 smgrextend(state->rs_new_rel->rd_smgr, MAIN_FORKNUM,
714 state->rs_blockno, (char *) page, true);
715
716 state->rs_blockno++;
717 state->rs_buffer_valid = false;
718 }
719 }
720
721 if (!state->rs_buffer_valid)
722 {
723 /* Initialize a new empty page */
724 PageInit(page, BLCKSZ, 0);
725 state->rs_buffer_valid = true;
726 }
727
728 /* And now we can insert the tuple into the page */
729 newoff = PageAddItem(page, (Item) heaptup->t_data, heaptup->t_len,
730 InvalidOffsetNumber, false, true);
731 if (newoff == InvalidOffsetNumber)
732 elog(ERROR, "failed to add tuple");
733
734 /* Update caller's t_self to the actual position where it was stored */
735 ItemPointerSet(&(tup->t_self), state->rs_blockno, newoff);
736
737 /*
738 * Insert the correct position into CTID of the stored tuple, too, if the
739 * caller didn't supply a valid CTID.
740 */
741 if (!ItemPointerIsValid(&tup->t_data->t_ctid))
742 {
743 ItemId newitemid;
744 HeapTupleHeader onpage_tup;
745
746 newitemid = PageGetItemId(page, newoff);
747 onpage_tup = (HeapTupleHeader) PageGetItem(page, newitemid);
748
749 onpage_tup->t_ctid = tup->t_self;
750 }
751
752 /* If heaptup is a private copy, release it. */
753 if (heaptup != tup)
754 heap_freetuple(heaptup);
755 }
756
757 /* ------------------------------------------------------------------------
758 * Logical rewrite support
759 *
760 * When doing logical decoding - which relies on using cmin/cmax of catalog
761 * tuples, via xl_heap_new_cid records - heap rewrites have to log enough
762 * information to allow the decoding backend to updates its internal mapping
763 * of (relfilenode,ctid) => (cmin, cmax) to be correct for the rewritten heap.
764 *
765 * For that, every time we find a tuple that's been modified in a catalog
766 * relation within the xmin horizon of any decoding slot, we log a mapping
767 * from the old to the new location.
768 *
769 * To deal with rewrites that abort the filename of a mapping file contains
770 * the xid of the transaction performing the rewrite, which then can be
771 * checked before being read in.
772 *
773 * For efficiency we don't immediately spill every single map mapping for a
774 * row to disk but only do so in batches when we've collected several of them
775 * in memory or when end_heap_rewrite() has been called.
776 *
777 * Crash-Safety: This module diverts from the usual patterns of doing WAL
778 * since it cannot rely on checkpoint flushing out all buffers and thus
779 * waiting for exclusive locks on buffers. Usually the XLogInsert() covering
780 * buffer modifications is performed while the buffer(s) that are being
781 * modified are exclusively locked guaranteeing that both the WAL record and
782 * the modified heap are on either side of the checkpoint. But since the
783 * mapping files we log aren't in shared_buffers that interlock doesn't work.
784 *
785 * Instead we simply write the mapping files out to disk, *before* the
786 * XLogInsert() is performed. That guarantees that either the XLogInsert() is
787 * inserted after the checkpoint's redo pointer or that the checkpoint (via
788 * LogicalRewriteHeapCheckpoint()) has flushed the (partial) mapping file to
789 * disk. That leaves the tail end that has not yet been flushed open to
790 * corruption, which is solved by including the current offset in the
791 * xl_heap_rewrite_mapping records and truncating the mapping file to it
792 * during replay. Every time a rewrite is finished all generated mapping files
793 * are synced to disk.
794 *
795 * Note that if we were only concerned about crash safety we wouldn't have to
796 * deal with WAL logging at all - an fsync() at the end of a rewrite would be
797 * sufficient for crash safety. Any mapping that hasn't been safely flushed to
798 * disk has to be by an aborted (explicitly or via a crash) transaction and is
799 * ignored by virtue of the xid in its name being subject to a
800 * TransactionDidCommit() check. But we want to support having standbys via
801 * physical replication, both for availability and to do logical decoding
802 * there.
803 * ------------------------------------------------------------------------
804 */
805
806 /*
807 * Do preparations for logging logical mappings during a rewrite if
808 * necessary. If we detect that we don't need to log anything we'll prevent
809 * any further action by the various logical rewrite functions.
810 */
811 static void
logical_begin_heap_rewrite(RewriteState state)812 logical_begin_heap_rewrite(RewriteState state)
813 {
814 HASHCTL hash_ctl;
815 TransactionId logical_xmin;
816
817 /*
818 * We only need to persist these mappings if the rewritten table can be
819 * accessed during logical decoding, if not, we can skip doing any
820 * additional work.
821 */
822 state->rs_logical_rewrite =
823 RelationIsAccessibleInLogicalDecoding(state->rs_old_rel);
824
825 if (!state->rs_logical_rewrite)
826 return;
827
828 ProcArrayGetReplicationSlotXmin(NULL, &logical_xmin);
829
830 /*
831 * If there are no logical slots in progress we don't need to do anything,
832 * there cannot be any remappings for relevant rows yet. The relation's
833 * lock protects us against races.
834 */
835 if (logical_xmin == InvalidTransactionId)
836 {
837 state->rs_logical_rewrite = false;
838 return;
839 }
840
841 state->rs_logical_xmin = logical_xmin;
842 state->rs_begin_lsn = GetXLogInsertRecPtr();
843 state->rs_num_rewrite_mappings = 0;
844
845 memset(&hash_ctl, 0, sizeof(hash_ctl));
846 hash_ctl.keysize = sizeof(TransactionId);
847 hash_ctl.entrysize = sizeof(RewriteMappingFile);
848 hash_ctl.hcxt = state->rs_cxt;
849
850 state->rs_logical_mappings =
851 hash_create("Logical rewrite mapping",
852 128, /* arbitrary initial size */
853 &hash_ctl,
854 HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
855 }
856
857 /*
858 * Flush all logical in-memory mappings to disk, but don't fsync them yet.
859 */
860 static void
logical_heap_rewrite_flush_mappings(RewriteState state)861 logical_heap_rewrite_flush_mappings(RewriteState state)
862 {
863 HASH_SEQ_STATUS seq_status;
864 RewriteMappingFile *src;
865 dlist_mutable_iter iter;
866
867 Assert(state->rs_logical_rewrite);
868
869 /* no logical rewrite in progress, no need to iterate over mappings */
870 if (state->rs_num_rewrite_mappings == 0)
871 return;
872
873 elog(DEBUG1, "flushing %u logical rewrite mapping entries",
874 state->rs_num_rewrite_mappings);
875
876 hash_seq_init(&seq_status, state->rs_logical_mappings);
877 while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL)
878 {
879 char *waldata;
880 char *waldata_start;
881 xl_heap_rewrite_mapping xlrec;
882 Oid dboid;
883 uint32 len;
884 int written;
885
886 /* this file hasn't got any new mappings */
887 if (src->num_mappings == 0)
888 continue;
889
890 if (state->rs_old_rel->rd_rel->relisshared)
891 dboid = InvalidOid;
892 else
893 dboid = MyDatabaseId;
894
895 xlrec.num_mappings = src->num_mappings;
896 xlrec.mapped_rel = RelationGetRelid(state->rs_old_rel);
897 xlrec.mapped_xid = src->xid;
898 xlrec.mapped_db = dboid;
899 xlrec.offset = src->off;
900 xlrec.start_lsn = state->rs_begin_lsn;
901
902 /* write all mappings consecutively */
903 len = src->num_mappings * sizeof(LogicalRewriteMappingData);
904 waldata_start = waldata = palloc(len);
905
906 /*
907 * collect data we need to write out, but don't modify ondisk data yet
908 */
909 dlist_foreach_modify(iter, &src->mappings)
910 {
911 RewriteMappingDataEntry *pmap;
912
913 pmap = dlist_container(RewriteMappingDataEntry, node, iter.cur);
914
915 memcpy(waldata, &pmap->map, sizeof(pmap->map));
916 waldata += sizeof(pmap->map);
917
918 /* remove from the list and free */
919 dlist_delete(&pmap->node);
920 pfree(pmap);
921
922 /* update bookkeeping */
923 state->rs_num_rewrite_mappings--;
924 src->num_mappings--;
925 }
926
927 Assert(src->num_mappings == 0);
928 Assert(waldata == waldata_start + len);
929
930 /*
931 * Note that we deviate from the usual WAL coding practices here,
932 * check the above "Logical rewrite support" comment for reasoning.
933 */
934 written = FileWrite(src->vfd, waldata_start, len);
935 if (written != len)
936 ereport(ERROR,
937 (errcode_for_file_access(),
938 errmsg("could not write to file \"%s\", wrote %d of %d: %m", src->path,
939 written, len)));
940 src->off += len;
941
942 XLogBeginInsert();
943 XLogRegisterData((char *) (&xlrec), sizeof(xlrec));
944 XLogRegisterData(waldata_start, len);
945
946 /* write xlog record */
947 XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_REWRITE);
948
949 pfree(waldata_start);
950 }
951 Assert(state->rs_num_rewrite_mappings == 0);
952 }
953
954 /*
955 * Logical remapping part of end_heap_rewrite().
956 */
957 static void
logical_end_heap_rewrite(RewriteState state)958 logical_end_heap_rewrite(RewriteState state)
959 {
960 HASH_SEQ_STATUS seq_status;
961 RewriteMappingFile *src;
962
963 /* done, no logical rewrite in progress */
964 if (!state->rs_logical_rewrite)
965 return;
966
967 /* writeout remaining in-memory entries */
968 if (state->rs_num_rewrite_mappings > 0)
969 logical_heap_rewrite_flush_mappings(state);
970
971 /* Iterate over all mappings we have written and fsync the files. */
972 hash_seq_init(&seq_status, state->rs_logical_mappings);
973 while ((src = (RewriteMappingFile *) hash_seq_search(&seq_status)) != NULL)
974 {
975 if (FileSync(src->vfd) != 0)
976 ereport(data_sync_elevel(ERROR),
977 (errcode_for_file_access(),
978 errmsg("could not fsync file \"%s\": %m", src->path)));
979 FileClose(src->vfd);
980 }
981 /* memory context cleanup will deal with the rest */
982 }
983
984 /*
985 * Log a single (old->new) mapping for 'xid'.
986 */
987 static void
logical_rewrite_log_mapping(RewriteState state,TransactionId xid,LogicalRewriteMappingData * map)988 logical_rewrite_log_mapping(RewriteState state, TransactionId xid,
989 LogicalRewriteMappingData *map)
990 {
991 RewriteMappingFile *src;
992 RewriteMappingDataEntry *pmap;
993 Oid relid;
994 bool found;
995
996 relid = RelationGetRelid(state->rs_old_rel);
997
998 /* look for existing mappings for this 'mapped' xid */
999 src = hash_search(state->rs_logical_mappings, &xid,
1000 HASH_ENTER, &found);
1001
1002 /*
1003 * We haven't yet had the need to map anything for this xid, create
1004 * per-xid data structures.
1005 */
1006 if (!found)
1007 {
1008 char path[MAXPGPATH];
1009 Oid dboid;
1010
1011 if (state->rs_old_rel->rd_rel->relisshared)
1012 dboid = InvalidOid;
1013 else
1014 dboid = MyDatabaseId;
1015
1016 snprintf(path, MAXPGPATH,
1017 "pg_logical/mappings/" LOGICAL_REWRITE_FORMAT,
1018 dboid, relid,
1019 (uint32) (state->rs_begin_lsn >> 32),
1020 (uint32) state->rs_begin_lsn,
1021 xid, GetCurrentTransactionId());
1022
1023 dlist_init(&src->mappings);
1024 src->num_mappings = 0;
1025 src->off = 0;
1026 memcpy(src->path, path, sizeof(path));
1027 src->vfd = PathNameOpenFile(path,
1028 O_CREAT | O_EXCL | O_WRONLY | PG_BINARY,
1029 S_IRUSR | S_IWUSR);
1030 if (src->vfd < 0)
1031 ereport(ERROR,
1032 (errcode_for_file_access(),
1033 errmsg("could not create file \"%s\": %m", path)));
1034 }
1035
1036 pmap = MemoryContextAlloc(state->rs_cxt,
1037 sizeof(RewriteMappingDataEntry));
1038 memcpy(&pmap->map, map, sizeof(LogicalRewriteMappingData));
1039 dlist_push_tail(&src->mappings, &pmap->node);
1040 src->num_mappings++;
1041 state->rs_num_rewrite_mappings++;
1042
1043 /*
1044 * Write out buffer every time we've too many in-memory entries across all
1045 * mapping files.
1046 */
1047 if (state->rs_num_rewrite_mappings >= 1000 /* arbitrary number */ )
1048 logical_heap_rewrite_flush_mappings(state);
1049 }
1050
1051 /*
1052 * Perform logical remapping for a tuple that's mapped from old_tid to
1053 * new_tuple->t_self by rewrite_heap_tuple() if necessary for the tuple.
1054 */
1055 static void
logical_rewrite_heap_tuple(RewriteState state,ItemPointerData old_tid,HeapTuple new_tuple)1056 logical_rewrite_heap_tuple(RewriteState state, ItemPointerData old_tid,
1057 HeapTuple new_tuple)
1058 {
1059 ItemPointerData new_tid = new_tuple->t_self;
1060 TransactionId cutoff = state->rs_logical_xmin;
1061 TransactionId xmin;
1062 TransactionId xmax;
1063 bool do_log_xmin = false;
1064 bool do_log_xmax = false;
1065 LogicalRewriteMappingData map;
1066
1067 /* no logical rewrite in progress, we don't need to log anything */
1068 if (!state->rs_logical_rewrite)
1069 return;
1070
1071 xmin = HeapTupleHeaderGetXmin(new_tuple->t_data);
1072 /* use *GetUpdateXid to correctly deal with multixacts */
1073 xmax = HeapTupleHeaderGetUpdateXid(new_tuple->t_data);
1074
1075 /*
1076 * Log the mapping iff the tuple has been created recently.
1077 */
1078 if (TransactionIdIsNormal(xmin) && !TransactionIdPrecedes(xmin, cutoff))
1079 do_log_xmin = true;
1080
1081 if (!TransactionIdIsNormal(xmax))
1082 {
1083 /*
1084 * no xmax is set, can't have any permanent ones, so this check is
1085 * sufficient
1086 */
1087 }
1088 else if (HEAP_XMAX_IS_LOCKED_ONLY(new_tuple->t_data->t_infomask))
1089 {
1090 /* only locked, we don't care */
1091 }
1092 else if (!TransactionIdPrecedes(xmax, cutoff))
1093 {
1094 /* tuple has been deleted recently, log */
1095 do_log_xmax = true;
1096 }
1097
1098 /* if neither needs to be logged, we're done */
1099 if (!do_log_xmin && !do_log_xmax)
1100 return;
1101
1102 /* fill out mapping information */
1103 map.old_node = state->rs_old_rel->rd_node;
1104 map.old_tid = old_tid;
1105 map.new_node = state->rs_new_rel->rd_node;
1106 map.new_tid = new_tid;
1107
1108 /* ---
1109 * Now persist the mapping for the individual xids that are affected. We
1110 * need to log for both xmin and xmax if they aren't the same transaction
1111 * since the mapping files are per "affected" xid.
1112 * We don't muster all that much effort detecting whether xmin and xmax
1113 * are actually the same transaction, we just check whether the xid is the
1114 * same disregarding subtransactions. Logging too much is relatively
1115 * harmless and we could never do the check fully since subtransaction
1116 * data is thrown away during restarts.
1117 * ---
1118 */
1119 if (do_log_xmin)
1120 logical_rewrite_log_mapping(state, xmin, &map);
1121 /* separately log mapping for xmax unless it'd be redundant */
1122 if (do_log_xmax && !TransactionIdEquals(xmin, xmax))
1123 logical_rewrite_log_mapping(state, xmax, &map);
1124 }
1125
1126 /*
1127 * Replay XLOG_HEAP2_REWRITE records
1128 */
1129 void
heap_xlog_logical_rewrite(XLogReaderState * r)1130 heap_xlog_logical_rewrite(XLogReaderState *r)
1131 {
1132 char path[MAXPGPATH];
1133 int fd;
1134 xl_heap_rewrite_mapping *xlrec;
1135 uint32 len;
1136 char *data;
1137
1138 xlrec = (xl_heap_rewrite_mapping *) XLogRecGetData(r);
1139
1140 snprintf(path, MAXPGPATH,
1141 "pg_logical/mappings/" LOGICAL_REWRITE_FORMAT,
1142 xlrec->mapped_db, xlrec->mapped_rel,
1143 (uint32) (xlrec->start_lsn >> 32),
1144 (uint32) xlrec->start_lsn,
1145 xlrec->mapped_xid, XLogRecGetXid(r));
1146
1147 fd = OpenTransientFile(path,
1148 O_CREAT | O_WRONLY | PG_BINARY,
1149 S_IRUSR | S_IWUSR);
1150 if (fd < 0)
1151 ereport(ERROR,
1152 (errcode_for_file_access(),
1153 errmsg("could not create file \"%s\": %m", path)));
1154
1155 /*
1156 * Truncate all data that's not guaranteed to have been safely fsynced (by
1157 * previous record or by the last checkpoint).
1158 */
1159 if (ftruncate(fd, xlrec->offset) != 0)
1160 ereport(ERROR,
1161 (errcode_for_file_access(),
1162 errmsg("could not truncate file \"%s\" to %u: %m",
1163 path, (uint32) xlrec->offset)));
1164
1165 /* now seek to the position we want to write our data to */
1166 if (lseek(fd, xlrec->offset, SEEK_SET) != xlrec->offset)
1167 ereport(ERROR,
1168 (errcode_for_file_access(),
1169 errmsg("could not seek to end of file \"%s\": %m",
1170 path)));
1171
1172 data = XLogRecGetData(r) + sizeof(*xlrec);
1173
1174 len = xlrec->num_mappings * sizeof(LogicalRewriteMappingData);
1175
1176 /* write out tail end of mapping file (again) */
1177 errno = 0;
1178 if (write(fd, data, len) != len)
1179 {
1180 /* if write didn't set errno, assume problem is no disk space */
1181 if (errno == 0)
1182 errno = ENOSPC;
1183 ereport(ERROR,
1184 (errcode_for_file_access(),
1185 errmsg("could not write to file \"%s\": %m", path)));
1186 }
1187
1188 /*
1189 * Now fsync all previously written data. We could improve things and only
1190 * do this for the last write to a file, but the required bookkeeping
1191 * doesn't seem worth the trouble.
1192 */
1193 if (pg_fsync(fd) != 0)
1194 ereport(data_sync_elevel(ERROR),
1195 (errcode_for_file_access(),
1196 errmsg("could not fsync file \"%s\": %m", path)));
1197
1198 CloseTransientFile(fd);
1199 }
1200
1201 /* ---
1202 * Perform a checkpoint for logical rewrite mappings
1203 *
1204 * This serves two tasks:
1205 * 1) Remove all mappings not needed anymore based on the logical restart LSN
1206 * 2) Flush all remaining mappings to disk, so that replay after a checkpoint
1207 * only has to deal with the parts of a mapping that have been written out
1208 * after the checkpoint started.
1209 * ---
1210 */
1211 void
CheckPointLogicalRewriteHeap(void)1212 CheckPointLogicalRewriteHeap(void)
1213 {
1214 XLogRecPtr cutoff;
1215 XLogRecPtr redo;
1216 DIR *mappings_dir;
1217 struct dirent *mapping_de;
1218 char path[MAXPGPATH + 20];
1219
1220 /*
1221 * We start of with a minimum of the last redo pointer. No new decoding
1222 * slot will start before that, so that's a safe upper bound for removal.
1223 */
1224 redo = GetRedoRecPtr();
1225
1226 /* now check for the restart ptrs from existing slots */
1227 cutoff = ReplicationSlotsComputeLogicalRestartLSN();
1228
1229 /* don't start earlier than the restart lsn */
1230 if (cutoff != InvalidXLogRecPtr && redo < cutoff)
1231 cutoff = redo;
1232
1233 mappings_dir = AllocateDir("pg_logical/mappings");
1234 while ((mapping_de = ReadDir(mappings_dir, "pg_logical/mappings")) != NULL)
1235 {
1236 struct stat statbuf;
1237 Oid dboid;
1238 Oid relid;
1239 XLogRecPtr lsn;
1240 TransactionId rewrite_xid;
1241 TransactionId create_xid;
1242 uint32 hi,
1243 lo;
1244
1245 if (strcmp(mapping_de->d_name, ".") == 0 ||
1246 strcmp(mapping_de->d_name, "..") == 0)
1247 continue;
1248
1249 snprintf(path, sizeof(path), "pg_logical/mappings/%s", mapping_de->d_name);
1250 if (lstat(path, &statbuf) == 0 && !S_ISREG(statbuf.st_mode))
1251 continue;
1252
1253 /* Skip over files that cannot be ours. */
1254 if (strncmp(mapping_de->d_name, "map-", 4) != 0)
1255 continue;
1256
1257 if (sscanf(mapping_de->d_name, LOGICAL_REWRITE_FORMAT,
1258 &dboid, &relid, &hi, &lo, &rewrite_xid, &create_xid) != 6)
1259 elog(ERROR, "could not parse filename \"%s\"", mapping_de->d_name);
1260
1261 lsn = ((uint64) hi) << 32 | lo;
1262
1263 if (lsn < cutoff || cutoff == InvalidXLogRecPtr)
1264 {
1265 elog(DEBUG1, "removing logical rewrite file \"%s\"", path);
1266 if (unlink(path) < 0)
1267 ereport(ERROR,
1268 (errcode_for_file_access(),
1269 errmsg("could not remove file \"%s\": %m", path)));
1270 }
1271 else
1272 {
1273 /* on some operating systems fsyncing a file requires O_RDWR */
1274 int fd = OpenTransientFile(path, O_RDWR | PG_BINARY, 0);
1275
1276 /*
1277 * The file cannot vanish due to concurrency since this function
1278 * is the only one removing logical mappings and it's run while
1279 * CheckpointLock is held exclusively.
1280 */
1281 if (fd < 0)
1282 ereport(ERROR,
1283 (errcode_for_file_access(),
1284 errmsg("could not open file \"%s\": %m", path)));
1285
1286 /*
1287 * We could try to avoid fsyncing files that either haven't
1288 * changed or have only been created since the checkpoint's start,
1289 * but it's currently not deemed worth the effort.
1290 */
1291 else if (pg_fsync(fd) != 0)
1292 ereport(data_sync_elevel(ERROR),
1293 (errcode_for_file_access(),
1294 errmsg("could not fsync file \"%s\": %m", path)));
1295 CloseTransientFile(fd);
1296 }
1297 }
1298 FreeDir(mappings_dir);
1299 }
1300