1 /*-------------------------------------------------------------------------
2 *
3 * xlog.c
4 * PostgreSQL write-ahead log manager
5 *
6 *
7 * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
9 *
10 * src/backend/access/transam/xlog.c
11 *
12 *-------------------------------------------------------------------------
B(int)13 */
14
15 #include "postgres.h"
16
17 #include <ctype.h>
18 #include <math.h>
19 #include <time.h>
20 #include <fcntl.h>
21 #include <sys/stat.h>
22 #include <sys/time.h>
23 #include <unistd.h>
24
25 #include "access/clog.h"
26 #include "access/commit_ts.h"
27 #include "access/multixact.h"
28 #include "access/rewriteheap.h"
29 #include "access/subtrans.h"
30 #include "access/timeline.h"
31 #include "access/transam.h"
32 #include "access/tuptoaster.h"
33 #include "access/twophase.h"
34 #include "access/xact.h"
35 #include "access/xlog_internal.h"
36 #include "access/xloginsert.h"
37 #include "access/xlogreader.h"
38 #include "access/xlogutils.h"
39 #include "catalog/catversion.h"
40 #include "catalog/pg_control.h"
41 #include "catalog/pg_database.h"
42 #include "commands/tablespace.h"
43 #include "miscadmin.h"
44 #include "pgstat.h"
45 #include "port/atomics.h"
46 #include "postmaster/bgwriter.h"
47 #include "postmaster/walwriter.h"
48 #include "postmaster/startup.h"
49 #include "replication/basebackup.h"
50 #include "replication/logical.h"
51 #include "replication/slot.h"
52 #include "replication/origin.h"
53 #include "replication/snapbuild.h"
54 #include "replication/walreceiver.h"
55 #include "replication/walsender.h"
56 #include "storage/bufmgr.h"
57 #include "storage/fd.h"
58 #include "storage/ipc.h"
59 #include "storage/large_object.h"
60 #include "storage/latch.h"
61 #include "storage/pmsignal.h"
62 #include "storage/predicate.h"
63 #include "storage/proc.h"
64 #include "storage/procarray.h"
65 #include "storage/reinit.h"
66 #include "storage/smgr.h"
67 #include "storage/spin.h"
68 #include "utils/backend_random.h"
69 #include "utils/builtins.h"
70 #include "utils/guc.h"
71 #include "utils/memutils.h"
72 #include "utils/pg_lsn.h"
73 #include "utils/ps_status.h"
74 #include "utils/relmapper.h"
75 #include "utils/snapmgr.h"
76 #include "utils/timestamp.h"
77 #include "pg_trace.h"
78
79 extern uint32 bootstrap_data_checksum_version;
80
81 /* File path names (all relative to $PGDATA) */
82 #define RECOVERY_COMMAND_FILE "recovery.conf"
83 #define RECOVERY_COMMAND_DONE "recovery.done"
84 #define PROMOTE_SIGNAL_FILE "promote"
85 #define FALLBACK_PROMOTE_SIGNAL_FILE "fallback_promote"
86
87
88 /* User-settable parameters */
89 int max_wal_size_mb = 1024; /* 1 GB */
90 int min_wal_size_mb = 80; /* 80 MB */
91 int wal_keep_segments = 0;
92 int XLOGbuffers = -1;
93 int XLogArchiveTimeout = 0;
94 int XLogArchiveMode = ARCHIVE_MODE_OFF;
95 char *XLogArchiveCommand = NULL;
96 bool EnableHotStandby = false;
97 bool fullPageWrites = true;
98 bool wal_log_hints = false;
99 bool wal_compression = false;
100 char *wal_consistency_checking_string = NULL;
101 bool *wal_consistency_checking = NULL;
102 bool log_checkpoints = false;
103 int sync_method = DEFAULT_SYNC_METHOD;
104 int wal_level = WAL_LEVEL_MINIMAL;
105 int CommitDelay = 0; /* precommit delay in microseconds */
106 int CommitSiblings = 5; /* # concurrent xacts needed to sleep */
107 int wal_retrieve_retry_interval = 5000;
108
109 #ifdef WAL_DEBUG
110 bool XLOG_DEBUG = false;
111 #endif
112
113 int wal_segment_size = DEFAULT_XLOG_SEG_SIZE;
114
testBool()115 /*
116 * Number of WAL insertion locks to use. A higher value allows more insertions
117 * to happen concurrently, but adds some CPU overhead to flushing the WAL,
118 * which needs to iterate all the locks.
119 */
120 #define NUM_XLOGINSERT_LOCKS 8
121
122 /*
123 * Max distance from last checkpoint, before triggering a new xlog-based
124 * checkpoint.
125 */
126 int CheckPointSegments;
127
128 /* Estimated distance between checkpoints, in bytes */
129 static double CheckPointDistanceEstimate = 0;
130 static double PrevCheckPointDistance = 0;
131
132 /*
133 * GUC support
134 */
135 const struct config_enum_entry sync_method_options[] = {
136 {"fsync", SYNC_METHOD_FSYNC, false},
137 #ifdef HAVE_FSYNC_WRITETHROUGH
138 {"fsync_writethrough", SYNC_METHOD_FSYNC_WRITETHROUGH, false},
139 #endif
140 #ifdef HAVE_FDATASYNC
141 {"fdatasync", SYNC_METHOD_FDATASYNC, false},
142 #endif
143 #ifdef OPEN_SYNC_FLAG
144 {"open_sync", SYNC_METHOD_OPEN, false},
145 #endif
146 #ifdef OPEN_DATASYNC_FLAG
147 {"open_datasync", SYNC_METHOD_OPEN_DSYNC, false},
148 #endif
149 {NULL, 0, false}
150 };
151
152
153 /*
154 * Although only "on", "off", and "always" are documented,
155 * we accept all the likely variants of "on" and "off".
156 */
157 const struct config_enum_entry archive_mode_options[] = {
158 {"always", ARCHIVE_MODE_ALWAYS, false},
159 {"on", ARCHIVE_MODE_ON, false},
160 {"off", ARCHIVE_MODE_OFF, false},
161 {"true", ARCHIVE_MODE_ON, true},
162 {"false", ARCHIVE_MODE_OFF, true},
163 {"yes", ARCHIVE_MODE_ON, true},
164 {"no", ARCHIVE_MODE_OFF, true},
165 {"1", ARCHIVE_MODE_ON, true},
166 {"0", ARCHIVE_MODE_OFF, true},
167 {NULL, 0, false}
168 };
169
170 /*
171 * Statistics for current checkpoint are collected in this global struct.
172 * Because only the checkpointer or a stand-alone backend can perform
173 * checkpoints, this will be unused in normal backends.
174 */
175 CheckpointStatsData CheckpointStats;
176
177 /*
178 * ThisTimeLineID will be same in all backends --- it identifies current
179 * WAL timeline for the database system.
180 */
181 TimeLineID ThisTimeLineID = 0;
182
183 /*
184 * Are we doing recovery from XLOG?
185 *
186 * This is only ever true in the startup process; it should be read as meaning
187 * "this process is replaying WAL records", rather than "the system is in
188 * recovery mode". It should be examined primarily by functions that need
189 * to act differently when called from a WAL redo function (e.g., to skip WAL
190 * logging). To check whether the system is in recovery regardless of which
191 * process you're running in, use RecoveryInProgress() but only after shared
192 * memory startup and lock initialization.
193 */
194 bool InRecovery = false;
195
196 /* Are we in Hot Standby mode? Only valid in startup process, see xlog.h */
197 HotStandbyState standbyState = STANDBY_DISABLED;
198
199 static XLogRecPtr LastRec;
200
201 /* Local copy of WalRcv->receivedUpto */
202 static XLogRecPtr receivedUpto = 0;
203 static TimeLineID receiveTLI = 0;
204
testNew()205 /*
206 * abortedRecPtr is the start pointer of a broken record at end of WAL when
207 * recovery completes; missingContrecPtr is the location of the first
208 * contrecord that went missing. See CreateOverwriteContrecordRecord for
209 * details.
210 */
211 static XLogRecPtr abortedRecPtr;
212 static XLogRecPtr missingContrecPtr;
213
214 /*
215 * During recovery, lastFullPageWrites keeps track of full_page_writes that
216 * the replayed WAL records indicate. It's initialized with full_page_writes
217 * that the recovery starting checkpoint record indicates, and then updated
218 * each time XLOG_FPW_CHANGE record is replayed.
219 */
220 static bool lastFullPageWrites;
221
222 /*
testDelete()223 * Local copy of the state tracked by SharedRecoveryState in shared memory,
224 * It is false if SharedRecoveryState is RECOVERY_STATE_DONE. True actually
225 * means "not known, need to check the shared state".
226 */
227 static bool LocalRecoveryInProgress = true;
228
229 /*
230 * Local copy of SharedHotStandbyActive variable. False actually means "not
231 * known, need to check the shared state".
232 */
233 static bool LocalHotStandbyActive = false;
234
235 /*
236 * Local state for XLogInsertAllowed():
237 * 1: unconditionally allowed to insert XLOG
238 * 0: unconditionally not allowed to insert XLOG
239 * -1: must check RecoveryInProgress(); disallow until it is false
testFunctionPointer()240 * Most processes start with -1 and transition to 1 after seeing that recovery
241 * is not in progress. But we can also force the value for special cases.
242 * The coding in XLogInsertAllowed() depends on the first two of these states
243 * being numerically the same as bool true and false.
244 */
245 static int LocalXLogInsertAllowed = -1;
246
247 /*
248 * When ArchiveRecoveryRequested is set, archive recovery was requested,
249 * ie. recovery.conf file was present. When InArchiveRecovery is set, we are
250 * currently recovering using offline XLOG archives. These variables are only
251 * valid in the startup process.
252 *
253 * When ArchiveRecoveryRequested is true, but InArchiveRecovery is false, we're
254 * currently performing crash recovery using only XLOG files in pg_wal, but
255 * will switch to using offline XLOG archives as soon as we reach the end of
256 * WAL in pg_wal.
257 */
258 bool ArchiveRecoveryRequested = false;
259 bool InArchiveRecovery = false;
260
261 /* Was the last xlog file restored from archive, or local? */
262 static bool restoredFromArchive = false;
263
264 /* Buffers dedicated to consistency checks of size BLCKSZ */
265 static char *replay_image_masked = NULL;
266 static char *master_image_masked = NULL;
267
268 /* options taken from recovery.conf for archive recovery */
269 char *recoveryRestoreCommand = NULL;
270 static char *recoveryEndCommand = NULL;
271 static char *archiveCleanupCommand = NULL;
272 static RecoveryTargetType recoveryTarget = RECOVERY_TARGET_UNSET;
273 static bool recoveryTargetInclusive = true;
274 static RecoveryTargetAction recoveryTargetAction = RECOVERY_TARGET_ACTION_PAUSE;
275 static TransactionId recoveryTargetXid;
276 static TimestampTz recoveryTargetTime;
277 static char *recoveryTargetName;
278 static XLogRecPtr recoveryTargetLSN;
279 static int recovery_min_apply_delay = 0;
280 static TimestampTz recoveryDelayUntilTime;
281
282 /* options taken from recovery.conf for XLOG streaming */
283 static bool StandbyModeRequested = false;
284 static char *PrimaryConnInfo = NULL;
285 static char *PrimarySlotName = NULL;
286 static char *TriggerFile = NULL;
287
288 /* are we currently in standby mode? */
289 bool StandbyMode = false;
290
291 /* whether request for fast promotion has been made yet */
292 static bool fast_promote = false;
293
294 /*
295 * if recoveryStopsBefore/After returns true, it saves information of the stop
296 * point here
297 */
298 static TransactionId recoveryStopXid;
299 static TimestampTz recoveryStopTime;
300 static XLogRecPtr recoveryStopLSN;
301 static char recoveryStopName[MAXFNAMELEN];
302 static bool recoveryStopAfter;
303
304 /*
305 * During normal operation, the only timeline we care about is ThisTimeLineID.
306 * During recovery, however, things are more complicated. To simplify life
307 * for rmgr code, we keep ThisTimeLineID set to the "current" timeline as we
308 * scan through the WAL history (that is, it is the line that was active when
309 * the currently-scanned WAL record was generated). We also need these
310 * timeline values:
311 *
312 * recoveryTargetTLI: the desired timeline that we want to end in.
313 *
314 * recoveryTargetIsLatest: was the requested target timeline 'latest'?
315 *
316 * expectedTLEs: a list of TimeLineHistoryEntries for recoveryTargetTLI and the timelines of
317 * its known parents, newest first (so recoveryTargetTLI is always the
318 * first list member). Only these TLIs are expected to be seen in the WAL
319 * segments we read, and indeed only these TLIs will be considered as
320 * candidate WAL files to open at all.
321 *
322 * curFileTLI: the TLI appearing in the name of the current input WAL file.
323 * (This is not necessarily the same as ThisTimeLineID, because we could
324 * be scanning data that was copied from an ancestor timeline when the current
325 * file was created.) During a sequential scan we do not allow this value
326 * to decrease.
327 */
328 static TimeLineID recoveryTargetTLI;
329 static bool recoveryTargetIsLatest = false;
330 static List *expectedTLEs;
331 static TimeLineID curFileTLI;
332
333 /*
334 * ProcLastRecPtr points to the start of the last XLOG record inserted by the
335 * current backend. It is updated for all inserts. XactLastRecEnd points to
336 * end+1 of the last record, and is reset when we end a top-level transaction,
337 * or start a new one; so it can be used to tell if the current transaction has
338 * created any XLOG records.
339 *
340 * While in parallel mode, this may not be fully up to date. When committing,
341 * a transaction can assume this covers all xlog records written either by the
342 * user backend or by any parallel worker which was present at any point during
343 * the transaction. But when aborting, or when still in parallel mode, other
344 * parallel backends may have written WAL records at later LSNs than the value
345 * stored here. The parallel leader advances its own copy, when necessary,
346 * in WaitForParallelWorkersToFinish.
347 */
348 XLogRecPtr ProcLastRecPtr = InvalidXLogRecPtr;
349 XLogRecPtr XactLastRecEnd = InvalidXLogRecPtr;
350 XLogRecPtr XactLastCommitEnd = InvalidXLogRecPtr;
351
352 /*
353 * RedoRecPtr is this backend's local copy of the REDO record pointer
354 * (which is almost but not quite the same as a pointer to the most recent
355 * CHECKPOINT record). We update this from the shared-memory copy,
356 * XLogCtl->Insert.RedoRecPtr, whenever we can safely do so (ie, when we
357 * hold an insertion lock). See XLogInsertRecord for details. We are also
358 * allowed to update from XLogCtl->RedoRecPtr if we hold the info_lck;
359 * see GetRedoRecPtr. A freshly spawned backend obtains the value during
360 * InitXLOGAccess.
361 */
362 static XLogRecPtr RedoRecPtr;
363
364 /*
365 * doPageWrites is this backend's local copy of (forcePageWrites ||
366 * fullPageWrites). It is used together with RedoRecPtr to decide whether
367 * a full-page image of a page need to be taken.
368 */
369 static bool doPageWrites;
370
371 /* Has the recovery code requested a walreceiver wakeup? */
372 static bool doRequestWalReceiverReply;
373
374 /*
375 * RedoStartLSN points to the checkpoint's REDO location which is specified
376 * in a backup label file, backup history file or control file. In standby
377 * mode, XLOG streaming usually starts from the position where an invalid
378 * record was found. But if we fail to read even the initial checkpoint
379 * record, we use the REDO location instead of the checkpoint location as
380 * the start position of XLOG streaming. Otherwise we would have to jump
381 * backwards to the REDO location after reading the checkpoint record,
382 * because the REDO record can precede the checkpoint record.
383 */
384 static XLogRecPtr RedoStartLSN = InvalidXLogRecPtr;
385
386 /*----------
387 * Shared-memory data structures for XLOG control
388 *
389 * LogwrtRqst indicates a byte position that we need to write and/or fsync
390 * the log up to (all records before that point must be written or fsynced).
391 * LogwrtResult indicates the byte positions we have already written/fsynced.
392 * These structs are identical but are declared separately to indicate their
393 * slightly different functions.
394 *
395 * To read XLogCtl->LogwrtResult, you must hold either info_lck or
396 * WALWriteLock. To update it, you need to hold both locks. The point of
397 * this arrangement is that the value can be examined by code that already
398 * holds WALWriteLock without needing to grab info_lck as well. In addition
399 * to the shared variable, each backend has a private copy of LogwrtResult,
400 * which is updated when convenient.
401 *
402 * The request bookkeeping is simpler: there is a shared XLogCtl->LogwrtRqst
403 * (protected by info_lck), but we don't need to cache any copies of it.
404 *
405 * info_lck is only held long enough to read/update the protected variables,
406 * so it's a plain spinlock. The other locks are held longer (potentially
407 * over I/O operations), so we use LWLocks for them. These locks are:
408 *
409 * WALBufMappingLock: must be held to replace a page in the WAL buffer cache.
410 * It is only held while initializing and changing the mapping. If the
411 * contents of the buffer being replaced haven't been written yet, the mapping
412 * lock is released while the write is done, and reacquired afterwards.
413 *
414 * WALWriteLock: must be held to write WAL buffers to disk (XLogWrite or
415 * XLogFlush).
416 *
417 * ControlFileLock: must be held to read/update control file or create
418 * new log file.
419 *
420 * CheckpointLock: must be held to do a checkpoint or restartpoint (ensures
421 * only one checkpointer at a time; currently, with all checkpoints done by
422 * the checkpointer, this is just pro forma).
423 *
424 *----------
425 */
426
427 typedef struct XLogwrtRqst
428 {
429 XLogRecPtr Write; /* last byte + 1 to write out */
430 XLogRecPtr Flush; /* last byte + 1 to flush */
431 } XLogwrtRqst;
432
433 typedef struct XLogwrtResult
434 {
435 XLogRecPtr Write; /* last byte + 1 written out */
436 XLogRecPtr Flush; /* last byte + 1 flushed */
437 } XLogwrtResult;
438
439 /*
440 * Inserting to WAL is protected by a small fixed number of WAL insertion
441 * locks. To insert to the WAL, you must hold one of the locks - it doesn't
442 * matter which one. To lock out other concurrent insertions, you must hold
443 * of them. Each WAL insertion lock consists of a lightweight lock, plus an
444 * indicator of how far the insertion has progressed (insertingAt).
445 *
446 * The insertingAt values are read when a process wants to flush WAL from
447 * the in-memory buffers to disk, to check that all the insertions to the
448 * region the process is about to write out have finished. You could simply
449 * wait for all currently in-progress insertions to finish, but the
450 * insertingAt indicator allows you to ignore insertions to later in the WAL,
451 * so that you only wait for the insertions that are modifying the buffers
452 * you're about to write out.
453 *
454 * This isn't just an optimization. If all the WAL buffers are dirty, an
455 * inserter that's holding a WAL insert lock might need to evict an old WAL
456 * buffer, which requires flushing the WAL. If it's possible for an inserter
457 * to block on another inserter unnecessarily, deadlock can arise when two
458 * inserters holding a WAL insert lock wait for each other to finish their
459 * insertion.
460 *
461 * Small WAL records that don't cross a page boundary never update the value,
462 * the WAL record is just copied to the page and the lock is released. But
463 * to avoid the deadlock-scenario explained above, the indicator is always
464 * updated before sleeping while holding an insertion lock.
465 *
466 * lastImportantAt contains the LSN of the last important WAL record inserted
467 * using a given lock. This value is used to detect if there has been
468 * important WAL activity since the last time some action, like a checkpoint,
469 * was performed - allowing to not repeat the action if not. The LSN is
470 * updated for all insertions, unless the XLOG_MARK_UNIMPORTANT flag was
471 * set. lastImportantAt is never cleared, only overwritten by the LSN of newer
472 * records. Tracking the WAL activity directly in WALInsertLock has the
473 * advantage of not needing any additional locks to update the value.
474 */
475 typedef struct
476 {
477 LWLock lock;
478 XLogRecPtr insertingAt;
479 XLogRecPtr lastImportantAt;
480 } WALInsertLock;
481
482 /*
483 * All the WAL insertion locks are allocated as an array in shared memory. We
484 * force the array stride to be a power of 2, which saves a few cycles in
485 * indexing, but more importantly also ensures that individual slots don't
486 * cross cache line boundaries. (Of course, we have to also ensure that the
487 * array start address is suitably aligned.)
488 */
489 typedef union WALInsertLockPadded
490 {
491 WALInsertLock l;
492 char pad[PG_CACHE_LINE_SIZE];
493 } WALInsertLockPadded;
494
495 /*
496 * State of an exclusive backup, necessary to control concurrent activities
497 * across sessions when working on exclusive backups.
498 *
499 * EXCLUSIVE_BACKUP_NONE means that there is no exclusive backup actually
500 * running, to be more precise pg_start_backup() is not being executed for
501 * an exclusive backup and there is no exclusive backup in progress.
502 * EXCLUSIVE_BACKUP_STARTING means that pg_start_backup() is starting an
503 * exclusive backup.
504 * EXCLUSIVE_BACKUP_IN_PROGRESS means that pg_start_backup() has finished
505 * running and an exclusive backup is in progress. pg_stop_backup() is
506 * needed to finish it.
507 * EXCLUSIVE_BACKUP_STOPPING means that pg_stop_backup() is stopping an
508 * exclusive backup.
509 */
510 typedef enum ExclusiveBackupState
511 {
512 EXCLUSIVE_BACKUP_NONE = 0,
513 EXCLUSIVE_BACKUP_STARTING,
514 EXCLUSIVE_BACKUP_IN_PROGRESS,
515 EXCLUSIVE_BACKUP_STOPPING
516 } ExclusiveBackupState;
517
518 /*
519 * Session status of running backup, used for sanity checks in SQL-callable
520 * functions to start and stop backups.
521 */
522 static SessionBackupState sessionBackupState = SESSION_BACKUP_NONE;
523
524 /*
525 * Shared state data for WAL insertion.
526 */
527 typedef struct XLogCtlInsert
528 {
529 slock_t insertpos_lck; /* protects CurrBytePos and PrevBytePos */
530
531 /*
532 * CurrBytePos is the end of reserved WAL. The next record will be
533 * inserted at that position. PrevBytePos is the start position of the
534 * previously inserted (or rather, reserved) record - it is copied to the
535 * prev-link of the next record. These are stored as "usable byte
536 * positions" rather than XLogRecPtrs (see XLogBytePosToRecPtr()).
537 */
538 uint64 CurrBytePos;
539 uint64 PrevBytePos;
540
541 /*
542 * Make sure the above heavily-contended spinlock and byte positions are
543 * on their own cache line. In particular, the RedoRecPtr and full page
544 * write variables below should be on a different cache line. They are
545 * read on every WAL insertion, but updated rarely, and we don't want
546 * those reads to steal the cache line containing Curr/PrevBytePos.
547 */
548 char pad[PG_CACHE_LINE_SIZE];
549
550 /*
551 * fullPageWrites is the master copy used by all backends to determine
552 * whether to write full-page to WAL, instead of using process-local one.
553 * This is required because, when full_page_writes is changed by SIGHUP,
554 * we must WAL-log it before it actually affects WAL-logging by backends.
555 * Checkpointer sets at startup or after SIGHUP.
556 *
557 * To read these fields, you must hold an insertion lock. To modify them,
558 * you must hold ALL the locks.
559 */
560 XLogRecPtr RedoRecPtr; /* current redo point for insertions */
561 bool forcePageWrites; /* forcing full-page writes for PITR? */
562 bool fullPageWrites;
563
564 /*
565 * exclusiveBackupState indicates the state of an exclusive backup (see
566 * comments of ExclusiveBackupState for more details). nonExclusiveBackups
567 * is a counter indicating the number of streaming base backups currently
568 * in progress. forcePageWrites is set to true when either of these is
569 * non-zero. lastBackupStart is the latest checkpoint redo location used
570 * as a starting point for an online backup.
571 */
572 ExclusiveBackupState exclusiveBackupState;
573 int nonExclusiveBackups;
574 XLogRecPtr lastBackupStart;
575
576 /*
577 * WAL insertion locks.
578 */
579 WALInsertLockPadded *WALInsertLocks;
580 } XLogCtlInsert;
581
582 /*
583 * Total shared-memory state for XLOG.
584 */
585 typedef struct XLogCtlData
586 {
587 XLogCtlInsert Insert;
588
589 /* Protected by info_lck: */
590 XLogwrtRqst LogwrtRqst;
591 XLogRecPtr RedoRecPtr; /* a recent copy of Insert->RedoRecPtr */
592 uint32 ckptXidEpoch; /* nextXID & epoch of latest checkpoint */
593 TransactionId ckptXid;
594 XLogRecPtr asyncXactLSN; /* LSN of newest async commit/abort */
595 XLogRecPtr replicationSlotMinLSN; /* oldest LSN needed by any slot */
596
597 XLogSegNo lastRemovedSegNo; /* latest removed/recycled XLOG segment */
598
599 /* Fake LSN counter, for unlogged relations. Protected by ulsn_lck. */
600 XLogRecPtr unloggedLSN;
601 slock_t ulsn_lck;
602
603 /* Time and LSN of last xlog segment switch. Protected by WALWriteLock. */
604 pg_time_t lastSegSwitchTime;
605 XLogRecPtr lastSegSwitchLSN;
606
607 /*
608 * Protected by info_lck and WALWriteLock (you must hold either lock to
609 * read it, but both to update)
610 */
611 XLogwrtResult LogwrtResult;
612
613 /*
614 * Latest initialized page in the cache (last byte position + 1).
615 *
616 * To change the identity of a buffer (and InitializedUpTo), you need to
617 * hold WALBufMappingLock. To change the identity of a buffer that's
618 * still dirty, the old page needs to be written out first, and for that
619 * you need WALWriteLock, and you need to ensure that there are no
620 * in-progress insertions to the page by calling
621 * WaitXLogInsertionsToFinish().
622 */
623 XLogRecPtr InitializedUpTo;
624
625 /*
626 * These values do not change after startup, although the pointed-to pages
627 * and xlblocks values certainly do. xlblock values are protected by
628 * WALBufMappingLock.
629 */
630 char *pages; /* buffers for unwritten XLOG pages */
631 XLogRecPtr *xlblocks; /* 1st byte ptr-s + XLOG_BLCKSZ */
632 int XLogCacheBlck; /* highest allocated xlog buffer index */
633
634 /*
635 * Shared copy of ThisTimeLineID. Does not change after end-of-recovery.
636 * If we created a new timeline when the system was started up,
637 * PrevTimeLineID is the old timeline's ID that we forked off from.
638 * Otherwise it's equal to ThisTimeLineID.
639 */
640 TimeLineID ThisTimeLineID;
641 TimeLineID PrevTimeLineID;
642
643 /*
644 * archiveCleanupCommand is read from recovery.conf but needs to be in
645 * shared memory so that the checkpointer process can access it.
646 */
647 char archiveCleanupCommand[MAXPGPATH];
648
649 /*
650 * SharedRecoveryState indicates if we're still in crash or archive
651 * recovery. Protected by info_lck.
652 */
653 RecoveryState SharedRecoveryState;
654
655 /*
656 * SharedHotStandbyActive indicates if we're still in crash or archive
657 * recovery. Protected by info_lck.
658 */
659 bool SharedHotStandbyActive;
660
661 /*
662 * WalWriterSleeping indicates whether the WAL writer is currently in
663 * low-power mode (and hence should be nudged if an async commit occurs).
664 * Protected by info_lck.
665 */
666 bool WalWriterSleeping;
667
668 /*
669 * recoveryWakeupLatch is used to wake up the startup process to continue
670 * WAL replay, if it is waiting for WAL to arrive or failover trigger file
671 * to appear.
672 */
673 Latch recoveryWakeupLatch;
674
675 /*
676 * During recovery, we keep a copy of the latest checkpoint record here.
677 * lastCheckPointRecPtr points to start of checkpoint record and
678 * lastCheckPointEndPtr points to end+1 of checkpoint record. Used by the
679 * checkpointer when it wants to create a restartpoint.
680 *
681 * Protected by info_lck.
682 */
683 XLogRecPtr lastCheckPointRecPtr;
684 XLogRecPtr lastCheckPointEndPtr;
685 CheckPoint lastCheckPoint;
686
687 /*
688 * lastReplayedEndRecPtr points to end+1 of the last record successfully
689 * replayed. When we're currently replaying a record, ie. in a redo
690 * function, replayEndRecPtr points to the end+1 of the record being
691 * replayed, otherwise it's equal to lastReplayedEndRecPtr.
692 */
693 XLogRecPtr lastReplayedEndRecPtr;
694 TimeLineID lastReplayedTLI;
695 XLogRecPtr replayEndRecPtr;
696 TimeLineID replayEndTLI;
697 /* timestamp of last COMMIT/ABORT record replayed (or being replayed) */
698 TimestampTz recoveryLastXTime;
699
700 /*
701 * timestamp of when we started replaying the current chunk of WAL data,
702 * only relevant for replication or archive recovery
703 */
704 TimestampTz currentChunkStartTime;
705 /* Are we requested to pause recovery? */
706 bool recoveryPause;
707
708 /*
709 * lastFpwDisableRecPtr points to the start of the last replayed
710 * XLOG_FPW_CHANGE record that instructs full_page_writes is disabled.
711 */
712 XLogRecPtr lastFpwDisableRecPtr;
713
714 slock_t info_lck; /* locks shared variables shown above */
715 } XLogCtlData;
716
717 static XLogCtlData *XLogCtl = NULL;
718
719 /* a private copy of XLogCtl->Insert.WALInsertLocks, for convenience */
720 static WALInsertLockPadded *WALInsertLocks = NULL;
721
722 /*
723 * We maintain an image of pg_control in shared memory.
724 */
725 static ControlFileData *ControlFile = NULL;
726
727 /*
728 * Calculate the amount of space left on the page after 'endptr'. Beware
729 * multiple evaluation!
730 */
731 #define INSERT_FREESPACE(endptr) \
732 (((endptr) % XLOG_BLCKSZ == 0) ? 0 : (XLOG_BLCKSZ - (endptr) % XLOG_BLCKSZ))
733
734 /* Macro to advance to next buffer index. */
735 #define NextBufIdx(idx) \
736 (((idx) == XLogCtl->XLogCacheBlck) ? 0 : ((idx) + 1))
737
738 /*
739 * XLogRecPtrToBufIdx returns the index of the WAL buffer that holds, or
740 * would hold if it was in cache, the page containing 'recptr'.
741 */
742 #define XLogRecPtrToBufIdx(recptr) \
743 (((recptr) / XLOG_BLCKSZ) % (XLogCtl->XLogCacheBlck + 1))
744
745 /*
746 * These are the number of bytes in a WAL page usable for WAL data.
747 */
748 #define UsableBytesInPage (XLOG_BLCKSZ - SizeOfXLogShortPHD)
749
750 /*
751 * Convert min_wal_size_mb and max wal_size_mb to equivalent segment count.
752 * Rounds down.
753 */
754 #define ConvertToXSegs(x, segsize) \
755 ((x) / ((segsize) / (1024 * 1024)))
756
757 /* The number of bytes in a WAL segment usable for WAL data. */
758 static int UsableBytesInSegment;
759
760 /*
761 * Private, possibly out-of-date copy of shared LogwrtResult.
762 * See discussion above.
763 */
764 static XLogwrtResult LogwrtResult = {0, 0};
765
766 /*
767 * Codes indicating where we got a WAL file from during recovery, or where
768 * to attempt to get one.
769 */
770 typedef enum
771 {
772 XLOG_FROM_ANY = 0, /* request to read WAL from any source */
773 XLOG_FROM_ARCHIVE, /* restored using restore_command */
774 XLOG_FROM_PG_WAL, /* existing file in pg_wal */
775 XLOG_FROM_STREAM /* streamed from master */
776 } XLogSource;
777
778 /* human-readable names for XLogSources, for debugging output */
779 static const char *xlogSourceNames[] = {"any", "archive", "pg_wal", "stream"};
780
781 /*
782 * openLogFile is -1 or a kernel FD for an open log file segment.
783 * When it's open, openLogOff is the current seek offset in the file.
784 * openLogSegNo identifies the segment. These variables are only
785 * used to write the XLOG, and so will normally refer to the active segment.
786 */
787 static int openLogFile = -1;
788 static XLogSegNo openLogSegNo = 0;
789 static uint32 openLogOff = 0;
790
791 /*
792 * These variables are used similarly to the ones above, but for reading
793 * the XLOG. Note, however, that readOff generally represents the offset
794 * of the page just read, not the seek position of the FD itself, which
795 * will be just past that page. readLen indicates how much of the current
796 * page has been read into readBuf, and readSource indicates where we got
797 * the currently open file from.
798 */
799 static int readFile = -1;
800 static XLogSegNo readSegNo = 0;
801 static uint32 readOff = 0;
802 static uint32 readLen = 0;
803 static XLogSource readSource = 0; /* XLOG_FROM_* code */
804
805 /*
806 * Keeps track of which source we're currently reading from. This is
807 * different from readSource in that this is always set, even when we don't
808 * currently have a WAL file open. If lastSourceFailed is set, our last
809 * attempt to read from currentSource failed, and we should try another source
810 * next.
811 */
812 static XLogSource currentSource = 0; /* XLOG_FROM_* code */
813 static bool lastSourceFailed = false;
814
815 typedef struct XLogPageReadPrivate
816 {
817 int emode;
818 bool fetching_ckpt; /* are we fetching a checkpoint record? */
819 bool randAccess;
820 } XLogPageReadPrivate;
821
822 /*
823 * These variables track when we last obtained some WAL data to process,
824 * and where we got it from. (XLogReceiptSource is initially the same as
825 * readSource, but readSource gets reset to zero when we don't have data
826 * to process right now. It is also different from currentSource, which
827 * also changes when we try to read from a source and fail, while
828 * XLogReceiptSource tracks where we last successfully read some WAL.)
829 */
830 static TimestampTz XLogReceiptTime = 0;
831 static XLogSource XLogReceiptSource = 0; /* XLOG_FROM_* code */
832
833 /* State information for XLOG reading */
834 static XLogRecPtr ReadRecPtr; /* start of last record read */
835 static XLogRecPtr EndRecPtr; /* end+1 of last record read */
836
837 /*
838 * Local copies of equivalent fields in the control file. When running
839 * crash recovery, minRecoveryPoint is set to InvalidXLogRecPtr as we
840 * expect to replay all the WAL available, and updateMinRecoveryPoint is
841 * switched to false to prevent any updates while replaying records.
842 * Those values are kept consistent as long as crash recovery runs.
843 */
844 static XLogRecPtr minRecoveryPoint;
845 static TimeLineID minRecoveryPointTLI;
846 static bool updateMinRecoveryPoint = true;
847
848 /*
849 * Have we reached a consistent database state? In crash recovery, we have
850 * to replay all the WAL, so reachedConsistency is never set. During archive
851 * recovery, the database is consistent once minRecoveryPoint is reached.
852 */
853 bool reachedConsistency = false;
854
855 static bool InRedo = false;
856
857 /* Have we launched bgwriter during recovery? */
858 static bool bgwriterLaunched = false;
859
860 /* For WALInsertLockAcquire/Release functions */
861 static int MyLockNo = 0;
862 static bool holdingAllLocks = false;
863
864 #ifdef WAL_DEBUG
865 static MemoryContext walDebugCxt = NULL;
866 #endif
867
868 static void readRecoveryCommandFile(void);
869 static void exitArchiveRecovery(TimeLineID endTLI, XLogRecPtr endOfLog);
870 static bool recoveryStopsBefore(XLogReaderState *record);
871 static bool recoveryStopsAfter(XLogReaderState *record);
872 static void recoveryPausesHere(void);
873 static bool recoveryApplyDelay(XLogReaderState *record);
874 static void SetLatestXTime(TimestampTz xtime);
875 static void SetCurrentChunkStartTime(TimestampTz xtime);
876 static void CheckRequiredParameterValues(void);
877 static void XLogReportParameters(void);
878 static void checkTimeLineSwitch(XLogRecPtr lsn, TimeLineID newTLI,
879 TimeLineID prevTLI);
880 static void VerifyOverwriteContrecord(xl_overwrite_contrecord *xlrec,
881 XLogReaderState *state);
882 static void LocalSetXLogInsertAllowed(void);
883 static void CreateEndOfRecoveryRecord(void);
884 static XLogRecPtr CreateOverwriteContrecordRecord(XLogRecPtr aborted_lsn);
885 static void CheckPointGuts(XLogRecPtr checkPointRedo, int flags);
886 static void KeepLogSeg(XLogRecPtr recptr, XLogSegNo *logSegNo);
887 static XLogRecPtr XLogGetReplicationSlotMinimumLSN(void);
888
889 static void AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic);
890 static bool XLogCheckpointNeeded(XLogSegNo new_segno);
891 static void XLogWrite(XLogwrtRqst WriteRqst, bool flexible);
892 static bool InstallXLogFileSegment(XLogSegNo *segno, char *tmppath,
893 bool find_free, XLogSegNo max_segno,
894 bool use_lock);
895 static int XLogFileRead(XLogSegNo segno, int emode, TimeLineID tli,
896 int source, bool notfoundOk);
897 static int XLogFileReadAnyTLI(XLogSegNo segno, int emode, int source);
898 static int XLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr,
899 int reqLen, XLogRecPtr targetRecPtr, char *readBuf,
900 TimeLineID *readTLI);
901 static bool WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
902 bool fetching_ckpt, XLogRecPtr tliRecPtr);
903 static int emode_for_corrupt_record(int emode, XLogRecPtr RecPtr);
904 static void XLogFileClose(void);
905 static void PreallocXlogFiles(XLogRecPtr endptr);
906 static void RemoveOldXlogFiles(XLogSegNo segno, XLogRecPtr lastredoptr, XLogRecPtr endptr);
907 static void RemoveXlogFile(const char *segname, XLogRecPtr lastredoptr, XLogRecPtr endptr);
908 static void UpdateLastRemovedPtr(char *filename);
909 static void ValidateXLOGDirectoryStructure(void);
910 static void CleanupBackupHistory(void);
911 static void UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force);
912 static XLogRecord *ReadRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr,
913 int emode, bool fetching_ckpt);
914 static void CheckRecoveryConsistency(void);
915 static XLogRecord *ReadCheckpointRecord(XLogReaderState *xlogreader,
916 XLogRecPtr RecPtr, int whichChkpti, bool report);
917 static bool rescanLatestTimeLine(void);
918 static void WriteControlFile(void);
919 static void ReadControlFile(void);
920 static char *str_time(pg_time_t tnow);
921 static bool CheckForStandbyTrigger(void);
922
923 #ifdef WAL_DEBUG
924 static void xlog_outrec(StringInfo buf, XLogReaderState *record);
925 #endif
926 static void xlog_outdesc(StringInfo buf, XLogReaderState *record);
927 static void pg_start_backup_callback(int code, Datum arg);
928 static void pg_stop_backup_callback(int code, Datum arg);
929 static bool read_backup_label(XLogRecPtr *checkPointLoc,
930 bool *backupEndRequired, bool *backupFromStandby);
931 static bool read_tablespace_map(List **tablespaces);
932
933 static void rm_redo_error_callback(void *arg);
934 static int get_sync_bit(int method);
935
936 static void CopyXLogRecordToWAL(int write_len, bool isLogSwitch,
937 XLogRecData *rdata,
938 XLogRecPtr StartPos, XLogRecPtr EndPos);
939 static void ReserveXLogInsertLocation(int size, XLogRecPtr *StartPos,
940 XLogRecPtr *EndPos, XLogRecPtr *PrevPtr);
941 static bool ReserveXLogSwitch(XLogRecPtr *StartPos, XLogRecPtr *EndPos,
942 XLogRecPtr *PrevPtr);
943 static XLogRecPtr WaitXLogInsertionsToFinish(XLogRecPtr upto);
944 static char *GetXLogBuffer(XLogRecPtr ptr);
945 static XLogRecPtr XLogBytePosToRecPtr(uint64 bytepos);
946 static XLogRecPtr XLogBytePosToEndRecPtr(uint64 bytepos);
947 static uint64 XLogRecPtrToBytePos(XLogRecPtr ptr);
948 static void checkXLogConsistency(XLogReaderState *record);
949
950 static void WALInsertLockAcquire(void);
951 static void WALInsertLockAcquireExclusive(void);
952 static void WALInsertLockRelease(void);
953 static void WALInsertLockUpdateInsertingAt(XLogRecPtr insertingAt);
954
955 /*
956 * Insert an XLOG record represented by an already-constructed chain of data
957 * chunks. This is a low-level routine; to construct the WAL record header
958 * and data, use the higher-level routines in xloginsert.c.
959 *
960 * If 'fpw_lsn' is valid, it is the oldest LSN among the pages that this
961 * WAL record applies to, that were not included in the record as full page
962 * images. If fpw_lsn <= RedoRecPtr, the function does not perform the
963 * insertion and returns InvalidXLogRecPtr. The caller can then recalculate
964 * which pages need a full-page image, and retry. If fpw_lsn is invalid, the
965 * record is always inserted.
966 *
967 * 'flags' gives more in-depth control on the record being inserted. See
968 * XLogSetRecordFlags() for details.
969 *
970 * The first XLogRecData in the chain must be for the record header, and its
971 * data must be MAXALIGNed. XLogInsertRecord fills in the xl_prev and
972 * xl_crc fields in the header, the rest of the header must already be filled
973 * by the caller.
974 *
975 * Returns XLOG pointer to end of record (beginning of next record).
976 * This can be used as LSN for data pages affected by the logged action.
977 * (LSN is the XLOG point up to which the XLOG must be flushed to disk
978 * before the data page can be written out. This implements the basic
979 * WAL rule "write the log before the data".)
980 */
981 XLogRecPtr
982 XLogInsertRecord(XLogRecData *rdata,
983 XLogRecPtr fpw_lsn,
984 uint8 flags)
985 {
986 XLogCtlInsert *Insert = &XLogCtl->Insert;
987 pg_crc32c rdata_crc;
988 bool inserted;
989 XLogRecord *rechdr = (XLogRecord *) rdata->data;
990 uint8 info = rechdr->xl_info & ~XLR_INFO_MASK;
991 bool isLogSwitch = (rechdr->xl_rmid == RM_XLOG_ID &&
992 info == XLOG_SWITCH);
993 XLogRecPtr StartPos;
994 XLogRecPtr EndPos;
995 bool prevDoPageWrites = doPageWrites;
996
997 /* we assume that all of the record header is in the first chunk */
998 Assert(rdata->len >= SizeOfXLogRecord);
999
1000 /* cross-check on whether we should be here or not */
1001 if (!XLogInsertAllowed())
1002 elog(ERROR, "cannot make new WAL entries during recovery");
1003
1004 /*----------
1005 *
1006 * We have now done all the preparatory work we can without holding a
1007 * lock or modifying shared state. From here on, inserting the new WAL
1008 * record to the shared WAL buffer cache is a two-step process:
1009 *
1010 * 1. Reserve the right amount of space from the WAL. The current head of
1011 * reserved space is kept in Insert->CurrBytePos, and is protected by
1012 * insertpos_lck.
1013 *
1014 * 2. Copy the record to the reserved WAL space. This involves finding the
1015 * correct WAL buffer containing the reserved space, and copying the
1016 * record in place. This can be done concurrently in multiple processes.
1017 *
1018 * To keep track of which insertions are still in-progress, each concurrent
1019 * inserter acquires an insertion lock. In addition to just indicating that
1020 * an insertion is in progress, the lock tells others how far the inserter
1021 * has progressed. There is a small fixed number of insertion locks,
1022 * determined by NUM_XLOGINSERT_LOCKS. When an inserter crosses a page
1023 * boundary, it updates the value stored in the lock to the how far it has
1024 * inserted, to allow the previous buffer to be flushed.
1025 *
1026 * Holding onto an insertion lock also protects RedoRecPtr and
1027 * fullPageWrites from changing until the insertion is finished.
1028 *
1029 * Step 2 can usually be done completely in parallel. If the required WAL
1030 * page is not initialized yet, you have to grab WALBufMappingLock to
1031 * initialize it, but the WAL writer tries to do that ahead of insertions
1032 * to avoid that from happening in the critical path.
1033 *
1034 *----------
1035 */
1036 START_CRIT_SECTION();
1037 if (isLogSwitch)
1038 WALInsertLockAcquireExclusive();
1039 else
1040 WALInsertLockAcquire();
1041
1042 /*
1043 * Check to see if my copy of RedoRecPtr is out of date. If so, may have
1044 * to go back and have the caller recompute everything. This can only
1045 * happen just after a checkpoint, so it's better to be slow in this case
1046 * and fast otherwise.
1047 *
1048 * Also check to see if fullPageWrites or forcePageWrites was just turned
1049 * on; if we weren't already doing full-page writes then go back and
1050 * recompute.
1051 *
1052 * If we aren't doing full-page writes then RedoRecPtr doesn't actually
1053 * affect the contents of the XLOG record, so we'll update our local copy
1054 * but not force a recomputation. (If doPageWrites was just turned off,
1055 * we could recompute the record without full pages, but we choose not to
1056 * bother.)
1057 */
1058 if (RedoRecPtr != Insert->RedoRecPtr)
1059 {
1060 Assert(RedoRecPtr < Insert->RedoRecPtr);
1061 RedoRecPtr = Insert->RedoRecPtr;
1062 }
1063 doPageWrites = (Insert->fullPageWrites || Insert->forcePageWrites);
1064
1065 if (doPageWrites &&
1066 (!prevDoPageWrites ||
1067 (fpw_lsn != InvalidXLogRecPtr && fpw_lsn <= RedoRecPtr)))
1068 {
1069 /*
1070 * Oops, some buffer now needs to be backed up that the caller didn't
1071 * back up. Start over.
1072 */
1073 WALInsertLockRelease();
1074 END_CRIT_SECTION();
1075 return InvalidXLogRecPtr;
1076 }
1077
1078 /*
1079 * Reserve space for the record in the WAL. This also sets the xl_prev
1080 * pointer.
1081 */
1082 if (isLogSwitch)
1083 inserted = ReserveXLogSwitch(&StartPos, &EndPos, &rechdr->xl_prev);
1084 else
1085 {
1086 ReserveXLogInsertLocation(rechdr->xl_tot_len, &StartPos, &EndPos,
1087 &rechdr->xl_prev);
1088 inserted = true;
1089 }
1090
1091 if (inserted)
1092 {
1093 /*
1094 * Now that xl_prev has been filled in, calculate CRC of the record
1095 * header.
1096 */
1097 rdata_crc = rechdr->xl_crc;
1098 COMP_CRC32C(rdata_crc, rechdr, offsetof(XLogRecord, xl_crc));
1099 FIN_CRC32C(rdata_crc);
1100 rechdr->xl_crc = rdata_crc;
1101
1102 /*
1103 * All the record data, including the header, is now ready to be
1104 * inserted. Copy the record in the space reserved.
1105 */
1106 CopyXLogRecordToWAL(rechdr->xl_tot_len, isLogSwitch, rdata,
1107 StartPos, EndPos);
1108
1109 /*
1110 * Unless record is flagged as not important, update LSN of last
1111 * important record in the current slot. When holding all locks, just
1112 * update the first one.
1113 */
1114 if ((flags & XLOG_MARK_UNIMPORTANT) == 0)
1115 {
1116 int lockno = holdingAllLocks ? 0 : MyLockNo;
1117
1118 WALInsertLocks[lockno].l.lastImportantAt = StartPos;
1119 }
1120 }
1121 else
1122 {
1123 /*
1124 * This was an xlog-switch record, but the current insert location was
1125 * already exactly at the beginning of a segment, so there was no need
1126 * to do anything.
1127 */
1128 }
1129
1130 /*
1131 * Done! Let others know that we're finished.
1132 */
1133 WALInsertLockRelease();
1134
1135 MarkCurrentTransactionIdLoggedIfAny();
1136
1137 END_CRIT_SECTION();
1138
1139 /*
1140 * Update shared LogwrtRqst.Write, if we crossed page boundary.
1141 */
1142 if (StartPos / XLOG_BLCKSZ != EndPos / XLOG_BLCKSZ)
1143 {
1144 SpinLockAcquire(&XLogCtl->info_lck);
1145 /* advance global request to include new block(s) */
1146 if (XLogCtl->LogwrtRqst.Write < EndPos)
1147 XLogCtl->LogwrtRqst.Write = EndPos;
1148 /* update local result copy while I have the chance */
1149 LogwrtResult = XLogCtl->LogwrtResult;
1150 SpinLockRelease(&XLogCtl->info_lck);
1151 }
1152
1153 /*
1154 * If this was an XLOG_SWITCH record, flush the record and the empty
1155 * padding space that fills the rest of the segment, and perform
1156 * end-of-segment actions (eg, notifying archiver).
1157 */
1158 if (isLogSwitch)
1159 {
1160 TRACE_POSTGRESQL_WAL_SWITCH();
1161 XLogFlush(EndPos);
1162
1163 /*
1164 * Even though we reserved the rest of the segment for us, which is
1165 * reflected in EndPos, we return a pointer to just the end of the
1166 * xlog-switch record.
1167 */
1168 if (inserted)
1169 {
1170 EndPos = StartPos + SizeOfXLogRecord;
1171 if (StartPos / XLOG_BLCKSZ != EndPos / XLOG_BLCKSZ)
1172 {
1173 uint64 offset = XLogSegmentOffset(EndPos, wal_segment_size);
1174
1175 if (offset == EndPos % XLOG_BLCKSZ)
1176 EndPos += SizeOfXLogLongPHD;
1177 else
1178 EndPos += SizeOfXLogShortPHD;
1179 }
1180 }
1181 }
1182
1183 #ifdef WAL_DEBUG
1184 if (XLOG_DEBUG)
1185 {
1186 static XLogReaderState *debug_reader = NULL;
1187 StringInfoData buf;
1188 StringInfoData recordBuf;
1189 char *errormsg = NULL;
1190 MemoryContext oldCxt;
1191
1192 oldCxt = MemoryContextSwitchTo(walDebugCxt);
1193
1194 initStringInfo(&buf);
1195 appendStringInfo(&buf, "INSERT @ %X/%X: ",
1196 (uint32) (EndPos >> 32), (uint32) EndPos);
1197
1198 /*
1199 * We have to piece together the WAL record data from the XLogRecData
1200 * entries, so that we can pass it to the rm_desc function as one
1201 * contiguous chunk.
1202 */
1203 initStringInfo(&recordBuf);
1204 for (; rdata != NULL; rdata = rdata->next)
1205 appendBinaryStringInfo(&recordBuf, rdata->data, rdata->len);
1206
1207 if (!debug_reader)
1208 debug_reader = XLogReaderAllocate(wal_segment_size, NULL, NULL);
1209
1210 if (!debug_reader)
1211 {
1212 appendStringInfoString(&buf, "error decoding record: out of memory");
1213 }
1214 else if (!DecodeXLogRecord(debug_reader, (XLogRecord *) recordBuf.data,
1215 &errormsg))
1216 {
1217 appendStringInfo(&buf, "error decoding record: %s",
1218 errormsg ? errormsg : "no error message");
1219 }
1220 else
1221 {
1222 appendStringInfoString(&buf, " - ");
1223 xlog_outdesc(&buf, debug_reader);
1224 }
1225 elog(LOG, "%s", buf.data);
1226
1227 pfree(buf.data);
1228 pfree(recordBuf.data);
1229 MemoryContextSwitchTo(oldCxt);
1230 }
1231 #endif
1232
1233 /*
1234 * Update our global variables
1235 */
1236 ProcLastRecPtr = StartPos;
1237 XactLastRecEnd = EndPos;
1238
1239 return EndPos;
1240 }
1241
1242 /*
1243 * Reserves the right amount of space for a record of given size from the WAL.
1244 * *StartPos is set to the beginning of the reserved section, *EndPos to
1245 * its end+1. *PrevPtr is set to the beginning of the previous record; it is
1246 * used to set the xl_prev of this record.
1247 *
1248 * This is the performance critical part of XLogInsert that must be serialized
1249 * across backends. The rest can happen mostly in parallel. Try to keep this
1250 * section as short as possible, insertpos_lck can be heavily contended on a
1251 * busy system.
1252 *
1253 * NB: The space calculation here must match the code in CopyXLogRecordToWAL,
1254 * where we actually copy the record to the reserved space.
1255 */
1256 static void
1257 ReserveXLogInsertLocation(int size, XLogRecPtr *StartPos, XLogRecPtr *EndPos,
1258 XLogRecPtr *PrevPtr)
1259 {
1260 XLogCtlInsert *Insert = &XLogCtl->Insert;
1261 uint64 startbytepos;
1262 uint64 endbytepos;
1263 uint64 prevbytepos;
1264
1265 size = MAXALIGN(size);
1266
1267 /* All (non xlog-switch) records should contain data. */
1268 Assert(size > SizeOfXLogRecord);
1269
1270 /*
1271 * The duration the spinlock needs to be held is minimized by minimizing
1272 * the calculations that have to be done while holding the lock. The
1273 * current tip of reserved WAL is kept in CurrBytePos, as a byte position
1274 * that only counts "usable" bytes in WAL, that is, it excludes all WAL
1275 * page headers. The mapping between "usable" byte positions and physical
1276 * positions (XLogRecPtrs) can be done outside the locked region, and
1277 * because the usable byte position doesn't include any headers, reserving
1278 * X bytes from WAL is almost as simple as "CurrBytePos += X".
1279 */
1280 SpinLockAcquire(&Insert->insertpos_lck);
1281
1282 startbytepos = Insert->CurrBytePos;
1283 endbytepos = startbytepos + size;
1284 prevbytepos = Insert->PrevBytePos;
1285 Insert->CurrBytePos = endbytepos;
1286 Insert->PrevBytePos = startbytepos;
1287
1288 SpinLockRelease(&Insert->insertpos_lck);
1289
1290 *StartPos = XLogBytePosToRecPtr(startbytepos);
1291 *EndPos = XLogBytePosToEndRecPtr(endbytepos);
1292 *PrevPtr = XLogBytePosToRecPtr(prevbytepos);
1293
1294 /*
1295 * Check that the conversions between "usable byte positions" and
1296 * XLogRecPtrs work consistently in both directions.
1297 */
1298 Assert(XLogRecPtrToBytePos(*StartPos) == startbytepos);
1299 Assert(XLogRecPtrToBytePos(*EndPos) == endbytepos);
1300 Assert(XLogRecPtrToBytePos(*PrevPtr) == prevbytepos);
1301 }
1302
1303 /*
1304 * Like ReserveXLogInsertLocation(), but for an xlog-switch record.
1305 *
1306 * A log-switch record is handled slightly differently. The rest of the
1307 * segment will be reserved for this insertion, as indicated by the returned
1308 * *EndPos value. However, if we are already at the beginning of the current
1309 * segment, *StartPos and *EndPos are set to the current location without
1310 * reserving any space, and the function returns false.
1311 */
1312 static bool
1313 ReserveXLogSwitch(XLogRecPtr *StartPos, XLogRecPtr *EndPos, XLogRecPtr *PrevPtr)
1314 {
1315 XLogCtlInsert *Insert = &XLogCtl->Insert;
1316 uint64 startbytepos;
1317 uint64 endbytepos;
1318 uint64 prevbytepos;
1319 uint32 size = MAXALIGN(SizeOfXLogRecord);
1320 XLogRecPtr ptr;
1321 uint32 segleft;
1322
1323 /*
1324 * These calculations are a bit heavy-weight to be done while holding a
1325 * spinlock, but since we're holding all the WAL insertion locks, there
1326 * are no other inserters competing for it. GetXLogInsertRecPtr() does
1327 * compete for it, but that's not called very frequently.
1328 */
1329 SpinLockAcquire(&Insert->insertpos_lck);
1330
1331 startbytepos = Insert->CurrBytePos;
1332
1333 ptr = XLogBytePosToEndRecPtr(startbytepos);
1334 if (XLogSegmentOffset(ptr, wal_segment_size) == 0)
1335 {
1336 SpinLockRelease(&Insert->insertpos_lck);
1337 *EndPos = *StartPos = ptr;
1338 return false;
1339 }
1340
1341 endbytepos = startbytepos + size;
1342 prevbytepos = Insert->PrevBytePos;
1343
1344 *StartPos = XLogBytePosToRecPtr(startbytepos);
1345 *EndPos = XLogBytePosToEndRecPtr(endbytepos);
1346
1347 segleft = wal_segment_size - XLogSegmentOffset(*EndPos, wal_segment_size);
1348 if (segleft != wal_segment_size)
1349 {
1350 /* consume the rest of the segment */
1351 *EndPos += segleft;
1352 endbytepos = XLogRecPtrToBytePos(*EndPos);
1353 }
1354 Insert->CurrBytePos = endbytepos;
1355 Insert->PrevBytePos = startbytepos;
1356
1357 SpinLockRelease(&Insert->insertpos_lck);
1358
1359 *PrevPtr = XLogBytePosToRecPtr(prevbytepos);
1360
1361 Assert(XLogSegmentOffset(*EndPos, wal_segment_size) == 0);
1362 Assert(XLogRecPtrToBytePos(*EndPos) == endbytepos);
1363 Assert(XLogRecPtrToBytePos(*StartPos) == startbytepos);
1364 Assert(XLogRecPtrToBytePos(*PrevPtr) == prevbytepos);
1365
1366 return true;
1367 }
1368
1369 /*
1370 * Checks whether the current buffer page and backup page stored in the
1371 * WAL record are consistent or not. Before comparing the two pages, a
1372 * masking can be applied to the pages to ignore certain areas like hint bits,
1373 * unused space between pd_lower and pd_upper among other things. This
1374 * function should be called once WAL replay has been completed for a
1375 * given record.
1376 */
1377 static void
1378 checkXLogConsistency(XLogReaderState *record)
1379 {
1380 RmgrId rmid = XLogRecGetRmid(record);
1381 RelFileNode rnode;
1382 ForkNumber forknum;
1383 BlockNumber blkno;
1384 int block_id;
1385
1386 /* Records with no backup blocks have no need for consistency checks. */
1387 if (!XLogRecHasAnyBlockRefs(record))
1388 return;
1389
1390 Assert((XLogRecGetInfo(record) & XLR_CHECK_CONSISTENCY) != 0);
1391
1392 for (block_id = 0; block_id <= record->max_block_id; block_id++)
1393 {
1394 Buffer buf;
1395 Page page;
1396
1397 if (!XLogRecGetBlockTag(record, block_id, &rnode, &forknum, &blkno))
1398 {
1399 /*
1400 * WAL record doesn't contain a block reference with the given id.
1401 * Do nothing.
1402 */
1403 continue;
1404 }
1405
1406 Assert(XLogRecHasBlockImage(record, block_id));
1407
1408 if (XLogRecBlockImageApply(record, block_id))
1409 {
1410 /*
1411 * WAL record has already applied the page, so bypass the
1412 * consistency check as that would result in comparing the full
1413 * page stored in the record with itself.
1414 */
1415 continue;
1416 }
1417
1418 /*
1419 * Read the contents from the current buffer and store it in a
1420 * temporary page.
1421 */
1422 buf = XLogReadBufferExtended(rnode, forknum, blkno,
1423 RBM_NORMAL_NO_LOG);
1424 if (!BufferIsValid(buf))
1425 continue;
1426
1427 LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
1428 page = BufferGetPage(buf);
1429
1430 /*
1431 * Take a copy of the local page where WAL has been applied to have a
1432 * comparison base before masking it...
1433 */
1434 memcpy(replay_image_masked, page, BLCKSZ);
1435
1436 /* No need for this page anymore now that a copy is in. */
1437 UnlockReleaseBuffer(buf);
1438
1439 /*
1440 * If the block LSN is already ahead of this WAL record, we can't
1441 * expect contents to match. This can happen if recovery is
1442 * restarted.
1443 */
1444 if (PageGetLSN(replay_image_masked) > record->EndRecPtr)
1445 continue;
1446
1447 /*
1448 * Read the contents from the backup copy, stored in WAL record and
1449 * store it in a temporary page. There is no need to allocate a new
1450 * page here, a local buffer is fine to hold its contents and a mask
1451 * can be directly applied on it.
1452 */
1453 if (!RestoreBlockImage(record, block_id, master_image_masked))
1454 elog(ERROR, "failed to restore block image");
1455
1456 /*
1457 * If masking function is defined, mask both the master and replay
1458 * images
1459 */
1460 if (RmgrTable[rmid].rm_mask != NULL)
1461 {
1462 RmgrTable[rmid].rm_mask(replay_image_masked, blkno);
1463 RmgrTable[rmid].rm_mask(master_image_masked, blkno);
1464 }
1465
1466 /* Time to compare the master and replay images. */
1467 if (memcmp(replay_image_masked, master_image_masked, BLCKSZ) != 0)
1468 {
1469 elog(FATAL,
1470 "inconsistent page found, rel %u/%u/%u, forknum %u, blkno %u",
1471 rnode.spcNode, rnode.dbNode, rnode.relNode,
1472 forknum, blkno);
1473 }
1474 }
1475 }
1476
1477 /*
1478 * Subroutine of XLogInsertRecord. Copies a WAL record to an already-reserved
1479 * area in the WAL.
1480 */
1481 static void
1482 CopyXLogRecordToWAL(int write_len, bool isLogSwitch, XLogRecData *rdata,
1483 XLogRecPtr StartPos, XLogRecPtr EndPos)
1484 {
1485 char *currpos;
1486 int freespace;
1487 int written;
1488 XLogRecPtr CurrPos;
1489 XLogPageHeader pagehdr;
1490
1491 /*
1492 * Get a pointer to the right place in the right WAL buffer to start
1493 * inserting to.
1494 */
1495 CurrPos = StartPos;
1496 currpos = GetXLogBuffer(CurrPos);
1497 freespace = INSERT_FREESPACE(CurrPos);
1498
1499 /*
1500 * there should be enough space for at least the first field (xl_tot_len)
1501 * on this page.
1502 */
1503 Assert(freespace >= sizeof(uint32));
1504
1505 /* Copy record data */
1506 written = 0;
1507 while (rdata != NULL)
1508 {
1509 char *rdata_data = rdata->data;
1510 int rdata_len = rdata->len;
1511
1512 while (rdata_len > freespace)
1513 {
1514 /*
1515 * Write what fits on this page, and continue on the next page.
1516 */
1517 Assert(CurrPos % XLOG_BLCKSZ >= SizeOfXLogShortPHD || freespace == 0);
1518 memcpy(currpos, rdata_data, freespace);
1519 rdata_data += freespace;
1520 rdata_len -= freespace;
1521 written += freespace;
1522 CurrPos += freespace;
1523
1524 /*
1525 * Get pointer to beginning of next page, and set the xlp_rem_len
1526 * in the page header. Set XLP_FIRST_IS_CONTRECORD.
1527 *
1528 * It's safe to set the contrecord flag and xlp_rem_len without a
1529 * lock on the page. All the other flags were already set when the
1530 * page was initialized, in AdvanceXLInsertBuffer, and we're the
1531 * only backend that needs to set the contrecord flag.
1532 */
1533 currpos = GetXLogBuffer(CurrPos);
1534 pagehdr = (XLogPageHeader) currpos;
1535 pagehdr->xlp_rem_len = write_len - written;
1536 pagehdr->xlp_info |= XLP_FIRST_IS_CONTRECORD;
1537
1538 /* skip over the page header */
1539 if (XLogSegmentOffset(CurrPos, wal_segment_size) == 0)
1540 {
1541 CurrPos += SizeOfXLogLongPHD;
1542 currpos += SizeOfXLogLongPHD;
1543 }
1544 else
1545 {
1546 CurrPos += SizeOfXLogShortPHD;
1547 currpos += SizeOfXLogShortPHD;
1548 }
1549 freespace = INSERT_FREESPACE(CurrPos);
1550 }
1551
1552 Assert(CurrPos % XLOG_BLCKSZ >= SizeOfXLogShortPHD || rdata_len == 0);
1553 memcpy(currpos, rdata_data, rdata_len);
1554 currpos += rdata_len;
1555 CurrPos += rdata_len;
1556 freespace -= rdata_len;
1557 written += rdata_len;
1558
1559 rdata = rdata->next;
1560 }
1561 Assert(written == write_len);
1562
1563 /*
1564 * If this was an xlog-switch, it's not enough to write the switch record,
1565 * we also have to consume all the remaining space in the WAL segment. We
1566 * have already reserved that space, but we need to actually fill it.
1567 */
1568 if (isLogSwitch && XLogSegmentOffset(CurrPos, wal_segment_size) != 0)
1569 {
1570 /* An xlog-switch record doesn't contain any data besides the header */
1571 Assert(write_len == SizeOfXLogRecord);
1572
1573 /* Assert that we did reserve the right amount of space */
1574 Assert(XLogSegmentOffset(EndPos, wal_segment_size) == 0);
1575
1576 /* Use up all the remaining space on the current page */
1577 CurrPos += freespace;
1578
1579 /*
1580 * Cause all remaining pages in the segment to be flushed, leaving the
1581 * XLog position where it should be, at the start of the next segment.
1582 * We do this one page at a time, to make sure we don't deadlock
1583 * against ourselves if wal_buffers < wal_segment_size.
1584 */
1585 while (CurrPos < EndPos)
1586 {
1587 /*
1588 * The minimal action to flush the page would be to call
1589 * WALInsertLockUpdateInsertingAt(CurrPos) followed by
1590 * AdvanceXLInsertBuffer(...). The page would be left initialized
1591 * mostly to zeros, except for the page header (always the short
1592 * variant, as this is never a segment's first page).
1593 *
1594 * The large vistas of zeros are good for compressibility, but the
1595 * headers interrupting them every XLOG_BLCKSZ (with values that
1596 * differ from page to page) are not. The effect varies with
1597 * compression tool, but bzip2 for instance compresses about an
1598 * order of magnitude worse if those headers are left in place.
1599 *
1600 * Rather than complicating AdvanceXLInsertBuffer itself (which is
1601 * called in heavily-loaded circumstances as well as this lightly-
1602 * loaded one) with variant behavior, we just use GetXLogBuffer
1603 * (which itself calls the two methods we need) to get the pointer
1604 * and zero most of the page. Then we just zero the page header.
1605 */
1606 currpos = GetXLogBuffer(CurrPos);
1607 MemSet(currpos, 0, SizeOfXLogShortPHD);
1608
1609 CurrPos += XLOG_BLCKSZ;
1610 }
1611 }
1612 else
1613 {
1614 /* Align the end position, so that the next record starts aligned */
1615 CurrPos = MAXALIGN64(CurrPos);
1616 }
1617
1618 if (CurrPos != EndPos)
1619 elog(PANIC, "space reserved for WAL record does not match what was written");
1620 }
1621
1622 /*
1623 * Acquire a WAL insertion lock, for inserting to WAL.
1624 */
1625 static void
1626 WALInsertLockAcquire(void)
1627 {
1628 bool immed;
1629
1630 /*
1631 * It doesn't matter which of the WAL insertion locks we acquire, so try
1632 * the one we used last time. If the system isn't particularly busy, it's
1633 * a good bet that it's still available, and it's good to have some
1634 * affinity to a particular lock so that you don't unnecessarily bounce
1635 * cache lines between processes when there's no contention.
1636 *
1637 * If this is the first time through in this backend, pick a lock
1638 * (semi-)randomly. This allows the locks to be used evenly if you have a
1639 * lot of very short connections.
1640 */
1641 static int lockToTry = -1;
1642
1643 if (lockToTry == -1)
1644 lockToTry = MyProc->pgprocno % NUM_XLOGINSERT_LOCKS;
1645 MyLockNo = lockToTry;
1646
1647 /*
1648 * The insertingAt value is initially set to 0, as we don't know our
1649 * insert location yet.
1650 */
1651 immed = LWLockAcquire(&WALInsertLocks[MyLockNo].l.lock, LW_EXCLUSIVE);
1652 if (!immed)
1653 {
1654 /*
1655 * If we couldn't get the lock immediately, try another lock next
1656 * time. On a system with more insertion locks than concurrent
1657 * inserters, this causes all the inserters to eventually migrate to a
1658 * lock that no-one else is using. On a system with more inserters
1659 * than locks, it still helps to distribute the inserters evenly
1660 * across the locks.
1661 */
1662 lockToTry = (lockToTry + 1) % NUM_XLOGINSERT_LOCKS;
1663 }
1664 }
1665
1666 /*
1667 * Acquire all WAL insertion locks, to prevent other backends from inserting
1668 * to WAL.
1669 */
1670 static void
1671 WALInsertLockAcquireExclusive(void)
1672 {
1673 int i;
1674
1675 /*
1676 * When holding all the locks, all but the last lock's insertingAt
1677 * indicator is set to 0xFFFFFFFFFFFFFFFF, which is higher than any real
1678 * XLogRecPtr value, to make sure that no-one blocks waiting on those.
1679 */
1680 for (i = 0; i < NUM_XLOGINSERT_LOCKS - 1; i++)
1681 {
1682 LWLockAcquire(&WALInsertLocks[i].l.lock, LW_EXCLUSIVE);
1683 LWLockUpdateVar(&WALInsertLocks[i].l.lock,
1684 &WALInsertLocks[i].l.insertingAt,
1685 PG_UINT64_MAX);
1686 }
1687 /* Variable value reset to 0 at release */
1688 LWLockAcquire(&WALInsertLocks[i].l.lock, LW_EXCLUSIVE);
1689
1690 holdingAllLocks = true;
1691 }
1692
1693 /*
1694 * Release our insertion lock (or locks, if we're holding them all).
1695 *
1696 * NB: Reset all variables to 0, so they cause LWLockWaitForVar to block the
1697 * next time the lock is acquired.
1698 */
1699 static void
1700 WALInsertLockRelease(void)
1701 {
1702 if (holdingAllLocks)
1703 {
1704 int i;
1705
1706 for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
1707 LWLockReleaseClearVar(&WALInsertLocks[i].l.lock,
1708 &WALInsertLocks[i].l.insertingAt,
1709 0);
1710
1711 holdingAllLocks = false;
1712 }
1713 else
1714 {
1715 LWLockReleaseClearVar(&WALInsertLocks[MyLockNo].l.lock,
1716 &WALInsertLocks[MyLockNo].l.insertingAt,
1717 0);
1718 }
1719 }
1720
1721 /*
1722 * Update our insertingAt value, to let others know that we've finished
1723 * inserting up to that point.
1724 */
1725 static void
1726 WALInsertLockUpdateInsertingAt(XLogRecPtr insertingAt)
1727 {
1728 if (holdingAllLocks)
1729 {
1730 /*
1731 * We use the last lock to mark our actual position, see comments in
1732 * WALInsertLockAcquireExclusive.
1733 */
1734 LWLockUpdateVar(&WALInsertLocks[NUM_XLOGINSERT_LOCKS - 1].l.lock,
1735 &WALInsertLocks[NUM_XLOGINSERT_LOCKS - 1].l.insertingAt,
1736 insertingAt);
1737 }
1738 else
1739 LWLockUpdateVar(&WALInsertLocks[MyLockNo].l.lock,
1740 &WALInsertLocks[MyLockNo].l.insertingAt,
1741 insertingAt);
1742 }
1743
1744 /*
1745 * Wait for any WAL insertions < upto to finish.
1746 *
1747 * Returns the location of the oldest insertion that is still in-progress.
1748 * Any WAL prior to that point has been fully copied into WAL buffers, and
1749 * can be flushed out to disk. Because this waits for any insertions older
1750 * than 'upto' to finish, the return value is always >= 'upto'.
1751 *
1752 * Note: When you are about to write out WAL, you must call this function
1753 * *before* acquiring WALWriteLock, to avoid deadlocks. This function might
1754 * need to wait for an insertion to finish (or at least advance to next
1755 * uninitialized page), and the inserter might need to evict an old WAL buffer
1756 * to make room for a new one, which in turn requires WALWriteLock.
1757 */
1758 static XLogRecPtr
1759 WaitXLogInsertionsToFinish(XLogRecPtr upto)
1760 {
1761 uint64 bytepos;
1762 XLogRecPtr reservedUpto;
1763 XLogRecPtr finishedUpto;
1764 XLogCtlInsert *Insert = &XLogCtl->Insert;
1765 int i;
1766
1767 if (MyProc == NULL)
1768 elog(PANIC, "cannot wait without a PGPROC structure");
1769
1770 /* Read the current insert position */
1771 SpinLockAcquire(&Insert->insertpos_lck);
1772 bytepos = Insert->CurrBytePos;
1773 SpinLockRelease(&Insert->insertpos_lck);
1774 reservedUpto = XLogBytePosToEndRecPtr(bytepos);
1775
1776 /*
1777 * No-one should request to flush a piece of WAL that hasn't even been
1778 * reserved yet. However, it can happen if there is a block with a bogus
1779 * LSN on disk, for example. XLogFlush checks for that situation and
1780 * complains, but only after the flush. Here we just assume that to mean
1781 * that all WAL that has been reserved needs to be finished. In this
1782 * corner-case, the return value can be smaller than 'upto' argument.
1783 */
1784 if (upto > reservedUpto)
1785 {
1786 elog(LOG, "request to flush past end of generated WAL; request %X/%X, currpos %X/%X",
1787 (uint32) (upto >> 32), (uint32) upto,
1788 (uint32) (reservedUpto >> 32), (uint32) reservedUpto);
1789 upto = reservedUpto;
1790 }
1791
1792 /*
1793 * Loop through all the locks, sleeping on any in-progress insert older
1794 * than 'upto'.
1795 *
1796 * finishedUpto is our return value, indicating the point upto which all
1797 * the WAL insertions have been finished. Initialize it to the head of
1798 * reserved WAL, and as we iterate through the insertion locks, back it
1799 * out for any insertion that's still in progress.
1800 */
1801 finishedUpto = reservedUpto;
1802 for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
1803 {
1804 XLogRecPtr insertingat = InvalidXLogRecPtr;
1805
1806 do
1807 {
1808 /*
1809 * See if this insertion is in progress. LWLockWait will wait for
1810 * the lock to be released, or for the 'value' to be set by a
1811 * LWLockUpdateVar call. When a lock is initially acquired, its
1812 * value is 0 (InvalidXLogRecPtr), which means that we don't know
1813 * where it's inserting yet. We will have to wait for it. If
1814 * it's a small insertion, the record will most likely fit on the
1815 * same page and the inserter will release the lock without ever
1816 * calling LWLockUpdateVar. But if it has to sleep, it will
1817 * advertise the insertion point with LWLockUpdateVar before
1818 * sleeping.
1819 */
1820 if (LWLockWaitForVar(&WALInsertLocks[i].l.lock,
1821 &WALInsertLocks[i].l.insertingAt,
1822 insertingat, &insertingat))
1823 {
1824 /* the lock was free, so no insertion in progress */
1825 insertingat = InvalidXLogRecPtr;
1826 break;
1827 }
1828
1829 /*
1830 * This insertion is still in progress. Have to wait, unless the
1831 * inserter has proceeded past 'upto'.
1832 */
1833 } while (insertingat < upto);
1834
1835 if (insertingat != InvalidXLogRecPtr && insertingat < finishedUpto)
1836 finishedUpto = insertingat;
1837 }
1838 return finishedUpto;
1839 }
1840
1841 /*
1842 * Get a pointer to the right location in the WAL buffer containing the
1843 * given XLogRecPtr.
1844 *
1845 * If the page is not initialized yet, it is initialized. That might require
1846 * evicting an old dirty buffer from the buffer cache, which means I/O.
1847 *
1848 * The caller must ensure that the page containing the requested location
1849 * isn't evicted yet, and won't be evicted. The way to ensure that is to
1850 * hold onto a WAL insertion lock with the insertingAt position set to
1851 * something <= ptr. GetXLogBuffer() will update insertingAt if it needs
1852 * to evict an old page from the buffer. (This means that once you call
1853 * GetXLogBuffer() with a given 'ptr', you must not access anything before
1854 * that point anymore, and must not call GetXLogBuffer() with an older 'ptr'
1855 * later, because older buffers might be recycled already)
1856 */
1857 static char *
1858 GetXLogBuffer(XLogRecPtr ptr)
1859 {
1860 int idx;
1861 XLogRecPtr endptr;
1862 static uint64 cachedPage = 0;
1863 static char *cachedPos = NULL;
1864 XLogRecPtr expectedEndPtr;
1865
1866 /*
1867 * Fast path for the common case that we need to access again the same
1868 * page as last time.
1869 */
1870 if (ptr / XLOG_BLCKSZ == cachedPage)
1871 {
1872 Assert(((XLogPageHeader) cachedPos)->xlp_magic == XLOG_PAGE_MAGIC);
1873 Assert(((XLogPageHeader) cachedPos)->xlp_pageaddr == ptr - (ptr % XLOG_BLCKSZ));
1874 return cachedPos + ptr % XLOG_BLCKSZ;
1875 }
1876
1877 /*
1878 * The XLog buffer cache is organized so that a page is always loaded to a
1879 * particular buffer. That way we can easily calculate the buffer a given
1880 * page must be loaded into, from the XLogRecPtr alone.
1881 */
1882 idx = XLogRecPtrToBufIdx(ptr);
1883
1884 /*
1885 * See what page is loaded in the buffer at the moment. It could be the
1886 * page we're looking for, or something older. It can't be anything newer
1887 * - that would imply the page we're looking for has already been written
1888 * out to disk and evicted, and the caller is responsible for making sure
1889 * that doesn't happen.
1890 *
1891 * However, we don't hold a lock while we read the value. If someone has
1892 * just initialized the page, it's possible that we get a "torn read" of
1893 * the XLogRecPtr if 64-bit fetches are not atomic on this platform. In
1894 * that case we will see a bogus value. That's ok, we'll grab the mapping
1895 * lock (in AdvanceXLInsertBuffer) and retry if we see anything else than
1896 * the page we're looking for. But it means that when we do this unlocked
1897 * read, we might see a value that appears to be ahead of the page we're
1898 * looking for. Don't PANIC on that, until we've verified the value while
1899 * holding the lock.
1900 */
1901 expectedEndPtr = ptr;
1902 expectedEndPtr += XLOG_BLCKSZ - ptr % XLOG_BLCKSZ;
1903
1904 endptr = XLogCtl->xlblocks[idx];
1905 if (expectedEndPtr != endptr)
1906 {
1907 XLogRecPtr initializedUpto;
1908
1909 /*
1910 * Before calling AdvanceXLInsertBuffer(), which can block, let others
1911 * know how far we're finished with inserting the record.
1912 *
1913 * NB: If 'ptr' points to just after the page header, advertise a
1914 * position at the beginning of the page rather than 'ptr' itself. If
1915 * there are no other insertions running, someone might try to flush
1916 * up to our advertised location. If we advertised a position after
1917 * the page header, someone might try to flush the page header, even
1918 * though page might actually not be initialized yet. As the first
1919 * inserter on the page, we are effectively responsible for making
1920 * sure that it's initialized, before we let insertingAt to move past
1921 * the page header.
1922 */
1923 if (ptr % XLOG_BLCKSZ == SizeOfXLogShortPHD &&
1924 XLogSegmentOffset(ptr, wal_segment_size) > XLOG_BLCKSZ)
1925 initializedUpto = ptr - SizeOfXLogShortPHD;
1926 else if (ptr % XLOG_BLCKSZ == SizeOfXLogLongPHD &&
1927 XLogSegmentOffset(ptr, wal_segment_size) < XLOG_BLCKSZ)
1928 initializedUpto = ptr - SizeOfXLogLongPHD;
1929 else
1930 initializedUpto = ptr;
1931
1932 WALInsertLockUpdateInsertingAt(initializedUpto);
1933
1934 AdvanceXLInsertBuffer(ptr, false);
1935 endptr = XLogCtl->xlblocks[idx];
1936
1937 if (expectedEndPtr != endptr)
1938 elog(PANIC, "could not find WAL buffer for %X/%X",
1939 (uint32) (ptr >> 32), (uint32) ptr);
1940 }
1941 else
1942 {
1943 /*
1944 * Make sure the initialization of the page is visible to us, and
1945 * won't arrive later to overwrite the WAL data we write on the page.
1946 */
1947 pg_memory_barrier();
1948 }
1949
1950 /*
1951 * Found the buffer holding this page. Return a pointer to the right
1952 * offset within the page.
1953 */
1954 cachedPage = ptr / XLOG_BLCKSZ;
1955 cachedPos = XLogCtl->pages + idx * (Size) XLOG_BLCKSZ;
1956
1957 Assert(((XLogPageHeader) cachedPos)->xlp_magic == XLOG_PAGE_MAGIC);
1958 Assert(((XLogPageHeader) cachedPos)->xlp_pageaddr == ptr - (ptr % XLOG_BLCKSZ));
1959
1960 return cachedPos + ptr % XLOG_BLCKSZ;
1961 }
1962
1963 /*
1964 * Converts a "usable byte position" to XLogRecPtr. A usable byte position
1965 * is the position starting from the beginning of WAL, excluding all WAL
1966 * page headers.
1967 */
1968 static XLogRecPtr
1969 XLogBytePosToRecPtr(uint64 bytepos)
1970 {
1971 uint64 fullsegs;
1972 uint64 fullpages;
1973 uint64 bytesleft;
1974 uint32 seg_offset;
1975 XLogRecPtr result;
1976
1977 fullsegs = bytepos / UsableBytesInSegment;
1978 bytesleft = bytepos % UsableBytesInSegment;
1979
1980 if (bytesleft < XLOG_BLCKSZ - SizeOfXLogLongPHD)
1981 {
1982 /* fits on first page of segment */
1983 seg_offset = bytesleft + SizeOfXLogLongPHD;
1984 }
1985 else
1986 {
1987 /* account for the first page on segment with long header */
1988 seg_offset = XLOG_BLCKSZ;
1989 bytesleft -= XLOG_BLCKSZ - SizeOfXLogLongPHD;
1990
1991 fullpages = bytesleft / UsableBytesInPage;
1992 bytesleft = bytesleft % UsableBytesInPage;
1993
1994 seg_offset += fullpages * XLOG_BLCKSZ + bytesleft + SizeOfXLogShortPHD;
1995 }
1996
1997 XLogSegNoOffsetToRecPtr(fullsegs, seg_offset, wal_segment_size, result);
1998
1999 return result;
2000 }
2001
2002 /*
2003 * Like XLogBytePosToRecPtr, but if the position is at a page boundary,
2004 * returns a pointer to the beginning of the page (ie. before page header),
2005 * not to where the first xlog record on that page would go to. This is used
2006 * when converting a pointer to the end of a record.
2007 */
2008 static XLogRecPtr
2009 XLogBytePosToEndRecPtr(uint64 bytepos)
2010 {
2011 uint64 fullsegs;
2012 uint64 fullpages;
2013 uint64 bytesleft;
2014 uint32 seg_offset;
2015 XLogRecPtr result;
2016
2017 fullsegs = bytepos / UsableBytesInSegment;
2018 bytesleft = bytepos % UsableBytesInSegment;
2019
2020 if (bytesleft < XLOG_BLCKSZ - SizeOfXLogLongPHD)
2021 {
2022 /* fits on first page of segment */
2023 if (bytesleft == 0)
2024 seg_offset = 0;
2025 else
2026 seg_offset = bytesleft + SizeOfXLogLongPHD;
2027 }
2028 else
2029 {
2030 /* account for the first page on segment with long header */
2031 seg_offset = XLOG_BLCKSZ;
2032 bytesleft -= XLOG_BLCKSZ - SizeOfXLogLongPHD;
2033
2034 fullpages = bytesleft / UsableBytesInPage;
2035 bytesleft = bytesleft % UsableBytesInPage;
2036
2037 if (bytesleft == 0)
2038 seg_offset += fullpages * XLOG_BLCKSZ + bytesleft;
2039 else
2040 seg_offset += fullpages * XLOG_BLCKSZ + bytesleft + SizeOfXLogShortPHD;
2041 }
2042
2043 XLogSegNoOffsetToRecPtr(fullsegs, seg_offset, wal_segment_size, result);
2044
2045 return result;
2046 }
2047
2048 /*
2049 * Convert an XLogRecPtr to a "usable byte position".
2050 */
2051 static uint64
2052 XLogRecPtrToBytePos(XLogRecPtr ptr)
2053 {
2054 uint64 fullsegs;
2055 uint32 fullpages;
2056 uint32 offset;
2057 uint64 result;
2058
2059 XLByteToSeg(ptr, fullsegs, wal_segment_size);
2060
2061 fullpages = (XLogSegmentOffset(ptr, wal_segment_size)) / XLOG_BLCKSZ;
2062 offset = ptr % XLOG_BLCKSZ;
2063
2064 if (fullpages == 0)
2065 {
2066 result = fullsegs * UsableBytesInSegment;
2067 if (offset > 0)
2068 {
2069 Assert(offset >= SizeOfXLogLongPHD);
2070 result += offset - SizeOfXLogLongPHD;
2071 }
2072 }
2073 else
2074 {
2075 result = fullsegs * UsableBytesInSegment +
2076 (XLOG_BLCKSZ - SizeOfXLogLongPHD) + /* account for first page */
2077 (fullpages - 1) * UsableBytesInPage; /* full pages */
2078 if (offset > 0)
2079 {
2080 Assert(offset >= SizeOfXLogShortPHD);
2081 result += offset - SizeOfXLogShortPHD;
2082 }
2083 }
2084
2085 return result;
2086 }
2087
2088 /*
2089 * Initialize XLOG buffers, writing out old buffers if they still contain
2090 * unwritten data, upto the page containing 'upto'. Or if 'opportunistic' is
2091 * true, initialize as many pages as we can without having to write out
2092 * unwritten data. Any new pages are initialized to zeros, with pages headers
2093 * initialized properly.
2094 */
2095 static void
2096 AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
2097 {
2098 XLogCtlInsert *Insert = &XLogCtl->Insert;
2099 int nextidx;
2100 XLogRecPtr OldPageRqstPtr;
2101 XLogwrtRqst WriteRqst;
2102 XLogRecPtr NewPageEndPtr = InvalidXLogRecPtr;
2103 XLogRecPtr NewPageBeginPtr;
2104 XLogPageHeader NewPage;
2105 int npages = 0;
2106
2107 LWLockAcquire(WALBufMappingLock, LW_EXCLUSIVE);
2108
2109 /*
2110 * Now that we have the lock, check if someone initialized the page
2111 * already.
2112 */
2113 while (upto >= XLogCtl->InitializedUpTo || opportunistic)
2114 {
2115 nextidx = XLogRecPtrToBufIdx(XLogCtl->InitializedUpTo);
2116
2117 /*
2118 * Get ending-offset of the buffer page we need to replace (this may
2119 * be zero if the buffer hasn't been used yet). Fall through if it's
2120 * already written out.
2121 */
2122 OldPageRqstPtr = XLogCtl->xlblocks[nextidx];
2123 if (LogwrtResult.Write < OldPageRqstPtr)
2124 {
2125 /*
2126 * Nope, got work to do. If we just want to pre-initialize as much
2127 * as we can without flushing, give up now.
2128 */
2129 if (opportunistic)
2130 break;
2131
2132 /* Before waiting, get info_lck and update LogwrtResult */
2133 SpinLockAcquire(&XLogCtl->info_lck);
2134 if (XLogCtl->LogwrtRqst.Write < OldPageRqstPtr)
2135 XLogCtl->LogwrtRqst.Write = OldPageRqstPtr;
2136 LogwrtResult = XLogCtl->LogwrtResult;
2137 SpinLockRelease(&XLogCtl->info_lck);
2138
2139 /*
2140 * Now that we have an up-to-date LogwrtResult value, see if we
2141 * still need to write it or if someone else already did.
2142 */
2143 if (LogwrtResult.Write < OldPageRqstPtr)
2144 {
2145 /*
2146 * Must acquire write lock. Release WALBufMappingLock first,
2147 * to make sure that all insertions that we need to wait for
2148 * can finish (up to this same position). Otherwise we risk
2149 * deadlock.
2150 */
2151 LWLockRelease(WALBufMappingLock);
2152
2153 WaitXLogInsertionsToFinish(OldPageRqstPtr);
2154
2155 LWLockAcquire(WALWriteLock, LW_EXCLUSIVE);
2156
2157 LogwrtResult = XLogCtl->LogwrtResult;
2158 if (LogwrtResult.Write >= OldPageRqstPtr)
2159 {
2160 /* OK, someone wrote it already */
2161 LWLockRelease(WALWriteLock);
2162 }
2163 else
2164 {
2165 /* Have to write it ourselves */
2166 TRACE_POSTGRESQL_WAL_BUFFER_WRITE_DIRTY_START();
2167 WriteRqst.Write = OldPageRqstPtr;
2168 WriteRqst.Flush = 0;
2169 XLogWrite(WriteRqst, false);
2170 LWLockRelease(WALWriteLock);
2171 TRACE_POSTGRESQL_WAL_BUFFER_WRITE_DIRTY_DONE();
2172 }
2173 /* Re-acquire WALBufMappingLock and retry */
2174 LWLockAcquire(WALBufMappingLock, LW_EXCLUSIVE);
2175 continue;
2176 }
2177 }
2178
2179 /*
2180 * Now the next buffer slot is free and we can set it up to be the
2181 * next output page.
2182 */
2183 NewPageBeginPtr = XLogCtl->InitializedUpTo;
2184 NewPageEndPtr = NewPageBeginPtr + XLOG_BLCKSZ;
2185
2186 Assert(XLogRecPtrToBufIdx(NewPageBeginPtr) == nextidx);
2187
2188 NewPage = (XLogPageHeader) (XLogCtl->pages + nextidx * (Size) XLOG_BLCKSZ);
2189
2190 /*
2191 * Be sure to re-zero the buffer so that bytes beyond what we've
2192 * written will look like zeroes and not valid XLOG records...
2193 */
2194 MemSet((char *) NewPage, 0, XLOG_BLCKSZ);
2195
2196 /*
2197 * Fill the new page's header
2198 */
2199 NewPage->xlp_magic = XLOG_PAGE_MAGIC;
2200
2201 /* NewPage->xlp_info = 0; */ /* done by memset */
2202 NewPage->xlp_tli = ThisTimeLineID;
2203 NewPage->xlp_pageaddr = NewPageBeginPtr;
2204
2205 /* NewPage->xlp_rem_len = 0; */ /* done by memset */
2206
2207 /*
2208 * If online backup is not in progress, mark the header to indicate
2209 * that WAL records beginning in this page have removable backup
2210 * blocks. This allows the WAL archiver to know whether it is safe to
2211 * compress archived WAL data by transforming full-block records into
2212 * the non-full-block format. It is sufficient to record this at the
2213 * page level because we force a page switch (in fact a segment
2214 * switch) when starting a backup, so the flag will be off before any
2215 * records can be written during the backup. At the end of a backup,
2216 * the last page will be marked as all unsafe when perhaps only part
2217 * is unsafe, but at worst the archiver would miss the opportunity to
2218 * compress a few records.
2219 */
2220 if (!Insert->forcePageWrites)
2221 NewPage->xlp_info |= XLP_BKP_REMOVABLE;
2222
2223 /*
2224 * If a record was found to be broken at the end of recovery, and
2225 * we're going to write on the page where its first contrecord was
2226 * lost, set the XLP_FIRST_IS_OVERWRITE_CONTRECORD flag on the page
2227 * header. See CreateOverwriteContrecordRecord().
2228 */
2229 if (missingContrecPtr == NewPageBeginPtr)
2230 {
2231 NewPage->xlp_info |= XLP_FIRST_IS_OVERWRITE_CONTRECORD;
2232 missingContrecPtr = InvalidXLogRecPtr;
2233 }
2234
2235 /*
2236 * If first page of an XLOG segment file, make it a long header.
2237 */
2238 if ((XLogSegmentOffset(NewPage->xlp_pageaddr, wal_segment_size)) == 0)
2239 {
2240 XLogLongPageHeader NewLongPage = (XLogLongPageHeader) NewPage;
2241
2242 NewLongPage->xlp_sysid = ControlFile->system_identifier;
2243 NewLongPage->xlp_seg_size = wal_segment_size;
2244 NewLongPage->xlp_xlog_blcksz = XLOG_BLCKSZ;
2245 NewPage->xlp_info |= XLP_LONG_HEADER;
2246 }
2247
2248 /*
2249 * Make sure the initialization of the page becomes visible to others
2250 * before the xlblocks update. GetXLogBuffer() reads xlblocks without
2251 * holding a lock.
2252 */
2253 pg_write_barrier();
2254
2255 *((volatile XLogRecPtr *) &XLogCtl->xlblocks[nextidx]) = NewPageEndPtr;
2256
2257 XLogCtl->InitializedUpTo = NewPageEndPtr;
2258
2259 npages++;
2260 }
2261 LWLockRelease(WALBufMappingLock);
2262
2263 #ifdef WAL_DEBUG
2264 if (XLOG_DEBUG && npages > 0)
2265 {
2266 elog(DEBUG1, "initialized %d pages, up to %X/%X",
2267 npages, (uint32) (NewPageEndPtr >> 32), (uint32) NewPageEndPtr);
2268 }
2269 #endif
2270 }
2271
2272 /*
2273 * Calculate CheckPointSegments based on max_wal_size_mb and
2274 * checkpoint_completion_target.
2275 */
2276 static void
2277 CalculateCheckpointSegments(void)
2278 {
2279 double target;
2280
2281 /*-------
2282 * Calculate the distance at which to trigger a checkpoint, to avoid
2283 * exceeding max_wal_size_mb. This is based on two assumptions:
2284 *
2285 * a) we keep WAL for only one checkpoint cycle (prior to PG11 we kept
2286 * WAL for two checkpoint cycles to allow us to recover from the
2287 * secondary checkpoint if the first checkpoint failed, though we
2288 * only did this on the master anyway, not on standby. Keeping just
2289 * one checkpoint simplifies processing and reduces disk space in
2290 * many smaller databases.)
2291 * b) during checkpoint, we consume checkpoint_completion_target *
2292 * number of segments consumed between checkpoints.
2293 *-------
2294 */
2295 target = (double) ConvertToXSegs(max_wal_size_mb, wal_segment_size) /
2296 (1.0 + CheckPointCompletionTarget);
2297
2298 /* round down */
2299 CheckPointSegments = (int) target;
2300
2301 if (CheckPointSegments < 1)
2302 CheckPointSegments = 1;
2303 }
2304
2305 void
2306 assign_max_wal_size(int newval, void *extra)
2307 {
2308 max_wal_size_mb = newval;
2309 CalculateCheckpointSegments();
2310 }
2311
2312 void
2313 assign_checkpoint_completion_target(double newval, void *extra)
2314 {
2315 CheckPointCompletionTarget = newval;
2316 CalculateCheckpointSegments();
2317 }
2318
2319 /*
2320 * At a checkpoint, how many WAL segments to recycle as preallocated future
2321 * XLOG segments? Returns the highest segment that should be preallocated.
2322 */
2323 static XLogSegNo
2324 XLOGfileslop(XLogRecPtr lastredoptr)
2325 {
2326 XLogSegNo minSegNo;
2327 XLogSegNo maxSegNo;
2328 double distance;
2329 XLogSegNo recycleSegNo;
2330
2331 /*
2332 * Calculate the segment numbers that min_wal_size_mb and max_wal_size_mb
2333 * correspond to. Always recycle enough segments to meet the minimum, and
2334 * remove enough segments to stay below the maximum.
2335 */
2336 minSegNo = lastredoptr / wal_segment_size +
2337 ConvertToXSegs(min_wal_size_mb, wal_segment_size) - 1;
2338 maxSegNo = lastredoptr / wal_segment_size +
2339 ConvertToXSegs(max_wal_size_mb, wal_segment_size) - 1;
2340
2341 /*
2342 * Between those limits, recycle enough segments to get us through to the
2343 * estimated end of next checkpoint.
2344 *
2345 * To estimate where the next checkpoint will finish, assume that the
2346 * system runs steadily consuming CheckPointDistanceEstimate bytes between
2347 * every checkpoint.
2348 */
2349 distance = (1.0 + CheckPointCompletionTarget) * CheckPointDistanceEstimate;
2350 /* add 10% for good measure. */
2351 distance *= 1.10;
2352
2353 recycleSegNo = (XLogSegNo) ceil(((double) lastredoptr + distance) /
2354 wal_segment_size);
2355
2356 if (recycleSegNo < minSegNo)
2357 recycleSegNo = minSegNo;
2358 if (recycleSegNo > maxSegNo)
2359 recycleSegNo = maxSegNo;
2360
2361 return recycleSegNo;
2362 }
2363
2364 /*
2365 * Check whether we've consumed enough xlog space that a checkpoint is needed.
2366 *
2367 * new_segno indicates a log file that has just been filled up (or read
2368 * during recovery). We measure the distance from RedoRecPtr to new_segno
2369 * and see if that exceeds CheckPointSegments.
2370 *
2371 * Note: it is caller's responsibility that RedoRecPtr is up-to-date.
2372 */
2373 static bool
2374 XLogCheckpointNeeded(XLogSegNo new_segno)
2375 {
2376 XLogSegNo old_segno;
2377
2378 XLByteToSeg(RedoRecPtr, old_segno, wal_segment_size);
2379
2380 if (new_segno >= old_segno + (uint64) (CheckPointSegments - 1))
2381 return true;
2382 return false;
2383 }
2384
2385 /*
2386 * Write and/or fsync the log at least as far as WriteRqst indicates.
2387 *
2388 * If flexible == true, we don't have to write as far as WriteRqst, but
2389 * may stop at any convenient boundary (such as a cache or logfile boundary).
2390 * This option allows us to avoid uselessly issuing multiple writes when a
2391 * single one would do.
2392 *
2393 * Must be called with WALWriteLock held. WaitXLogInsertionsToFinish(WriteRqst)
2394 * must be called before grabbing the lock, to make sure the data is ready to
2395 * write.
2396 */
2397 static void
2398 XLogWrite(XLogwrtRqst WriteRqst, bool flexible)
2399 {
2400 bool ispartialpage;
2401 bool last_iteration;
2402 bool finishing_seg;
2403 bool use_existent;
2404 int curridx;
2405 int npages;
2406 int startidx;
2407 uint32 startoffset;
2408
2409 /* We should always be inside a critical section here */
2410 Assert(CritSectionCount > 0);
2411
2412 /*
2413 * Update local LogwrtResult (caller probably did this already, but...)
2414 */
2415 LogwrtResult = XLogCtl->LogwrtResult;
2416
2417 /*
2418 * Since successive pages in the xlog cache are consecutively allocated,
2419 * we can usually gather multiple pages together and issue just one
2420 * write() call. npages is the number of pages we have determined can be
2421 * written together; startidx is the cache block index of the first one,
2422 * and startoffset is the file offset at which it should go. The latter
2423 * two variables are only valid when npages > 0, but we must initialize
2424 * all of them to keep the compiler quiet.
2425 */
2426 npages = 0;
2427 startidx = 0;
2428 startoffset = 0;
2429
2430 /*
2431 * Within the loop, curridx is the cache block index of the page to
2432 * consider writing. Begin at the buffer containing the next unwritten
2433 * page, or last partially written page.
2434 */
2435 curridx = XLogRecPtrToBufIdx(LogwrtResult.Write);
2436
2437 while (LogwrtResult.Write < WriteRqst.Write)
2438 {
2439 /*
2440 * Make sure we're not ahead of the insert process. This could happen
2441 * if we're passed a bogus WriteRqst.Write that is past the end of the
2442 * last page that's been initialized by AdvanceXLInsertBuffer.
2443 */
2444 XLogRecPtr EndPtr = XLogCtl->xlblocks[curridx];
2445
2446 if (LogwrtResult.Write >= EndPtr)
2447 elog(PANIC, "xlog write request %X/%X is past end of log %X/%X",
2448 (uint32) (LogwrtResult.Write >> 32),
2449 (uint32) LogwrtResult.Write,
2450 (uint32) (EndPtr >> 32), (uint32) EndPtr);
2451
2452 /* Advance LogwrtResult.Write to end of current buffer page */
2453 LogwrtResult.Write = EndPtr;
2454 ispartialpage = WriteRqst.Write < LogwrtResult.Write;
2455
2456 if (!XLByteInPrevSeg(LogwrtResult.Write, openLogSegNo,
2457 wal_segment_size))
2458 {
2459 /*
2460 * Switch to new logfile segment. We cannot have any pending
2461 * pages here (since we dump what we have at segment end).
2462 */
2463 Assert(npages == 0);
2464 if (openLogFile >= 0)
2465 XLogFileClose();
2466 XLByteToPrevSeg(LogwrtResult.Write, openLogSegNo,
2467 wal_segment_size);
2468
2469 /* create/use new log file */
2470 use_existent = true;
2471 openLogFile = XLogFileInit(openLogSegNo, &use_existent, true);
2472 openLogOff = 0;
2473 }
2474
2475 /* Make sure we have the current logfile open */
2476 if (openLogFile < 0)
2477 {
2478 XLByteToPrevSeg(LogwrtResult.Write, openLogSegNo,
2479 wal_segment_size);
2480 openLogFile = XLogFileOpen(openLogSegNo);
2481 openLogOff = 0;
2482 }
2483
2484 /* Add current page to the set of pending pages-to-dump */
2485 if (npages == 0)
2486 {
2487 /* first of group */
2488 startidx = curridx;
2489 startoffset = XLogSegmentOffset(LogwrtResult.Write - XLOG_BLCKSZ,
2490 wal_segment_size);
2491 }
2492 npages++;
2493
2494 /*
2495 * Dump the set if this will be the last loop iteration, or if we are
2496 * at the last page of the cache area (since the next page won't be
2497 * contiguous in memory), or if we are at the end of the logfile
2498 * segment.
2499 */
2500 last_iteration = WriteRqst.Write <= LogwrtResult.Write;
2501
2502 finishing_seg = !ispartialpage &&
2503 (startoffset + npages * XLOG_BLCKSZ) >= wal_segment_size;
2504
2505 if (last_iteration ||
2506 curridx == XLogCtl->XLogCacheBlck ||
2507 finishing_seg)
2508 {
2509 char *from;
2510 Size nbytes;
2511 Size nleft;
2512 int written;
2513
2514 /* Need to seek in the file? */
2515 if (openLogOff != startoffset)
2516 {
2517 if (lseek(openLogFile, (off_t) startoffset, SEEK_SET) < 0)
2518 ereport(PANIC,
2519 (errcode_for_file_access(),
2520 errmsg("could not seek in log file %s to offset %u: %m",
2521 XLogFileNameP(ThisTimeLineID, openLogSegNo),
2522 startoffset)));
2523 openLogOff = startoffset;
2524 }
2525
2526 /* OK to write the page(s) */
2527 from = XLogCtl->pages + startidx * (Size) XLOG_BLCKSZ;
2528 nbytes = npages * (Size) XLOG_BLCKSZ;
2529 nleft = nbytes;
2530 do
2531 {
2532 errno = 0;
2533 pgstat_report_wait_start(WAIT_EVENT_WAL_WRITE);
2534 written = write(openLogFile, from, nleft);
2535 pgstat_report_wait_end();
2536 if (written <= 0)
2537 {
2538 if (errno == EINTR)
2539 continue;
2540 ereport(PANIC,
2541 (errcode_for_file_access(),
2542 errmsg("could not write to log file %s "
2543 "at offset %u, length %zu: %m",
2544 XLogFileNameP(ThisTimeLineID, openLogSegNo),
2545 openLogOff, nbytes)));
2546 }
2547 nleft -= written;
2548 from += written;
2549 } while (nleft > 0);
2550
2551 /* Update state for write */
2552 openLogOff += nbytes;
2553 npages = 0;
2554
2555 /*
2556 * If we just wrote the whole last page of a logfile segment,
2557 * fsync the segment immediately. This avoids having to go back
2558 * and re-open prior segments when an fsync request comes along
2559 * later. Doing it here ensures that one and only one backend will
2560 * perform this fsync.
2561 *
2562 * This is also the right place to notify the Archiver that the
2563 * segment is ready to copy to archival storage, and to update the
2564 * timer for archive_timeout, and to signal for a checkpoint if
2565 * too many logfile segments have been used since the last
2566 * checkpoint.
2567 */
2568 if (finishing_seg)
2569 {
2570 issue_xlog_fsync(openLogFile, openLogSegNo);
2571
2572 /* signal that we need to wakeup walsenders later */
2573 WalSndWakeupRequest();
2574
2575 LogwrtResult.Flush = LogwrtResult.Write; /* end of page */
2576
2577 if (XLogArchivingActive())
2578 XLogArchiveNotifySeg(openLogSegNo);
2579
2580 XLogCtl->lastSegSwitchTime = (pg_time_t) time(NULL);
2581 XLogCtl->lastSegSwitchLSN = LogwrtResult.Flush;
2582
2583 /*
2584 * Request a checkpoint if we've consumed too much xlog since
2585 * the last one. For speed, we first check using the local
2586 * copy of RedoRecPtr, which might be out of date; if it looks
2587 * like a checkpoint is needed, forcibly update RedoRecPtr and
2588 * recheck.
2589 */
2590 if (IsUnderPostmaster && XLogCheckpointNeeded(openLogSegNo))
2591 {
2592 (void) GetRedoRecPtr();
2593 if (XLogCheckpointNeeded(openLogSegNo))
2594 RequestCheckpoint(CHECKPOINT_CAUSE_XLOG);
2595 }
2596 }
2597 }
2598
2599 if (ispartialpage)
2600 {
2601 /* Only asked to write a partial page */
2602 LogwrtResult.Write = WriteRqst.Write;
2603 break;
2604 }
2605 curridx = NextBufIdx(curridx);
2606
2607 /* If flexible, break out of loop as soon as we wrote something */
2608 if (flexible && npages == 0)
2609 break;
2610 }
2611
2612 Assert(npages == 0);
2613
2614 /*
2615 * If asked to flush, do so
2616 */
2617 if (LogwrtResult.Flush < WriteRqst.Flush &&
2618 LogwrtResult.Flush < LogwrtResult.Write)
2619
2620 {
2621 /*
2622 * Could get here without iterating above loop, in which case we might
2623 * have no open file or the wrong one. However, we do not need to
2624 * fsync more than one file.
2625 */
2626 if (sync_method != SYNC_METHOD_OPEN &&
2627 sync_method != SYNC_METHOD_OPEN_DSYNC)
2628 {
2629 if (openLogFile >= 0 &&
2630 !XLByteInPrevSeg(LogwrtResult.Write, openLogSegNo,
2631 wal_segment_size))
2632 XLogFileClose();
2633 if (openLogFile < 0)
2634 {
2635 XLByteToPrevSeg(LogwrtResult.Write, openLogSegNo,
2636 wal_segment_size);
2637 openLogFile = XLogFileOpen(openLogSegNo);
2638 openLogOff = 0;
2639 }
2640
2641 issue_xlog_fsync(openLogFile, openLogSegNo);
2642 }
2643
2644 /* signal that we need to wakeup walsenders later */
2645 WalSndWakeupRequest();
2646
2647 LogwrtResult.Flush = LogwrtResult.Write;
2648 }
2649
2650 /*
2651 * Update shared-memory status
2652 *
2653 * We make sure that the shared 'request' values do not fall behind the
2654 * 'result' values. This is not absolutely essential, but it saves some
2655 * code in a couple of places.
2656 */
2657 {
2658 SpinLockAcquire(&XLogCtl->info_lck);
2659 XLogCtl->LogwrtResult = LogwrtResult;
2660 if (XLogCtl->LogwrtRqst.Write < LogwrtResult.Write)
2661 XLogCtl->LogwrtRqst.Write = LogwrtResult.Write;
2662 if (XLogCtl->LogwrtRqst.Flush < LogwrtResult.Flush)
2663 XLogCtl->LogwrtRqst.Flush = LogwrtResult.Flush;
2664 SpinLockRelease(&XLogCtl->info_lck);
2665 }
2666 }
2667
2668 /*
2669 * Record the LSN for an asynchronous transaction commit/abort
2670 * and nudge the WALWriter if there is work for it to do.
2671 * (This should not be called for synchronous commits.)
2672 */
2673 void
2674 XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN)
2675 {
2676 XLogRecPtr WriteRqstPtr = asyncXactLSN;
2677 bool sleeping;
2678
2679 SpinLockAcquire(&XLogCtl->info_lck);
2680 LogwrtResult = XLogCtl->LogwrtResult;
2681 sleeping = XLogCtl->WalWriterSleeping;
2682 if (XLogCtl->asyncXactLSN < asyncXactLSN)
2683 XLogCtl->asyncXactLSN = asyncXactLSN;
2684 SpinLockRelease(&XLogCtl->info_lck);
2685
2686 /*
2687 * If the WALWriter is sleeping, we should kick it to make it come out of
2688 * low-power mode. Otherwise, determine whether there's a full page of
2689 * WAL available to write.
2690 */
2691 if (!sleeping)
2692 {
2693 /* back off to last completed page boundary */
2694 WriteRqstPtr -= WriteRqstPtr % XLOG_BLCKSZ;
2695
2696 /* if we have already flushed that far, we're done */
2697 if (WriteRqstPtr <= LogwrtResult.Flush)
2698 return;
2699 }
2700
2701 /*
2702 * Nudge the WALWriter: it has a full page of WAL to write, or we want it
2703 * to come out of low-power mode so that this async commit will reach disk
2704 * within the expected amount of time.
2705 */
2706 if (ProcGlobal->walwriterLatch)
2707 SetLatch(ProcGlobal->walwriterLatch);
2708 }
2709
2710 /*
2711 * Record the LSN up to which we can remove WAL because it's not required by
2712 * any replication slot.
2713 */
2714 void
2715 XLogSetReplicationSlotMinimumLSN(XLogRecPtr lsn)
2716 {
2717 SpinLockAcquire(&XLogCtl->info_lck);
2718 XLogCtl->replicationSlotMinLSN = lsn;
2719 SpinLockRelease(&XLogCtl->info_lck);
2720 }
2721
2722
2723 /*
2724 * Return the oldest LSN we must retain to satisfy the needs of some
2725 * replication slot.
2726 */
2727 static XLogRecPtr
2728 XLogGetReplicationSlotMinimumLSN(void)
2729 {
2730 XLogRecPtr retval;
2731
2732 SpinLockAcquire(&XLogCtl->info_lck);
2733 retval = XLogCtl->replicationSlotMinLSN;
2734 SpinLockRelease(&XLogCtl->info_lck);
2735
2736 return retval;
2737 }
2738
2739 /*
2740 * Advance minRecoveryPoint in control file.
2741 *
2742 * If we crash during recovery, we must reach this point again before the
2743 * database is consistent.
2744 *
2745 * If 'force' is true, 'lsn' argument is ignored. Otherwise, minRecoveryPoint
2746 * is only updated if it's not already greater than or equal to 'lsn'.
2747 */
2748 static void
2749 UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force)
2750 {
2751 /* Quick check using our local copy of the variable */
2752 if (!updateMinRecoveryPoint || (!force && lsn <= minRecoveryPoint))
2753 return;
2754
2755 /*
2756 * An invalid minRecoveryPoint means that we need to recover all the WAL,
2757 * i.e., we're doing crash recovery. We never modify the control file's
2758 * value in that case, so we can short-circuit future checks here too. The
2759 * local values of minRecoveryPoint and minRecoveryPointTLI should not be
2760 * updated until crash recovery finishes. We only do this for the startup
2761 * process as it should not update its own reference of minRecoveryPoint
2762 * until it has finished crash recovery to make sure that all WAL
2763 * available is replayed in this case. This also saves from extra locks
2764 * taken on the control file from the startup process.
2765 */
2766 if (XLogRecPtrIsInvalid(minRecoveryPoint) && InRecovery)
2767 {
2768 updateMinRecoveryPoint = false;
2769 return;
2770 }
2771
2772 LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
2773
2774 /* update local copy */
2775 minRecoveryPoint = ControlFile->minRecoveryPoint;
2776 minRecoveryPointTLI = ControlFile->minRecoveryPointTLI;
2777
2778 if (XLogRecPtrIsInvalid(minRecoveryPoint))
2779 updateMinRecoveryPoint = false;
2780 else if (force || minRecoveryPoint < lsn)
2781 {
2782 XLogRecPtr newMinRecoveryPoint;
2783 TimeLineID newMinRecoveryPointTLI;
2784
2785 /*
2786 * To avoid having to update the control file too often, we update it
2787 * all the way to the last record being replayed, even though 'lsn'
2788 * would suffice for correctness. This also allows the 'force' case
2789 * to not need a valid 'lsn' value.
2790 *
2791 * Another important reason for doing it this way is that the passed
2792 * 'lsn' value could be bogus, i.e., past the end of available WAL, if
2793 * the caller got it from a corrupted heap page. Accepting such a
2794 * value as the min recovery point would prevent us from coming up at
2795 * all. Instead, we just log a warning and continue with recovery.
2796 * (See also the comments about corrupt LSNs in XLogFlush.)
2797 */
2798 SpinLockAcquire(&XLogCtl->info_lck);
2799 newMinRecoveryPoint = XLogCtl->replayEndRecPtr;
2800 newMinRecoveryPointTLI = XLogCtl->replayEndTLI;
2801 SpinLockRelease(&XLogCtl->info_lck);
2802
2803 if (!force && newMinRecoveryPoint < lsn)
2804 elog(WARNING,
2805 "xlog min recovery request %X/%X is past current point %X/%X",
2806 (uint32) (lsn >> 32), (uint32) lsn,
2807 (uint32) (newMinRecoveryPoint >> 32),
2808 (uint32) newMinRecoveryPoint);
2809
2810 /* update control file */
2811 if (ControlFile->minRecoveryPoint < newMinRecoveryPoint)
2812 {
2813 ControlFile->minRecoveryPoint = newMinRecoveryPoint;
2814 ControlFile->minRecoveryPointTLI = newMinRecoveryPointTLI;
2815 UpdateControlFile();
2816 minRecoveryPoint = newMinRecoveryPoint;
2817 minRecoveryPointTLI = newMinRecoveryPointTLI;
2818
2819 ereport(DEBUG2,
2820 (errmsg("updated min recovery point to %X/%X on timeline %u",
2821 (uint32) (minRecoveryPoint >> 32),
2822 (uint32) minRecoveryPoint,
2823 newMinRecoveryPointTLI)));
2824 }
2825 }
2826 LWLockRelease(ControlFileLock);
2827 }
2828
2829 /*
2830 * Ensure that all XLOG data through the given position is flushed to disk.
2831 *
2832 * NOTE: this differs from XLogWrite mainly in that the WALWriteLock is not
2833 * already held, and we try to avoid acquiring it if possible.
2834 */
2835 void
2836 XLogFlush(XLogRecPtr record)
2837 {
2838 XLogRecPtr WriteRqstPtr;
2839 XLogwrtRqst WriteRqst;
2840
2841 /*
2842 * During REDO, we are reading not writing WAL. Therefore, instead of
2843 * trying to flush the WAL, we should update minRecoveryPoint instead. We
2844 * test XLogInsertAllowed(), not InRecovery, because we need checkpointer
2845 * to act this way too, and because when it tries to write the
2846 * end-of-recovery checkpoint, it should indeed flush.
2847 */
2848 if (!XLogInsertAllowed())
2849 {
2850 UpdateMinRecoveryPoint(record, false);
2851 return;
2852 }
2853
2854 /* Quick exit if already known flushed */
2855 if (record <= LogwrtResult.Flush)
2856 return;
2857
2858 #ifdef WAL_DEBUG
2859 if (XLOG_DEBUG)
2860 elog(LOG, "xlog flush request %X/%X; write %X/%X; flush %X/%X",
2861 (uint32) (record >> 32), (uint32) record,
2862 (uint32) (LogwrtResult.Write >> 32), (uint32) LogwrtResult.Write,
2863 (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
2864 #endif
2865
2866 START_CRIT_SECTION();
2867
2868 /*
2869 * Since fsync is usually a horribly expensive operation, we try to
2870 * piggyback as much data as we can on each fsync: if we see any more data
2871 * entered into the xlog buffer, we'll write and fsync that too, so that
2872 * the final value of LogwrtResult.Flush is as large as possible. This
2873 * gives us some chance of avoiding another fsync immediately after.
2874 */
2875
2876 /* initialize to given target; may increase below */
2877 WriteRqstPtr = record;
2878
2879 /*
2880 * Now wait until we get the write lock, or someone else does the flush
2881 * for us.
2882 */
2883 for (;;)
2884 {
2885 XLogRecPtr insertpos;
2886
2887 /* read LogwrtResult and update local state */
2888 SpinLockAcquire(&XLogCtl->info_lck);
2889 if (WriteRqstPtr < XLogCtl->LogwrtRqst.Write)
2890 WriteRqstPtr = XLogCtl->LogwrtRqst.Write;
2891 LogwrtResult = XLogCtl->LogwrtResult;
2892 SpinLockRelease(&XLogCtl->info_lck);
2893
2894 /* done already? */
2895 if (record <= LogwrtResult.Flush)
2896 break;
2897
2898 /*
2899 * Before actually performing the write, wait for all in-flight
2900 * insertions to the pages we're about to write to finish.
2901 */
2902 insertpos = WaitXLogInsertionsToFinish(WriteRqstPtr);
2903
2904 /*
2905 * Try to get the write lock. If we can't get it immediately, wait
2906 * until it's released, and recheck if we still need to do the flush
2907 * or if the backend that held the lock did it for us already. This
2908 * helps to maintain a good rate of group committing when the system
2909 * is bottlenecked by the speed of fsyncing.
2910 */
2911 if (!LWLockAcquireOrWait(WALWriteLock, LW_EXCLUSIVE))
2912 {
2913 /*
2914 * The lock is now free, but we didn't acquire it yet. Before we
2915 * do, loop back to check if someone else flushed the record for
2916 * us already.
2917 */
2918 continue;
2919 }
2920
2921 /* Got the lock; recheck whether request is satisfied */
2922 LogwrtResult = XLogCtl->LogwrtResult;
2923 if (record <= LogwrtResult.Flush)
2924 {
2925 LWLockRelease(WALWriteLock);
2926 break;
2927 }
2928
2929 /*
2930 * Sleep before flush! By adding a delay here, we may give further
2931 * backends the opportunity to join the backlog of group commit
2932 * followers; this can significantly improve transaction throughput,
2933 * at the risk of increasing transaction latency.
2934 *
2935 * We do not sleep if enableFsync is not turned on, nor if there are
2936 * fewer than CommitSiblings other backends with active transactions.
2937 */
2938 if (CommitDelay > 0 && enableFsync &&
2939 MinimumActiveBackends(CommitSiblings))
2940 {
2941 pg_usleep(CommitDelay);
2942
2943 /*
2944 * Re-check how far we can now flush the WAL. It's generally not
2945 * safe to call WaitXLogInsertionsToFinish while holding
2946 * WALWriteLock, because an in-progress insertion might need to
2947 * also grab WALWriteLock to make progress. But we know that all
2948 * the insertions up to insertpos have already finished, because
2949 * that's what the earlier WaitXLogInsertionsToFinish() returned.
2950 * We're only calling it again to allow insertpos to be moved
2951 * further forward, not to actually wait for anyone.
2952 */
2953 insertpos = WaitXLogInsertionsToFinish(insertpos);
2954 }
2955
2956 /* try to write/flush later additions to XLOG as well */
2957 WriteRqst.Write = insertpos;
2958 WriteRqst.Flush = insertpos;
2959
2960 XLogWrite(WriteRqst, false);
2961
2962 LWLockRelease(WALWriteLock);
2963 /* done */
2964 break;
2965 }
2966
2967 END_CRIT_SECTION();
2968
2969 /* wake up walsenders now that we've released heavily contended locks */
2970 WalSndWakeupProcessRequests();
2971
2972 /*
2973 * If we still haven't flushed to the request point then we have a
2974 * problem; most likely, the requested flush point is past end of XLOG.
2975 * This has been seen to occur when a disk page has a corrupted LSN.
2976 *
2977 * Formerly we treated this as a PANIC condition, but that hurts the
2978 * system's robustness rather than helping it: we do not want to take down
2979 * the whole system due to corruption on one data page. In particular, if
2980 * the bad page is encountered again during recovery then we would be
2981 * unable to restart the database at all! (This scenario actually
2982 * happened in the field several times with 7.1 releases.) As of 8.4, bad
2983 * LSNs encountered during recovery are UpdateMinRecoveryPoint's problem;
2984 * the only time we can reach here during recovery is while flushing the
2985 * end-of-recovery checkpoint record, and we don't expect that to have a
2986 * bad LSN.
2987 *
2988 * Note that for calls from xact.c, the ERROR will be promoted to PANIC
2989 * since xact.c calls this routine inside a critical section. However,
2990 * calls from bufmgr.c are not within critical sections and so we will not
2991 * force a restart for a bad LSN on a data page.
2992 */
2993 if (LogwrtResult.Flush < record)
2994 elog(ERROR,
2995 "xlog flush request %X/%X is not satisfied --- flushed only to %X/%X",
2996 (uint32) (record >> 32), (uint32) record,
2997 (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
2998 }
2999
3000 /*
3001 * Write & flush xlog, but without specifying exactly where to.
3002 *
3003 * We normally write only completed blocks; but if there is nothing to do on
3004 * that basis, we check for unwritten async commits in the current incomplete
3005 * block, and write through the latest one of those. Thus, if async commits
3006 * are not being used, we will write complete blocks only.
3007 *
3008 * If, based on the above, there's anything to write we do so immediately. But
3009 * to avoid calling fsync, fdatasync et. al. at a rate that'd impact
3010 * concurrent IO, we only flush WAL every wal_writer_delay ms, or if there's
3011 * more than wal_writer_flush_after unflushed blocks.
3012 *
3013 * We can guarantee that async commits reach disk after at most three
3014 * wal_writer_delay cycles. (When flushing complete blocks, we allow XLogWrite
3015 * to write "flexibly", meaning it can stop at the end of the buffer ring;
3016 * this makes a difference only with very high load or long wal_writer_delay,
3017 * but imposes one extra cycle for the worst case for async commits.)
3018 *
3019 * This routine is invoked periodically by the background walwriter process.
3020 *
3021 * Returns true if there was any work to do, even if we skipped flushing due
3022 * to wal_writer_delay/wal_writer_flush_after.
3023 */
3024 bool
3025 XLogBackgroundFlush(void)
3026 {
3027 XLogwrtRqst WriteRqst;
3028 bool flexible = true;
3029 static TimestampTz lastflush;
3030 TimestampTz now;
3031 int flushbytes;
3032
3033 /* XLOG doesn't need flushing during recovery */
3034 if (RecoveryInProgress())
3035 return false;
3036
3037 /* read LogwrtResult and update local state */
3038 SpinLockAcquire(&XLogCtl->info_lck);
3039 LogwrtResult = XLogCtl->LogwrtResult;
3040 WriteRqst = XLogCtl->LogwrtRqst;
3041 SpinLockRelease(&XLogCtl->info_lck);
3042
3043 /* back off to last completed page boundary */
3044 WriteRqst.Write -= WriteRqst.Write % XLOG_BLCKSZ;
3045
3046 /* if we have already flushed that far, consider async commit records */
3047 if (WriteRqst.Write <= LogwrtResult.Flush)
3048 {
3049 SpinLockAcquire(&XLogCtl->info_lck);
3050 WriteRqst.Write = XLogCtl->asyncXactLSN;
3051 SpinLockRelease(&XLogCtl->info_lck);
3052 flexible = false; /* ensure it all gets written */
3053 }
3054
3055 /*
3056 * If already known flushed, we're done. Just need to check if we are
3057 * holding an open file handle to a logfile that's no longer in use,
3058 * preventing the file from being deleted.
3059 */
3060 if (WriteRqst.Write <= LogwrtResult.Flush)
3061 {
3062 if (openLogFile >= 0)
3063 {
3064 if (!XLByteInPrevSeg(LogwrtResult.Write, openLogSegNo,
3065 wal_segment_size))
3066 {
3067 XLogFileClose();
3068 }
3069 }
3070 return false;
3071 }
3072
3073 /*
3074 * Determine how far to flush WAL, based on the wal_writer_delay and
3075 * wal_writer_flush_after GUCs.
3076 */
3077 now = GetCurrentTimestamp();
3078 flushbytes =
3079 WriteRqst.Write / XLOG_BLCKSZ - LogwrtResult.Flush / XLOG_BLCKSZ;
3080
3081 if (WalWriterFlushAfter == 0 || lastflush == 0)
3082 {
3083 /* first call, or block based limits disabled */
3084 WriteRqst.Flush = WriteRqst.Write;
3085 lastflush = now;
3086 }
3087 else if (TimestampDifferenceExceeds(lastflush, now, WalWriterDelay))
3088 {
3089 /*
3090 * Flush the writes at least every WalWriteDelay ms. This is important
3091 * to bound the amount of time it takes for an asynchronous commit to
3092 * hit disk.
3093 */
3094 WriteRqst.Flush = WriteRqst.Write;
3095 lastflush = now;
3096 }
3097 else if (flushbytes >= WalWriterFlushAfter)
3098 {
3099 /* exceeded wal_writer_flush_after blocks, flush */
3100 WriteRqst.Flush = WriteRqst.Write;
3101 lastflush = now;
3102 }
3103 else
3104 {
3105 /* no flushing, this time round */
3106 WriteRqst.Flush = 0;
3107 }
3108
3109 #ifdef WAL_DEBUG
3110 if (XLOG_DEBUG)
3111 elog(LOG, "xlog bg flush request write %X/%X; flush: %X/%X, current is write %X/%X; flush %X/%X",
3112 (uint32) (WriteRqst.Write >> 32), (uint32) WriteRqst.Write,
3113 (uint32) (WriteRqst.Flush >> 32), (uint32) WriteRqst.Flush,
3114 (uint32) (LogwrtResult.Write >> 32), (uint32) LogwrtResult.Write,
3115 (uint32) (LogwrtResult.Flush >> 32), (uint32) LogwrtResult.Flush);
3116 #endif
3117
3118 START_CRIT_SECTION();
3119
3120 /* now wait for any in-progress insertions to finish and get write lock */
3121 WaitXLogInsertionsToFinish(WriteRqst.Write);
3122 LWLockAcquire(WALWriteLock, LW_EXCLUSIVE);
3123 LogwrtResult = XLogCtl->LogwrtResult;
3124 if (WriteRqst.Write > LogwrtResult.Write ||
3125 WriteRqst.Flush > LogwrtResult.Flush)
3126 {
3127 XLogWrite(WriteRqst, flexible);
3128 }
3129 LWLockRelease(WALWriteLock);
3130
3131 END_CRIT_SECTION();
3132
3133 /* wake up walsenders now that we've released heavily contended locks */
3134 WalSndWakeupProcessRequests();
3135
3136 /*
3137 * Great, done. To take some work off the critical path, try to initialize
3138 * as many of the no-longer-needed WAL buffers for future use as we can.
3139 */
3140 AdvanceXLInsertBuffer(InvalidXLogRecPtr, true);
3141
3142 /*
3143 * If we determined that we need to write data, but somebody else
3144 * wrote/flushed already, it should be considered as being active, to
3145 * avoid hibernating too early.
3146 */
3147 return true;
3148 }
3149
3150 /*
3151 * Test whether XLOG data has been flushed up to (at least) the given position.
3152 *
3153 * Returns true if a flush is still needed. (It may be that someone else
3154 * is already in process of flushing that far, however.)
3155 */
3156 bool
3157 XLogNeedsFlush(XLogRecPtr record)
3158 {
3159 /*
3160 * During recovery, we don't flush WAL but update minRecoveryPoint
3161 * instead. So "needs flush" is taken to mean whether minRecoveryPoint
3162 * would need to be updated.
3163 */
3164 if (RecoveryInProgress())
3165 {
3166 /*
3167 * An invalid minRecoveryPoint means that we need to recover all the
3168 * WAL, i.e., we're doing crash recovery. We never modify the control
3169 * file's value in that case, so we can short-circuit future checks
3170 * here too. This triggers a quick exit path for the startup process,
3171 * which cannot update its local copy of minRecoveryPoint as long as
3172 * it has not replayed all WAL available when doing crash recovery.
3173 */
3174 if (XLogRecPtrIsInvalid(minRecoveryPoint) && InRecovery)
3175 updateMinRecoveryPoint = false;
3176
3177 /* Quick exit if already known to be updated or cannot be updated */
3178 if (record <= minRecoveryPoint || !updateMinRecoveryPoint)
3179 return false;
3180
3181 /*
3182 * Update local copy of minRecoveryPoint. But if the lock is busy,
3183 * just return a conservative guess.
3184 */
3185 if (!LWLockConditionalAcquire(ControlFileLock, LW_SHARED))
3186 return true;
3187 minRecoveryPoint = ControlFile->minRecoveryPoint;
3188 minRecoveryPointTLI = ControlFile->minRecoveryPointTLI;
3189 LWLockRelease(ControlFileLock);
3190
3191 /*
3192 * Check minRecoveryPoint for any other process than the startup
3193 * process doing crash recovery, which should not update the control
3194 * file value if crash recovery is still running.
3195 */
3196 if (XLogRecPtrIsInvalid(minRecoveryPoint))
3197 updateMinRecoveryPoint = false;
3198
3199 /* check again */
3200 if (record <= minRecoveryPoint || !updateMinRecoveryPoint)
3201 return false;
3202 else
3203 return true;
3204 }
3205
3206 /* Quick exit if already known flushed */
3207 if (record <= LogwrtResult.Flush)
3208 return false;
3209
3210 /* read LogwrtResult and update local state */
3211 SpinLockAcquire(&XLogCtl->info_lck);
3212 LogwrtResult = XLogCtl->LogwrtResult;
3213 SpinLockRelease(&XLogCtl->info_lck);
3214
3215 /* check again */
3216 if (record <= LogwrtResult.Flush)
3217 return false;
3218
3219 return true;
3220 }
3221
3222 /*
3223 * Create a new XLOG file segment, or open a pre-existing one.
3224 *
3225 * log, seg: identify segment to be created/opened.
3226 *
3227 * *use_existent: if true, OK to use a pre-existing file (else, any
3228 * pre-existing file will be deleted). On return, true if a pre-existing
3229 * file was used.
3230 *
3231 * use_lock: if true, acquire ControlFileLock while moving file into
3232 * place. This should be true except during bootstrap log creation. The
3233 * caller must *not* hold the lock at call.
3234 *
3235 * Returns FD of opened file.
3236 *
3237 * Note: errors here are ERROR not PANIC because we might or might not be
3238 * inside a critical section (eg, during checkpoint there is no reason to
3239 * take down the system on failure). They will promote to PANIC if we are
3240 * in a critical section.
3241 */
3242 int
3243 XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock)
3244 {
3245 char path[MAXPGPATH];
3246 char tmppath[MAXPGPATH];
3247 PGAlignedXLogBlock zbuffer;
3248 XLogSegNo installed_segno;
3249 XLogSegNo max_segno;
3250 int fd;
3251 int nbytes;
3252
3253 XLogFilePath(path, ThisTimeLineID, logsegno, wal_segment_size);
3254
3255 /*
3256 * Try to use existent file (checkpoint maker may have created it already)
3257 */
3258 if (*use_existent)
3259 {
3260 fd = BasicOpenFile(path, O_RDWR | PG_BINARY | get_sync_bit(sync_method));
3261 if (fd < 0)
3262 {
3263 if (errno != ENOENT)
3264 ereport(ERROR,
3265 (errcode_for_file_access(),
3266 errmsg("could not open file \"%s\": %m", path)));
3267 }
3268 else
3269 return fd;
3270 }
3271
3272 /*
3273 * Initialize an empty (all zeroes) segment. NOTE: it is possible that
3274 * another process is doing the same thing. If so, we will end up
3275 * pre-creating an extra log segment. That seems OK, and better than
3276 * holding the lock throughout this lengthy process.
3277 */
3278 elog(DEBUG2, "creating and filling new WAL file");
3279
3280 snprintf(tmppath, MAXPGPATH, XLOGDIR "/xlogtemp.%d", (int) getpid());
3281
3282 unlink(tmppath);
3283
3284 /* do not use get_sync_bit() here --- want to fsync only at end of fill */
3285 fd = BasicOpenFile(tmppath, O_RDWR | O_CREAT | O_EXCL | PG_BINARY);
3286 if (fd < 0)
3287 ereport(ERROR,
3288 (errcode_for_file_access(),
3289 errmsg("could not create file \"%s\": %m", tmppath)));
3290
3291 /*
3292 * Zero-fill the file. We have to do this the hard way to ensure that all
3293 * the file space has really been allocated --- on platforms that allow
3294 * "holes" in files, just seeking to the end doesn't allocate intermediate
3295 * space. This way, we know that we have all the space and (after the
3296 * fsync below) that all the indirect blocks are down on disk. Therefore,
3297 * fdatasync(2) or O_DSYNC will be sufficient to sync future writes to the
3298 * log file.
3299 */
3300 memset(zbuffer.data, 0, XLOG_BLCKSZ);
3301 for (nbytes = 0; nbytes < wal_segment_size; nbytes += XLOG_BLCKSZ)
3302 {
3303 errno = 0;
3304 pgstat_report_wait_start(WAIT_EVENT_WAL_INIT_WRITE);
3305 if ((int) write(fd, zbuffer.data, XLOG_BLCKSZ) != (int) XLOG_BLCKSZ)
3306 {
3307 int save_errno = errno;
3308
3309 /*
3310 * If we fail to make the file, delete it to release disk space
3311 */
3312 unlink(tmppath);
3313
3314 close(fd);
3315
3316 /* if write didn't set errno, assume problem is no disk space */
3317 errno = save_errno ? save_errno : ENOSPC;
3318
3319 ereport(ERROR,
3320 (errcode_for_file_access(),
3321 errmsg("could not write to file \"%s\": %m", tmppath)));
3322 }
3323 pgstat_report_wait_end();
3324 }
3325
3326 pgstat_report_wait_start(WAIT_EVENT_WAL_INIT_SYNC);
3327 if (pg_fsync(fd) != 0)
3328 {
3329 int save_errno = errno;
3330
3331 close(fd);
3332 errno = save_errno;
3333 ereport(ERROR,
3334 (errcode_for_file_access(),
3335 errmsg("could not fsync file \"%s\": %m", tmppath)));
3336 }
3337 pgstat_report_wait_end();
3338
3339 if (close(fd))
3340 ereport(ERROR,
3341 (errcode_for_file_access(),
3342 errmsg("could not close file \"%s\": %m", tmppath)));
3343
3344 /*
3345 * Now move the segment into place with its final name.
3346 *
3347 * If caller didn't want to use a pre-existing file, get rid of any
3348 * pre-existing file. Otherwise, cope with possibility that someone else
3349 * has created the file while we were filling ours: if so, use ours to
3350 * pre-create a future log segment.
3351 */
3352 installed_segno = logsegno;
3353
3354 /*
3355 * XXX: What should we use as max_segno? We used to use XLOGfileslop when
3356 * that was a constant, but that was always a bit dubious: normally, at a
3357 * checkpoint, XLOGfileslop was the offset from the checkpoint record, but
3358 * here, it was the offset from the insert location. We can't do the
3359 * normal XLOGfileslop calculation here because we don't have access to
3360 * the prior checkpoint's redo location. So somewhat arbitrarily, just use
3361 * CheckPointSegments.
3362 */
3363 max_segno = logsegno + CheckPointSegments;
3364 if (!InstallXLogFileSegment(&installed_segno, tmppath,
3365 *use_existent, max_segno,
3366 use_lock))
3367 {
3368 /*
3369 * No need for any more future segments, or InstallXLogFileSegment()
3370 * failed to rename the file into place. If the rename failed, opening
3371 * the file below will fail.
3372 */
3373 unlink(tmppath);
3374 }
3375
3376 /* Set flag to tell caller there was no existent file */
3377 *use_existent = false;
3378
3379 /* Now open original target segment (might not be file I just made) */
3380 fd = BasicOpenFile(path, O_RDWR | PG_BINARY | get_sync_bit(sync_method));
3381 if (fd < 0)
3382 ereport(ERROR,
3383 (errcode_for_file_access(),
3384 errmsg("could not open file \"%s\": %m", path)));
3385
3386 elog(DEBUG2, "done creating and filling new WAL file");
3387
3388 return fd;
3389 }
3390
3391 /*
3392 * Create a new XLOG file segment by copying a pre-existing one.
3393 *
3394 * destsegno: identify segment to be created.
3395 *
3396 * srcTLI, srcsegno: identify segment to be copied (could be from
3397 * a different timeline)
3398 *
3399 * upto: how much of the source file to copy (the rest is filled with
3400 * zeros)
3401 *
3402 * Currently this is only used during recovery, and so there are no locking
3403 * considerations. But we should be just as tense as XLogFileInit to avoid
3404 * emplacing a bogus file.
3405 */
3406 static void
3407 XLogFileCopy(XLogSegNo destsegno, TimeLineID srcTLI, XLogSegNo srcsegno,
3408 int upto)
3409 {
3410 char path[MAXPGPATH];
3411 char tmppath[MAXPGPATH];
3412 PGAlignedXLogBlock buffer;
3413 int srcfd;
3414 int fd;
3415 int nbytes;
3416
3417 /*
3418 * Open the source file
3419 */
3420 XLogFilePath(path, srcTLI, srcsegno, wal_segment_size);
3421 srcfd = OpenTransientFile(path, O_RDONLY | PG_BINARY);
3422 if (srcfd < 0)
3423 ereport(ERROR,
3424 (errcode_for_file_access(),
3425 errmsg("could not open file \"%s\": %m", path)));
3426
3427 /*
3428 * Copy into a temp file name.
3429 */
3430 snprintf(tmppath, MAXPGPATH, XLOGDIR "/xlogtemp.%d", (int) getpid());
3431
3432 unlink(tmppath);
3433
3434 /* do not use get_sync_bit() here --- want to fsync only at end of fill */
3435 fd = OpenTransientFile(tmppath, O_RDWR | O_CREAT | O_EXCL | PG_BINARY);
3436 if (fd < 0)
3437 ereport(ERROR,
3438 (errcode_for_file_access(),
3439 errmsg("could not create file \"%s\": %m", tmppath)));
3440
3441 /*
3442 * Do the data copying.
3443 */
3444 for (nbytes = 0; nbytes < wal_segment_size; nbytes += sizeof(buffer))
3445 {
3446 int nread;
3447
3448 nread = upto - nbytes;
3449
3450 /*
3451 * The part that is not read from the source file is filled with
3452 * zeros.
3453 */
3454 if (nread < sizeof(buffer))
3455 memset(buffer.data, 0, sizeof(buffer));
3456
3457 if (nread > 0)
3458 {
3459 if (nread > sizeof(buffer))
3460 nread = sizeof(buffer);
3461 errno = 0;
3462 pgstat_report_wait_start(WAIT_EVENT_WAL_COPY_READ);
3463 if (read(srcfd, buffer.data, nread) != nread)
3464 {
3465 if (errno != 0)
3466 ereport(ERROR,
3467 (errcode_for_file_access(),
3468 errmsg("could not read file \"%s\": %m",
3469 path)));
3470 else
3471 ereport(ERROR,
3472 (errmsg("not enough data in file \"%s\"",
3473 path)));
3474 }
3475 pgstat_report_wait_end();
3476 }
3477 errno = 0;
3478 pgstat_report_wait_start(WAIT_EVENT_WAL_COPY_WRITE);
3479 if ((int) write(fd, buffer.data, sizeof(buffer)) != (int) sizeof(buffer))
3480 {
3481 int save_errno = errno;
3482
3483 /*
3484 * If we fail to make the file, delete it to release disk space
3485 */
3486 unlink(tmppath);
3487 /* if write didn't set errno, assume problem is no disk space */
3488 errno = save_errno ? save_errno : ENOSPC;
3489
3490 ereport(ERROR,
3491 (errcode_for_file_access(),
3492 errmsg("could not write to file \"%s\": %m", tmppath)));
3493 }
3494 pgstat_report_wait_end();
3495 }
3496
3497 pgstat_report_wait_start(WAIT_EVENT_WAL_COPY_SYNC);
3498 if (pg_fsync(fd) != 0)
3499 ereport(data_sync_elevel(ERROR),
3500 (errcode_for_file_access(),
3501 errmsg("could not fsync file \"%s\": %m", tmppath)));
3502 pgstat_report_wait_end();
3503
3504 if (CloseTransientFile(fd))
3505 ereport(ERROR,
3506 (errcode_for_file_access(),
3507 errmsg("could not close file \"%s\": %m", tmppath)));
3508
3509 CloseTransientFile(srcfd);
3510
3511 /*
3512 * Now move the segment into place with its final name.
3513 */
3514 if (!InstallXLogFileSegment(&destsegno, tmppath, false, 0, false))
3515 elog(ERROR, "InstallXLogFileSegment should not have failed");
3516 }
3517
3518 /*
3519 * Install a new XLOG segment file as a current or future log segment.
3520 *
3521 * This is used both to install a newly-created segment (which has a temp
3522 * filename while it's being created) and to recycle an old segment.
3523 *
3524 * *segno: identify segment to install as (or first possible target).
3525 * When find_free is true, this is modified on return to indicate the
3526 * actual installation location or last segment searched.
3527 *
3528 * tmppath: initial name of file to install. It will be renamed into place.
3529 *
3530 * find_free: if true, install the new segment at the first empty segno
3531 * number at or after the passed numbers. If false, install the new segment
3532 * exactly where specified, deleting any existing segment file there.
3533 *
3534 * max_segno: maximum segment number to install the new file as. Fail if no
3535 * free slot is found between *segno and max_segno. (Ignored when find_free
3536 * is false.)
3537 *
3538 * use_lock: if true, acquire ControlFileLock while moving file into
3539 * place. This should be true except during bootstrap log creation. The
3540 * caller must *not* hold the lock at call.
3541 *
3542 * Returns true if the file was installed successfully. false indicates that
3543 * max_segno limit was exceeded, or an error occurred while renaming the
3544 * file into place.
3545 */
3546 static bool
3547 InstallXLogFileSegment(XLogSegNo *segno, char *tmppath,
3548 bool find_free, XLogSegNo max_segno,
3549 bool use_lock)
3550 {
3551 char path[MAXPGPATH];
3552 struct stat stat_buf;
3553
3554 XLogFilePath(path, ThisTimeLineID, *segno, wal_segment_size);
3555
3556 /*
3557 * We want to be sure that only one process does this at a time.
3558 */
3559 if (use_lock)
3560 LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
3561
3562 if (!find_free)
3563 {
3564 /* Force installation: get rid of any pre-existing segment file */
3565 durable_unlink(path, DEBUG1);
3566 }
3567 else
3568 {
3569 /* Find a free slot to put it in */
3570 while (stat(path, &stat_buf) == 0)
3571 {
3572 if ((*segno) >= max_segno)
3573 {
3574 /* Failed to find a free slot within specified range */
3575 if (use_lock)
3576 LWLockRelease(ControlFileLock);
3577 return false;
3578 }
3579 (*segno)++;
3580 XLogFilePath(path, ThisTimeLineID, *segno, wal_segment_size);
3581 }
3582 }
3583
3584 /*
3585 * Perform the rename using link if available, paranoidly trying to avoid
3586 * overwriting an existing file (there shouldn't be one).
3587 */
3588 if (durable_link_or_rename(tmppath, path, LOG) != 0)
3589 {
3590 if (use_lock)
3591 LWLockRelease(ControlFileLock);
3592 /* durable_link_or_rename already emitted log message */
3593 return false;
3594 }
3595
3596 if (use_lock)
3597 LWLockRelease(ControlFileLock);
3598
3599 return true;
3600 }
3601
3602 /*
3603 * Open a pre-existing logfile segment for writing.
3604 */
3605 int
3606 XLogFileOpen(XLogSegNo segno)
3607 {
3608 char path[MAXPGPATH];
3609 int fd;
3610
3611 XLogFilePath(path, ThisTimeLineID, segno, wal_segment_size);
3612
3613 fd = BasicOpenFile(path, O_RDWR | PG_BINARY | get_sync_bit(sync_method));
3614 if (fd < 0)
3615 ereport(PANIC,
3616 (errcode_for_file_access(),
3617 errmsg("could not open write-ahead log file \"%s\": %m", path)));
3618
3619 return fd;
3620 }
3621
3622 /*
3623 * Open a logfile segment for reading (during recovery).
3624 *
3625 * If source == XLOG_FROM_ARCHIVE, the segment is retrieved from archive.
3626 * Otherwise, it's assumed to be already available in pg_wal.
3627 */
3628 static int
3629 XLogFileRead(XLogSegNo segno, int emode, TimeLineID tli,
3630 int source, bool notfoundOk)
3631 {
3632 char xlogfname[MAXFNAMELEN];
3633 char activitymsg[MAXFNAMELEN + 16];
3634 char path[MAXPGPATH];
3635 int fd;
3636
3637 XLogFileName(xlogfname, tli, segno, wal_segment_size);
3638
3639 switch (source)
3640 {
3641 case XLOG_FROM_ARCHIVE:
3642 /* Report recovery progress in PS display */
3643 snprintf(activitymsg, sizeof(activitymsg), "waiting for %s",
3644 xlogfname);
3645 set_ps_display(activitymsg, false);
3646
3647 restoredFromArchive = RestoreArchivedFile(path, xlogfname,
3648 "RECOVERYXLOG",
3649 wal_segment_size,
3650 InRedo);
3651 if (!restoredFromArchive)
3652 return -1;
3653 break;
3654
3655 case XLOG_FROM_PG_WAL:
3656 case XLOG_FROM_STREAM:
3657 XLogFilePath(path, tli, segno, wal_segment_size);
3658 restoredFromArchive = false;
3659 break;
3660
3661 default:
3662 elog(ERROR, "invalid XLogFileRead source %d", source);
3663 }
3664
3665 /*
3666 * If the segment was fetched from archival storage, replace the existing
3667 * xlog segment (if any) with the archival version.
3668 */
3669 if (source == XLOG_FROM_ARCHIVE)
3670 {
3671 KeepFileRestoredFromArchive(path, xlogfname);
3672
3673 /*
3674 * Set path to point at the new file in pg_wal.
3675 */
3676 snprintf(path, MAXPGPATH, XLOGDIR "/%s", xlogfname);
3677 }
3678
3679 fd = BasicOpenFile(path, O_RDONLY | PG_BINARY);
3680 if (fd >= 0)
3681 {
3682 /* Success! */
3683 curFileTLI = tli;
3684
3685 /* Report recovery progress in PS display */
3686 snprintf(activitymsg, sizeof(activitymsg), "recovering %s",
3687 xlogfname);
3688 set_ps_display(activitymsg, false);
3689
3690 /* Track source of data in assorted state variables */
3691 readSource = source;
3692 XLogReceiptSource = source;
3693 /* In FROM_STREAM case, caller tracks receipt time, not me */
3694 if (source != XLOG_FROM_STREAM)
3695 XLogReceiptTime = GetCurrentTimestamp();
3696
3697 return fd;
3698 }
3699 if (errno != ENOENT || !notfoundOk) /* unexpected failure? */
3700 ereport(PANIC,
3701 (errcode_for_file_access(),
3702 errmsg("could not open file \"%s\": %m", path)));
3703 return -1;
3704 }
3705
3706 /*
3707 * Open a logfile segment for reading (during recovery).
3708 *
3709 * This version searches for the segment with any TLI listed in expectedTLEs.
3710 */
3711 static int
3712 XLogFileReadAnyTLI(XLogSegNo segno, int emode, int source)
3713 {
3714 char path[MAXPGPATH];
3715 ListCell *cell;
3716 int fd;
3717 List *tles;
3718
3719 /*
3720 * Loop looking for a suitable timeline ID: we might need to read any of
3721 * the timelines listed in expectedTLEs.
3722 *
3723 * We expect curFileTLI on entry to be the TLI of the preceding file in
3724 * sequence, or 0 if there was no predecessor. We do not allow curFileTLI
3725 * to go backwards; this prevents us from picking up the wrong file when a
3726 * parent timeline extends to higher segment numbers than the child we
3727 * want to read.
3728 *
3729 * If we haven't read the timeline history file yet, read it now, so that
3730 * we know which TLIs to scan. We don't save the list in expectedTLEs,
3731 * however, unless we actually find a valid segment. That way if there is
3732 * neither a timeline history file nor a WAL segment in the archive, and
3733 * streaming replication is set up, we'll read the timeline history file
3734 * streamed from the master when we start streaming, instead of recovering
3735 * with a dummy history generated here.
3736 */
3737 if (expectedTLEs)
3738 tles = expectedTLEs;
3739 else
3740 tles = readTimeLineHistory(recoveryTargetTLI);
3741
3742 foreach(cell, tles)
3743 {
3744 TimeLineHistoryEntry *hent = (TimeLineHistoryEntry *) lfirst(cell);
3745 TimeLineID tli = hent->tli;
3746
3747 if (tli < curFileTLI)
3748 break; /* don't bother looking at too-old TLIs */
3749
3750 /*
3751 * Skip scanning the timeline ID that the logfile segment to read
3752 * doesn't belong to
3753 */
3754 if (hent->begin != InvalidXLogRecPtr)
3755 {
3756 XLogSegNo beginseg = 0;
3757
3758 XLByteToSeg(hent->begin, beginseg, wal_segment_size);
3759
3760 /*
3761 * The logfile segment that doesn't belong to the timeline is
3762 * older or newer than the segment that the timeline started or
3763 * ended at, respectively. It's sufficient to check only the
3764 * starting segment of the timeline here. Since the timelines are
3765 * scanned in descending order in this loop, any segments newer
3766 * than the ending segment should belong to newer timeline and
3767 * have already been read before. So it's not necessary to check
3768 * the ending segment of the timeline here.
3769 */
3770 if (segno < beginseg)
3771 continue;
3772 }
3773
3774 if (source == XLOG_FROM_ANY || source == XLOG_FROM_ARCHIVE)
3775 {
3776 fd = XLogFileRead(segno, emode, tli,
3777 XLOG_FROM_ARCHIVE, true);
3778 if (fd != -1)
3779 {
3780 elog(DEBUG1, "got WAL segment from archive");
3781 if (!expectedTLEs)
3782 expectedTLEs = tles;
3783 return fd;
3784 }
3785 }
3786
3787 if (source == XLOG_FROM_ANY || source == XLOG_FROM_PG_WAL)
3788 {
3789 fd = XLogFileRead(segno, emode, tli,
3790 XLOG_FROM_PG_WAL, true);
3791 if (fd != -1)
3792 {
3793 if (!expectedTLEs)
3794 expectedTLEs = tles;
3795 return fd;
3796 }
3797 }
3798 }
3799
3800 /* Couldn't find it. For simplicity, complain about front timeline */
3801 XLogFilePath(path, recoveryTargetTLI, segno, wal_segment_size);
3802 errno = ENOENT;
3803 ereport(emode,
3804 (errcode_for_file_access(),
3805 errmsg("could not open file \"%s\": %m", path)));
3806 return -1;
3807 }
3808
3809 /*
3810 * Close the current logfile segment for writing.
3811 */
3812 static void
3813 XLogFileClose(void)
3814 {
3815 Assert(openLogFile >= 0);
3816
3817 /*
3818 * WAL segment files will not be re-read in normal operation, so we advise
3819 * the OS to release any cached pages. But do not do so if WAL archiving
3820 * or streaming is active, because archiver and walsender process could
3821 * use the cache to read the WAL segment.
3822 */
3823 #if defined(USE_POSIX_FADVISE) && defined(POSIX_FADV_DONTNEED)
3824 if (!XLogIsNeeded())
3825 (void) posix_fadvise(openLogFile, 0, 0, POSIX_FADV_DONTNEED);
3826 #endif
3827
3828 if (close(openLogFile))
3829 ereport(PANIC,
3830 (errcode_for_file_access(),
3831 errmsg("could not close log file %s: %m",
3832 XLogFileNameP(ThisTimeLineID, openLogSegNo))));
3833 openLogFile = -1;
3834 }
3835
3836 /*
3837 * Preallocate log files beyond the specified log endpoint.
3838 *
3839 * XXX this is currently extremely conservative, since it forces only one
3840 * future log segment to exist, and even that only if we are 75% done with
3841 * the current one. This is only appropriate for very low-WAL-volume systems.
3842 * High-volume systems will be OK once they've built up a sufficient set of
3843 * recycled log segments, but the startup transient is likely to include
3844 * a lot of segment creations by foreground processes, which is not so good.
3845 */
3846 static void
3847 PreallocXlogFiles(XLogRecPtr endptr)
3848 {
3849 XLogSegNo _logSegNo;
3850 int lf;
3851 bool use_existent;
3852 uint64 offset;
3853
3854 XLByteToPrevSeg(endptr, _logSegNo, wal_segment_size);
3855 offset = XLogSegmentOffset(endptr - 1, wal_segment_size);
3856 if (offset >= (uint32) (0.75 * wal_segment_size))
3857 {
3858 _logSegNo++;
3859 use_existent = true;
3860 lf = XLogFileInit(_logSegNo, &use_existent, true);
3861 close(lf);
3862 if (!use_existent)
3863 CheckpointStats.ckpt_segs_added++;
3864 }
3865 }
3866
3867 /*
3868 * Throws an error if the given log segment has already been removed or
3869 * recycled. The caller should only pass a segment that it knows to have
3870 * existed while the server has been running, as this function always
3871 * succeeds if no WAL segments have been removed since startup.
3872 * 'tli' is only used in the error message.
3873 *
3874 * Note: this function guarantees to keep errno unchanged on return.
3875 * This supports callers that use this to possibly deliver a better
3876 * error message about a missing file, while still being able to throw
3877 * a normal file-access error afterwards, if this does return.
3878 */
3879 void
3880 CheckXLogRemoved(XLogSegNo segno, TimeLineID tli)
3881 {
3882 int save_errno = errno;
3883 XLogSegNo lastRemovedSegNo;
3884
3885 SpinLockAcquire(&XLogCtl->info_lck);
3886 lastRemovedSegNo = XLogCtl->lastRemovedSegNo;
3887 SpinLockRelease(&XLogCtl->info_lck);
3888
3889 if (segno <= lastRemovedSegNo)
3890 {
3891 char filename[MAXFNAMELEN];
3892
3893 XLogFileName(filename, tli, segno, wal_segment_size);
3894 errno = save_errno;
3895 ereport(ERROR,
3896 (errcode_for_file_access(),
3897 errmsg("requested WAL segment %s has already been removed",
3898 filename)));
3899 }
3900 errno = save_errno;
3901 }
3902
3903 /*
3904 * Return the last WAL segment removed, or 0 if no segment has been removed
3905 * since startup.
3906 *
3907 * NB: the result can be out of date arbitrarily fast, the caller has to deal
3908 * with that.
3909 */
3910 XLogSegNo
3911 XLogGetLastRemovedSegno(void)
3912 {
3913 XLogSegNo lastRemovedSegNo;
3914
3915 SpinLockAcquire(&XLogCtl->info_lck);
3916 lastRemovedSegNo = XLogCtl->lastRemovedSegNo;
3917 SpinLockRelease(&XLogCtl->info_lck);
3918
3919 return lastRemovedSegNo;
3920 }
3921
3922 /*
3923 * Update the last removed segno pointer in shared memory, to reflect
3924 * that the given XLOG file has been removed.
3925 */
3926 static void
3927 UpdateLastRemovedPtr(char *filename)
3928 {
3929 uint32 tli;
3930 XLogSegNo segno;
3931
3932 XLogFromFileName(filename, &tli, &segno, wal_segment_size);
3933
3934 SpinLockAcquire(&XLogCtl->info_lck);
3935 if (segno > XLogCtl->lastRemovedSegNo)
3936 XLogCtl->lastRemovedSegNo = segno;
3937 SpinLockRelease(&XLogCtl->info_lck);
3938 }
3939
3940 /*
3941 * Recycle or remove all log files older or equal to passed segno.
3942 *
3943 * endptr is current (or recent) end of xlog, and lastredoptr is the
3944 * redo pointer of the last checkpoint. These are used to determine
3945 * whether we want to recycle rather than delete no-longer-wanted log files.
3946 */
3947 static void
3948 RemoveOldXlogFiles(XLogSegNo segno, XLogRecPtr lastredoptr, XLogRecPtr endptr)
3949 {
3950 DIR *xldir;
3951 struct dirent *xlde;
3952 char lastoff[MAXFNAMELEN];
3953
3954 /*
3955 * Construct a filename of the last segment to be kept. The timeline ID
3956 * doesn't matter, we ignore that in the comparison. (During recovery,
3957 * ThisTimeLineID isn't set, so we can't use that.)
3958 */
3959 XLogFileName(lastoff, 0, segno, wal_segment_size);
3960
3961 elog(DEBUG2, "attempting to remove WAL segments older than log file %s",
3962 lastoff);
3963
3964 xldir = AllocateDir(XLOGDIR);
3965
3966 while ((xlde = ReadDir(xldir, XLOGDIR)) != NULL)
3967 {
3968 /* Ignore files that are not XLOG segments */
3969 if (!IsXLogFileName(xlde->d_name) &&
3970 !IsPartialXLogFileName(xlde->d_name))
3971 continue;
3972
3973 /*
3974 * We ignore the timeline part of the XLOG segment identifiers in
3975 * deciding whether a segment is still needed. This ensures that we
3976 * won't prematurely remove a segment from a parent timeline. We could
3977 * probably be a little more proactive about removing segments of
3978 * non-parent timelines, but that would be a whole lot more
3979 * complicated.
3980 *
3981 * We use the alphanumeric sorting property of the filenames to decide
3982 * which ones are earlier than the lastoff segment.
3983 */
3984 if (strcmp(xlde->d_name + 8, lastoff + 8) <= 0)
3985 {
3986 if (XLogArchiveCheckDone(xlde->d_name))
3987 {
3988 /* Update the last removed location in shared memory first */
3989 UpdateLastRemovedPtr(xlde->d_name);
3990
3991 RemoveXlogFile(xlde->d_name, lastredoptr, endptr);
3992 }
3993 }
3994 }
3995
3996 FreeDir(xldir);
3997 }
3998
3999 /*
4000 * Remove WAL files that are not part of the given timeline's history.
4001 *
4002 * This is called during recovery, whenever we switch to follow a new
4003 * timeline, and at the end of recovery when we create a new timeline. We
4004 * wouldn't otherwise care about extra WAL files lying in pg_wal, but they
4005 * might be leftover pre-allocated or recycled WAL segments on the old timeline
4006 * that we haven't used yet, and contain garbage. If we just leave them in
4007 * pg_wal, they will eventually be archived, and we can't let that happen.
4008 * Files that belong to our timeline history are valid, because we have
4009 * successfully replayed them, but from others we can't be sure.
4010 *
4011 * 'switchpoint' is the current point in WAL where we switch to new timeline,
4012 * and 'newTLI' is the new timeline we switch to.
4013 */
4014 static void
4015 RemoveNonParentXlogFiles(XLogRecPtr switchpoint, TimeLineID newTLI)
4016 {
4017 DIR *xldir;
4018 struct dirent *xlde;
4019 char switchseg[MAXFNAMELEN];
4020 XLogSegNo endLogSegNo;
4021
4022 XLByteToPrevSeg(switchpoint, endLogSegNo, wal_segment_size);
4023
4024 /*
4025 * Construct a filename of the last segment to be kept.
4026 */
4027 XLogFileName(switchseg, newTLI, endLogSegNo, wal_segment_size);
4028
4029 elog(DEBUG2, "attempting to remove WAL segments newer than log file %s",
4030 switchseg);
4031
4032 xldir = AllocateDir(XLOGDIR);
4033
4034 while ((xlde = ReadDir(xldir, XLOGDIR)) != NULL)
4035 {
4036 /* Ignore files that are not XLOG segments */
4037 if (!IsXLogFileName(xlde->d_name))
4038 continue;
4039
4040 /*
4041 * Remove files that are on a timeline older than the new one we're
4042 * switching to, but with a segment number >= the first segment on the
4043 * new timeline.
4044 */
4045 if (strncmp(xlde->d_name, switchseg, 8) < 0 &&
4046 strcmp(xlde->d_name + 8, switchseg + 8) > 0)
4047 {
4048 /*
4049 * If the file has already been marked as .ready, however, don't
4050 * remove it yet. It should be OK to remove it - files that are
4051 * not part of our timeline history are not required for recovery
4052 * - but seems safer to let them be archived and removed later.
4053 */
4054 if (!XLogArchiveIsReady(xlde->d_name))
4055 RemoveXlogFile(xlde->d_name, InvalidXLogRecPtr, switchpoint);
4056 }
4057 }
4058
4059 FreeDir(xldir);
4060 }
4061
4062 /*
4063 * Recycle or remove a log file that's no longer needed.
4064 *
4065 * endptr is current (or recent) end of xlog, and lastredoptr is the
4066 * redo pointer of the last checkpoint. These are used to determine
4067 * whether we want to recycle rather than delete no-longer-wanted log files.
4068 * If lastredoptr is not known, pass invalid, and the function will recycle,
4069 * somewhat arbitrarily, 10 future segments.
4070 */
4071 static void
4072 RemoveXlogFile(const char *segname, XLogRecPtr lastredoptr, XLogRecPtr endptr)
4073 {
4074 char path[MAXPGPATH];
4075 #ifdef WIN32
4076 char newpath[MAXPGPATH];
4077 #endif
4078 struct stat statbuf;
4079 XLogSegNo endlogSegNo;
4080 XLogSegNo recycleSegNo;
4081
4082 /*
4083 * Initialize info about where to try to recycle to.
4084 */
4085 XLByteToSeg(endptr, endlogSegNo, wal_segment_size);
4086 if (lastredoptr == InvalidXLogRecPtr)
4087 recycleSegNo = endlogSegNo + 10;
4088 else
4089 recycleSegNo = XLOGfileslop(lastredoptr);
4090
4091 snprintf(path, MAXPGPATH, XLOGDIR "/%s", segname);
4092
4093 /*
4094 * Before deleting the file, see if it can be recycled as a future log
4095 * segment. Only recycle normal files, pg_standby for example can create
4096 * symbolic links pointing to a separate archive directory.
4097 */
4098 if (endlogSegNo <= recycleSegNo &&
4099 lstat(path, &statbuf) == 0 && S_ISREG(statbuf.st_mode) &&
4100 InstallXLogFileSegment(&endlogSegNo, path,
4101 true, recycleSegNo, true))
4102 {
4103 ereport(DEBUG2,
4104 (errmsg("recycled write-ahead log file \"%s\"",
4105 segname)));
4106 CheckpointStats.ckpt_segs_recycled++;
4107 /* Needn't recheck that slot on future iterations */
4108 endlogSegNo++;
4109 }
4110 else
4111 {
4112 /* No need for any more future segments... */
4113 int rc;
4114
4115 ereport(DEBUG2,
4116 (errmsg("removing write-ahead log file \"%s\"",
4117 segname)));
4118
4119 #ifdef WIN32
4120
4121 /*
4122 * On Windows, if another process (e.g another backend) holds the file
4123 * open in FILE_SHARE_DELETE mode, unlink will succeed, but the file
4124 * will still show up in directory listing until the last handle is
4125 * closed. To avoid confusing the lingering deleted file for a live
4126 * WAL file that needs to be archived, rename it before deleting it.
4127 *
4128 * If another process holds the file open without FILE_SHARE_DELETE
4129 * flag, rename will fail. We'll try again at the next checkpoint.
4130 */
4131 snprintf(newpath, MAXPGPATH, "%s.deleted", path);
4132 if (rename(path, newpath) != 0)
4133 {
4134 ereport(LOG,
4135 (errcode_for_file_access(),
4136 errmsg("could not rename old write-ahead log file \"%s\": %m",
4137 path)));
4138 return;
4139 }
4140 rc = durable_unlink(newpath, LOG);
4141 #else
4142 rc = durable_unlink(path, LOG);
4143 #endif
4144 if (rc != 0)
4145 {
4146 /* Message already logged by durable_unlink() */
4147 return;
4148 }
4149 CheckpointStats.ckpt_segs_removed++;
4150 }
4151
4152 XLogArchiveCleanup(segname);
4153 }
4154
4155 /*
4156 * Verify whether pg_wal and pg_wal/archive_status exist.
4157 * If the latter does not exist, recreate it.
4158 *
4159 * It is not the goal of this function to verify the contents of these
4160 * directories, but to help in cases where someone has performed a cluster
4161 * copy for PITR purposes but omitted pg_wal from the copy.
4162 *
4163 * We could also recreate pg_wal if it doesn't exist, but a deliberate
4164 * policy decision was made not to. It is fairly common for pg_wal to be
4165 * a symlink, and if that was the DBA's intent then automatically making a
4166 * plain directory would result in degraded performance with no notice.
4167 */
4168 static void
4169 ValidateXLOGDirectoryStructure(void)
4170 {
4171 char path[MAXPGPATH];
4172 struct stat stat_buf;
4173
4174 /* Check for pg_wal; if it doesn't exist, error out */
4175 if (stat(XLOGDIR, &stat_buf) != 0 ||
4176 !S_ISDIR(stat_buf.st_mode))
4177 ereport(FATAL,
4178 (errmsg("required WAL directory \"%s\" does not exist",
4179 XLOGDIR)));
4180
4181 /* Check for archive_status */
4182 snprintf(path, MAXPGPATH, XLOGDIR "/archive_status");
4183 if (stat(path, &stat_buf) == 0)
4184 {
4185 /* Check for weird cases where it exists but isn't a directory */
4186 if (!S_ISDIR(stat_buf.st_mode))
4187 ereport(FATAL,
4188 (errmsg("required WAL directory \"%s\" does not exist",
4189 path)));
4190 }
4191 else
4192 {
4193 ereport(LOG,
4194 (errmsg("creating missing WAL directory \"%s\"", path)));
4195 if (MakePGDirectory(path) < 0)
4196 ereport(FATAL,
4197 (errmsg("could not create missing directory \"%s\": %m",
4198 path)));
4199 }
4200 }
4201
4202 /*
4203 * Remove previous backup history files. This also retries creation of
4204 * .ready files for any backup history files for which XLogArchiveNotify
4205 * failed earlier.
4206 */
4207 static void
4208 CleanupBackupHistory(void)
4209 {
4210 DIR *xldir;
4211 struct dirent *xlde;
4212 char path[MAXPGPATH + sizeof(XLOGDIR)];
4213
4214 xldir = AllocateDir(XLOGDIR);
4215
4216 while ((xlde = ReadDir(xldir, XLOGDIR)) != NULL)
4217 {
4218 if (IsBackupHistoryFileName(xlde->d_name))
4219 {
4220 if (XLogArchiveCheckDone(xlde->d_name))
4221 {
4222 elog(DEBUG2, "removing WAL backup history file \"%s\"",
4223 xlde->d_name);
4224 snprintf(path, sizeof(path), XLOGDIR "/%s", xlde->d_name);
4225 unlink(path);
4226 XLogArchiveCleanup(xlde->d_name);
4227 }
4228 }
4229 }
4230
4231 FreeDir(xldir);
4232 }
4233
4234 /*
4235 * Attempt to read an XLOG record.
4236 *
4237 * If RecPtr is valid, try to read a record at that position. Otherwise
4238 * try to read a record just after the last one previously read.
4239 *
4240 * If no valid record is available, returns NULL, or fails if emode is PANIC.
4241 * (emode must be either PANIC, LOG). In standby mode, retries until a valid
4242 * record is available.
4243 *
4244 * The record is copied into readRecordBuf, so that on successful return,
4245 * the returned record pointer always points there.
4246 */
4247 static XLogRecord *
4248 ReadRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr, int emode,
4249 bool fetching_ckpt)
4250 {
4251 XLogRecord *record;
4252 XLogPageReadPrivate *private = (XLogPageReadPrivate *) xlogreader->private_data;
4253
4254 /* Pass through parameters to XLogPageRead */
4255 private->fetching_ckpt = fetching_ckpt;
4256 private->emode = emode;
4257 private->randAccess = (RecPtr != InvalidXLogRecPtr);
4258
4259 /* This is the first attempt to read this page. */
4260 lastSourceFailed = false;
4261
4262 for (;;)
4263 {
4264 char *errormsg;
4265
4266 record = XLogReadRecord(xlogreader, RecPtr, &errormsg);
4267 ReadRecPtr = xlogreader->ReadRecPtr;
4268 EndRecPtr = xlogreader->EndRecPtr;
4269 if (record == NULL)
4270 {
4271 /*
4272 * When not in standby mode we find that WAL ends in an incomplete
4273 * record, keep track of that record. After recovery is done,
4274 * we'll write a record to indicate downstream WAL readers that
4275 * that portion is to be ignored.
4276 */
4277 if (!StandbyMode &&
4278 !XLogRecPtrIsInvalid(xlogreader->abortedRecPtr))
4279 {
4280 abortedRecPtr = xlogreader->abortedRecPtr;
4281 missingContrecPtr = xlogreader->missingContrecPtr;
4282 }
4283
4284 if (readFile >= 0)
4285 {
4286 close(readFile);
4287 readFile = -1;
4288 }
4289
4290 /*
4291 * We only end up here without a message when XLogPageRead()
4292 * failed - in that case we already logged something. In
4293 * StandbyMode that only happens if we have been triggered, so we
4294 * shouldn't loop anymore in that case.
4295 */
4296 if (errormsg)
4297 ereport(emode_for_corrupt_record(emode,
4298 RecPtr ? RecPtr : EndRecPtr),
4299 (errmsg_internal("%s", errormsg) /* already translated */ ));
4300 }
4301
4302 /*
4303 * Check page TLI is one of the expected values.
4304 */
4305 else if (!tliInHistory(xlogreader->latestPageTLI, expectedTLEs))
4306 {
4307 char fname[MAXFNAMELEN];
4308 XLogSegNo segno;
4309 int32 offset;
4310
4311 XLByteToSeg(xlogreader->latestPagePtr, segno, wal_segment_size);
4312 offset = XLogSegmentOffset(xlogreader->latestPagePtr,
4313 wal_segment_size);
4314 XLogFileName(fname, xlogreader->readPageTLI, segno,
4315 wal_segment_size);
4316 ereport(emode_for_corrupt_record(emode,
4317 RecPtr ? RecPtr : EndRecPtr),
4318 (errmsg("unexpected timeline ID %u in log segment %s, offset %u",
4319 xlogreader->latestPageTLI,
4320 fname,
4321 offset)));
4322 record = NULL;
4323 }
4324
4325 if (record)
4326 {
4327 /* Great, got a record */
4328 return record;
4329 }
4330 else
4331 {
4332 /* No valid record available from this source */
4333 lastSourceFailed = true;
4334
4335 /*
4336 * If archive recovery was requested, but we were still doing
4337 * crash recovery, switch to archive recovery and retry using the
4338 * offline archive. We have now replayed all the valid WAL in
4339 * pg_wal, so we are presumably now consistent.
4340 *
4341 * We require that there's at least some valid WAL present in
4342 * pg_wal, however (!fetching_ckpt). We could recover using the
4343 * WAL from the archive, even if pg_wal is completely empty, but
4344 * we'd have no idea how far we'd have to replay to reach
4345 * consistency. So err on the safe side and give up.
4346 */
4347 if (!InArchiveRecovery && ArchiveRecoveryRequested &&
4348 !fetching_ckpt)
4349 {
4350 ereport(DEBUG1,
4351 (errmsg_internal("reached end of WAL in pg_wal, entering archive recovery")));
4352 InArchiveRecovery = true;
4353 if (StandbyModeRequested)
4354 StandbyMode = true;
4355
4356 /* initialize minRecoveryPoint to this record */
4357 LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
4358 ControlFile->state = DB_IN_ARCHIVE_RECOVERY;
4359 if (ControlFile->minRecoveryPoint < EndRecPtr)
4360 {
4361 ControlFile->minRecoveryPoint = EndRecPtr;
4362 ControlFile->minRecoveryPointTLI = ThisTimeLineID;
4363 }
4364 /* update local copy */
4365 minRecoveryPoint = ControlFile->minRecoveryPoint;
4366 minRecoveryPointTLI = ControlFile->minRecoveryPointTLI;
4367
4368 /*
4369 * The startup process can update its local copy of
4370 * minRecoveryPoint from this point.
4371 */
4372 updateMinRecoveryPoint = true;
4373
4374 UpdateControlFile();
4375
4376 /*
4377 * We update SharedRecoveryState while holding the lock on
4378 * ControlFileLock so both states are consistent in shared
4379 * memory.
4380 */
4381 SpinLockAcquire(&XLogCtl->info_lck);
4382 XLogCtl->SharedRecoveryState = RECOVERY_STATE_ARCHIVE;
4383 SpinLockRelease(&XLogCtl->info_lck);
4384
4385 LWLockRelease(ControlFileLock);
4386
4387 CheckRecoveryConsistency();
4388
4389 /*
4390 * Before we retry, reset lastSourceFailed and currentSource
4391 * so that we will check the archive next.
4392 */
4393 lastSourceFailed = false;
4394 currentSource = 0;
4395
4396 continue;
4397 }
4398
4399 /* In standby mode, loop back to retry. Otherwise, give up. */
4400 if (StandbyMode && !CheckForStandbyTrigger())
4401 continue;
4402 else
4403 return NULL;
4404 }
4405 }
4406 }
4407
4408 /*
4409 * Scan for new timelines that might have appeared in the archive since we
4410 * started recovery.
4411 *
4412 * If there are any, the function changes recovery target TLI to the latest
4413 * one and returns 'true'.
4414 */
4415 static bool
4416 rescanLatestTimeLine(void)
4417 {
4418 List *newExpectedTLEs;
4419 bool found;
4420 ListCell *cell;
4421 TimeLineID newtarget;
4422 TimeLineID oldtarget = recoveryTargetTLI;
4423 TimeLineHistoryEntry *currentTle = NULL;
4424
4425 newtarget = findNewestTimeLine(recoveryTargetTLI);
4426 if (newtarget == recoveryTargetTLI)
4427 {
4428 /* No new timelines found */
4429 return false;
4430 }
4431
4432 /*
4433 * Determine the list of expected TLIs for the new TLI
4434 */
4435
4436 newExpectedTLEs = readTimeLineHistory(newtarget);
4437
4438 /*
4439 * If the current timeline is not part of the history of the new timeline,
4440 * we cannot proceed to it.
4441 */
4442 found = false;
4443 foreach(cell, newExpectedTLEs)
4444 {
4445 currentTle = (TimeLineHistoryEntry *) lfirst(cell);
4446
4447 if (currentTle->tli == recoveryTargetTLI)
4448 {
4449 found = true;
4450 break;
4451 }
4452 }
4453 if (!found)
4454 {
4455 ereport(LOG,
4456 (errmsg("new timeline %u is not a child of database system timeline %u",
4457 newtarget,
4458 ThisTimeLineID)));
4459 return false;
4460 }
4461
4462 /*
4463 * The current timeline was found in the history file, but check that the
4464 * next timeline was forked off from it *after* the current recovery
4465 * location.
4466 */
4467 if (currentTle->end < EndRecPtr)
4468 {
4469 ereport(LOG,
4470 (errmsg("new timeline %u forked off current database system timeline %u before current recovery point %X/%X",
4471 newtarget,
4472 ThisTimeLineID,
4473 (uint32) (EndRecPtr >> 32), (uint32) EndRecPtr)));
4474 return false;
4475 }
4476
4477 /* The new timeline history seems valid. Switch target */
4478 recoveryTargetTLI = newtarget;
4479 list_free_deep(expectedTLEs);
4480 expectedTLEs = newExpectedTLEs;
4481
4482 /*
4483 * As in StartupXLOG(), try to ensure we have all the history files
4484 * between the old target and new target in pg_wal.
4485 */
4486 restoreTimeLineHistoryFiles(oldtarget + 1, newtarget);
4487
4488 ereport(LOG,
4489 (errmsg("new target timeline is %u",
4490 recoveryTargetTLI)));
4491
4492 return true;
4493 }
4494
4495 /*
4496 * I/O routines for pg_control
4497 *
4498 * *ControlFile is a buffer in shared memory that holds an image of the
4499 * contents of pg_control. WriteControlFile() initializes pg_control
4500 * given a preloaded buffer, ReadControlFile() loads the buffer from
4501 * the pg_control file (during postmaster or standalone-backend startup),
4502 * and UpdateControlFile() rewrites pg_control after we modify xlog state.
4503 *
4504 * For simplicity, WriteControlFile() initializes the fields of pg_control
4505 * that are related to checking backend/database compatibility, and
4506 * ReadControlFile() verifies they are correct. We could split out the
4507 * I/O and compatibility-check functions, but there seems no need currently.
4508 */
4509 static void
4510 WriteControlFile(void)
4511 {
4512 int fd;
4513 char buffer[PG_CONTROL_FILE_SIZE]; /* need not be aligned */
4514
4515 /*
4516 * Ensure that the size of the pg_control data structure is sane. See the
4517 * comments for these symbols in pg_control.h.
4518 */
4519 StaticAssertStmt(sizeof(ControlFileData) <= PG_CONTROL_MAX_SAFE_SIZE,
4520 "pg_control is too large for atomic disk writes");
4521 StaticAssertStmt(sizeof(ControlFileData) <= PG_CONTROL_FILE_SIZE,
4522 "sizeof(ControlFileData) exceeds PG_CONTROL_FILE_SIZE");
4523
4524 /*
4525 * Initialize version and compatibility-check fields
4526 */
4527 ControlFile->pg_control_version = PG_CONTROL_VERSION;
4528 ControlFile->catalog_version_no = CATALOG_VERSION_NO;
4529
4530 ControlFile->maxAlign = MAXIMUM_ALIGNOF;
4531 ControlFile->floatFormat = FLOATFORMAT_VALUE;
4532
4533 ControlFile->blcksz = BLCKSZ;
4534 ControlFile->relseg_size = RELSEG_SIZE;
4535 ControlFile->xlog_blcksz = XLOG_BLCKSZ;
4536 ControlFile->xlog_seg_size = wal_segment_size;
4537
4538 ControlFile->nameDataLen = NAMEDATALEN;
4539 ControlFile->indexMaxKeys = INDEX_MAX_KEYS;
4540
4541 ControlFile->toast_max_chunk_size = TOAST_MAX_CHUNK_SIZE;
4542 ControlFile->loblksize = LOBLKSIZE;
4543
4544 ControlFile->float4ByVal = FLOAT4PASSBYVAL;
4545 ControlFile->float8ByVal = FLOAT8PASSBYVAL;
4546
4547 /* Contents are protected with a CRC */
4548 INIT_CRC32C(ControlFile->crc);
4549 COMP_CRC32C(ControlFile->crc,
4550 (char *) ControlFile,
4551 offsetof(ControlFileData, crc));
4552 FIN_CRC32C(ControlFile->crc);
4553
4554 /*
4555 * We write out PG_CONTROL_FILE_SIZE bytes into pg_control, zero-padding
4556 * the excess over sizeof(ControlFileData). This reduces the odds of
4557 * premature-EOF errors when reading pg_control. We'll still fail when we
4558 * check the contents of the file, but hopefully with a more specific
4559 * error than "couldn't read pg_control".
4560 */
4561 memset(buffer, 0, PG_CONTROL_FILE_SIZE);
4562 memcpy(buffer, ControlFile, sizeof(ControlFileData));
4563
4564 fd = BasicOpenFile(XLOG_CONTROL_FILE,
4565 O_RDWR | O_CREAT | O_EXCL | PG_BINARY);
4566 if (fd < 0)
4567 ereport(PANIC,
4568 (errcode_for_file_access(),
4569 errmsg("could not create control file \"%s\": %m",
4570 XLOG_CONTROL_FILE)));
4571
4572 errno = 0;
4573 pgstat_report_wait_start(WAIT_EVENT_CONTROL_FILE_WRITE);
4574 if (write(fd, buffer, PG_CONTROL_FILE_SIZE) != PG_CONTROL_FILE_SIZE)
4575 {
4576 /* if write didn't set errno, assume problem is no disk space */
4577 if (errno == 0)
4578 errno = ENOSPC;
4579 ereport(PANIC,
4580 (errcode_for_file_access(),
4581 errmsg("could not write to control file: %m")));
4582 }
4583 pgstat_report_wait_end();
4584
4585 pgstat_report_wait_start(WAIT_EVENT_CONTROL_FILE_SYNC);
4586 if (pg_fsync(fd) != 0)
4587 ereport(PANIC,
4588 (errcode_for_file_access(),
4589 errmsg("could not fsync control file: %m")));
4590 pgstat_report_wait_end();
4591
4592 if (close(fd))
4593 ereport(PANIC,
4594 (errcode_for_file_access(),
4595 errmsg("could not close control file: %m")));
4596 }
4597
4598 static void
4599 ReadControlFile(void)
4600 {
4601 pg_crc32c crc;
4602 int fd;
4603 static char wal_segsz_str[20];
4604 int r;
4605
4606 /*
4607 * Read data...
4608 */
4609 fd = BasicOpenFile(XLOG_CONTROL_FILE,
4610 O_RDWR | PG_BINARY);
4611 if (fd < 0)
4612 ereport(PANIC,
4613 (errcode_for_file_access(),
4614 errmsg("could not open control file \"%s\": %m",
4615 XLOG_CONTROL_FILE)));
4616
4617 pgstat_report_wait_start(WAIT_EVENT_CONTROL_FILE_READ);
4618 r = read(fd, ControlFile, sizeof(ControlFileData));
4619 if (r != sizeof(ControlFileData))
4620 {
4621 if (r < 0)
4622 ereport(PANIC,
4623 (errcode_for_file_access(),
4624 errmsg("could not read from control file: %m")));
4625 else
4626 ereport(PANIC,
4627 (errmsg("could not read from control file: read %d bytes, expected %d", r, (int) sizeof(ControlFileData))));
4628 }
4629 pgstat_report_wait_end();
4630
4631 close(fd);
4632
4633 /*
4634 * Check for expected pg_control format version. If this is wrong, the
4635 * CRC check will likely fail because we'll be checking the wrong number
4636 * of bytes. Complaining about wrong version will probably be more
4637 * enlightening than complaining about wrong CRC.
4638 */
4639
4640 if (ControlFile->pg_control_version != PG_CONTROL_VERSION && ControlFile->pg_control_version % 65536 == 0 && ControlFile->pg_control_version / 65536 != 0)
4641 ereport(FATAL,
4642 (errmsg("database files are incompatible with server"),
4643 errdetail("The database cluster was initialized with PG_CONTROL_VERSION %d (0x%08x),"
4644 " but the server was compiled with PG_CONTROL_VERSION %d (0x%08x).",
4645 ControlFile->pg_control_version, ControlFile->pg_control_version,
4646 PG_CONTROL_VERSION, PG_CONTROL_VERSION),
4647 errhint("This could be a problem of mismatched byte ordering. It looks like you need to initdb.")));
4648
4649 if (ControlFile->pg_control_version != PG_CONTROL_VERSION)
4650 ereport(FATAL,
4651 (errmsg("database files are incompatible with server"),
4652 errdetail("The database cluster was initialized with PG_CONTROL_VERSION %d,"
4653 " but the server was compiled with PG_CONTROL_VERSION %d.",
4654 ControlFile->pg_control_version, PG_CONTROL_VERSION),
4655 errhint("It looks like you need to initdb.")));
4656
4657 /* Now check the CRC. */
4658 INIT_CRC32C(crc);
4659 COMP_CRC32C(crc,
4660 (char *) ControlFile,
4661 offsetof(ControlFileData, crc));
4662 FIN_CRC32C(crc);
4663
4664 if (!EQ_CRC32C(crc, ControlFile->crc))
4665 ereport(FATAL,
4666 (errmsg("incorrect checksum in control file")));
4667
4668 /*
4669 * Do compatibility checking immediately. If the database isn't
4670 * compatible with the backend executable, we want to abort before we can
4671 * possibly do any damage.
4672 */
4673 if (ControlFile->catalog_version_no != CATALOG_VERSION_NO)
4674 ereport(FATAL,
4675 (errmsg("database files are incompatible with server"),
4676 errdetail("The database cluster was initialized with CATALOG_VERSION_NO %d,"
4677 " but the server was compiled with CATALOG_VERSION_NO %d.",
4678 ControlFile->catalog_version_no, CATALOG_VERSION_NO),
4679 errhint("It looks like you need to initdb.")));
4680 if (ControlFile->maxAlign != MAXIMUM_ALIGNOF)
4681 ereport(FATAL,
4682 (errmsg("database files are incompatible with server"),
4683 errdetail("The database cluster was initialized with MAXALIGN %d,"
4684 " but the server was compiled with MAXALIGN %d.",
4685 ControlFile->maxAlign, MAXIMUM_ALIGNOF),
4686 errhint("It looks like you need to initdb.")));
4687 if (ControlFile->floatFormat != FLOATFORMAT_VALUE)
4688 ereport(FATAL,
4689 (errmsg("database files are incompatible with server"),
4690 errdetail("The database cluster appears to use a different floating-point number format than the server executable."),
4691 errhint("It looks like you need to initdb.")));
4692 if (ControlFile->blcksz != BLCKSZ)
4693 ereport(FATAL,
4694 (errmsg("database files are incompatible with server"),
4695 errdetail("The database cluster was initialized with BLCKSZ %d,"
4696 " but the server was compiled with BLCKSZ %d.",
4697 ControlFile->blcksz, BLCKSZ),
4698 errhint("It looks like you need to recompile or initdb.")));
4699 if (ControlFile->relseg_size != RELSEG_SIZE)
4700 ereport(FATAL,
4701 (errmsg("database files are incompatible with server"),
4702 errdetail("The database cluster was initialized with RELSEG_SIZE %d,"
4703 " but the server was compiled with RELSEG_SIZE %d.",
4704 ControlFile->relseg_size, RELSEG_SIZE),
4705 errhint("It looks like you need to recompile or initdb.")));
4706 if (ControlFile->xlog_blcksz != XLOG_BLCKSZ)
4707 ereport(FATAL,
4708 (errmsg("database files are incompatible with server"),
4709 errdetail("The database cluster was initialized with XLOG_BLCKSZ %d,"
4710 " but the server was compiled with XLOG_BLCKSZ %d.",
4711 ControlFile->xlog_blcksz, XLOG_BLCKSZ),
4712 errhint("It looks like you need to recompile or initdb.")));
4713 if (ControlFile->nameDataLen != NAMEDATALEN)
4714 ereport(FATAL,
4715 (errmsg("database files are incompatible with server"),
4716 errdetail("The database cluster was initialized with NAMEDATALEN %d,"
4717 " but the server was compiled with NAMEDATALEN %d.",
4718 ControlFile->nameDataLen, NAMEDATALEN),
4719 errhint("It looks like you need to recompile or initdb.")));
4720 if (ControlFile->indexMaxKeys != INDEX_MAX_KEYS)
4721 ereport(FATAL,
4722 (errmsg("database files are incompatible with server"),
4723 errdetail("The database cluster was initialized with INDEX_MAX_KEYS %d,"
4724 " but the server was compiled with INDEX_MAX_KEYS %d.",
4725 ControlFile->indexMaxKeys, INDEX_MAX_KEYS),
4726 errhint("It looks like you need to recompile or initdb.")));
4727 if (ControlFile->toast_max_chunk_size != TOAST_MAX_CHUNK_SIZE)
4728 ereport(FATAL,
4729 (errmsg("database files are incompatible with server"),
4730 errdetail("The database cluster was initialized with TOAST_MAX_CHUNK_SIZE %d,"
4731 " but the server was compiled with TOAST_MAX_CHUNK_SIZE %d.",
4732 ControlFile->toast_max_chunk_size, (int) TOAST_MAX_CHUNK_SIZE),
4733 errhint("It looks like you need to recompile or initdb.")));
4734 if (ControlFile->loblksize != LOBLKSIZE)
4735 ereport(FATAL,
4736 (errmsg("database files are incompatible with server"),
4737 errdetail("The database cluster was initialized with LOBLKSIZE %d,"
4738 " but the server was compiled with LOBLKSIZE %d.",
4739 ControlFile->loblksize, (int) LOBLKSIZE),
4740 errhint("It looks like you need to recompile or initdb.")));
4741
4742 #ifdef USE_FLOAT4_BYVAL
4743 if (ControlFile->float4ByVal != true)
4744 ereport(FATAL,
4745 (errmsg("database files are incompatible with server"),
4746 errdetail("The database cluster was initialized without USE_FLOAT4_BYVAL"
4747 " but the server was compiled with USE_FLOAT4_BYVAL."),
4748 errhint("It looks like you need to recompile or initdb.")));
4749 #else
4750 if (ControlFile->float4ByVal != false)
4751 ereport(FATAL,
4752 (errmsg("database files are incompatible with server"),
4753 errdetail("The database cluster was initialized with USE_FLOAT4_BYVAL"
4754 " but the server was compiled without USE_FLOAT4_BYVAL."),
4755 errhint("It looks like you need to recompile or initdb.")));
4756 #endif
4757
4758 #ifdef USE_FLOAT8_BYVAL
4759 if (ControlFile->float8ByVal != true)
4760 ereport(FATAL,
4761 (errmsg("database files are incompatible with server"),
4762 errdetail("The database cluster was initialized without USE_FLOAT8_BYVAL"
4763 " but the server was compiled with USE_FLOAT8_BYVAL."),
4764 errhint("It looks like you need to recompile or initdb.")));
4765 #else
4766 if (ControlFile->float8ByVal != false)
4767 ereport(FATAL,
4768 (errmsg("database files are incompatible with server"),
4769 errdetail("The database cluster was initialized with USE_FLOAT8_BYVAL"
4770 " but the server was compiled without USE_FLOAT8_BYVAL."),
4771 errhint("It looks like you need to recompile or initdb.")));
4772 #endif
4773
4774 wal_segment_size = ControlFile->xlog_seg_size;
4775
4776 if (!IsValidWalSegSize(wal_segment_size))
4777 ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
4778 errmsg_plural("WAL segment size must be a power of two between 1 MB and 1 GB, but the control file specifies %d byte",
4779 "WAL segment size must be a power of two between 1 MB and 1 GB, but the control file specifies %d bytes",
4780 wal_segment_size,
4781 wal_segment_size)));
4782
4783 snprintf(wal_segsz_str, sizeof(wal_segsz_str), "%d", wal_segment_size);
4784 SetConfigOption("wal_segment_size", wal_segsz_str, PGC_INTERNAL,
4785 PGC_S_OVERRIDE);
4786
4787 /* check and update variables dependent on wal_segment_size */
4788 if (ConvertToXSegs(min_wal_size_mb, wal_segment_size) < 2)
4789 ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
4790 errmsg("\"min_wal_size\" must be at least twice \"wal_segment_size\"")));
4791
4792 if (ConvertToXSegs(max_wal_size_mb, wal_segment_size) < 2)
4793 ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
4794 errmsg("\"max_wal_size\" must be at least twice \"wal_segment_size\"")));
4795
4796 UsableBytesInSegment =
4797 (wal_segment_size / XLOG_BLCKSZ * UsableBytesInPage) -
4798 (SizeOfXLogLongPHD - SizeOfXLogShortPHD);
4799
4800 CalculateCheckpointSegments();
4801
4802 /* Make the initdb settings visible as GUC variables, too */
4803 SetConfigOption("data_checksums", DataChecksumsEnabled() ? "yes" : "no",
4804 PGC_INTERNAL, PGC_S_OVERRIDE);
4805 }
4806
4807 void
4808 UpdateControlFile(void)
4809 {
4810 int fd;
4811
4812 INIT_CRC32C(ControlFile->crc);
4813 COMP_CRC32C(ControlFile->crc,
4814 (char *) ControlFile,
4815 offsetof(ControlFileData, crc));
4816 FIN_CRC32C(ControlFile->crc);
4817
4818 fd = BasicOpenFile(XLOG_CONTROL_FILE,
4819 O_RDWR | PG_BINARY);
4820 if (fd < 0)
4821 ereport(PANIC,
4822 (errcode_for_file_access(),
4823 errmsg("could not open control file \"%s\": %m",
4824 XLOG_CONTROL_FILE)));
4825
4826 errno = 0;
4827 pgstat_report_wait_start(WAIT_EVENT_CONTROL_FILE_WRITE_UPDATE);
4828 if (write(fd, ControlFile, sizeof(ControlFileData)) != sizeof(ControlFileData))
4829 {
4830 /* if write didn't set errno, assume problem is no disk space */
4831 if (errno == 0)
4832 errno = ENOSPC;
4833 ereport(PANIC,
4834 (errcode_for_file_access(),
4835 errmsg("could not write to control file: %m")));
4836 }
4837 pgstat_report_wait_end();
4838
4839 pgstat_report_wait_start(WAIT_EVENT_CONTROL_FILE_SYNC_UPDATE);
4840 if (pg_fsync(fd) != 0)
4841 ereport(PANIC,
4842 (errcode_for_file_access(),
4843 errmsg("could not fsync control file: %m")));
4844 pgstat_report_wait_end();
4845
4846 if (close(fd))
4847 ereport(PANIC,
4848 (errcode_for_file_access(),
4849 errmsg("could not close control file: %m")));
4850 }
4851
4852 /*
4853 * Returns the unique system identifier from control file.
4854 */
4855 uint64
4856 GetSystemIdentifier(void)
4857 {
4858 Assert(ControlFile != NULL);
4859 return ControlFile->system_identifier;
4860 }
4861
4862 /*
4863 * Returns the random nonce from control file.
4864 */
4865 char *
4866 GetMockAuthenticationNonce(void)
4867 {
4868 Assert(ControlFile != NULL);
4869 return ControlFile->mock_authentication_nonce;
4870 }
4871
4872 /*
4873 * Are checksums enabled for data pages?
4874 */
4875 bool
4876 DataChecksumsEnabled(void)
4877 {
4878 Assert(ControlFile != NULL);
4879 return (ControlFile->data_checksum_version > 0);
4880 }
4881
4882 /*
4883 * Returns a fake LSN for unlogged relations.
4884 *
4885 * Each call generates an LSN that is greater than any previous value
4886 * returned. The current counter value is saved and restored across clean
4887 * shutdowns, but like unlogged relations, does not survive a crash. This can
4888 * be used in lieu of real LSN values returned by XLogInsert, if you need an
4889 * LSN-like increasing sequence of numbers without writing any WAL.
4890 */
4891 XLogRecPtr
4892 GetFakeLSNForUnloggedRel(void)
4893 {
4894 XLogRecPtr nextUnloggedLSN;
4895
4896 /* increment the unloggedLSN counter, need SpinLock */
4897 SpinLockAcquire(&XLogCtl->ulsn_lck);
4898 nextUnloggedLSN = XLogCtl->unloggedLSN++;
4899 SpinLockRelease(&XLogCtl->ulsn_lck);
4900
4901 return nextUnloggedLSN;
4902 }
4903
4904 /*
4905 * Auto-tune the number of XLOG buffers.
4906 *
4907 * The preferred setting for wal_buffers is about 3% of shared_buffers, with
4908 * a maximum of one XLOG segment (there is little reason to think that more
4909 * is helpful, at least so long as we force an fsync when switching log files)
4910 * and a minimum of 8 blocks (which was the default value prior to PostgreSQL
4911 * 9.1, when auto-tuning was added).
4912 *
4913 * This should not be called until NBuffers has received its final value.
4914 */
4915 static int
4916 XLOGChooseNumBuffers(void)
4917 {
4918 int xbuffers;
4919
4920 xbuffers = NBuffers / 32;
4921 if (xbuffers > (wal_segment_size / XLOG_BLCKSZ))
4922 xbuffers = (wal_segment_size / XLOG_BLCKSZ);
4923 if (xbuffers < 8)
4924 xbuffers = 8;
4925 return xbuffers;
4926 }
4927
4928 /*
4929 * GUC check_hook for wal_buffers
4930 */
4931 bool
4932 check_wal_buffers(int *newval, void **extra, GucSource source)
4933 {
4934 /*
4935 * -1 indicates a request for auto-tune.
4936 */
4937 if (*newval == -1)
4938 {
4939 /*
4940 * If we haven't yet changed the boot_val default of -1, just let it
4941 * be. We'll fix it when XLOGShmemSize is called.
4942 */
4943 if (XLOGbuffers == -1)
4944 return true;
4945
4946 /* Otherwise, substitute the auto-tune value */
4947 *newval = XLOGChooseNumBuffers();
4948 }
4949
4950 /*
4951 * We clamp manually-set values to at least 4 blocks. Prior to PostgreSQL
4952 * 9.1, a minimum of 4 was enforced by guc.c, but since that is no longer
4953 * the case, we just silently treat such values as a request for the
4954 * minimum. (We could throw an error instead, but that doesn't seem very
4955 * helpful.)
4956 */
4957 if (*newval < 4)
4958 *newval = 4;
4959
4960 return true;
4961 }
4962
4963 /*
4964 * Read the control file, set respective GUCs.
4965 *
4966 * This is to be called during startup, including a crash recovery cycle,
4967 * unless in bootstrap mode, where no control file yet exists. As there's no
4968 * usable shared memory yet (its sizing can depend on the contents of the
4969 * control file!), first store the contents in local memory. XLOGShmemInit()
4970 * will then copy it to shared memory later.
4971 *
4972 * reset just controls whether previous contents are to be expected (in the
4973 * reset case, there's a dangling pointer into old shared memory), or not.
4974 */
4975 void
4976 LocalProcessControlFile(bool reset)
4977 {
4978 Assert(reset || ControlFile == NULL);
4979 ControlFile = palloc(sizeof(ControlFileData));
4980 ReadControlFile();
4981 }
4982
4983 /*
4984 * Initialization of shared memory for XLOG
4985 */
4986 Size
4987 XLOGShmemSize(void)
4988 {
4989 Size size;
4990
4991 /*
4992 * If the value of wal_buffers is -1, use the preferred auto-tune value.
4993 * This isn't an amazingly clean place to do this, but we must wait till
4994 * NBuffers has received its final value, and must do it before using the
4995 * value of XLOGbuffers to do anything important.
4996 */
4997 if (XLOGbuffers == -1)
4998 {
4999 char buf[32];
5000
5001 snprintf(buf, sizeof(buf), "%d", XLOGChooseNumBuffers());
5002 SetConfigOption("wal_buffers", buf, PGC_POSTMASTER, PGC_S_OVERRIDE);
5003 }
5004 Assert(XLOGbuffers > 0);
5005
5006 /* XLogCtl */
5007 size = sizeof(XLogCtlData);
5008
5009 /* WAL insertion locks, plus alignment */
5010 size = add_size(size, mul_size(sizeof(WALInsertLockPadded), NUM_XLOGINSERT_LOCKS + 1));
5011 /* xlblocks array */
5012 size = add_size(size, mul_size(sizeof(XLogRecPtr), XLOGbuffers));
5013 /* extra alignment padding for XLOG I/O buffers */
5014 size = add_size(size, XLOG_BLCKSZ);
5015 /* and the buffers themselves */
5016 size = add_size(size, mul_size(XLOG_BLCKSZ, XLOGbuffers));
5017
5018 /*
5019 * Note: we don't count ControlFileData, it comes out of the "slop factor"
5020 * added by CreateSharedMemoryAndSemaphores. This lets us use this
5021 * routine again below to compute the actual allocation size.
5022 */
5023
5024 return size;
5025 }
5026
5027 void
5028 XLOGShmemInit(void)
5029 {
5030 bool foundCFile,
5031 foundXLog;
5032 char *allocptr;
5033 int i;
5034 ControlFileData *localControlFile;
5035
5036 #ifdef WAL_DEBUG
5037
5038 /*
5039 * Create a memory context for WAL debugging that's exempt from the normal
5040 * "no pallocs in critical section" rule. Yes, that can lead to a PANIC if
5041 * an allocation fails, but wal_debug is not for production use anyway.
5042 */
5043 if (walDebugCxt == NULL)
5044 {
5045 walDebugCxt = AllocSetContextCreate(TopMemoryContext,
5046 "WAL Debug",
5047 ALLOCSET_DEFAULT_SIZES);
5048 MemoryContextAllowInCriticalSection(walDebugCxt, true);
5049 }
5050 #endif
5051
5052
5053 XLogCtl = (XLogCtlData *)
5054 ShmemInitStruct("XLOG Ctl", XLOGShmemSize(), &foundXLog);
5055
5056 localControlFile = ControlFile;
5057 ControlFile = (ControlFileData *)
5058 ShmemInitStruct("Control File", sizeof(ControlFileData), &foundCFile);
5059
5060 if (foundCFile || foundXLog)
5061 {
5062 /* both should be present or neither */
5063 Assert(foundCFile && foundXLog);
5064
5065 /* Initialize local copy of WALInsertLocks and register the tranche */
5066 WALInsertLocks = XLogCtl->Insert.WALInsertLocks;
5067 LWLockRegisterTranche(LWTRANCHE_WAL_INSERT,
5068 "wal_insert");
5069
5070 if (localControlFile)
5071 pfree(localControlFile);
5072 return;
5073 }
5074 memset(XLogCtl, 0, sizeof(XLogCtlData));
5075
5076 /*
5077 * Already have read control file locally, unless in bootstrap mode. Move
5078 * contents into shared memory.
5079 */
5080 if (localControlFile)
5081 {
5082 memcpy(ControlFile, localControlFile, sizeof(ControlFileData));
5083 pfree(localControlFile);
5084 }
5085
5086 /*
5087 * Since XLogCtlData contains XLogRecPtr fields, its sizeof should be a
5088 * multiple of the alignment for same, so no extra alignment padding is
5089 * needed here.
5090 */
5091 allocptr = ((char *) XLogCtl) + sizeof(XLogCtlData);
5092 XLogCtl->xlblocks = (XLogRecPtr *) allocptr;
5093 memset(XLogCtl->xlblocks, 0, sizeof(XLogRecPtr) * XLOGbuffers);
5094 allocptr += sizeof(XLogRecPtr) * XLOGbuffers;
5095
5096
5097 /* WAL insertion locks. Ensure they're aligned to the full padded size */
5098 allocptr += sizeof(WALInsertLockPadded) -
5099 ((uintptr_t) allocptr) % sizeof(WALInsertLockPadded);
5100 WALInsertLocks = XLogCtl->Insert.WALInsertLocks =
5101 (WALInsertLockPadded *) allocptr;
5102 allocptr += sizeof(WALInsertLockPadded) * NUM_XLOGINSERT_LOCKS;
5103
5104 LWLockRegisterTranche(LWTRANCHE_WAL_INSERT, "wal_insert");
5105 for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
5106 {
5107 LWLockInitialize(&WALInsertLocks[i].l.lock, LWTRANCHE_WAL_INSERT);
5108 WALInsertLocks[i].l.insertingAt = InvalidXLogRecPtr;
5109 WALInsertLocks[i].l.lastImportantAt = InvalidXLogRecPtr;
5110 }
5111
5112 /*
5113 * Align the start of the page buffers to a full xlog block size boundary.
5114 * This simplifies some calculations in XLOG insertion. It is also
5115 * required for O_DIRECT.
5116 */
5117 allocptr = (char *) TYPEALIGN(XLOG_BLCKSZ, allocptr);
5118 XLogCtl->pages = allocptr;
5119 memset(XLogCtl->pages, 0, (Size) XLOG_BLCKSZ * XLOGbuffers);
5120
5121 /*
5122 * Do basic initialization of XLogCtl shared data. (StartupXLOG will fill
5123 * in additional info.)
5124 */
5125 XLogCtl->XLogCacheBlck = XLOGbuffers - 1;
5126 XLogCtl->SharedRecoveryState = RECOVERY_STATE_CRASH;
5127 XLogCtl->SharedHotStandbyActive = false;
5128 XLogCtl->WalWriterSleeping = false;
5129
5130 SpinLockInit(&XLogCtl->Insert.insertpos_lck);
5131 SpinLockInit(&XLogCtl->info_lck);
5132 SpinLockInit(&XLogCtl->ulsn_lck);
5133 InitSharedLatch(&XLogCtl->recoveryWakeupLatch);
5134 }
5135
5136 /*
5137 * This func must be called ONCE on system install. It creates pg_control
5138 * and the initial XLOG segment.
5139 */
5140 void
5141 BootStrapXLOG(void)
5142 {
5143 CheckPoint checkPoint;
5144 char *buffer;
5145 XLogPageHeader page;
5146 XLogLongPageHeader longpage;
5147 XLogRecord *record;
5148 char *recptr;
5149 bool use_existent;
5150 uint64 sysidentifier;
5151 char mock_auth_nonce[MOCK_AUTH_NONCE_LEN];
5152 struct timeval tv;
5153 pg_crc32c crc;
5154
5155 /*
5156 * Select a hopefully-unique system identifier code for this installation.
5157 * We use the result of gettimeofday(), including the fractional seconds
5158 * field, as being about as unique as we can easily get. (Think not to
5159 * use random(), since it hasn't been seeded and there's no portable way
5160 * to seed it other than the system clock value...) The upper half of the
5161 * uint64 value is just the tv_sec part, while the lower half contains the
5162 * tv_usec part (which must fit in 20 bits), plus 12 bits from our current
5163 * PID for a little extra uniqueness. A person knowing this encoding can
5164 * determine the initialization time of the installation, which could
5165 * perhaps be useful sometimes.
5166 */
5167 gettimeofday(&tv, NULL);
5168 sysidentifier = ((uint64) tv.tv_sec) << 32;
5169 sysidentifier |= ((uint64) tv.tv_usec) << 12;
5170 sysidentifier |= getpid() & 0xFFF;
5171
5172 /*
5173 * Generate a random nonce. This is used for authentication requests that
5174 * will fail because the user does not exist. The nonce is used to create
5175 * a genuine-looking password challenge for the non-existent user, in lieu
5176 * of an actual stored password.
5177 */
5178 if (!pg_backend_random(mock_auth_nonce, MOCK_AUTH_NONCE_LEN))
5179 ereport(PANIC,
5180 (errcode(ERRCODE_INTERNAL_ERROR),
5181 errmsg("could not generate secret authorization token")));
5182
5183 /* First timeline ID is always 1 */
5184 ThisTimeLineID = 1;
5185
5186 /* page buffer must be aligned suitably for O_DIRECT */
5187 buffer = (char *) palloc(XLOG_BLCKSZ + XLOG_BLCKSZ);
5188 page = (XLogPageHeader) TYPEALIGN(XLOG_BLCKSZ, buffer);
5189 memset(page, 0, XLOG_BLCKSZ);
5190
5191 /*
5192 * Set up information for the initial checkpoint record
5193 *
5194 * The initial checkpoint record is written to the beginning of the WAL
5195 * segment with logid=0 logseg=1. The very first WAL segment, 0/0, is not
5196 * used, so that we can use 0/0 to mean "before any valid WAL segment".
5197 */
5198 checkPoint.redo = wal_segment_size + SizeOfXLogLongPHD;
5199 checkPoint.ThisTimeLineID = ThisTimeLineID;
5200 checkPoint.PrevTimeLineID = ThisTimeLineID;
5201 checkPoint.fullPageWrites = fullPageWrites;
5202 checkPoint.nextXidEpoch = 0;
5203 checkPoint.nextXid = FirstNormalTransactionId;
5204 checkPoint.nextOid = FirstBootstrapObjectId;
5205 checkPoint.nextMulti = FirstMultiXactId;
5206 checkPoint.nextMultiOffset = 0;
5207 checkPoint.oldestXid = FirstNormalTransactionId;
5208 checkPoint.oldestXidDB = TemplateDbOid;
5209 checkPoint.oldestMulti = FirstMultiXactId;
5210 checkPoint.oldestMultiDB = TemplateDbOid;
5211 checkPoint.oldestCommitTsXid = InvalidTransactionId;
5212 checkPoint.newestCommitTsXid = InvalidTransactionId;
5213 checkPoint.time = (pg_time_t) time(NULL);
5214 checkPoint.oldestActiveXid = InvalidTransactionId;
5215
5216 ShmemVariableCache->nextXid = checkPoint.nextXid;
5217 ShmemVariableCache->nextOid = checkPoint.nextOid;
5218 ShmemVariableCache->oidCount = 0;
5219 MultiXactSetNextMXact(checkPoint.nextMulti, checkPoint.nextMultiOffset);
5220 AdvanceOldestClogXid(checkPoint.oldestXid);
5221 SetTransactionIdLimit(checkPoint.oldestXid, checkPoint.oldestXidDB);
5222 SetMultiXactIdLimit(checkPoint.oldestMulti, checkPoint.oldestMultiDB, true);
5223 SetCommitTsLimit(InvalidTransactionId, InvalidTransactionId);
5224
5225 /* Set up the XLOG page header */
5226 page->xlp_magic = XLOG_PAGE_MAGIC;
5227 page->xlp_info = XLP_LONG_HEADER;
5228 page->xlp_tli = ThisTimeLineID;
5229 page->xlp_pageaddr = wal_segment_size;
5230 longpage = (XLogLongPageHeader) page;
5231 longpage->xlp_sysid = sysidentifier;
5232 longpage->xlp_seg_size = wal_segment_size;
5233 longpage->xlp_xlog_blcksz = XLOG_BLCKSZ;
5234
5235 /* Insert the initial checkpoint record */
5236 recptr = ((char *) page + SizeOfXLogLongPHD);
5237 record = (XLogRecord *) recptr;
5238 record->xl_prev = 0;
5239 record->xl_xid = InvalidTransactionId;
5240 record->xl_tot_len = SizeOfXLogRecord + SizeOfXLogRecordDataHeaderShort + sizeof(checkPoint);
5241 record->xl_info = XLOG_CHECKPOINT_SHUTDOWN;
5242 record->xl_rmid = RM_XLOG_ID;
5243 recptr += SizeOfXLogRecord;
5244 /* fill the XLogRecordDataHeaderShort struct */
5245 *(recptr++) = (char) XLR_BLOCK_ID_DATA_SHORT;
5246 *(recptr++) = sizeof(checkPoint);
5247 memcpy(recptr, &checkPoint, sizeof(checkPoint));
5248 recptr += sizeof(checkPoint);
5249 Assert(recptr - (char *) record == record->xl_tot_len);
5250
5251 INIT_CRC32C(crc);
5252 COMP_CRC32C(crc, ((char *) record) + SizeOfXLogRecord, record->xl_tot_len - SizeOfXLogRecord);
5253 COMP_CRC32C(crc, (char *) record, offsetof(XLogRecord, xl_crc));
5254 FIN_CRC32C(crc);
5255 record->xl_crc = crc;
5256
5257 /* Create first XLOG segment file */
5258 use_existent = false;
5259 openLogFile = XLogFileInit(1, &use_existent, false);
5260
5261 /* Write the first page with the initial record */
5262 errno = 0;
5263 pgstat_report_wait_start(WAIT_EVENT_WAL_BOOTSTRAP_WRITE);
5264 if (write(openLogFile, page, XLOG_BLCKSZ) != XLOG_BLCKSZ)
5265 {
5266 /* if write didn't set errno, assume problem is no disk space */
5267 if (errno == 0)
5268 errno = ENOSPC;
5269 ereport(PANIC,
5270 (errcode_for_file_access(),
5271 errmsg("could not write bootstrap write-ahead log file: %m")));
5272 }
5273 pgstat_report_wait_end();
5274
5275 pgstat_report_wait_start(WAIT_EVENT_WAL_BOOTSTRAP_SYNC);
5276 if (pg_fsync(openLogFile) != 0)
5277 ereport(PANIC,
5278 (errcode_for_file_access(),
5279 errmsg("could not fsync bootstrap write-ahead log file: %m")));
5280 pgstat_report_wait_end();
5281
5282 if (close(openLogFile))
5283 ereport(PANIC,
5284 (errcode_for_file_access(),
5285 errmsg("could not close bootstrap write-ahead log file: %m")));
5286
5287 openLogFile = -1;
5288
5289 /* Now create pg_control */
5290
5291 memset(ControlFile, 0, sizeof(ControlFileData));
5292 /* Initialize pg_control status fields */
5293 ControlFile->system_identifier = sysidentifier;
5294 memcpy(ControlFile->mock_authentication_nonce, mock_auth_nonce, MOCK_AUTH_NONCE_LEN);
5295 ControlFile->state = DB_SHUTDOWNED;
5296 ControlFile->time = checkPoint.time;
5297 ControlFile->checkPoint = checkPoint.redo;
5298 ControlFile->checkPointCopy = checkPoint;
5299 ControlFile->unloggedLSN = 1;
5300
5301 /* Set important parameter values for use when replaying WAL */
5302 ControlFile->MaxConnections = MaxConnections;
5303 ControlFile->max_worker_processes = max_worker_processes;
5304 ControlFile->max_prepared_xacts = max_prepared_xacts;
5305 ControlFile->max_locks_per_xact = max_locks_per_xact;
5306 ControlFile->wal_level = wal_level;
5307 ControlFile->wal_log_hints = wal_log_hints;
5308 ControlFile->track_commit_timestamp = track_commit_timestamp;
5309 ControlFile->data_checksum_version = bootstrap_data_checksum_version;
5310
5311 /* some additional ControlFile fields are set in WriteControlFile() */
5312
5313 WriteControlFile();
5314
5315 /* Bootstrap the commit log, too */
5316 BootStrapCLOG();
5317 BootStrapCommitTs();
5318 BootStrapSUBTRANS();
5319 BootStrapMultiXact();
5320
5321 pfree(buffer);
5322
5323 /*
5324 * Force control file to be read - in contrast to normal processing we'd
5325 * otherwise never run the checks and GUC related initializations therein.
5326 */
5327 ReadControlFile();
5328 }
5329
5330 static char *
5331 str_time(pg_time_t tnow)
5332 {
5333 static char buf[128];
5334
5335 pg_strftime(buf, sizeof(buf),
5336 "%Y-%m-%d %H:%M:%S %Z",
5337 pg_localtime(&tnow, log_timezone));
5338
5339 return buf;
5340 }
5341
5342 /*
5343 * See if there is a recovery command file (recovery.conf), and if so
5344 * read in parameters for archive recovery and XLOG streaming.
5345 *
5346 * The file is parsed using the main configuration parser.
5347 */
5348 static void
5349 readRecoveryCommandFile(void)
5350 {
5351 FILE *fd;
5352 TimeLineID rtli = 0;
5353 bool rtliGiven = false;
5354 ConfigVariable *item,
5355 *head = NULL,
5356 *tail = NULL;
5357 bool recoveryTargetActionSet = false;
5358
5359
5360 fd = AllocateFile(RECOVERY_COMMAND_FILE, "r");
5361 if (fd == NULL)
5362 {
5363 if (errno == ENOENT)
5364 return; /* not there, so no archive recovery */
5365 ereport(FATAL,
5366 (errcode_for_file_access(),
5367 errmsg("could not open recovery command file \"%s\": %m",
5368 RECOVERY_COMMAND_FILE)));
5369 }
5370
5371 /*
5372 * Since we're asking ParseConfigFp() to report errors as FATAL, there's
5373 * no need to check the return value.
5374 */
5375 (void) ParseConfigFp(fd, RECOVERY_COMMAND_FILE, 0, FATAL, &head, &tail);
5376
5377 FreeFile(fd);
5378
5379 for (item = head; item; item = item->next)
5380 {
5381 if (strcmp(item->name, "restore_command") == 0)
5382 {
5383 recoveryRestoreCommand = pstrdup(item->value);
5384 ereport(DEBUG2,
5385 (errmsg_internal("restore_command = '%s'",
5386 recoveryRestoreCommand)));
5387 }
5388 else if (strcmp(item->name, "recovery_end_command") == 0)
5389 {
5390 recoveryEndCommand = pstrdup(item->value);
5391 ereport(DEBUG2,
5392 (errmsg_internal("recovery_end_command = '%s'",
5393 recoveryEndCommand)));
5394 }
5395 else if (strcmp(item->name, "archive_cleanup_command") == 0)
5396 {
5397 archiveCleanupCommand = pstrdup(item->value);
5398 ereport(DEBUG2,
5399 (errmsg_internal("archive_cleanup_command = '%s'",
5400 archiveCleanupCommand)));
5401 }
5402 else if (strcmp(item->name, "recovery_target_action") == 0)
5403 {
5404 if (strcmp(item->value, "pause") == 0)
5405 recoveryTargetAction = RECOVERY_TARGET_ACTION_PAUSE;
5406 else if (strcmp(item->value, "promote") == 0)
5407 recoveryTargetAction = RECOVERY_TARGET_ACTION_PROMOTE;
5408 else if (strcmp(item->value, "shutdown") == 0)
5409 recoveryTargetAction = RECOVERY_TARGET_ACTION_SHUTDOWN;
5410 else
5411 ereport(ERROR,
5412 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
5413 errmsg("invalid value for recovery parameter \"%s\": \"%s\"",
5414 "recovery_target_action",
5415 item->value),
5416 errhint("Valid values are \"pause\", \"promote\", and \"shutdown\".")));
5417
5418 ereport(DEBUG2,
5419 (errmsg_internal("recovery_target_action = '%s'",
5420 item->value)));
5421
5422 recoveryTargetActionSet = true;
5423 }
5424 else if (strcmp(item->name, "recovery_target_timeline") == 0)
5425 {
5426 rtliGiven = true;
5427 if (strcmp(item->value, "latest") == 0)
5428 rtli = 0;
5429 else
5430 {
5431 errno = 0;
5432 rtli = (TimeLineID) strtoul(item->value, NULL, 0);
5433 if (errno == EINVAL || errno == ERANGE)
5434 ereport(FATAL,
5435 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
5436 errmsg("recovery_target_timeline is not a valid number: \"%s\"",
5437 item->value)));
5438 }
5439 if (rtli)
5440 ereport(DEBUG2,
5441 (errmsg_internal("recovery_target_timeline = %u", rtli)));
5442 else
5443 ereport(DEBUG2,
5444 (errmsg_internal("recovery_target_timeline = latest")));
5445 }
5446 else if (strcmp(item->name, "recovery_target_xid") == 0)
5447 {
5448 errno = 0;
5449 recoveryTargetXid = (TransactionId) pg_strtouint64(item->value, NULL, 0);
5450 if (errno == EINVAL || errno == ERANGE)
5451 ereport(FATAL,
5452 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
5453 errmsg("recovery_target_xid is not a valid number: \"%s\"",
5454 item->value)));
5455 ereport(DEBUG2,
5456 (errmsg_internal("recovery_target_xid = %u",
5457 recoveryTargetXid)));
5458 recoveryTarget = RECOVERY_TARGET_XID;
5459 }
5460 else if (strcmp(item->name, "recovery_target_time") == 0)
5461 {
5462 recoveryTarget = RECOVERY_TARGET_TIME;
5463
5464 if (strcmp(item->value, "epoch") == 0 ||
5465 strcmp(item->value, "infinity") == 0 ||
5466 strcmp(item->value, "-infinity") == 0 ||
5467 strcmp(item->value, "now") == 0 ||
5468 strcmp(item->value, "today") == 0 ||
5469 strcmp(item->value, "tomorrow") == 0 ||
5470 strcmp(item->value, "yesterday") == 0)
5471 ereport(FATAL,
5472 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
5473 errmsg("recovery_target_time is not a valid timestamp: \"%s\"",
5474 item->value)));
5475
5476 /*
5477 * Convert the time string given by the user to TimestampTz form.
5478 */
5479 recoveryTargetTime =
5480 DatumGetTimestampTz(DirectFunctionCall3(timestamptz_in,
5481 CStringGetDatum(item->value),
5482 ObjectIdGetDatum(InvalidOid),
5483 Int32GetDatum(-1)));
5484 ereport(DEBUG2,
5485 (errmsg_internal("recovery_target_time = '%s'",
5486 timestamptz_to_str(recoveryTargetTime))));
5487 }
5488 else if (strcmp(item->name, "recovery_target_name") == 0)
5489 {
5490 recoveryTarget = RECOVERY_TARGET_NAME;
5491
5492 recoveryTargetName = pstrdup(item->value);
5493 if (strlen(recoveryTargetName) >= MAXFNAMELEN)
5494 ereport(FATAL,
5495 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
5496 errmsg("recovery_target_name is too long (maximum %d characters)",
5497 MAXFNAMELEN - 1)));
5498
5499 ereport(DEBUG2,
5500 (errmsg_internal("recovery_target_name = '%s'",
5501 recoveryTargetName)));
5502 }
5503 else if (strcmp(item->name, "recovery_target_lsn") == 0)
5504 {
5505 recoveryTarget = RECOVERY_TARGET_LSN;
5506
5507 /*
5508 * Convert the LSN string given by the user to XLogRecPtr form.
5509 */
5510 recoveryTargetLSN =
5511 DatumGetLSN(DirectFunctionCall3(pg_lsn_in,
5512 CStringGetDatum(item->value),
5513 ObjectIdGetDatum(InvalidOid),
5514 Int32GetDatum(-1)));
5515 ereport(DEBUG2,
5516 (errmsg_internal("recovery_target_lsn = '%X/%X'",
5517 (uint32) (recoveryTargetLSN >> 32),
5518 (uint32) recoveryTargetLSN)));
5519 }
5520 else if (strcmp(item->name, "recovery_target") == 0)
5521 {
5522 if (strcmp(item->value, "immediate") == 0)
5523 recoveryTarget = RECOVERY_TARGET_IMMEDIATE;
5524 else
5525 ereport(ERROR,
5526 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
5527 errmsg("invalid value for recovery parameter \"%s\": \"%s\"",
5528 "recovery_target",
5529 item->value),
5530 errhint("The only allowed value is \"immediate\".")));
5531 ereport(DEBUG2,
5532 (errmsg_internal("recovery_target = '%s'",
5533 item->value)));
5534 }
5535 else if (strcmp(item->name, "recovery_target_inclusive") == 0)
5536 {
5537 /*
5538 * does nothing if a recovery_target is not also set
5539 */
5540 if (!parse_bool(item->value, &recoveryTargetInclusive))
5541 ereport(ERROR,
5542 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
5543 errmsg("parameter \"%s\" requires a Boolean value",
5544 "recovery_target_inclusive")));
5545 ereport(DEBUG2,
5546 (errmsg_internal("recovery_target_inclusive = %s",
5547 item->value)));
5548 }
5549 else if (strcmp(item->name, "standby_mode") == 0)
5550 {
5551 if (!parse_bool(item->value, &StandbyModeRequested))
5552 ereport(ERROR,
5553 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
5554 errmsg("parameter \"%s\" requires a Boolean value",
5555 "standby_mode")));
5556 ereport(DEBUG2,
5557 (errmsg_internal("standby_mode = '%s'", item->value)));
5558 }
5559 else if (strcmp(item->name, "primary_conninfo") == 0)
5560 {
5561 PrimaryConnInfo = pstrdup(item->value);
5562 ereport(DEBUG2,
5563 (errmsg_internal("primary_conninfo = '%s'",
5564 PrimaryConnInfo)));
5565 }
5566 else if (strcmp(item->name, "primary_slot_name") == 0)
5567 {
5568 ReplicationSlotValidateName(item->value, ERROR);
5569 PrimarySlotName = pstrdup(item->value);
5570 ereport(DEBUG2,
5571 (errmsg_internal("primary_slot_name = '%s'",
5572 PrimarySlotName)));
5573 }
5574 else if (strcmp(item->name, "trigger_file") == 0)
5575 {
5576 TriggerFile = pstrdup(item->value);
5577 ereport(DEBUG2,
5578 (errmsg_internal("trigger_file = '%s'",
5579 TriggerFile)));
5580 }
5581 else if (strcmp(item->name, "recovery_min_apply_delay") == 0)
5582 {
5583 const char *hintmsg;
5584
5585 if (!parse_int(item->value, &recovery_min_apply_delay, GUC_UNIT_MS,
5586 &hintmsg))
5587 ereport(ERROR,
5588 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
5589 errmsg("parameter \"%s\" requires a temporal value",
5590 "recovery_min_apply_delay"),
5591 hintmsg ? errhint("%s", _(hintmsg)) : 0));
5592 ereport(DEBUG2,
5593 (errmsg_internal("recovery_min_apply_delay = '%s'", item->value)));
5594 }
5595 else
5596 ereport(FATAL,
5597 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
5598 errmsg("unrecognized recovery parameter \"%s\"",
5599 item->name)));
5600 }
5601
5602 /*
5603 * Check for compulsory parameters
5604 */
5605 if (StandbyModeRequested)
5606 {
5607 if (PrimaryConnInfo == NULL && recoveryRestoreCommand == NULL)
5608 ereport(WARNING,
5609 (errmsg("recovery command file \"%s\" specified neither primary_conninfo nor restore_command",
5610 RECOVERY_COMMAND_FILE),
5611 errhint("The database server will regularly poll the pg_wal subdirectory to check for files placed there.")));
5612 }
5613 else
5614 {
5615 if (recoveryRestoreCommand == NULL)
5616 ereport(FATAL,
5617 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
5618 errmsg("recovery command file \"%s\" must specify restore_command when standby mode is not enabled",
5619 RECOVERY_COMMAND_FILE)));
5620 }
5621
5622 /*
5623 * Override any inconsistent requests. Not that this is a change of
5624 * behaviour in 9.5; prior to this we simply ignored a request to pause if
5625 * hot_standby = off, which was surprising behaviour.
5626 */
5627 if (recoveryTargetAction == RECOVERY_TARGET_ACTION_PAUSE &&
5628 recoveryTargetActionSet &&
5629 !EnableHotStandby)
5630 recoveryTargetAction = RECOVERY_TARGET_ACTION_SHUTDOWN;
5631
5632 /*
5633 * We don't support standby_mode in standalone backends; that requires
5634 * other processes such as the WAL receiver to be alive.
5635 */
5636 if (StandbyModeRequested && !IsUnderPostmaster)
5637 ereport(FATAL,
5638 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
5639 errmsg("standby mode is not supported by single-user servers")));
5640
5641 /* Enable fetching from archive recovery area */
5642 ArchiveRecoveryRequested = true;
5643
5644 /*
5645 * If user specified recovery_target_timeline, validate it or compute the
5646 * "latest" value. We can't do this until after we've gotten the restore
5647 * command and set InArchiveRecovery, because we need to fetch timeline
5648 * history files from the archive.
5649 */
5650 if (rtliGiven)
5651 {
5652 if (rtli)
5653 {
5654 /* Timeline 1 does not have a history file, all else should */
5655 if (rtli != 1 && !existsTimeLineHistory(rtli))
5656 ereport(FATAL,
5657 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
5658 errmsg("recovery target timeline %u does not exist",
5659 rtli)));
5660 recoveryTargetTLI = rtli;
5661 recoveryTargetIsLatest = false;
5662 }
5663 else
5664 {
5665 /* We start the "latest" search from pg_control's timeline */
5666 recoveryTargetTLI = findNewestTimeLine(recoveryTargetTLI);
5667 recoveryTargetIsLatest = true;
5668 }
5669 }
5670
5671 FreeConfigVariables(head);
5672 }
5673
5674 /*
5675 * Exit archive-recovery state
5676 */
5677 static void
5678 exitArchiveRecovery(TimeLineID endTLI, XLogRecPtr endOfLog)
5679 {
5680 char xlogfname[MAXFNAMELEN];
5681 XLogSegNo endLogSegNo;
5682 XLogSegNo startLogSegNo;
5683
5684 /* we always switch to a new timeline after archive recovery */
5685 Assert(endTLI != ThisTimeLineID);
5686
5687 /*
5688 * We are no longer in archive recovery state.
5689 */
5690 InArchiveRecovery = false;
5691
5692 /*
5693 * Update min recovery point one last time.
5694 */
5695 UpdateMinRecoveryPoint(InvalidXLogRecPtr, true);
5696
5697 /*
5698 * If the ending log segment is still open, close it (to avoid problems on
5699 * Windows with trying to rename or delete an open file).
5700 */
5701 if (readFile >= 0)
5702 {
5703 close(readFile);
5704 readFile = -1;
5705 }
5706
5707 /*
5708 * Calculate the last segment on the old timeline, and the first segment
5709 * on the new timeline. If the switch happens in the middle of a segment,
5710 * they are the same, but if the switch happens exactly at a segment
5711 * boundary, startLogSegNo will be endLogSegNo + 1.
5712 */
5713 XLByteToPrevSeg(endOfLog, endLogSegNo, wal_segment_size);
5714 XLByteToSeg(endOfLog, startLogSegNo, wal_segment_size);
5715
5716 /*
5717 * Initialize the starting WAL segment for the new timeline. If the switch
5718 * happens in the middle of a segment, copy data from the last WAL segment
5719 * of the old timeline up to the switch point, to the starting WAL segment
5720 * on the new timeline.
5721 */
5722 if (endLogSegNo == startLogSegNo)
5723 {
5724 /*
5725 * Make a copy of the file on the new timeline.
5726 *
5727 * Writing WAL isn't allowed yet, so there are no locking
5728 * considerations. But we should be just as tense as XLogFileInit to
5729 * avoid emplacing a bogus file.
5730 */
5731 XLogFileCopy(endLogSegNo, endTLI, endLogSegNo,
5732 XLogSegmentOffset(endOfLog, wal_segment_size));
5733 }
5734 else
5735 {
5736 /*
5737 * The switch happened at a segment boundary, so just create the next
5738 * segment on the new timeline.
5739 */
5740 bool use_existent = true;
5741 int fd;
5742
5743 fd = XLogFileInit(startLogSegNo, &use_existent, true);
5744
5745 if (close(fd))
5746 ereport(ERROR,
5747 (errcode_for_file_access(),
5748 errmsg("could not close log file %s: %m",
5749 XLogFileNameP(ThisTimeLineID, startLogSegNo))));
5750 }
5751
5752 /*
5753 * Let's just make real sure there are not .ready or .done flags posted
5754 * for the new segment.
5755 */
5756 XLogFileName(xlogfname, ThisTimeLineID, startLogSegNo, wal_segment_size);
5757 XLogArchiveCleanup(xlogfname);
5758
5759 /*
5760 * Rename the config file out of the way, so that we don't accidentally
5761 * re-enter archive recovery mode in a subsequent crash.
5762 */
5763 unlink(RECOVERY_COMMAND_DONE);
5764 durable_rename(RECOVERY_COMMAND_FILE, RECOVERY_COMMAND_DONE, FATAL);
5765
5766 ereport(LOG,
5767 (errmsg("archive recovery complete")));
5768 }
5769
5770 /*
5771 * Extract timestamp from WAL record.
5772 *
5773 * If the record contains a timestamp, returns true, and saves the timestamp
5774 * in *recordXtime. If the record type has no timestamp, returns false.
5775 * Currently, only transaction commit/abort records and restore points contain
5776 * timestamps.
5777 */
5778 static bool
5779 getRecordTimestamp(XLogReaderState *record, TimestampTz *recordXtime)
5780 {
5781 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
5782 uint8 xact_info = info & XLOG_XACT_OPMASK;
5783 uint8 rmid = XLogRecGetRmid(record);
5784
5785 if (rmid == RM_XLOG_ID && info == XLOG_RESTORE_POINT)
5786 {
5787 *recordXtime = ((xl_restore_point *) XLogRecGetData(record))->rp_time;
5788 return true;
5789 }
5790 if (rmid == RM_XACT_ID && (xact_info == XLOG_XACT_COMMIT ||
5791 xact_info == XLOG_XACT_COMMIT_PREPARED))
5792 {
5793 *recordXtime = ((xl_xact_commit *) XLogRecGetData(record))->xact_time;
5794 return true;
5795 }
5796 if (rmid == RM_XACT_ID && (xact_info == XLOG_XACT_ABORT ||
5797 xact_info == XLOG_XACT_ABORT_PREPARED))
5798 {
5799 *recordXtime = ((xl_xact_abort *) XLogRecGetData(record))->xact_time;
5800 return true;
5801 }
5802 return false;
5803 }
5804
5805 /*
5806 * For point-in-time recovery, this function decides whether we want to
5807 * stop applying the XLOG before the current record.
5808 *
5809 * Returns true if we are stopping, false otherwise. If stopping, some
5810 * information is saved in recoveryStopXid et al for use in annotating the
5811 * new timeline's history file.
5812 */
5813 static bool
5814 recoveryStopsBefore(XLogReaderState *record)
5815 {
5816 bool stopsHere = false;
5817 uint8 xact_info;
5818 bool isCommit;
5819 TimestampTz recordXtime = 0;
5820 TransactionId recordXid;
5821
5822 /* Check if we should stop as soon as reaching consistency */
5823 if (recoveryTarget == RECOVERY_TARGET_IMMEDIATE && reachedConsistency)
5824 {
5825 ereport(LOG,
5826 (errmsg("recovery stopping after reaching consistency")));
5827
5828 recoveryStopAfter = false;
5829 recoveryStopXid = InvalidTransactionId;
5830 recoveryStopLSN = InvalidXLogRecPtr;
5831 recoveryStopTime = 0;
5832 recoveryStopName[0] = '\0';
5833 return true;
5834 }
5835
5836 /* Check if target LSN has been reached */
5837 if (recoveryTarget == RECOVERY_TARGET_LSN &&
5838 !recoveryTargetInclusive &&
5839 record->ReadRecPtr >= recoveryTargetLSN)
5840 {
5841 recoveryStopAfter = false;
5842 recoveryStopXid = InvalidTransactionId;
5843 recoveryStopLSN = record->ReadRecPtr;
5844 recoveryStopTime = 0;
5845 recoveryStopName[0] = '\0';
5846 ereport(LOG,
5847 (errmsg("recovery stopping before WAL location (LSN) \"%X/%X\"",
5848 (uint32) (recoveryStopLSN >> 32),
5849 (uint32) recoveryStopLSN)));
5850 return true;
5851 }
5852
5853 /* Otherwise we only consider stopping before COMMIT or ABORT records. */
5854 if (XLogRecGetRmid(record) != RM_XACT_ID)
5855 return false;
5856
5857 xact_info = XLogRecGetInfo(record) & XLOG_XACT_OPMASK;
5858
5859 if (xact_info == XLOG_XACT_COMMIT)
5860 {
5861 isCommit = true;
5862 recordXid = XLogRecGetXid(record);
5863 }
5864 else if (xact_info == XLOG_XACT_COMMIT_PREPARED)
5865 {
5866 xl_xact_commit *xlrec = (xl_xact_commit *) XLogRecGetData(record);
5867 xl_xact_parsed_commit parsed;
5868
5869 isCommit = true;
5870 ParseCommitRecord(XLogRecGetInfo(record),
5871 xlrec,
5872 &parsed);
5873 recordXid = parsed.twophase_xid;
5874 }
5875 else if (xact_info == XLOG_XACT_ABORT)
5876 {
5877 isCommit = false;
5878 recordXid = XLogRecGetXid(record);
5879 }
5880 else if (xact_info == XLOG_XACT_ABORT_PREPARED)
5881 {
5882 xl_xact_abort *xlrec = (xl_xact_abort *) XLogRecGetData(record);
5883 xl_xact_parsed_abort parsed;
5884
5885 isCommit = false;
5886 ParseAbortRecord(XLogRecGetInfo(record),
5887 xlrec,
5888 &parsed);
5889 recordXid = parsed.twophase_xid;
5890 }
5891 else
5892 return false;
5893
5894 if (recoveryTarget == RECOVERY_TARGET_XID && !recoveryTargetInclusive)
5895 {
5896 /*
5897 * There can be only one transaction end record with this exact
5898 * transactionid
5899 *
5900 * when testing for an xid, we MUST test for equality only, since
5901 * transactions are numbered in the order they start, not the order
5902 * they complete. A higher numbered xid will complete before you about
5903 * 50% of the time...
5904 */
5905 stopsHere = (recordXid == recoveryTargetXid);
5906 }
5907
5908 if (recoveryTarget == RECOVERY_TARGET_TIME &&
5909 getRecordTimestamp(record, &recordXtime))
5910 {
5911 /*
5912 * There can be many transactions that share the same commit time, so
5913 * we stop after the last one, if we are inclusive, or stop at the
5914 * first one if we are exclusive
5915 */
5916 if (recoveryTargetInclusive)
5917 stopsHere = (recordXtime > recoveryTargetTime);
5918 else
5919 stopsHere = (recordXtime >= recoveryTargetTime);
5920 }
5921
5922 if (stopsHere)
5923 {
5924 recoveryStopAfter = false;
5925 recoveryStopXid = recordXid;
5926 recoveryStopTime = recordXtime;
5927 recoveryStopLSN = InvalidXLogRecPtr;
5928 recoveryStopName[0] = '\0';
5929
5930 if (isCommit)
5931 {
5932 ereport(LOG,
5933 (errmsg("recovery stopping before commit of transaction %u, time %s",
5934 recoveryStopXid,
5935 timestamptz_to_str(recoveryStopTime))));
5936 }
5937 else
5938 {
5939 ereport(LOG,
5940 (errmsg("recovery stopping before abort of transaction %u, time %s",
5941 recoveryStopXid,
5942 timestamptz_to_str(recoveryStopTime))));
5943 }
5944 }
5945
5946 return stopsHere;
5947 }
5948
5949 /*
5950 * Same as recoveryStopsBefore, but called after applying the record.
5951 *
5952 * We also track the timestamp of the latest applied COMMIT/ABORT
5953 * record in XLogCtl->recoveryLastXTime.
5954 */
5955 static bool
5956 recoveryStopsAfter(XLogReaderState *record)
5957 {
5958 uint8 info;
5959 uint8 xact_info;
5960 uint8 rmid;
5961 TimestampTz recordXtime;
5962
5963 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
5964 rmid = XLogRecGetRmid(record);
5965
5966 /*
5967 * There can be many restore points that share the same name; we stop at
5968 * the first one.
5969 */
5970 if (recoveryTarget == RECOVERY_TARGET_NAME &&
5971 rmid == RM_XLOG_ID && info == XLOG_RESTORE_POINT)
5972 {
5973 xl_restore_point *recordRestorePointData;
5974
5975 recordRestorePointData = (xl_restore_point *) XLogRecGetData(record);
5976
5977 if (strcmp(recordRestorePointData->rp_name, recoveryTargetName) == 0)
5978 {
5979 recoveryStopAfter = true;
5980 recoveryStopXid = InvalidTransactionId;
5981 recoveryStopLSN = InvalidXLogRecPtr;
5982 (void) getRecordTimestamp(record, &recoveryStopTime);
5983 strlcpy(recoveryStopName, recordRestorePointData->rp_name, MAXFNAMELEN);
5984
5985 ereport(LOG,
5986 (errmsg("recovery stopping at restore point \"%s\", time %s",
5987 recoveryStopName,
5988 timestamptz_to_str(recoveryStopTime))));
5989 return true;
5990 }
5991 }
5992
5993 /* Check if the target LSN has been reached */
5994 if (recoveryTarget == RECOVERY_TARGET_LSN &&
5995 recoveryTargetInclusive &&
5996 record->ReadRecPtr >= recoveryTargetLSN)
5997 {
5998 recoveryStopAfter = true;
5999 recoveryStopXid = InvalidTransactionId;
6000 recoveryStopLSN = record->ReadRecPtr;
6001 recoveryStopTime = 0;
6002 recoveryStopName[0] = '\0';
6003 ereport(LOG,
6004 (errmsg("recovery stopping after WAL location (LSN) \"%X/%X\"",
6005 (uint32) (recoveryStopLSN >> 32),
6006 (uint32) recoveryStopLSN)));
6007 return true;
6008 }
6009
6010 if (rmid != RM_XACT_ID)
6011 return false;
6012
6013 xact_info = info & XLOG_XACT_OPMASK;
6014
6015 if (xact_info == XLOG_XACT_COMMIT ||
6016 xact_info == XLOG_XACT_COMMIT_PREPARED ||
6017 xact_info == XLOG_XACT_ABORT ||
6018 xact_info == XLOG_XACT_ABORT_PREPARED)
6019 {
6020 TransactionId recordXid;
6021
6022 /* Update the last applied transaction timestamp */
6023 if (getRecordTimestamp(record, &recordXtime))
6024 SetLatestXTime(recordXtime);
6025
6026 /* Extract the XID of the committed/aborted transaction */
6027 if (xact_info == XLOG_XACT_COMMIT_PREPARED)
6028 {
6029 xl_xact_commit *xlrec = (xl_xact_commit *) XLogRecGetData(record);
6030 xl_xact_parsed_commit parsed;
6031
6032 ParseCommitRecord(XLogRecGetInfo(record),
6033 xlrec,
6034 &parsed);
6035 recordXid = parsed.twophase_xid;
6036 }
6037 else if (xact_info == XLOG_XACT_ABORT_PREPARED)
6038 {
6039 xl_xact_abort *xlrec = (xl_xact_abort *) XLogRecGetData(record);
6040 xl_xact_parsed_abort parsed;
6041
6042 ParseAbortRecord(XLogRecGetInfo(record),
6043 xlrec,
6044 &parsed);
6045 recordXid = parsed.twophase_xid;
6046 }
6047 else
6048 recordXid = XLogRecGetXid(record);
6049
6050 /*
6051 * There can be only one transaction end record with this exact
6052 * transactionid
6053 *
6054 * when testing for an xid, we MUST test for equality only, since
6055 * transactions are numbered in the order they start, not the order
6056 * they complete. A higher numbered xid will complete before you about
6057 * 50% of the time...
6058 */
6059 if (recoveryTarget == RECOVERY_TARGET_XID && recoveryTargetInclusive &&
6060 recordXid == recoveryTargetXid)
6061 {
6062 recoveryStopAfter = true;
6063 recoveryStopXid = recordXid;
6064 recoveryStopTime = recordXtime;
6065 recoveryStopLSN = InvalidXLogRecPtr;
6066 recoveryStopName[0] = '\0';
6067
6068 if (xact_info == XLOG_XACT_COMMIT ||
6069 xact_info == XLOG_XACT_COMMIT_PREPARED)
6070 {
6071 ereport(LOG,
6072 (errmsg("recovery stopping after commit of transaction %u, time %s",
6073 recoveryStopXid,
6074 timestamptz_to_str(recoveryStopTime))));
6075 }
6076 else if (xact_info == XLOG_XACT_ABORT ||
6077 xact_info == XLOG_XACT_ABORT_PREPARED)
6078 {
6079 ereport(LOG,
6080 (errmsg("recovery stopping after abort of transaction %u, time %s",
6081 recoveryStopXid,
6082 timestamptz_to_str(recoveryStopTime))));
6083 }
6084 return true;
6085 }
6086 }
6087
6088 /* Check if we should stop as soon as reaching consistency */
6089 if (recoveryTarget == RECOVERY_TARGET_IMMEDIATE && reachedConsistency)
6090 {
6091 ereport(LOG,
6092 (errmsg("recovery stopping after reaching consistency")));
6093
6094 recoveryStopAfter = true;
6095 recoveryStopXid = InvalidTransactionId;
6096 recoveryStopTime = 0;
6097 recoveryStopLSN = InvalidXLogRecPtr;
6098 recoveryStopName[0] = '\0';
6099 return true;
6100 }
6101
6102 return false;
6103 }
6104
6105 /*
6106 * Wait until shared recoveryPause flag is cleared.
6107 *
6108 * XXX Could also be done with shared latch, avoiding the pg_usleep loop.
6109 * Probably not worth the trouble though. This state shouldn't be one that
6110 * anyone cares about server power consumption in.
6111 */
6112 static void
6113 recoveryPausesHere(void)
6114 {
6115 /* Don't pause unless users can connect! */
6116 if (!LocalHotStandbyActive)
6117 return;
6118
6119 ereport(LOG,
6120 (errmsg("recovery has paused"),
6121 errhint("Execute pg_wal_replay_resume() to continue.")));
6122
6123 while (RecoveryIsPaused())
6124 {
6125 pg_usleep(1000000L); /* 1000 ms */
6126 HandleStartupProcInterrupts();
6127 }
6128 }
6129
6130 bool
6131 RecoveryIsPaused(void)
6132 {
6133 bool recoveryPause;
6134
6135 SpinLockAcquire(&XLogCtl->info_lck);
6136 recoveryPause = XLogCtl->recoveryPause;
6137 SpinLockRelease(&XLogCtl->info_lck);
6138
6139 return recoveryPause;
6140 }
6141
6142 void
6143 SetRecoveryPause(bool recoveryPause)
6144 {
6145 SpinLockAcquire(&XLogCtl->info_lck);
6146 XLogCtl->recoveryPause = recoveryPause;
6147 SpinLockRelease(&XLogCtl->info_lck);
6148 }
6149
6150 /*
6151 * When recovery_min_apply_delay is set, we wait long enough to make sure
6152 * certain record types are applied at least that interval behind the master.
6153 *
6154 * Returns true if we waited.
6155 *
6156 * Note that the delay is calculated between the WAL record log time and
6157 * the current time on standby. We would prefer to keep track of when this
6158 * standby received each WAL record, which would allow a more consistent
6159 * approach and one not affected by time synchronisation issues, but that
6160 * is significantly more effort and complexity for little actual gain in
6161 * usability.
6162 */
6163 static bool
6164 recoveryApplyDelay(XLogReaderState *record)
6165 {
6166 uint8 xact_info;
6167 TimestampTz xtime;
6168 long msecs;
6169
6170 /* nothing to do if no delay configured */
6171 if (recovery_min_apply_delay <= 0)
6172 return false;
6173
6174 /* no delay is applied on a database not yet consistent */
6175 if (!reachedConsistency)
6176 return false;
6177
6178 /*
6179 * Is it a COMMIT record?
6180 *
6181 * We deliberately choose not to delay aborts since they have no effect on
6182 * MVCC. We already allow replay of records that don't have a timestamp,
6183 * so there is already opportunity for issues caused by early conflicts on
6184 * standbys.
6185 */
6186 if (XLogRecGetRmid(record) != RM_XACT_ID)
6187 return false;
6188
6189 xact_info = XLogRecGetInfo(record) & XLOG_XACT_OPMASK;
6190
6191 if (xact_info != XLOG_XACT_COMMIT &&
6192 xact_info != XLOG_XACT_COMMIT_PREPARED)
6193 return false;
6194
6195 if (!getRecordTimestamp(record, &xtime))
6196 return false;
6197
6198 recoveryDelayUntilTime =
6199 TimestampTzPlusMilliseconds(xtime, recovery_min_apply_delay);
6200
6201 /*
6202 * Exit without arming the latch if it's already past time to apply this
6203 * record
6204 */
6205 msecs = TimestampDifferenceMilliseconds(GetCurrentTimestamp(),
6206 recoveryDelayUntilTime);
6207 if (msecs <= 0)
6208 return false;
6209
6210 while (true)
6211 {
6212 ResetLatch(&XLogCtl->recoveryWakeupLatch);
6213
6214 /*
6215 * This might change recovery_min_apply_delay or the trigger file's
6216 * location.
6217 */
6218 HandleStartupProcInterrupts();
6219
6220 if (CheckForStandbyTrigger())
6221 break;
6222
6223 /*
6224 * Recalculate recoveryDelayUntilTime as recovery_min_apply_delay
6225 * could have changed while waiting in this loop.
6226 */
6227 recoveryDelayUntilTime =
6228 TimestampTzPlusMilliseconds(xtime, recovery_min_apply_delay);
6229
6230 /*
6231 * Wait for difference between GetCurrentTimestamp() and
6232 * recoveryDelayUntilTime
6233 */
6234 msecs = TimestampDifferenceMilliseconds(GetCurrentTimestamp(),
6235 recoveryDelayUntilTime);
6236
6237 if (msecs <= 0)
6238 break;
6239
6240 elog(DEBUG2, "recovery apply delay %ld milliseconds", msecs);
6241
6242 (void) WaitLatch(&XLogCtl->recoveryWakeupLatch,
6243 WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
6244 msecs,
6245 WAIT_EVENT_RECOVERY_APPLY_DELAY);
6246 }
6247 return true;
6248 }
6249
6250 /*
6251 * Save timestamp of latest processed commit/abort record.
6252 *
6253 * We keep this in XLogCtl, not a simple static variable, so that it can be
6254 * seen by processes other than the startup process. Note in particular
6255 * that CreateRestartPoint is executed in the checkpointer.
6256 */
6257 static void
6258 SetLatestXTime(TimestampTz xtime)
6259 {
6260 SpinLockAcquire(&XLogCtl->info_lck);
6261 XLogCtl->recoveryLastXTime = xtime;
6262 SpinLockRelease(&XLogCtl->info_lck);
6263 }
6264
6265 /*
6266 * Fetch timestamp of latest processed commit/abort record.
6267 */
6268 TimestampTz
6269 GetLatestXTime(void)
6270 {
6271 TimestampTz xtime;
6272
6273 SpinLockAcquire(&XLogCtl->info_lck);
6274 xtime = XLogCtl->recoveryLastXTime;
6275 SpinLockRelease(&XLogCtl->info_lck);
6276
6277 return xtime;
6278 }
6279
6280 /*
6281 * Save timestamp of the next chunk of WAL records to apply.
6282 *
6283 * We keep this in XLogCtl, not a simple static variable, so that it can be
6284 * seen by all backends.
6285 */
6286 static void
6287 SetCurrentChunkStartTime(TimestampTz xtime)
6288 {
6289 SpinLockAcquire(&XLogCtl->info_lck);
6290 XLogCtl->currentChunkStartTime = xtime;
6291 SpinLockRelease(&XLogCtl->info_lck);
6292 }
6293
6294 /*
6295 * Fetch timestamp of latest processed commit/abort record.
6296 * Startup process maintains an accurate local copy in XLogReceiptTime
6297 */
6298 TimestampTz
6299 GetCurrentChunkReplayStartTime(void)
6300 {
6301 TimestampTz xtime;
6302
6303 SpinLockAcquire(&XLogCtl->info_lck);
6304 xtime = XLogCtl->currentChunkStartTime;
6305 SpinLockRelease(&XLogCtl->info_lck);
6306
6307 return xtime;
6308 }
6309
6310 /*
6311 * Returns time of receipt of current chunk of XLOG data, as well as
6312 * whether it was received from streaming replication or from archives.
6313 */
6314 void
6315 GetXLogReceiptTime(TimestampTz *rtime, bool *fromStream)
6316 {
6317 /*
6318 * This must be executed in the startup process, since we don't export the
6319 * relevant state to shared memory.
6320 */
6321 Assert(InRecovery);
6322
6323 *rtime = XLogReceiptTime;
6324 *fromStream = (XLogReceiptSource == XLOG_FROM_STREAM);
6325 }
6326
6327 /*
6328 * Note that text field supplied is a parameter name and does not require
6329 * translation
6330 */
6331 #define RecoveryRequiresIntParameter(param_name, currValue, minValue) \
6332 do { \
6333 if ((currValue) < (minValue)) \
6334 ereport(ERROR, \
6335 (errcode(ERRCODE_INVALID_PARAMETER_VALUE), \
6336 errmsg("hot standby is not possible because " \
6337 "%s = %d is a lower setting than on the master server " \
6338 "(its value was %d)", \
6339 param_name, \
6340 currValue, \
6341 minValue))); \
6342 } while(0)
6343
6344 /*
6345 * Check to see if required parameters are set high enough on this server
6346 * for various aspects of recovery operation.
6347 *
6348 * Note that all the parameters which this function tests need to be
6349 * listed in Administrator's Overview section in high-availability.sgml.
6350 * If you change them, don't forget to update the list.
6351 */
6352 static void
6353 CheckRequiredParameterValues(void)
6354 {
6355 /*
6356 * For archive recovery, the WAL must be generated with at least 'replica'
6357 * wal_level.
6358 */
6359 if (ArchiveRecoveryRequested && ControlFile->wal_level == WAL_LEVEL_MINIMAL)
6360 {
6361 ereport(WARNING,
6362 (errmsg("WAL was generated with wal_level=minimal, data may be missing"),
6363 errhint("This happens if you temporarily set wal_level=minimal without taking a new base backup.")));
6364 }
6365
6366 /*
6367 * For Hot Standby, the WAL must be generated with 'replica' mode, and we
6368 * must have at least as many backend slots as the primary.
6369 */
6370 if (ArchiveRecoveryRequested && EnableHotStandby)
6371 {
6372 if (ControlFile->wal_level < WAL_LEVEL_REPLICA)
6373 ereport(ERROR,
6374 (errmsg("hot standby is not possible because wal_level was not set to \"replica\" or higher on the master server"),
6375 errhint("Either set wal_level to \"replica\" on the master, or turn off hot_standby here.")));
6376
6377 /* We ignore autovacuum_max_workers when we make this test. */
6378 RecoveryRequiresIntParameter("max_connections",
6379 MaxConnections,
6380 ControlFile->MaxConnections);
6381 RecoveryRequiresIntParameter("max_worker_processes",
6382 max_worker_processes,
6383 ControlFile->max_worker_processes);
6384 RecoveryRequiresIntParameter("max_prepared_transactions",
6385 max_prepared_xacts,
6386 ControlFile->max_prepared_xacts);
6387 RecoveryRequiresIntParameter("max_locks_per_transaction",
6388 max_locks_per_xact,
6389 ControlFile->max_locks_per_xact);
6390 }
6391 }
6392
6393 /*
6394 * This must be called ONCE during postmaster or standalone-backend startup
6395 */
6396 void
6397 StartupXLOG(void)
6398 {
6399 XLogCtlInsert *Insert;
6400 CheckPoint checkPoint;
6401 bool wasShutdown;
6402 bool reachedStopPoint = false;
6403 bool haveBackupLabel = false;
6404 bool haveTblspcMap = false;
6405 XLogRecPtr RecPtr,
6406 checkPointLoc,
6407 EndOfLog;
6408 TimeLineID EndOfLogTLI;
6409 TimeLineID PrevTimeLineID;
6410 XLogRecord *record;
6411 TransactionId oldestActiveXID;
6412 bool backupEndRequired = false;
6413 bool backupFromStandby = false;
6414 DBState dbstate_at_startup;
6415 XLogReaderState *xlogreader;
6416 XLogPageReadPrivate private;
6417 bool fast_promoted = false;
6418 struct stat st;
6419
6420 /*
6421 * Verify XLOG status looks valid.
6422 */
6423 if (ControlFile->state < DB_SHUTDOWNED ||
6424 ControlFile->state > DB_IN_PRODUCTION ||
6425 !XRecOffIsValid(ControlFile->checkPoint))
6426 ereport(FATAL,
6427 (errmsg("control file contains invalid data")));
6428
6429 if (ControlFile->state == DB_SHUTDOWNED)
6430 {
6431 /* This is the expected case, so don't be chatty in standalone mode */
6432 ereport(IsPostmasterEnvironment ? LOG : NOTICE,
6433 (errmsg("database system was shut down at %s",
6434 str_time(ControlFile->time))));
6435 }
6436 else if (ControlFile->state == DB_SHUTDOWNED_IN_RECOVERY)
6437 ereport(LOG,
6438 (errmsg("database system was shut down in recovery at %s",
6439 str_time(ControlFile->time))));
6440 else if (ControlFile->state == DB_SHUTDOWNING)
6441 ereport(LOG,
6442 (errmsg("database system shutdown was interrupted; last known up at %s",
6443 str_time(ControlFile->time))));
6444 else if (ControlFile->state == DB_IN_CRASH_RECOVERY)
6445 ereport(LOG,
6446 (errmsg("database system was interrupted while in recovery at %s",
6447 str_time(ControlFile->time)),
6448 errhint("This probably means that some data is corrupted and"
6449 " you will have to use the last backup for recovery.")));
6450 else if (ControlFile->state == DB_IN_ARCHIVE_RECOVERY)
6451 ereport(LOG,
6452 (errmsg("database system was interrupted while in recovery at log time %s",
6453 str_time(ControlFile->checkPointCopy.time)),
6454 errhint("If this has occurred more than once some data might be corrupted"
6455 " and you might need to choose an earlier recovery target.")));
6456 else if (ControlFile->state == DB_IN_PRODUCTION)
6457 ereport(LOG,
6458 (errmsg("database system was interrupted; last known up at %s",
6459 str_time(ControlFile->time))));
6460
6461 /* This is just to allow attaching to startup process with a debugger */
6462 #ifdef XLOG_REPLAY_DELAY
6463 if (ControlFile->state != DB_SHUTDOWNED)
6464 pg_usleep(60000000L);
6465 #endif
6466
6467 /*
6468 * Verify that pg_wal and pg_wal/archive_status exist. In cases where
6469 * someone has performed a copy for PITR, these directories may have been
6470 * excluded and need to be re-created.
6471 */
6472 ValidateXLOGDirectoryStructure();
6473
6474 /*
6475 * If we previously crashed, there might be data which we had written,
6476 * intending to fsync it, but which we had not actually fsync'd yet.
6477 * Therefore, a power failure in the near future might cause earlier
6478 * unflushed writes to be lost, even though more recent data written to
6479 * disk from here on would be persisted. To avoid that, fsync the entire
6480 * data directory.
6481 */
6482 if (ControlFile->state != DB_SHUTDOWNED &&
6483 ControlFile->state != DB_SHUTDOWNED_IN_RECOVERY)
6484 SyncDataDirectory();
6485
6486 /*
6487 * Initialize on the assumption we want to recover to the latest timeline
6488 * that's active according to pg_control.
6489 */
6490 if (ControlFile->minRecoveryPointTLI >
6491 ControlFile->checkPointCopy.ThisTimeLineID)
6492 recoveryTargetTLI = ControlFile->minRecoveryPointTLI;
6493 else
6494 recoveryTargetTLI = ControlFile->checkPointCopy.ThisTimeLineID;
6495
6496 /*
6497 * Check for recovery control file, and if so set up state for offline
6498 * recovery
6499 */
6500 readRecoveryCommandFile();
6501
6502 /*
6503 * Save archive_cleanup_command in shared memory so that other processes
6504 * can see it.
6505 */
6506 strlcpy(XLogCtl->archiveCleanupCommand,
6507 archiveCleanupCommand ? archiveCleanupCommand : "",
6508 sizeof(XLogCtl->archiveCleanupCommand));
6509
6510 if (ArchiveRecoveryRequested)
6511 {
6512 if (StandbyModeRequested)
6513 ereport(LOG,
6514 (errmsg("entering standby mode")));
6515 else if (recoveryTarget == RECOVERY_TARGET_XID)
6516 ereport(LOG,
6517 (errmsg("starting point-in-time recovery to XID %u",
6518 recoveryTargetXid)));
6519 else if (recoveryTarget == RECOVERY_TARGET_TIME)
6520 ereport(LOG,
6521 (errmsg("starting point-in-time recovery to %s",
6522 timestamptz_to_str(recoveryTargetTime))));
6523 else if (recoveryTarget == RECOVERY_TARGET_NAME)
6524 ereport(LOG,
6525 (errmsg("starting point-in-time recovery to \"%s\"",
6526 recoveryTargetName)));
6527 else if (recoveryTarget == RECOVERY_TARGET_LSN)
6528 ereport(LOG,
6529 (errmsg("starting point-in-time recovery to WAL location (LSN) \"%X/%X\"",
6530 (uint32) (recoveryTargetLSN >> 32),
6531 (uint32) recoveryTargetLSN)));
6532 else if (recoveryTarget == RECOVERY_TARGET_IMMEDIATE)
6533 ereport(LOG,
6534 (errmsg("starting point-in-time recovery to earliest consistent point")));
6535 else
6536 ereport(LOG,
6537 (errmsg("starting archive recovery")));
6538 }
6539
6540 /*
6541 * Take ownership of the wakeup latch if we're going to sleep during
6542 * recovery.
6543 */
6544 if (ArchiveRecoveryRequested)
6545 OwnLatch(&XLogCtl->recoveryWakeupLatch);
6546
6547 /* Set up XLOG reader facility */
6548 MemSet(&private, 0, sizeof(XLogPageReadPrivate));
6549 xlogreader = XLogReaderAllocate(wal_segment_size, &XLogPageRead, &private);
6550 if (!xlogreader)
6551 ereport(ERROR,
6552 (errcode(ERRCODE_OUT_OF_MEMORY),
6553 errmsg("out of memory"),
6554 errdetail("Failed while allocating a WAL reading processor.")));
6555 xlogreader->system_identifier = ControlFile->system_identifier;
6556
6557 /*
6558 * Allocate two page buffers dedicated to WAL consistency checks. We do
6559 * it this way, rather than just making static arrays, for two reasons:
6560 * (1) no need to waste the storage in most instantiations of the backend;
6561 * (2) a static char array isn't guaranteed to have any particular
6562 * alignment, whereas palloc() will provide MAXALIGN'd storage.
6563 */
6564 replay_image_masked = (char *) palloc(BLCKSZ);
6565 master_image_masked = (char *) palloc(BLCKSZ);
6566
6567 if (read_backup_label(&checkPointLoc, &backupEndRequired,
6568 &backupFromStandby))
6569 {
6570 List *tablespaces = NIL;
6571
6572 /*
6573 * Archive recovery was requested, and thanks to the backup label
6574 * file, we know how far we need to replay to reach consistency. Enter
6575 * archive recovery directly.
6576 */
6577 InArchiveRecovery = true;
6578 if (StandbyModeRequested)
6579 StandbyMode = true;
6580
6581 /*
6582 * When a backup_label file is present, we want to roll forward from
6583 * the checkpoint it identifies, rather than using pg_control.
6584 */
6585 record = ReadCheckpointRecord(xlogreader, checkPointLoc, 0, true);
6586 if (record != NULL)
6587 {
6588 memcpy(&checkPoint, XLogRecGetData(xlogreader), sizeof(CheckPoint));
6589 wasShutdown = ((record->xl_info & ~XLR_INFO_MASK) == XLOG_CHECKPOINT_SHUTDOWN);
6590 ereport(DEBUG1,
6591 (errmsg("checkpoint record is at %X/%X",
6592 (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
6593 InRecovery = true; /* force recovery even if SHUTDOWNED */
6594
6595 /*
6596 * Make sure that REDO location exists. This may not be the case
6597 * if there was a crash during an online backup, which left a
6598 * backup_label around that references a WAL segment that's
6599 * already been archived.
6600 */
6601 if (checkPoint.redo < checkPointLoc)
6602 {
6603 if (!ReadRecord(xlogreader, checkPoint.redo, LOG, false))
6604 ereport(FATAL,
6605 (errmsg("could not find redo location referenced by checkpoint record"),
6606 errhint("If you are not restoring from a backup, try removing the file \"%s/backup_label\".", DataDir)));
6607 }
6608 }
6609 else
6610 {
6611 ereport(FATAL,
6612 (errmsg("could not locate required checkpoint record"),
6613 errhint("If you are not restoring from a backup, try removing the file \"%s/backup_label\".", DataDir)));
6614 wasShutdown = false; /* keep compiler quiet */
6615 }
6616
6617 /* read the tablespace_map file if present and create symlinks. */
6618 if (read_tablespace_map(&tablespaces))
6619 {
6620 ListCell *lc;
6621
6622 foreach(lc, tablespaces)
6623 {
6624 tablespaceinfo *ti = lfirst(lc);
6625 char *linkloc;
6626
6627 linkloc = psprintf("pg_tblspc/%s", ti->oid);
6628
6629 /*
6630 * Remove the existing symlink if any and Create the symlink
6631 * under PGDATA.
6632 */
6633 remove_tablespace_symlink(linkloc);
6634
6635 if (symlink(ti->path, linkloc) < 0)
6636 ereport(ERROR,
6637 (errcode_for_file_access(),
6638 errmsg("could not create symbolic link \"%s\": %m",
6639 linkloc)));
6640
6641 pfree(ti->oid);
6642 pfree(ti->path);
6643 pfree(ti);
6644 }
6645
6646 /* set flag to delete it later */
6647 haveTblspcMap = true;
6648 }
6649
6650 /* set flag to delete it later */
6651 haveBackupLabel = true;
6652 }
6653 else
6654 {
6655 /*
6656 * If tablespace_map file is present without backup_label file, there
6657 * is no use of such file. There is no harm in retaining it, but it
6658 * is better to get rid of the map file so that we don't have any
6659 * redundant file in data directory and it will avoid any sort of
6660 * confusion. It seems prudent though to just rename the file out of
6661 * the way rather than delete it completely, also we ignore any error
6662 * that occurs in rename operation as even if map file is present
6663 * without backup_label file, it is harmless.
6664 */
6665 if (stat(TABLESPACE_MAP, &st) == 0)
6666 {
6667 unlink(TABLESPACE_MAP_OLD);
6668 if (durable_rename(TABLESPACE_MAP, TABLESPACE_MAP_OLD, DEBUG1) == 0)
6669 ereport(LOG,
6670 (errmsg("ignoring file \"%s\" because no file \"%s\" exists",
6671 TABLESPACE_MAP, BACKUP_LABEL_FILE),
6672 errdetail("File \"%s\" was renamed to \"%s\".",
6673 TABLESPACE_MAP, TABLESPACE_MAP_OLD)));
6674 else
6675 ereport(LOG,
6676 (errmsg("ignoring file \"%s\" because no file \"%s\" exists",
6677 TABLESPACE_MAP, BACKUP_LABEL_FILE),
6678 errdetail("Could not rename file \"%s\" to \"%s\": %m.",
6679 TABLESPACE_MAP, TABLESPACE_MAP_OLD)));
6680 }
6681
6682 /*
6683 * It's possible that archive recovery was requested, but we don't
6684 * know how far we need to replay the WAL before we reach consistency.
6685 * This can happen for example if a base backup is taken from a
6686 * running server using an atomic filesystem snapshot, without calling
6687 * pg_start/stop_backup. Or if you just kill a running master server
6688 * and put it into archive recovery by creating a recovery.conf file.
6689 *
6690 * Our strategy in that case is to perform crash recovery first,
6691 * replaying all the WAL present in pg_wal, and only enter archive
6692 * recovery after that.
6693 *
6694 * But usually we already know how far we need to replay the WAL (up
6695 * to minRecoveryPoint, up to backupEndPoint, or until we see an
6696 * end-of-backup record), and we can enter archive recovery directly.
6697 */
6698 if (ArchiveRecoveryRequested &&
6699 (ControlFile->minRecoveryPoint != InvalidXLogRecPtr ||
6700 ControlFile->backupEndRequired ||
6701 ControlFile->backupEndPoint != InvalidXLogRecPtr ||
6702 ControlFile->state == DB_SHUTDOWNED))
6703 {
6704 InArchiveRecovery = true;
6705 if (StandbyModeRequested)
6706 StandbyMode = true;
6707 }
6708
6709 /* Get the last valid checkpoint record. */
6710 checkPointLoc = ControlFile->checkPoint;
6711 RedoStartLSN = ControlFile->checkPointCopy.redo;
6712 record = ReadCheckpointRecord(xlogreader, checkPointLoc, 1, true);
6713 if (record != NULL)
6714 {
6715 ereport(DEBUG1,
6716 (errmsg("checkpoint record is at %X/%X",
6717 (uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
6718 }
6719 else
6720 {
6721 /*
6722 * We used to attempt to go back to a secondary checkpoint record
6723 * here, but only when not in standby_mode. We now just fail if we
6724 * can't read the last checkpoint because this allows us to
6725 * simplify processing around checkpoints.
6726 */
6727 ereport(PANIC,
6728 (errmsg("could not locate a valid checkpoint record")));
6729 }
6730 memcpy(&checkPoint, XLogRecGetData(xlogreader), sizeof(CheckPoint));
6731 wasShutdown = ((record->xl_info & ~XLR_INFO_MASK) == XLOG_CHECKPOINT_SHUTDOWN);
6732 }
6733
6734 /*
6735 * Clear out any old relcache cache files. This is *necessary* if we do
6736 * any WAL replay, since that would probably result in the cache files
6737 * being out of sync with database reality. In theory we could leave them
6738 * in place if the database had been cleanly shut down, but it seems
6739 * safest to just remove them always and let them be rebuilt during the
6740 * first backend startup. These files needs to be removed from all
6741 * directories including pg_tblspc, however the symlinks are created only
6742 * after reading tablespace_map file in case of archive recovery from
6743 * backup, so needs to clear old relcache files here after creating
6744 * symlinks.
6745 */
6746 RelationCacheInitFileRemove();
6747
6748 /*
6749 * If the location of the checkpoint record is not on the expected
6750 * timeline in the history of the requested timeline, we cannot proceed:
6751 * the backup is not part of the history of the requested timeline.
6752 */
6753 Assert(expectedTLEs); /* was initialized by reading checkpoint
6754 * record */
6755 if (tliOfPointInHistory(checkPointLoc, expectedTLEs) !=
6756 checkPoint.ThisTimeLineID)
6757 {
6758 XLogRecPtr switchpoint;
6759
6760 /*
6761 * tliSwitchPoint will throw an error if the checkpoint's timeline is
6762 * not in expectedTLEs at all.
6763 */
6764 switchpoint = tliSwitchPoint(ControlFile->checkPointCopy.ThisTimeLineID, expectedTLEs, NULL);
6765 ereport(FATAL,
6766 (errmsg("requested timeline %u is not a child of this server's history",
6767 recoveryTargetTLI),
6768 errdetail("Latest checkpoint is at %X/%X on timeline %u, but in the history of the requested timeline, the server forked off from that timeline at %X/%X.",
6769 (uint32) (ControlFile->checkPoint >> 32),
6770 (uint32) ControlFile->checkPoint,
6771 ControlFile->checkPointCopy.ThisTimeLineID,
6772 (uint32) (switchpoint >> 32),
6773 (uint32) switchpoint)));
6774 }
6775
6776 /*
6777 * The min recovery point should be part of the requested timeline's
6778 * history, too.
6779 */
6780 if (!XLogRecPtrIsInvalid(ControlFile->minRecoveryPoint) &&
6781 tliOfPointInHistory(ControlFile->minRecoveryPoint - 1, expectedTLEs) !=
6782 ControlFile->minRecoveryPointTLI)
6783 ereport(FATAL,
6784 (errmsg("requested timeline %u does not contain minimum recovery point %X/%X on timeline %u",
6785 recoveryTargetTLI,
6786 (uint32) (ControlFile->minRecoveryPoint >> 32),
6787 (uint32) ControlFile->minRecoveryPoint,
6788 ControlFile->minRecoveryPointTLI)));
6789
6790 LastRec = RecPtr = checkPointLoc;
6791
6792 ereport(DEBUG1,
6793 (errmsg_internal("redo record is at %X/%X; shutdown %s",
6794 (uint32) (checkPoint.redo >> 32), (uint32) checkPoint.redo,
6795 wasShutdown ? "true" : "false")));
6796 ereport(DEBUG1,
6797 (errmsg_internal("next transaction ID: %u:%u; next OID: %u",
6798 checkPoint.nextXidEpoch, checkPoint.nextXid,
6799 checkPoint.nextOid)));
6800 ereport(DEBUG1,
6801 (errmsg_internal("next MultiXactId: %u; next MultiXactOffset: %u",
6802 checkPoint.nextMulti, checkPoint.nextMultiOffset)));
6803 ereport(DEBUG1,
6804 (errmsg_internal("oldest unfrozen transaction ID: %u, in database %u",
6805 checkPoint.oldestXid, checkPoint.oldestXidDB)));
6806 ereport(DEBUG1,
6807 (errmsg_internal("oldest MultiXactId: %u, in database %u",
6808 checkPoint.oldestMulti, checkPoint.oldestMultiDB)));
6809 ereport(DEBUG1,
6810 (errmsg_internal("commit timestamp Xid oldest/newest: %u/%u",
6811 checkPoint.oldestCommitTsXid,
6812 checkPoint.newestCommitTsXid)));
6813 if (!TransactionIdIsNormal(checkPoint.nextXid))
6814 ereport(PANIC,
6815 (errmsg("invalid next transaction ID")));
6816
6817 /* initialize shared memory variables from the checkpoint record */
6818 ShmemVariableCache->nextXid = checkPoint.nextXid;
6819 ShmemVariableCache->nextOid = checkPoint.nextOid;
6820 ShmemVariableCache->oidCount = 0;
6821 MultiXactSetNextMXact(checkPoint.nextMulti, checkPoint.nextMultiOffset);
6822 AdvanceOldestClogXid(checkPoint.oldestXid);
6823 SetTransactionIdLimit(checkPoint.oldestXid, checkPoint.oldestXidDB);
6824 SetMultiXactIdLimit(checkPoint.oldestMulti, checkPoint.oldestMultiDB, true);
6825 SetCommitTsLimit(checkPoint.oldestCommitTsXid,
6826 checkPoint.newestCommitTsXid);
6827 XLogCtl->ckptXidEpoch = checkPoint.nextXidEpoch;
6828 XLogCtl->ckptXid = checkPoint.nextXid;
6829
6830 /*
6831 * Initialize replication slots, before there's a chance to remove
6832 * required resources.
6833 */
6834 StartupReplicationSlots();
6835
6836 /*
6837 * Startup logical state, needs to be setup now so we have proper data
6838 * during crash recovery.
6839 */
6840 StartupReorderBuffer();
6841
6842 /*
6843 * Startup MultiXact. We need to do this early to be able to replay
6844 * truncations.
6845 */
6846 StartupMultiXact();
6847
6848 /*
6849 * Ditto for commit timestamps. Activate the facility if the setting is
6850 * enabled in the control file, as there should be no tracking of commit
6851 * timestamps done when the setting was disabled. This facility can be
6852 * started or stopped when replaying a XLOG_PARAMETER_CHANGE record.
6853 */
6854 if (ControlFile->track_commit_timestamp)
6855 StartupCommitTs();
6856
6857 /*
6858 * Recover knowledge about replay progress of known replication partners.
6859 */
6860 StartupReplicationOrigin();
6861
6862 /*
6863 * Initialize unlogged LSN. On a clean shutdown, it's restored from the
6864 * control file. On recovery, all unlogged relations are blown away, so
6865 * the unlogged LSN counter can be reset too.
6866 */
6867 if (ControlFile->state == DB_SHUTDOWNED)
6868 XLogCtl->unloggedLSN = ControlFile->unloggedLSN;
6869 else
6870 XLogCtl->unloggedLSN = 1;
6871
6872 /*
6873 * We must replay WAL entries using the same TimeLineID they were created
6874 * under, so temporarily adopt the TLI indicated by the checkpoint (see
6875 * also xlog_redo()).
6876 */
6877 ThisTimeLineID = checkPoint.ThisTimeLineID;
6878
6879 /*
6880 * Copy any missing timeline history files between 'now' and the recovery
6881 * target timeline from archive to pg_wal. While we don't need those files
6882 * ourselves - the history file of the recovery target timeline covers all
6883 * the previous timelines in the history too - a cascading standby server
6884 * might be interested in them. Or, if you archive the WAL from this
6885 * server to a different archive than the master, it'd be good for all the
6886 * history files to get archived there after failover, so that you can use
6887 * one of the old timelines as a PITR target. Timeline history files are
6888 * small, so it's better to copy them unnecessarily than not copy them and
6889 * regret later.
6890 */
6891 restoreTimeLineHistoryFiles(ThisTimeLineID, recoveryTargetTLI);
6892
6893 /*
6894 * Before running in recovery, scan pg_twophase and fill in its status to
6895 * be able to work on entries generated by redo. Doing a scan before
6896 * taking any recovery action has the merit to discard any 2PC files that
6897 * are newer than the first record to replay, saving from any conflicts at
6898 * replay. This avoids as well any subsequent scans when doing recovery
6899 * of the on-disk two-phase data.
6900 */
6901 restoreTwoPhaseData();
6902
6903 lastFullPageWrites = checkPoint.fullPageWrites;
6904
6905 RedoRecPtr = XLogCtl->RedoRecPtr = XLogCtl->Insert.RedoRecPtr = checkPoint.redo;
6906 doPageWrites = lastFullPageWrites;
6907
6908 if (RecPtr < checkPoint.redo)
6909 ereport(PANIC,
6910 (errmsg("invalid redo in checkpoint record")));
6911
6912 /*
6913 * Check whether we need to force recovery from WAL. If it appears to
6914 * have been a clean shutdown and we did not have a recovery.conf file,
6915 * then assume no recovery needed.
6916 */
6917 if (checkPoint.redo < RecPtr)
6918 {
6919 if (wasShutdown)
6920 ereport(PANIC,
6921 (errmsg("invalid redo record in shutdown checkpoint")));
6922 InRecovery = true;
6923 }
6924 else if (ControlFile->state != DB_SHUTDOWNED)
6925 InRecovery = true;
6926 else if (ArchiveRecoveryRequested)
6927 {
6928 /* force recovery due to presence of recovery.conf */
6929 InRecovery = true;
6930 }
6931
6932 /*
6933 * Start recovery assuming that the final record isn't lost.
6934 */
6935 abortedRecPtr = InvalidXLogRecPtr;
6936 missingContrecPtr = InvalidXLogRecPtr;
6937
6938 /* REDO */
6939 if (InRecovery)
6940 {
6941 int rmid;
6942
6943 /*
6944 * Update pg_control to show that we are recovering and to show the
6945 * selected checkpoint as the place we are starting from. We also mark
6946 * pg_control with any minimum recovery stop point obtained from a
6947 * backup history file.
6948 */
6949 dbstate_at_startup = ControlFile->state;
6950 if (InArchiveRecovery)
6951 {
6952 ControlFile->state = DB_IN_ARCHIVE_RECOVERY;
6953
6954 SpinLockAcquire(&XLogCtl->info_lck);
6955 XLogCtl->SharedRecoveryState = RECOVERY_STATE_ARCHIVE;
6956 SpinLockRelease(&XLogCtl->info_lck);
6957 }
6958 else
6959 {
6960 ereport(LOG,
6961 (errmsg("database system was not properly shut down; "
6962 "automatic recovery in progress")));
6963 if (recoveryTargetTLI > ControlFile->checkPointCopy.ThisTimeLineID)
6964 ereport(LOG,
6965 (errmsg("crash recovery starts in timeline %u "
6966 "and has target timeline %u",
6967 ControlFile->checkPointCopy.ThisTimeLineID,
6968 recoveryTargetTLI)));
6969 ControlFile->state = DB_IN_CRASH_RECOVERY;
6970
6971 SpinLockAcquire(&XLogCtl->info_lck);
6972 XLogCtl->SharedRecoveryState = RECOVERY_STATE_CRASH;
6973 SpinLockRelease(&XLogCtl->info_lck);
6974 }
6975 ControlFile->checkPoint = checkPointLoc;
6976 ControlFile->checkPointCopy = checkPoint;
6977 if (InArchiveRecovery)
6978 {
6979 /* initialize minRecoveryPoint if not set yet */
6980 if (ControlFile->minRecoveryPoint < checkPoint.redo)
6981 {
6982 ControlFile->minRecoveryPoint = checkPoint.redo;
6983 ControlFile->minRecoveryPointTLI = checkPoint.ThisTimeLineID;
6984 }
6985 }
6986
6987 /*
6988 * Set backupStartPoint if we're starting recovery from a base backup.
6989 *
6990 * Also set backupEndPoint and use minRecoveryPoint as the backup end
6991 * location if we're starting recovery from a base backup which was
6992 * taken from a standby. In this case, the database system status in
6993 * pg_control must indicate that the database was already in recovery.
6994 * Usually that will be DB_IN_ARCHIVE_RECOVERY but also can be
6995 * DB_SHUTDOWNED_IN_RECOVERY if recovery previously was interrupted
6996 * before reaching this point; e.g. because restore_command or
6997 * primary_conninfo were faulty.
6998 *
6999 * Any other state indicates that the backup somehow became corrupted
7000 * and we can't sensibly continue with recovery.
7001 */
7002 if (haveBackupLabel)
7003 {
7004 ControlFile->backupStartPoint = checkPoint.redo;
7005 ControlFile->backupEndRequired = backupEndRequired;
7006
7007 if (backupFromStandby)
7008 {
7009 if (dbstate_at_startup != DB_IN_ARCHIVE_RECOVERY &&
7010 dbstate_at_startup != DB_SHUTDOWNED_IN_RECOVERY)
7011 ereport(FATAL,
7012 (errmsg("backup_label contains data inconsistent with control file"),
7013 errhint("This means that the backup is corrupted and you will "
7014 "have to use another backup for recovery.")));
7015 ControlFile->backupEndPoint = ControlFile->minRecoveryPoint;
7016 }
7017 }
7018 ControlFile->time = (pg_time_t) time(NULL);
7019 /* No need to hold ControlFileLock yet, we aren't up far enough */
7020 UpdateControlFile();
7021
7022 /*
7023 * Initialize our local copy of minRecoveryPoint. When doing crash
7024 * recovery we want to replay up to the end of WAL. Particularly, in
7025 * the case of a promoted standby minRecoveryPoint value in the
7026 * control file is only updated after the first checkpoint. However,
7027 * if the instance crashes before the first post-recovery checkpoint
7028 * is completed then recovery will use a stale location causing the
7029 * startup process to think that there are still invalid page
7030 * references when checking for data consistency.
7031 */
7032 if (InArchiveRecovery)
7033 {
7034 minRecoveryPoint = ControlFile->minRecoveryPoint;
7035 minRecoveryPointTLI = ControlFile->minRecoveryPointTLI;
7036 }
7037 else
7038 {
7039 minRecoveryPoint = InvalidXLogRecPtr;
7040 minRecoveryPointTLI = 0;
7041 }
7042
7043 /*
7044 * Reset pgstat data, because it may be invalid after recovery.
7045 */
7046 pgstat_reset_all();
7047
7048 /*
7049 * If there was a backup label file, it's done its job and the info
7050 * has now been propagated into pg_control. We must get rid of the
7051 * label file so that if we crash during recovery, we'll pick up at
7052 * the latest recovery restartpoint instead of going all the way back
7053 * to the backup start point. It seems prudent though to just rename
7054 * the file out of the way rather than delete it completely.
7055 */
7056 if (haveBackupLabel)
7057 {
7058 unlink(BACKUP_LABEL_OLD);
7059 durable_rename(BACKUP_LABEL_FILE, BACKUP_LABEL_OLD, FATAL);
7060 }
7061
7062 /*
7063 * If there was a tablespace_map file, it's done its job and the
7064 * symlinks have been created. We must get rid of the map file so
7065 * that if we crash during recovery, we don't create symlinks again.
7066 * It seems prudent though to just rename the file out of the way
7067 * rather than delete it completely.
7068 */
7069 if (haveTblspcMap)
7070 {
7071 unlink(TABLESPACE_MAP_OLD);
7072 durable_rename(TABLESPACE_MAP, TABLESPACE_MAP_OLD, FATAL);
7073 }
7074
7075 /* Check that the GUCs used to generate the WAL allow recovery */
7076 CheckRequiredParameterValues();
7077
7078 /*
7079 * We're in recovery, so unlogged relations may be trashed and must be
7080 * reset. This should be done BEFORE allowing Hot Standby
7081 * connections, so that read-only backends don't try to read whatever
7082 * garbage is left over from before.
7083 */
7084 ResetUnloggedRelations(UNLOGGED_RELATION_CLEANUP);
7085
7086 /*
7087 * Likewise, delete any saved transaction snapshot files that got left
7088 * behind by crashed backends.
7089 */
7090 DeleteAllExportedSnapshotFiles();
7091
7092 /*
7093 * Initialize for Hot Standby, if enabled. We won't let backends in
7094 * yet, not until we've reached the min recovery point specified in
7095 * control file and we've established a recovery snapshot from a
7096 * running-xacts WAL record.
7097 */
7098 if (ArchiveRecoveryRequested && EnableHotStandby)
7099 {
7100 TransactionId *xids;
7101 int nxids;
7102
7103 ereport(DEBUG1,
7104 (errmsg("initializing for hot standby")));
7105
7106 InitRecoveryTransactionEnvironment();
7107
7108 if (wasShutdown)
7109 oldestActiveXID = PrescanPreparedTransactions(&xids, &nxids);
7110 else
7111 oldestActiveXID = checkPoint.oldestActiveXid;
7112 Assert(TransactionIdIsValid(oldestActiveXID));
7113
7114 /* Tell procarray about the range of xids it has to deal with */
7115 ProcArrayInitRecovery(ShmemVariableCache->nextXid);
7116
7117 /*
7118 * Startup commit log and subtrans only. MultiXact and commit
7119 * timestamp have already been started up and other SLRUs are not
7120 * maintained during recovery and need not be started yet.
7121 */
7122 StartupCLOG();
7123 StartupSUBTRANS(oldestActiveXID);
7124
7125 /*
7126 * If we're beginning at a shutdown checkpoint, we know that
7127 * nothing was running on the master at this point. So fake-up an
7128 * empty running-xacts record and use that here and now. Recover
7129 * additional standby state for prepared transactions.
7130 */
7131 if (wasShutdown)
7132 {
7133 RunningTransactionsData running;
7134 TransactionId latestCompletedXid;
7135
7136 /*
7137 * Construct a RunningTransactions snapshot representing a
7138 * shut down server, with only prepared transactions still
7139 * alive. We're never overflowed at this point because all
7140 * subxids are listed with their parent prepared transactions.
7141 */
7142 running.xcnt = nxids;
7143 running.subxcnt = 0;
7144 running.subxid_overflow = false;
7145 running.nextXid = checkPoint.nextXid;
7146 running.oldestRunningXid = oldestActiveXID;
7147 latestCompletedXid = checkPoint.nextXid;
7148 TransactionIdRetreat(latestCompletedXid);
7149 Assert(TransactionIdIsNormal(latestCompletedXid));
7150 running.latestCompletedXid = latestCompletedXid;
7151 running.xids = xids;
7152
7153 ProcArrayApplyRecoveryInfo(&running);
7154
7155 StandbyRecoverPreparedTransactions();
7156 }
7157 }
7158
7159 /* Initialize resource managers */
7160 for (rmid = 0; rmid <= RM_MAX_ID; rmid++)
7161 {
7162 if (RmgrTable[rmid].rm_startup != NULL)
7163 RmgrTable[rmid].rm_startup();
7164 }
7165
7166 /*
7167 * Initialize shared variables for tracking progress of WAL replay, as
7168 * if we had just replayed the record before the REDO location (or the
7169 * checkpoint record itself, if it's a shutdown checkpoint).
7170 */
7171 SpinLockAcquire(&XLogCtl->info_lck);
7172 if (checkPoint.redo < RecPtr)
7173 XLogCtl->replayEndRecPtr = checkPoint.redo;
7174 else
7175 XLogCtl->replayEndRecPtr = EndRecPtr;
7176 XLogCtl->replayEndTLI = ThisTimeLineID;
7177 XLogCtl->lastReplayedEndRecPtr = XLogCtl->replayEndRecPtr;
7178 XLogCtl->lastReplayedTLI = XLogCtl->replayEndTLI;
7179 XLogCtl->recoveryLastXTime = 0;
7180 XLogCtl->currentChunkStartTime = 0;
7181 XLogCtl->recoveryPause = false;
7182 SpinLockRelease(&XLogCtl->info_lck);
7183
7184 /* Also ensure XLogReceiptTime has a sane value */
7185 XLogReceiptTime = GetCurrentTimestamp();
7186
7187 /*
7188 * Let postmaster know we've started redo now, so that it can launch
7189 * checkpointer to perform restartpoints. We don't bother during
7190 * crash recovery as restartpoints can only be performed during
7191 * archive recovery. And we'd like to keep crash recovery simple, to
7192 * avoid introducing bugs that could affect you when recovering after
7193 * crash.
7194 *
7195 * After this point, we can no longer assume that we're the only
7196 * process in addition to postmaster! Also, fsync requests are
7197 * subsequently to be handled by the checkpointer, not locally.
7198 */
7199 if (ArchiveRecoveryRequested && IsUnderPostmaster)
7200 {
7201 PublishStartupProcessInformation();
7202 SetForwardFsyncRequests();
7203 SendPostmasterSignal(PMSIGNAL_RECOVERY_STARTED);
7204 bgwriterLaunched = true;
7205 }
7206
7207 /*
7208 * Allow read-only connections immediately if we're consistent
7209 * already.
7210 */
7211 CheckRecoveryConsistency();
7212
7213 /*
7214 * Find the first record that logically follows the checkpoint --- it
7215 * might physically precede it, though.
7216 */
7217 if (checkPoint.redo < RecPtr)
7218 {
7219 /* back up to find the record */
7220 record = ReadRecord(xlogreader, checkPoint.redo, PANIC, false);
7221 }
7222 else
7223 {
7224 /* just have to read next record after CheckPoint */
7225 record = ReadRecord(xlogreader, InvalidXLogRecPtr, LOG, false);
7226 }
7227
7228 if (record != NULL)
7229 {
7230 ErrorContextCallback errcallback;
7231 TimestampTz xtime;
7232
7233 InRedo = true;
7234
7235 ereport(LOG,
7236 (errmsg("redo starts at %X/%X",
7237 (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr)));
7238
7239 /*
7240 * main redo apply loop
7241 */
7242 do
7243 {
7244 bool switchedTLI = false;
7245
7246 #ifdef WAL_DEBUG
7247 if (XLOG_DEBUG ||
7248 (rmid == RM_XACT_ID && trace_recovery_messages <= DEBUG2) ||
7249 (rmid != RM_XACT_ID && trace_recovery_messages <= DEBUG3))
7250 {
7251 StringInfoData buf;
7252
7253 initStringInfo(&buf);
7254 appendStringInfo(&buf, "REDO @ %X/%X; LSN %X/%X: ",
7255 (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr,
7256 (uint32) (EndRecPtr >> 32), (uint32) EndRecPtr);
7257 xlog_outrec(&buf, xlogreader);
7258 appendStringInfoString(&buf, " - ");
7259 xlog_outdesc(&buf, xlogreader);
7260 elog(LOG, "%s", buf.data);
7261 pfree(buf.data);
7262 }
7263 #endif
7264
7265 /* Handle interrupt signals of startup process */
7266 HandleStartupProcInterrupts();
7267
7268 /*
7269 * Pause WAL replay, if requested by a hot-standby session via
7270 * SetRecoveryPause().
7271 *
7272 * Note that we intentionally don't take the info_lck spinlock
7273 * here. We might therefore read a slightly stale value of
7274 * the recoveryPause flag, but it can't be very stale (no
7275 * worse than the last spinlock we did acquire). Since a
7276 * pause request is a pretty asynchronous thing anyway,
7277 * possibly responding to it one WAL record later than we
7278 * otherwise would is a minor issue, so it doesn't seem worth
7279 * adding another spinlock cycle to prevent that.
7280 */
7281 if (((volatile XLogCtlData *) XLogCtl)->recoveryPause)
7282 recoveryPausesHere();
7283
7284 /*
7285 * Have we reached our recovery target?
7286 */
7287 if (recoveryStopsBefore(xlogreader))
7288 {
7289 reachedStopPoint = true; /* see below */
7290 break;
7291 }
7292
7293 /*
7294 * If we've been asked to lag the master, wait on latch until
7295 * enough time has passed.
7296 */
7297 if (recoveryApplyDelay(xlogreader))
7298 {
7299 /*
7300 * We test for paused recovery again here. If user sets
7301 * delayed apply, it may be because they expect to pause
7302 * recovery in case of problems, so we must test again
7303 * here otherwise pausing during the delay-wait wouldn't
7304 * work.
7305 */
7306 if (((volatile XLogCtlData *) XLogCtl)->recoveryPause)
7307 recoveryPausesHere();
7308 }
7309
7310 /* Setup error traceback support for ereport() */
7311 errcallback.callback = rm_redo_error_callback;
7312 errcallback.arg = (void *) xlogreader;
7313 errcallback.previous = error_context_stack;
7314 error_context_stack = &errcallback;
7315
7316 /*
7317 * ShmemVariableCache->nextXid must be beyond record's xid.
7318 *
7319 * We don't expect anyone else to modify nextXid, hence we
7320 * don't need to hold a lock while examining it. We still
7321 * acquire the lock to modify it, though.
7322 */
7323 if (TransactionIdFollowsOrEquals(record->xl_xid,
7324 ShmemVariableCache->nextXid))
7325 {
7326 LWLockAcquire(XidGenLock, LW_EXCLUSIVE);
7327 ShmemVariableCache->nextXid = record->xl_xid;
7328 TransactionIdAdvance(ShmemVariableCache->nextXid);
7329 LWLockRelease(XidGenLock);
7330 }
7331
7332 /*
7333 * Before replaying this record, check if this record causes
7334 * the current timeline to change. The record is already
7335 * considered to be part of the new timeline, so we update
7336 * ThisTimeLineID before replaying it. That's important so
7337 * that replayEndTLI, which is recorded as the minimum
7338 * recovery point's TLI if recovery stops after this record,
7339 * is set correctly.
7340 */
7341 if (record->xl_rmid == RM_XLOG_ID)
7342 {
7343 TimeLineID newTLI = ThisTimeLineID;
7344 TimeLineID prevTLI = ThisTimeLineID;
7345 uint8 info = record->xl_info & ~XLR_INFO_MASK;
7346
7347 if (info == XLOG_CHECKPOINT_SHUTDOWN)
7348 {
7349 CheckPoint checkPoint;
7350
7351 memcpy(&checkPoint, XLogRecGetData(xlogreader), sizeof(CheckPoint));
7352 newTLI = checkPoint.ThisTimeLineID;
7353 prevTLI = checkPoint.PrevTimeLineID;
7354 }
7355 else if (info == XLOG_END_OF_RECOVERY)
7356 {
7357 xl_end_of_recovery xlrec;
7358
7359 memcpy(&xlrec, XLogRecGetData(xlogreader), sizeof(xl_end_of_recovery));
7360 newTLI = xlrec.ThisTimeLineID;
7361 prevTLI = xlrec.PrevTimeLineID;
7362 }
7363
7364 if (newTLI != ThisTimeLineID)
7365 {
7366 /* Check that it's OK to switch to this TLI */
7367 checkTimeLineSwitch(EndRecPtr, newTLI, prevTLI);
7368
7369 /* Following WAL records should be run with new TLI */
7370 ThisTimeLineID = newTLI;
7371 switchedTLI = true;
7372 }
7373 }
7374
7375 /*
7376 * Update shared replayEndRecPtr before replaying this record,
7377 * so that XLogFlush will update minRecoveryPoint correctly.
7378 */
7379 SpinLockAcquire(&XLogCtl->info_lck);
7380 XLogCtl->replayEndRecPtr = EndRecPtr;
7381 XLogCtl->replayEndTLI = ThisTimeLineID;
7382 SpinLockRelease(&XLogCtl->info_lck);
7383
7384 /*
7385 * If we are attempting to enter Hot Standby mode, process
7386 * XIDs we see
7387 */
7388 if (standbyState >= STANDBY_INITIALIZED &&
7389 TransactionIdIsValid(record->xl_xid))
7390 RecordKnownAssignedTransactionIds(record->xl_xid);
7391
7392 /* Now apply the WAL record itself */
7393 RmgrTable[record->xl_rmid].rm_redo(xlogreader);
7394
7395 /*
7396 * After redo, check whether the backup pages associated with
7397 * the WAL record are consistent with the existing pages. This
7398 * check is done only if consistency check is enabled for this
7399 * record.
7400 */
7401 if ((record->xl_info & XLR_CHECK_CONSISTENCY) != 0)
7402 checkXLogConsistency(xlogreader);
7403
7404 /* Pop the error context stack */
7405 error_context_stack = errcallback.previous;
7406
7407 /*
7408 * Update lastReplayedEndRecPtr after this record has been
7409 * successfully replayed.
7410 */
7411 SpinLockAcquire(&XLogCtl->info_lck);
7412 XLogCtl->lastReplayedEndRecPtr = EndRecPtr;
7413 XLogCtl->lastReplayedTLI = ThisTimeLineID;
7414 SpinLockRelease(&XLogCtl->info_lck);
7415
7416 /*
7417 * If rm_redo called XLogRequestWalReceiverReply, then we wake
7418 * up the receiver so that it notices the updated
7419 * lastReplayedEndRecPtr and sends a reply to the master.
7420 */
7421 if (doRequestWalReceiverReply)
7422 {
7423 doRequestWalReceiverReply = false;
7424 WalRcvForceReply();
7425 }
7426
7427 /* Remember this record as the last-applied one */
7428 LastRec = ReadRecPtr;
7429
7430 /* Allow read-only connections if we're consistent now */
7431 CheckRecoveryConsistency();
7432
7433 /* Is this a timeline switch? */
7434 if (switchedTLI)
7435 {
7436 /*
7437 * Before we continue on the new timeline, clean up any
7438 * (possibly bogus) future WAL segments on the old
7439 * timeline.
7440 */
7441 RemoveNonParentXlogFiles(EndRecPtr, ThisTimeLineID);
7442
7443 /*
7444 * Wake up any walsenders to notice that we are on a new
7445 * timeline.
7446 */
7447 if (switchedTLI && AllowCascadeReplication())
7448 WalSndWakeup();
7449 }
7450
7451 /* Exit loop if we reached inclusive recovery target */
7452 if (recoveryStopsAfter(xlogreader))
7453 {
7454 reachedStopPoint = true;
7455 break;
7456 }
7457
7458 /* Else, try to fetch the next WAL record */
7459 record = ReadRecord(xlogreader, InvalidXLogRecPtr, LOG, false);
7460 } while (record != NULL);
7461
7462 /*
7463 * end of main redo apply loop
7464 */
7465
7466 if (reachedStopPoint)
7467 {
7468 if (!reachedConsistency)
7469 ereport(FATAL,
7470 (errmsg("requested recovery stop point is before consistent recovery point")));
7471
7472 /*
7473 * This is the last point where we can restart recovery with a
7474 * new recovery target, if we shutdown and begin again. After
7475 * this, Resource Managers may choose to do permanent
7476 * corrective actions at end of recovery.
7477 */
7478 switch (recoveryTargetAction)
7479 {
7480 case RECOVERY_TARGET_ACTION_SHUTDOWN:
7481
7482 /*
7483 * exit with special return code to request shutdown
7484 * of postmaster. Log messages issued from
7485 * postmaster.
7486 */
7487 proc_exit(3);
7488
7489 case RECOVERY_TARGET_ACTION_PAUSE:
7490 SetRecoveryPause(true);
7491 recoveryPausesHere();
7492
7493 /* drop into promote */
7494
7495 case RECOVERY_TARGET_ACTION_PROMOTE:
7496 break;
7497 }
7498 }
7499
7500 /* Allow resource managers to do any required cleanup. */
7501 for (rmid = 0; rmid <= RM_MAX_ID; rmid++)
7502 {
7503 if (RmgrTable[rmid].rm_cleanup != NULL)
7504 RmgrTable[rmid].rm_cleanup();
7505 }
7506
7507 ereport(LOG,
7508 (errmsg("redo done at %X/%X",
7509 (uint32) (ReadRecPtr >> 32), (uint32) ReadRecPtr)));
7510 xtime = GetLatestXTime();
7511 if (xtime)
7512 ereport(LOG,
7513 (errmsg("last completed transaction was at log time %s",
7514 timestamptz_to_str(xtime))));
7515
7516 InRedo = false;
7517 }
7518 else
7519 {
7520 /* there are no WAL records following the checkpoint */
7521 ereport(LOG,
7522 (errmsg("redo is not required")));
7523 }
7524 }
7525
7526 /*
7527 * Kill WAL receiver, if it's still running, before we continue to write
7528 * the startup checkpoint and aborted-contrecord records. It will trump
7529 * over these records and subsequent ones if it's still alive when we
7530 * start writing WAL.
7531 */
7532 ShutdownWalRcv();
7533
7534 /*
7535 * Reset unlogged relations to the contents of their INIT fork. This is
7536 * done AFTER recovery is complete so as to include any unlogged relations
7537 * created during recovery, but BEFORE recovery is marked as having
7538 * completed successfully. Otherwise we'd not retry if any of the post
7539 * end-of-recovery steps fail.
7540 */
7541 if (InRecovery)
7542 ResetUnloggedRelations(UNLOGGED_RELATION_INIT);
7543
7544 /*
7545 * We don't need the latch anymore. It's not strictly necessary to disown
7546 * it, but let's do it for the sake of tidiness.
7547 */
7548 if (ArchiveRecoveryRequested)
7549 DisownLatch(&XLogCtl->recoveryWakeupLatch);
7550
7551 /*
7552 * We are now done reading the xlog from stream. Turn off streaming
7553 * recovery to force fetching the files (which would be required at end of
7554 * recovery, e.g., timeline history file) from archive or pg_wal.
7555 *
7556 * Note that standby mode must be turned off after killing WAL receiver,
7557 * i.e., calling ShutdownWalRcv().
7558 */
7559 Assert(!WalRcvStreaming());
7560 StandbyMode = false;
7561
7562 /*
7563 * Determine where to start writing WAL next.
7564 *
7565 * When recovery ended in an incomplete record, write a WAL record about
7566 * that and continue after it. In all other cases, re-fetch the last
7567 * valid or last applied record, so we can identify the exact endpoint of
7568 * what we consider the valid portion of WAL.
7569 */
7570 record = ReadRecord(xlogreader, LastRec, PANIC, false);
7571 EndOfLog = EndRecPtr;
7572
7573 /*
7574 * EndOfLogTLI is the TLI in the filename of the XLOG segment containing
7575 * the end-of-log. It could be different from the timeline that EndOfLog
7576 * nominally belongs to, if there was a timeline switch in that segment,
7577 * and we were reading the old WAL from a segment belonging to a higher
7578 * timeline.
7579 */
7580 EndOfLogTLI = xlogreader->readPageTLI;
7581
7582 /*
7583 * Complain if we did not roll forward far enough to render the backup
7584 * dump consistent. Note: it is indeed okay to look at the local variable
7585 * minRecoveryPoint here, even though ControlFile->minRecoveryPoint might
7586 * be further ahead --- ControlFile->minRecoveryPoint cannot have been
7587 * advanced beyond the WAL we processed.
7588 */
7589 if (InRecovery &&
7590 (EndOfLog < minRecoveryPoint ||
7591 !XLogRecPtrIsInvalid(ControlFile->backupStartPoint)))
7592 {
7593 /*
7594 * Ran off end of WAL before reaching end-of-backup WAL record, or
7595 * minRecoveryPoint. That's usually a bad sign, indicating that you
7596 * tried to recover from an online backup but never called
7597 * pg_stop_backup(), or you didn't archive all the WAL up to that
7598 * point. However, this also happens in crash recovery, if the system
7599 * crashes while an online backup is in progress. We must not treat
7600 * that as an error, or the database will refuse to start up.
7601 */
7602 if (ArchiveRecoveryRequested || ControlFile->backupEndRequired)
7603 {
7604 if (ControlFile->backupEndRequired)
7605 ereport(FATAL,
7606 (errmsg("WAL ends before end of online backup"),
7607 errhint("All WAL generated while online backup was taken must be available at recovery.")));
7608 else if (!XLogRecPtrIsInvalid(ControlFile->backupStartPoint))
7609 ereport(FATAL,
7610 (errmsg("WAL ends before end of online backup"),
7611 errhint("Online backup started with pg_start_backup() must be ended with pg_stop_backup(), and all WAL up to that point must be available at recovery.")));
7612 else
7613 ereport(FATAL,
7614 (errmsg("WAL ends before consistent recovery point")));
7615 }
7616 }
7617
7618 /*
7619 * Pre-scan prepared transactions to find out the range of XIDs present.
7620 * This information is not quite needed yet, but it is positioned here so
7621 * as potential problems are detected before any on-disk change is done.
7622 */
7623 oldestActiveXID = PrescanPreparedTransactions(NULL, NULL);
7624
7625 /*
7626 * Consider whether we need to assign a new timeline ID.
7627 *
7628 * If we are doing an archive recovery, we always assign a new ID. This
7629 * handles a couple of issues. If we stopped short of the end of WAL
7630 * during recovery, then we are clearly generating a new timeline and must
7631 * assign it a unique new ID. Even if we ran to the end, modifying the
7632 * current last segment is problematic because it may result in trying to
7633 * overwrite an already-archived copy of that segment, and we encourage
7634 * DBAs to make their archive_commands reject that. We can dodge the
7635 * problem by making the new active segment have a new timeline ID.
7636 *
7637 * In a normal crash recovery, we can just extend the timeline we were in.
7638 */
7639 PrevTimeLineID = ThisTimeLineID;
7640 if (ArchiveRecoveryRequested)
7641 {
7642 char reason[200];
7643 char recoveryPath[MAXPGPATH];
7644
7645 Assert(InArchiveRecovery);
7646
7647 ThisTimeLineID = findNewestTimeLine(recoveryTargetTLI) + 1;
7648 ereport(LOG,
7649 (errmsg("selected new timeline ID: %u", ThisTimeLineID)));
7650
7651 /*
7652 * Create a comment for the history file to explain why and where
7653 * timeline changed.
7654 */
7655 if (recoveryTarget == RECOVERY_TARGET_XID)
7656 snprintf(reason, sizeof(reason),
7657 "%s transaction %u",
7658 recoveryStopAfter ? "after" : "before",
7659 recoveryStopXid);
7660 else if (recoveryTarget == RECOVERY_TARGET_TIME)
7661 snprintf(reason, sizeof(reason),
7662 "%s %s\n",
7663 recoveryStopAfter ? "after" : "before",
7664 timestamptz_to_str(recoveryStopTime));
7665 else if (recoveryTarget == RECOVERY_TARGET_LSN)
7666 snprintf(reason, sizeof(reason),
7667 "%s LSN %X/%X\n",
7668 recoveryStopAfter ? "after" : "before",
7669 (uint32) (recoveryStopLSN >> 32),
7670 (uint32) recoveryStopLSN);
7671 else if (recoveryTarget == RECOVERY_TARGET_NAME)
7672 snprintf(reason, sizeof(reason),
7673 "at restore point \"%s\"",
7674 recoveryStopName);
7675 else if (recoveryTarget == RECOVERY_TARGET_IMMEDIATE)
7676 snprintf(reason, sizeof(reason), "reached consistency");
7677 else
7678 snprintf(reason, sizeof(reason), "no recovery target specified");
7679
7680 /*
7681 * We are now done reading the old WAL. Turn off archive fetching if
7682 * it was active, and make a writable copy of the last WAL segment.
7683 * (Note that we also have a copy of the last block of the old WAL in
7684 * readBuf; we will use that below.)
7685 */
7686 exitArchiveRecovery(EndOfLogTLI, EndOfLog);
7687
7688 /*
7689 * Write the timeline history file, and have it archived. After this
7690 * point (or rather, as soon as the file is archived), the timeline
7691 * will appear as "taken" in the WAL archive and to any standby
7692 * servers. If we crash before actually switching to the new
7693 * timeline, standby servers will nevertheless think that we switched
7694 * to the new timeline, and will try to connect to the new timeline.
7695 * To minimize the window for that, try to do as little as possible
7696 * between here and writing the end-of-recovery record.
7697 */
7698 writeTimeLineHistory(ThisTimeLineID, recoveryTargetTLI,
7699 EndRecPtr, reason);
7700
7701 /*
7702 * Since there might be a partial WAL segment named RECOVERYXLOG, get
7703 * rid of it.
7704 */
7705 snprintf(recoveryPath, MAXPGPATH, XLOGDIR "/RECOVERYXLOG");
7706 unlink(recoveryPath); /* ignore any error */
7707
7708 /* Get rid of any remaining recovered timeline-history file, too */
7709 snprintf(recoveryPath, MAXPGPATH, XLOGDIR "/RECOVERYHISTORY");
7710 unlink(recoveryPath); /* ignore any error */
7711 }
7712
7713 /* Save the selected TimeLineID in shared memory, too */
7714 XLogCtl->ThisTimeLineID = ThisTimeLineID;
7715 XLogCtl->PrevTimeLineID = PrevTimeLineID;
7716
7717 /*
7718 * Actually, if WAL ended in an incomplete record, skip the parts that
7719 * made it through and start writing after the portion that persisted.
7720 * (It's critical to first write an OVERWRITE_CONTRECORD message, which
7721 * we'll do as soon as we're open for writing new WAL.)
7722 */
7723 if (!XLogRecPtrIsInvalid(missingContrecPtr))
7724 {
7725 Assert(!XLogRecPtrIsInvalid(abortedRecPtr));
7726 EndOfLog = missingContrecPtr;
7727 }
7728
7729 /*
7730 * Prepare to write WAL starting at EndOfLog location, and init xlog
7731 * buffer cache using the block containing the last record from the
7732 * previous incarnation.
7733 */
7734 Insert = &XLogCtl->Insert;
7735 Insert->PrevBytePos = XLogRecPtrToBytePos(LastRec);
7736 Insert->CurrBytePos = XLogRecPtrToBytePos(EndOfLog);
7737
7738 /*
7739 * Tricky point here: readBuf contains the *last* block that the LastRec
7740 * record spans, not the one it starts in. The last block is indeed the
7741 * one we want to use.
7742 */
7743 if (EndOfLog % XLOG_BLCKSZ != 0)
7744 {
7745 char *page;
7746 int len;
7747 int firstIdx;
7748 XLogRecPtr pageBeginPtr;
7749
7750 pageBeginPtr = EndOfLog - (EndOfLog % XLOG_BLCKSZ);
7751 Assert(readOff == XLogSegmentOffset(pageBeginPtr, wal_segment_size));
7752
7753 firstIdx = XLogRecPtrToBufIdx(EndOfLog);
7754
7755 /* Copy the valid part of the last block, and zero the rest */
7756 page = &XLogCtl->pages[firstIdx * XLOG_BLCKSZ];
7757 len = EndOfLog % XLOG_BLCKSZ;
7758 memcpy(page, xlogreader->readBuf, len);
7759 memset(page + len, 0, XLOG_BLCKSZ - len);
7760
7761 XLogCtl->xlblocks[firstIdx] = pageBeginPtr + XLOG_BLCKSZ;
7762 XLogCtl->InitializedUpTo = pageBeginPtr + XLOG_BLCKSZ;
7763 }
7764 else
7765 {
7766 /*
7767 * There is no partial block to copy. Just set InitializedUpTo, and
7768 * let the first attempt to insert a log record to initialize the next
7769 * buffer.
7770 */
7771 XLogCtl->InitializedUpTo = EndOfLog;
7772 }
7773
7774 LogwrtResult.Write = LogwrtResult.Flush = EndOfLog;
7775
7776 XLogCtl->LogwrtResult = LogwrtResult;
7777
7778 XLogCtl->LogwrtRqst.Write = EndOfLog;
7779 XLogCtl->LogwrtRqst.Flush = EndOfLog;
7780
7781 LocalSetXLogInsertAllowed();
7782
7783 /* If necessary, write overwrite-contrecord before doing anything else */
7784 if (!XLogRecPtrIsInvalid(abortedRecPtr))
7785 {
7786 Assert(!XLogRecPtrIsInvalid(missingContrecPtr));
7787 CreateOverwriteContrecordRecord(abortedRecPtr);
7788 abortedRecPtr = InvalidXLogRecPtr;
7789 missingContrecPtr = InvalidXLogRecPtr;
7790 }
7791
7792 /*
7793 * Update full_page_writes in shared memory and write an XLOG_FPW_CHANGE
7794 * record before resource manager writes cleanup WAL records or checkpoint
7795 * record is written.
7796 */
7797 Insert->fullPageWrites = lastFullPageWrites;
7798 UpdateFullPageWrites();
7799 LocalXLogInsertAllowed = -1;
7800
7801 if (InRecovery)
7802 {
7803 /*
7804 * Perform a checkpoint to update all our recovery activity to disk.
7805 *
7806 * Note that we write a shutdown checkpoint rather than an on-line
7807 * one. This is not particularly critical, but since we may be
7808 * assigning a new TLI, using a shutdown checkpoint allows us to have
7809 * the rule that TLI only changes in shutdown checkpoints, which
7810 * allows some extra error checking in xlog_redo.
7811 *
7812 * In fast promotion, only create a lightweight end-of-recovery record
7813 * instead of a full checkpoint. A checkpoint is requested later,
7814 * after we're fully out of recovery mode and already accepting
7815 * queries.
7816 */
7817 if (bgwriterLaunched)
7818 {
7819 if (fast_promote)
7820 {
7821 checkPointLoc = ControlFile->checkPoint;
7822
7823 /*
7824 * Confirm the last checkpoint is available for us to recover
7825 * from if we fail.
7826 */
7827 record = ReadCheckpointRecord(xlogreader, checkPointLoc, 1, false);
7828 if (record != NULL)
7829 {
7830 fast_promoted = true;
7831
7832 /*
7833 * Insert a special WAL record to mark the end of
7834 * recovery, since we aren't doing a checkpoint. That
7835 * means that the checkpointer process may likely be in
7836 * the middle of a time-smoothed restartpoint and could
7837 * continue to be for minutes after this. That sounds
7838 * strange, but the effect is roughly the same and it
7839 * would be stranger to try to come out of the
7840 * restartpoint and then checkpoint. We request a
7841 * checkpoint later anyway, just for safety.
7842 */
7843 CreateEndOfRecoveryRecord();
7844 }
7845 }
7846
7847 if (!fast_promoted)
7848 RequestCheckpoint(CHECKPOINT_END_OF_RECOVERY |
7849 CHECKPOINT_IMMEDIATE |
7850 CHECKPOINT_WAIT);
7851 }
7852 else
7853 CreateCheckPoint(CHECKPOINT_END_OF_RECOVERY | CHECKPOINT_IMMEDIATE);
7854
7855 /*
7856 * And finally, execute the recovery_end_command, if any.
7857 */
7858 if (recoveryEndCommand)
7859 ExecuteRecoveryCommand(recoveryEndCommand,
7860 "recovery_end_command",
7861 true);
7862 }
7863
7864 if (ArchiveRecoveryRequested)
7865 {
7866 /*
7867 * We switched to a new timeline. Clean up segments on the old
7868 * timeline.
7869 *
7870 * If there are any higher-numbered segments on the old timeline,
7871 * remove them. They might contain valid WAL, but they might also be
7872 * pre-allocated files containing garbage. In any case, they are not
7873 * part of the new timeline's history so we don't need them.
7874 */
7875 RemoveNonParentXlogFiles(EndOfLog, ThisTimeLineID);
7876
7877 /*
7878 * If the switch happened in the middle of a segment, what to do with
7879 * the last, partial segment on the old timeline? If we don't archive
7880 * it, and the server that created the WAL never archives it either
7881 * (e.g. because it was hit by a meteor), it will never make it to the
7882 * archive. That's OK from our point of view, because the new segment
7883 * that we created with the new TLI contains all the WAL from the old
7884 * timeline up to the switch point. But if you later try to do PITR to
7885 * the "missing" WAL on the old timeline, recovery won't find it in
7886 * the archive. It's physically present in the new file with new TLI,
7887 * but recovery won't look there when it's recovering to the older
7888 * timeline. On the other hand, if we archive the partial segment, and
7889 * the original server on that timeline is still running and archives
7890 * the completed version of the same segment later, it will fail. (We
7891 * used to do that in 9.4 and below, and it caused such problems).
7892 *
7893 * As a compromise, we rename the last segment with the .partial
7894 * suffix, and archive it. Archive recovery will never try to read
7895 * .partial segments, so they will normally go unused. But in the odd
7896 * PITR case, the administrator can copy them manually to the pg_wal
7897 * directory (removing the suffix). They can be useful in debugging,
7898 * too.
7899 *
7900 * If a .done or .ready file already exists for the old timeline,
7901 * however, we had already determined that the segment is complete, so
7902 * we can let it be archived normally. (In particular, if it was
7903 * restored from the archive to begin with, it's expected to have a
7904 * .done file).
7905 */
7906 if (XLogSegmentOffset(EndOfLog, wal_segment_size) != 0 &&
7907 XLogArchivingActive())
7908 {
7909 char origfname[MAXFNAMELEN];
7910 XLogSegNo endLogSegNo;
7911
7912 XLByteToPrevSeg(EndOfLog, endLogSegNo, wal_segment_size);
7913 XLogFileName(origfname, EndOfLogTLI, endLogSegNo, wal_segment_size);
7914
7915 if (!XLogArchiveIsReadyOrDone(origfname))
7916 {
7917 char origpath[MAXPGPATH];
7918 char partialfname[MAXFNAMELEN];
7919 char partialpath[MAXPGPATH];
7920
7921 XLogFilePath(origpath, EndOfLogTLI, endLogSegNo, wal_segment_size);
7922 snprintf(partialfname, MAXFNAMELEN, "%s.partial", origfname);
7923 snprintf(partialpath, MAXPGPATH, "%s.partial", origpath);
7924
7925 /*
7926 * Make sure there's no .done or .ready file for the .partial
7927 * file.
7928 */
7929 XLogArchiveCleanup(partialfname);
7930
7931 durable_rename(origpath, partialpath, ERROR);
7932 XLogArchiveNotify(partialfname);
7933 }
7934 }
7935 }
7936
7937 /*
7938 * Preallocate additional log files, if wanted.
7939 */
7940 PreallocXlogFiles(EndOfLog);
7941
7942 /*
7943 * Okay, we're officially UP.
7944 */
7945 InRecovery = false;
7946
7947 /* start the archive_timeout timer and LSN running */
7948 XLogCtl->lastSegSwitchTime = (pg_time_t) time(NULL);
7949 XLogCtl->lastSegSwitchLSN = EndOfLog;
7950
7951 /* also initialize latestCompletedXid, to nextXid - 1 */
7952 LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
7953 ShmemVariableCache->latestCompletedXid = ShmemVariableCache->nextXid;
7954 TransactionIdRetreat(ShmemVariableCache->latestCompletedXid);
7955 LWLockRelease(ProcArrayLock);
7956
7957 /*
7958 * Start up the commit log and subtrans, if not already done for hot
7959 * standby. (commit timestamps are started below, if necessary.)
7960 */
7961 if (standbyState == STANDBY_DISABLED)
7962 {
7963 StartupCLOG();
7964 StartupSUBTRANS(oldestActiveXID);
7965 }
7966
7967 /*
7968 * Perform end of recovery actions for any SLRUs that need it.
7969 */
7970 TrimCLOG();
7971 TrimMultiXact();
7972
7973 /* Reload shared-memory state for prepared transactions */
7974 RecoverPreparedTransactions();
7975
7976 /* Shut down xlogreader */
7977 if (readFile >= 0)
7978 {
7979 close(readFile);
7980 readFile = -1;
7981 }
7982 XLogReaderFree(xlogreader);
7983
7984 /*
7985 * If any of the critical GUCs have changed, log them before we allow
7986 * backends to write WAL.
7987 */
7988 LocalSetXLogInsertAllowed();
7989 XLogReportParameters();
7990
7991 /*
7992 * Local WAL inserts enabled, so it's time to finish initialization of
7993 * commit timestamp.
7994 */
7995 CompleteCommitTsInitialization();
7996
7997 /*
7998 * All done with end-of-recovery actions.
7999 *
8000 * Now allow backends to write WAL and update the control file status in
8001 * consequence. The boolean flag allowing backends to write WAL is
8002 * updated while holding ControlFileLock to prevent other backends to look
8003 * at an inconsistent state of the control file in shared memory. There
8004 * is still a small window during which backends can write WAL and the
8005 * control file is still referring to a system not in DB_IN_PRODUCTION
8006 * state while looking at the on-disk control file.
8007 *
8008 * Also, although the boolean flag to allow WAL is probably atomic in
8009 * itself, we use the info_lck here to ensure that there are no race
8010 * conditions concerning visibility of other recent updates to shared
8011 * memory.
8012 */
8013 LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
8014 ControlFile->state = DB_IN_PRODUCTION;
8015 ControlFile->time = (pg_time_t) time(NULL);
8016
8017 SpinLockAcquire(&XLogCtl->info_lck);
8018 XLogCtl->SharedRecoveryState = RECOVERY_STATE_DONE;
8019 SpinLockRelease(&XLogCtl->info_lck);
8020
8021 UpdateControlFile();
8022 LWLockRelease(ControlFileLock);
8023
8024 /*
8025 * Shutdown the recovery environment. This must occur after
8026 * RecoverPreparedTransactions() (see notes in lock_twophase_recover())
8027 * and after switching SharedRecoveryState to RECOVERY_STATE_DONE so as
8028 * any session building a snapshot will not rely on KnownAssignedXids as
8029 * RecoveryInProgress() would return false at this stage. This is
8030 * particularly critical for prepared 2PC transactions, that would still
8031 * need to be included in snapshots once recovery has ended.
8032 */
8033 if (standbyState != STANDBY_DISABLED)
8034 ShutdownRecoveryTransactionEnvironment();
8035
8036 /*
8037 * If there were cascading standby servers connected to us, nudge any wal
8038 * sender processes to notice that we've been promoted.
8039 */
8040 WalSndWakeup();
8041
8042 /*
8043 * If this was a fast promotion, request an (online) checkpoint now. This
8044 * isn't required for consistency, but the last restartpoint might be far
8045 * back, and in case of a crash, recovering from it might take a longer
8046 * than is appropriate now that we're not in standby mode anymore.
8047 */
8048 if (fast_promoted)
8049 RequestCheckpoint(CHECKPOINT_FORCE);
8050 }
8051
8052 /*
8053 * Checks if recovery has reached a consistent state. When consistency is
8054 * reached and we have a valid starting standby snapshot, tell postmaster
8055 * that it can start accepting read-only connections.
8056 */
8057 static void
8058 CheckRecoveryConsistency(void)
8059 {
8060 XLogRecPtr lastReplayedEndRecPtr;
8061
8062 /*
8063 * During crash recovery, we don't reach a consistent state until we've
8064 * replayed all the WAL.
8065 */
8066 if (XLogRecPtrIsInvalid(minRecoveryPoint))
8067 return;
8068
8069 Assert(InArchiveRecovery);
8070
8071 /*
8072 * assume that we are called in the startup process, and hence don't need
8073 * a lock to read lastReplayedEndRecPtr
8074 */
8075 lastReplayedEndRecPtr = XLogCtl->lastReplayedEndRecPtr;
8076
8077 /*
8078 * Have we reached the point where our base backup was completed?
8079 */
8080 if (!XLogRecPtrIsInvalid(ControlFile->backupEndPoint) &&
8081 ControlFile->backupEndPoint <= lastReplayedEndRecPtr)
8082 {
8083 /*
8084 * We have reached the end of base backup, as indicated by pg_control.
8085 * The data on disk is now consistent. Reset backupStartPoint and
8086 * backupEndPoint, and update minRecoveryPoint to make sure we don't
8087 * allow starting up at an earlier point even if recovery is stopped
8088 * and restarted soon after this.
8089 */
8090 elog(DEBUG1, "end of backup reached");
8091
8092 LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
8093
8094 if (ControlFile->minRecoveryPoint < lastReplayedEndRecPtr)
8095 ControlFile->minRecoveryPoint = lastReplayedEndRecPtr;
8096
8097 ControlFile->backupStartPoint = InvalidXLogRecPtr;
8098 ControlFile->backupEndPoint = InvalidXLogRecPtr;
8099 ControlFile->backupEndRequired = false;
8100 UpdateControlFile();
8101
8102 LWLockRelease(ControlFileLock);
8103 }
8104
8105 /*
8106 * Have we passed our safe starting point? Note that minRecoveryPoint is
8107 * known to be incorrectly set if ControlFile->backupEndRequired, until
8108 * the XLOG_BACKUP_END arrives to advise us of the correct
8109 * minRecoveryPoint. All we know prior to that is that we're not
8110 * consistent yet.
8111 */
8112 if (!reachedConsistency && !ControlFile->backupEndRequired &&
8113 minRecoveryPoint <= lastReplayedEndRecPtr &&
8114 XLogRecPtrIsInvalid(ControlFile->backupStartPoint))
8115 {
8116 /*
8117 * Check to see if the XLOG sequence contained any unresolved
8118 * references to uninitialized pages.
8119 */
8120 XLogCheckInvalidPages();
8121
8122 reachedConsistency = true;
8123 ereport(LOG,
8124 (errmsg("consistent recovery state reached at %X/%X",
8125 (uint32) (lastReplayedEndRecPtr >> 32),
8126 (uint32) lastReplayedEndRecPtr)));
8127 }
8128
8129 /*
8130 * Have we got a valid starting snapshot that will allow queries to be
8131 * run? If so, we can tell postmaster that the database is consistent now,
8132 * enabling connections.
8133 */
8134 if (standbyState == STANDBY_SNAPSHOT_READY &&
8135 !LocalHotStandbyActive &&
8136 reachedConsistency &&
8137 IsUnderPostmaster)
8138 {
8139 SpinLockAcquire(&XLogCtl->info_lck);
8140 XLogCtl->SharedHotStandbyActive = true;
8141 SpinLockRelease(&XLogCtl->info_lck);
8142
8143 LocalHotStandbyActive = true;
8144
8145 SendPostmasterSignal(PMSIGNAL_BEGIN_HOT_STANDBY);
8146 }
8147 }
8148
8149 /*
8150 * Is the system still in recovery?
8151 *
8152 * Unlike testing InRecovery, this works in any process that's connected to
8153 * shared memory.
8154 *
8155 * As a side-effect, we initialize the local TimeLineID and RedoRecPtr
8156 * variables the first time we see that recovery is finished.
8157 */
8158 bool
8159 RecoveryInProgress(void)
8160 {
8161 /*
8162 * We check shared state each time only until we leave recovery mode. We
8163 * can't re-enter recovery, so there's no need to keep checking after the
8164 * shared variable has once been seen false.
8165 */
8166 if (!LocalRecoveryInProgress)
8167 return false;
8168 else
8169 {
8170 /*
8171 * use volatile pointer to make sure we make a fresh read of the
8172 * shared variable.
8173 */
8174 volatile XLogCtlData *xlogctl = XLogCtl;
8175
8176 LocalRecoveryInProgress = (xlogctl->SharedRecoveryState != RECOVERY_STATE_DONE);
8177
8178 /*
8179 * Initialize TimeLineID and RedoRecPtr when we discover that recovery
8180 * is finished. InitPostgres() relies upon this behaviour to ensure
8181 * that InitXLOGAccess() is called at backend startup. (If you change
8182 * this, see also LocalSetXLogInsertAllowed.)
8183 */
8184 if (!LocalRecoveryInProgress)
8185 {
8186 /*
8187 * If we just exited recovery, make sure we read TimeLineID and
8188 * RedoRecPtr after SharedRecoveryState (for machines with weak
8189 * memory ordering).
8190 */
8191 pg_memory_barrier();
8192 InitXLOGAccess();
8193 }
8194
8195 /*
8196 * Note: We don't need a memory barrier when we're still in recovery.
8197 * We might exit recovery immediately after return, so the caller
8198 * can't rely on 'true' meaning that we're still in recovery anyway.
8199 */
8200
8201 return LocalRecoveryInProgress;
8202 }
8203 }
8204
8205 /*
8206 * Returns current recovery state from shared memory.
8207 *
8208 * This returned state is kept consistent with the contents of the control
8209 * file. See details about the possible values of RecoveryState in xlog.h.
8210 */
8211 RecoveryState
8212 GetRecoveryState(void)
8213 {
8214 RecoveryState retval;
8215
8216 SpinLockAcquire(&XLogCtl->info_lck);
8217 retval = XLogCtl->SharedRecoveryState;
8218 SpinLockRelease(&XLogCtl->info_lck);
8219
8220 return retval;
8221 }
8222
8223 /*
8224 * Is HotStandby active yet? This is only important in special backends
8225 * since normal backends won't ever be able to connect until this returns
8226 * true. Postmaster knows this by way of signal, not via shared memory.
8227 *
8228 * Unlike testing standbyState, this works in any process that's connected to
8229 * shared memory. (And note that standbyState alone doesn't tell the truth
8230 * anyway.)
8231 */
8232 bool
8233 HotStandbyActive(void)
8234 {
8235 /*
8236 * We check shared state each time only until Hot Standby is active. We
8237 * can't de-activate Hot Standby, so there's no need to keep checking
8238 * after the shared variable has once been seen true.
8239 */
8240 if (LocalHotStandbyActive)
8241 return true;
8242 else
8243 {
8244 /* spinlock is essential on machines with weak memory ordering! */
8245 SpinLockAcquire(&XLogCtl->info_lck);
8246 LocalHotStandbyActive = XLogCtl->SharedHotStandbyActive;
8247 SpinLockRelease(&XLogCtl->info_lck);
8248
8249 return LocalHotStandbyActive;
8250 }
8251 }
8252
8253 /*
8254 * Like HotStandbyActive(), but to be used only in WAL replay code,
8255 * where we don't need to ask any other process what the state is.
8256 */
8257 bool
8258 HotStandbyActiveInReplay(void)
8259 {
8260 Assert(AmStartupProcess() || !IsPostmasterEnvironment);
8261 return LocalHotStandbyActive;
8262 }
8263
8264 /*
8265 * Is this process allowed to insert new WAL records?
8266 *
8267 * Ordinarily this is essentially equivalent to !RecoveryInProgress().
8268 * But we also have provisions for forcing the result "true" or "false"
8269 * within specific processes regardless of the global state.
8270 */
8271 bool
8272 XLogInsertAllowed(void)
8273 {
8274 /*
8275 * If value is "unconditionally true" or "unconditionally false", just
8276 * return it. This provides the normal fast path once recovery is known
8277 * done.
8278 */
8279 if (LocalXLogInsertAllowed >= 0)
8280 return (bool) LocalXLogInsertAllowed;
8281
8282 /*
8283 * Else, must check to see if we're still in recovery.
8284 */
8285 if (RecoveryInProgress())
8286 return false;
8287
8288 /*
8289 * On exit from recovery, reset to "unconditionally true", since there is
8290 * no need to keep checking.
8291 */
8292 LocalXLogInsertAllowed = 1;
8293 return true;
8294 }
8295
8296 /*
8297 * Make XLogInsertAllowed() return true in the current process only.
8298 *
8299 * Note: it is allowed to switch LocalXLogInsertAllowed back to -1 later,
8300 * and even call LocalSetXLogInsertAllowed() again after that.
8301 */
8302 static void
8303 LocalSetXLogInsertAllowed(void)
8304 {
8305 Assert(LocalXLogInsertAllowed == -1);
8306 LocalXLogInsertAllowed = 1;
8307
8308 /* Initialize as RecoveryInProgress() would do when switching state */
8309 InitXLOGAccess();
8310 }
8311
8312 /*
8313 * Subroutine to try to fetch and validate a prior checkpoint record.
8314 *
8315 * whichChkpt identifies the checkpoint (merely for reporting purposes).
8316 * 1 for "primary", 0 for "other" (backup_label)
8317 */
8318 static XLogRecord *
8319 ReadCheckpointRecord(XLogReaderState *xlogreader, XLogRecPtr RecPtr,
8320 int whichChkpt, bool report)
8321 {
8322 XLogRecord *record;
8323 uint8 info;
8324
8325 if (!XRecOffIsValid(RecPtr))
8326 {
8327 if (!report)
8328 return NULL;
8329
8330 switch (whichChkpt)
8331 {
8332 case 1:
8333 ereport(LOG,
8334 (errmsg("invalid primary checkpoint link in control file")));
8335 break;
8336 default:
8337 ereport(LOG,
8338 (errmsg("invalid checkpoint link in backup_label file")));
8339 break;
8340 }
8341 return NULL;
8342 }
8343
8344 record = ReadRecord(xlogreader, RecPtr, LOG, true);
8345
8346 if (record == NULL)
8347 {
8348 if (!report)
8349 return NULL;
8350
8351 switch (whichChkpt)
8352 {
8353 case 1:
8354 ereport(LOG,
8355 (errmsg("invalid primary checkpoint record")));
8356 break;
8357 default:
8358 ereport(LOG,
8359 (errmsg("invalid checkpoint record")));
8360 break;
8361 }
8362 return NULL;
8363 }
8364 if (record->xl_rmid != RM_XLOG_ID)
8365 {
8366 switch (whichChkpt)
8367 {
8368 case 1:
8369 ereport(LOG,
8370 (errmsg("invalid resource manager ID in primary checkpoint record")));
8371 break;
8372 default:
8373 ereport(LOG,
8374 (errmsg("invalid resource manager ID in checkpoint record")));
8375 break;
8376 }
8377 return NULL;
8378 }
8379 info = record->xl_info & ~XLR_INFO_MASK;
8380 if (info != XLOG_CHECKPOINT_SHUTDOWN &&
8381 info != XLOG_CHECKPOINT_ONLINE)
8382 {
8383 switch (whichChkpt)
8384 {
8385 case 1:
8386 ereport(LOG,
8387 (errmsg("invalid xl_info in primary checkpoint record")));
8388 break;
8389 default:
8390 ereport(LOG,
8391 (errmsg("invalid xl_info in checkpoint record")));
8392 break;
8393 }
8394 return NULL;
8395 }
8396 if (record->xl_tot_len != SizeOfXLogRecord + SizeOfXLogRecordDataHeaderShort + sizeof(CheckPoint))
8397 {
8398 switch (whichChkpt)
8399 {
8400 case 1:
8401 ereport(LOG,
8402 (errmsg("invalid length of primary checkpoint record")));
8403 break;
8404 default:
8405 ereport(LOG,
8406 (errmsg("invalid length of checkpoint record")));
8407 break;
8408 }
8409 return NULL;
8410 }
8411 return record;
8412 }
8413
8414 /*
8415 * This must be called in a backend process before creating WAL records
8416 * (except in a standalone backend, which does StartupXLOG instead). We need
8417 * to initialize the local copies of ThisTimeLineID and RedoRecPtr.
8418 *
8419 * Note: before Postgres 8.0, we went to some effort to keep the postmaster
8420 * process's copies of ThisTimeLineID and RedoRecPtr valid too. This was
8421 * unnecessary however, since the postmaster itself never touches XLOG anyway.
8422 */
8423 void
8424 InitXLOGAccess(void)
8425 {
8426 XLogCtlInsert *Insert = &XLogCtl->Insert;
8427
8428 /* ThisTimeLineID doesn't change so we need no lock to copy it */
8429 ThisTimeLineID = XLogCtl->ThisTimeLineID;
8430 Assert(ThisTimeLineID != 0 || IsBootstrapProcessingMode());
8431
8432 /* set wal_segment_size */
8433 wal_segment_size = ControlFile->xlog_seg_size;
8434
8435 /* Use GetRedoRecPtr to copy the RedoRecPtr safely */
8436 (void) GetRedoRecPtr();
8437 /* Also update our copy of doPageWrites. */
8438 doPageWrites = (Insert->fullPageWrites || Insert->forcePageWrites);
8439
8440 /* Also initialize the working areas for constructing WAL records */
8441 InitXLogInsert();
8442 }
8443
8444 /*
8445 * Return the current Redo pointer from shared memory.
8446 *
8447 * As a side-effect, the local RedoRecPtr copy is updated.
8448 */
8449 XLogRecPtr
8450 GetRedoRecPtr(void)
8451 {
8452 XLogRecPtr ptr;
8453
8454 /*
8455 * The possibly not up-to-date copy in XlogCtl is enough. Even if we
8456 * grabbed a WAL insertion lock to read the master copy, someone might
8457 * update it just after we've released the lock.
8458 */
8459 SpinLockAcquire(&XLogCtl->info_lck);
8460 ptr = XLogCtl->RedoRecPtr;
8461 SpinLockRelease(&XLogCtl->info_lck);
8462
8463 if (RedoRecPtr < ptr)
8464 RedoRecPtr = ptr;
8465
8466 return RedoRecPtr;
8467 }
8468
8469 /*
8470 * Return information needed to decide whether a modified block needs a
8471 * full-page image to be included in the WAL record.
8472 *
8473 * The returned values are cached copies from backend-private memory, and
8474 * possibly out-of-date. XLogInsertRecord will re-check them against
8475 * up-to-date values, while holding the WAL insert lock.
8476 */
8477 void
8478 GetFullPageWriteInfo(XLogRecPtr *RedoRecPtr_p, bool *doPageWrites_p)
8479 {
8480 *RedoRecPtr_p = RedoRecPtr;
8481 *doPageWrites_p = doPageWrites;
8482 }
8483
8484 /*
8485 * GetInsertRecPtr -- Returns the current insert position.
8486 *
8487 * NOTE: The value *actually* returned is the position of the last full
8488 * xlog page. It lags behind the real insert position by at most 1 page.
8489 * For that, we don't need to scan through WAL insertion locks, and an
8490 * approximation is enough for the current usage of this function.
8491 */
8492 XLogRecPtr
8493 GetInsertRecPtr(void)
8494 {
8495 XLogRecPtr recptr;
8496
8497 SpinLockAcquire(&XLogCtl->info_lck);
8498 recptr = XLogCtl->LogwrtRqst.Write;
8499 SpinLockRelease(&XLogCtl->info_lck);
8500
8501 return recptr;
8502 }
8503
8504 /*
8505 * GetFlushRecPtr -- Returns the current flush position, ie, the last WAL
8506 * position known to be fsync'd to disk.
8507 */
8508 XLogRecPtr
8509 GetFlushRecPtr(void)
8510 {
8511 SpinLockAcquire(&XLogCtl->info_lck);
8512 LogwrtResult = XLogCtl->LogwrtResult;
8513 SpinLockRelease(&XLogCtl->info_lck);
8514
8515 return LogwrtResult.Flush;
8516 }
8517
8518 /*
8519 * GetLastImportantRecPtr -- Returns the LSN of the last important record
8520 * inserted. All records not explicitly marked as unimportant are considered
8521 * important.
8522 *
8523 * The LSN is determined by computing the maximum of
8524 * WALInsertLocks[i].lastImportantAt.
8525 */
8526 XLogRecPtr
8527 GetLastImportantRecPtr(void)
8528 {
8529 XLogRecPtr res = InvalidXLogRecPtr;
8530 int i;
8531
8532 for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
8533 {
8534 XLogRecPtr last_important;
8535
8536 /*
8537 * Need to take a lock to prevent torn reads of the LSN, which are
8538 * possible on some of the supported platforms. WAL insert locks only
8539 * support exclusive mode, so we have to use that.
8540 */
8541 LWLockAcquire(&WALInsertLocks[i].l.lock, LW_EXCLUSIVE);
8542 last_important = WALInsertLocks[i].l.lastImportantAt;
8543 LWLockRelease(&WALInsertLocks[i].l.lock);
8544
8545 if (res < last_important)
8546 res = last_important;
8547 }
8548
8549 return res;
8550 }
8551
8552 /*
8553 * Get the time and LSN of the last xlog segment switch
8554 */
8555 pg_time_t
8556 GetLastSegSwitchData(XLogRecPtr *lastSwitchLSN)
8557 {
8558 pg_time_t result;
8559
8560 /* Need WALWriteLock, but shared lock is sufficient */
8561 LWLockAcquire(WALWriteLock, LW_SHARED);
8562 result = XLogCtl->lastSegSwitchTime;
8563 *lastSwitchLSN = XLogCtl->lastSegSwitchLSN;
8564 LWLockRelease(WALWriteLock);
8565
8566 return result;
8567 }
8568
8569 /*
8570 * GetNextXidAndEpoch - get the current nextXid value and associated epoch
8571 *
8572 * This is exported for use by code that would like to have 64-bit XIDs.
8573 * We don't really support such things, but all XIDs within the system
8574 * can be presumed "close to" the result, and thus the epoch associated
8575 * with them can be determined.
8576 */
8577 void
8578 GetNextXidAndEpoch(TransactionId *xid, uint32 *epoch)
8579 {
8580 uint32 ckptXidEpoch;
8581 TransactionId ckptXid;
8582 TransactionId nextXid;
8583
8584 /* Must read checkpoint info first, else have race condition */
8585 SpinLockAcquire(&XLogCtl->info_lck);
8586 ckptXidEpoch = XLogCtl->ckptXidEpoch;
8587 ckptXid = XLogCtl->ckptXid;
8588 SpinLockRelease(&XLogCtl->info_lck);
8589
8590 /* Now fetch current nextXid */
8591 nextXid = ReadNewTransactionId();
8592
8593 /*
8594 * nextXid is certainly logically later than ckptXid. So if it's
8595 * numerically less, it must have wrapped into the next epoch.
8596 */
8597 if (nextXid < ckptXid)
8598 ckptXidEpoch++;
8599
8600 *xid = nextXid;
8601 *epoch = ckptXidEpoch;
8602 }
8603
8604 /*
8605 * This must be called ONCE during postmaster or standalone-backend shutdown
8606 */
8607 void
8608 ShutdownXLOG(int code, Datum arg)
8609 {
8610 /* Don't be chatty in standalone mode */
8611 ereport(IsPostmasterEnvironment ? LOG : NOTICE,
8612 (errmsg("shutting down")));
8613
8614 /*
8615 * Signal walsenders to move to stopping state.
8616 */
8617 WalSndInitStopping();
8618
8619 /*
8620 * Wait for WAL senders to be in stopping state. This prevents commands
8621 * from writing new WAL.
8622 */
8623 WalSndWaitStopping();
8624
8625 if (RecoveryInProgress())
8626 CreateRestartPoint(CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_IMMEDIATE);
8627 else
8628 {
8629 /*
8630 * If archiving is enabled, rotate the last XLOG file so that all the
8631 * remaining records are archived (postmaster wakes up the archiver
8632 * process one more time at the end of shutdown). The checkpoint
8633 * record will go to the next XLOG file and won't be archived (yet).
8634 */
8635 if (XLogArchivingActive() && XLogArchiveCommandSet())
8636 RequestXLogSwitch(false);
8637
8638 CreateCheckPoint(CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_IMMEDIATE);
8639 }
8640 ShutdownCLOG();
8641 ShutdownCommitTs();
8642 ShutdownSUBTRANS();
8643 ShutdownMultiXact();
8644 }
8645
8646 /*
8647 * Log start of a checkpoint.
8648 */
8649 static void
8650 LogCheckpointStart(int flags, bool restartpoint)
8651 {
8652 elog(LOG, "%s starting:%s%s%s%s%s%s%s%s",
8653 restartpoint ? "restartpoint" : "checkpoint",
8654 (flags & CHECKPOINT_IS_SHUTDOWN) ? " shutdown" : "",
8655 (flags & CHECKPOINT_END_OF_RECOVERY) ? " end-of-recovery" : "",
8656 (flags & CHECKPOINT_IMMEDIATE) ? " immediate" : "",
8657 (flags & CHECKPOINT_FORCE) ? " force" : "",
8658 (flags & CHECKPOINT_WAIT) ? " wait" : "",
8659 (flags & CHECKPOINT_CAUSE_XLOG) ? " xlog" : "",
8660 (flags & CHECKPOINT_CAUSE_TIME) ? " time" : "",
8661 (flags & CHECKPOINT_FLUSH_ALL) ? " flush-all" : "");
8662 }
8663
8664 /*
8665 * Log end of a checkpoint.
8666 */
8667 static void
8668 LogCheckpointEnd(bool restartpoint)
8669 {
8670 long write_msecs,
8671 sync_msecs,
8672 total_msecs,
8673 longest_msecs,
8674 average_msecs;
8675 uint64 average_sync_time;
8676
8677 CheckpointStats.ckpt_end_t = GetCurrentTimestamp();
8678
8679 write_msecs = TimestampDifferenceMilliseconds(CheckpointStats.ckpt_write_t,
8680 CheckpointStats.ckpt_sync_t);
8681
8682 sync_msecs = TimestampDifferenceMilliseconds(CheckpointStats.ckpt_sync_t,
8683 CheckpointStats.ckpt_sync_end_t);
8684
8685 /* Accumulate checkpoint timing summary data, in milliseconds. */
8686 BgWriterStats.m_checkpoint_write_time += write_msecs;
8687 BgWriterStats.m_checkpoint_sync_time += sync_msecs;
8688
8689 /*
8690 * All of the published timing statistics are accounted for. Only
8691 * continue if a log message is to be written.
8692 */
8693 if (!log_checkpoints)
8694 return;
8695
8696 total_msecs = TimestampDifferenceMilliseconds(CheckpointStats.ckpt_start_t,
8697 CheckpointStats.ckpt_end_t);
8698
8699 /*
8700 * Timing values returned from CheckpointStats are in microseconds.
8701 * Convert to milliseconds for consistent printing.
8702 */
8703 longest_msecs = (long) ((CheckpointStats.ckpt_longest_sync + 999) / 1000);
8704
8705 average_sync_time = 0;
8706 if (CheckpointStats.ckpt_sync_rels > 0)
8707 average_sync_time = CheckpointStats.ckpt_agg_sync_time /
8708 CheckpointStats.ckpt_sync_rels;
8709 average_msecs = (long) ((average_sync_time + 999) / 1000);
8710
8711 elog(LOG, "%s complete: wrote %d buffers (%.1f%%); "
8712 "%d WAL file(s) added, %d removed, %d recycled; "
8713 "write=%ld.%03d s, sync=%ld.%03d s, total=%ld.%03d s; "
8714 "sync files=%d, longest=%ld.%03d s, average=%ld.%03d s; "
8715 "distance=%d kB, estimate=%d kB",
8716 restartpoint ? "restartpoint" : "checkpoint",
8717 CheckpointStats.ckpt_bufs_written,
8718 (double) CheckpointStats.ckpt_bufs_written * 100 / NBuffers,
8719 CheckpointStats.ckpt_segs_added,
8720 CheckpointStats.ckpt_segs_removed,
8721 CheckpointStats.ckpt_segs_recycled,
8722 write_msecs / 1000, (int) (write_msecs % 1000),
8723 sync_msecs / 1000, (int) (sync_msecs % 1000),
8724 total_msecs / 1000, (int) (total_msecs % 1000),
8725 CheckpointStats.ckpt_sync_rels,
8726 longest_msecs / 1000, (int) (longest_msecs % 1000),
8727 average_msecs / 1000, (int) (average_msecs % 1000),
8728 (int) (PrevCheckPointDistance / 1024.0),
8729 (int) (CheckPointDistanceEstimate / 1024.0));
8730 }
8731
8732 /*
8733 * Update the estimate of distance between checkpoints.
8734 *
8735 * The estimate is used to calculate the number of WAL segments to keep
8736 * preallocated, see XLOGFileSlop().
8737 */
8738 static void
8739 UpdateCheckPointDistanceEstimate(uint64 nbytes)
8740 {
8741 /*
8742 * To estimate the number of segments consumed between checkpoints, keep a
8743 * moving average of the amount of WAL generated in previous checkpoint
8744 * cycles. However, if the load is bursty, with quiet periods and busy
8745 * periods, we want to cater for the peak load. So instead of a plain
8746 * moving average, let the average decline slowly if the previous cycle
8747 * used less WAL than estimated, but bump it up immediately if it used
8748 * more.
8749 *
8750 * When checkpoints are triggered by max_wal_size, this should converge to
8751 * CheckpointSegments * wal_segment_size,
8752 *
8753 * Note: This doesn't pay any attention to what caused the checkpoint.
8754 * Checkpoints triggered manually with CHECKPOINT command, or by e.g.
8755 * starting a base backup, are counted the same as those created
8756 * automatically. The slow-decline will largely mask them out, if they are
8757 * not frequent. If they are frequent, it seems reasonable to count them
8758 * in as any others; if you issue a manual checkpoint every 5 minutes and
8759 * never let a timed checkpoint happen, it makes sense to base the
8760 * preallocation on that 5 minute interval rather than whatever
8761 * checkpoint_timeout is set to.
8762 */
8763 PrevCheckPointDistance = nbytes;
8764 if (CheckPointDistanceEstimate < nbytes)
8765 CheckPointDistanceEstimate = nbytes;
8766 else
8767 CheckPointDistanceEstimate =
8768 (0.90 * CheckPointDistanceEstimate + 0.10 * (double) nbytes);
8769 }
8770
8771 /*
8772 * Perform a checkpoint --- either during shutdown, or on-the-fly
8773 *
8774 * flags is a bitwise OR of the following:
8775 * CHECKPOINT_IS_SHUTDOWN: checkpoint is for database shutdown.
8776 * CHECKPOINT_END_OF_RECOVERY: checkpoint is for end of WAL recovery.
8777 * CHECKPOINT_IMMEDIATE: finish the checkpoint ASAP,
8778 * ignoring checkpoint_completion_target parameter.
8779 * CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occurred
8780 * since the last one (implied by CHECKPOINT_IS_SHUTDOWN or
8781 * CHECKPOINT_END_OF_RECOVERY).
8782 * CHECKPOINT_FLUSH_ALL: also flush buffers of unlogged tables.
8783 *
8784 * Note: flags contains other bits, of interest here only for logging purposes.
8785 * In particular note that this routine is synchronous and does not pay
8786 * attention to CHECKPOINT_WAIT.
8787 *
8788 * If !shutdown then we are writing an online checkpoint. This is a very special
8789 * kind of operation and WAL record because the checkpoint action occurs over
8790 * a period of time yet logically occurs at just a single LSN. The logical
8791 * position of the WAL record (redo ptr) is the same or earlier than the
8792 * physical position. When we replay WAL we locate the checkpoint via its
8793 * physical position then read the redo ptr and actually start replay at the
8794 * earlier logical position. Note that we don't write *anything* to WAL at
8795 * the logical position, so that location could be any other kind of WAL record.
8796 * All of this mechanism allows us to continue working while we checkpoint.
8797 * As a result, timing of actions is critical here and be careful to note that
8798 * this function will likely take minutes to execute on a busy system.
8799 */
8800 void
8801 CreateCheckPoint(int flags)
8802 {
8803 bool shutdown;
8804 CheckPoint checkPoint;
8805 XLogRecPtr recptr;
8806 XLogSegNo _logSegNo;
8807 XLogCtlInsert *Insert = &XLogCtl->Insert;
8808 uint32 freespace;
8809 XLogRecPtr PriorRedoPtr;
8810 XLogRecPtr curInsert;
8811 XLogRecPtr last_important_lsn;
8812 VirtualTransactionId *vxids;
8813 int nvxids;
8814
8815 /*
8816 * An end-of-recovery checkpoint is really a shutdown checkpoint, just
8817 * issued at a different time.
8818 */
8819 if (flags & (CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_END_OF_RECOVERY))
8820 shutdown = true;
8821 else
8822 shutdown = false;
8823
8824 /* sanity check */
8825 if (RecoveryInProgress() && (flags & CHECKPOINT_END_OF_RECOVERY) == 0)
8826 elog(ERROR, "can't create a checkpoint during recovery");
8827
8828 /*
8829 * Initialize InitXLogInsert working areas before entering the critical
8830 * section. Normally, this is done by the first call to
8831 * RecoveryInProgress() or LocalSetXLogInsertAllowed(), but when creating
8832 * an end-of-recovery checkpoint, the LocalSetXLogInsertAllowed call is
8833 * done below in a critical section, and InitXLogInsert cannot be called
8834 * in a critical section.
8835 */
8836 InitXLogInsert();
8837
8838 /*
8839 * Acquire CheckpointLock to ensure only one checkpoint happens at a time.
8840 * (This is just pro forma, since in the present system structure there is
8841 * only one process that is allowed to issue checkpoints at any given
8842 * time.)
8843 */
8844 LWLockAcquire(CheckpointLock, LW_EXCLUSIVE);
8845
8846 /*
8847 * Prepare to accumulate statistics.
8848 *
8849 * Note: because it is possible for log_checkpoints to change while a
8850 * checkpoint proceeds, we always accumulate stats, even if
8851 * log_checkpoints is currently off.
8852 */
8853 MemSet(&CheckpointStats, 0, sizeof(CheckpointStats));
8854 CheckpointStats.ckpt_start_t = GetCurrentTimestamp();
8855
8856 /*
8857 * Use a critical section to force system panic if we have trouble.
8858 */
8859 START_CRIT_SECTION();
8860
8861 if (shutdown)
8862 {
8863 LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
8864 ControlFile->state = DB_SHUTDOWNING;
8865 ControlFile->time = (pg_time_t) time(NULL);
8866 UpdateControlFile();
8867 LWLockRelease(ControlFileLock);
8868 }
8869
8870 /*
8871 * Let smgr prepare for checkpoint; this has to happen before we determine
8872 * the REDO pointer. Note that smgr must not do anything that'd have to
8873 * be undone if we decide no checkpoint is needed.
8874 */
8875 smgrpreckpt();
8876
8877 /* Begin filling in the checkpoint WAL record */
8878 MemSet(&checkPoint, 0, sizeof(checkPoint));
8879 checkPoint.time = (pg_time_t) time(NULL);
8880
8881 /*
8882 * For Hot Standby, derive the oldestActiveXid before we fix the redo
8883 * pointer. This allows us to begin accumulating changes to assemble our
8884 * starting snapshot of locks and transactions.
8885 */
8886 if (!shutdown && XLogStandbyInfoActive())
8887 checkPoint.oldestActiveXid = GetOldestActiveTransactionId();
8888 else
8889 checkPoint.oldestActiveXid = InvalidTransactionId;
8890
8891 /*
8892 * Get location of last important record before acquiring insert locks (as
8893 * GetLastImportantRecPtr() also locks WAL locks).
8894 */
8895 last_important_lsn = GetLastImportantRecPtr();
8896
8897 /*
8898 * We must block concurrent insertions while examining insert state to
8899 * determine the checkpoint REDO pointer.
8900 */
8901 WALInsertLockAcquireExclusive();
8902 curInsert = XLogBytePosToRecPtr(Insert->CurrBytePos);
8903
8904 /*
8905 * If this isn't a shutdown or forced checkpoint, and if there has been no
8906 * WAL activity requiring a checkpoint, skip it. The idea here is to
8907 * avoid inserting duplicate checkpoints when the system is idle.
8908 */
8909 if ((flags & (CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_END_OF_RECOVERY |
8910 CHECKPOINT_FORCE)) == 0)
8911 {
8912 if (last_important_lsn == ControlFile->checkPoint)
8913 {
8914 WALInsertLockRelease();
8915 LWLockRelease(CheckpointLock);
8916 END_CRIT_SECTION();
8917 ereport(DEBUG1,
8918 (errmsg("checkpoint skipped because system is idle")));
8919 return;
8920 }
8921 }
8922
8923 /*
8924 * An end-of-recovery checkpoint is created before anyone is allowed to
8925 * write WAL. To allow us to write the checkpoint record, temporarily
8926 * enable XLogInsertAllowed. (This also ensures ThisTimeLineID is
8927 * initialized, which we need here and in AdvanceXLInsertBuffer.)
8928 */
8929 if (flags & CHECKPOINT_END_OF_RECOVERY)
8930 LocalSetXLogInsertAllowed();
8931
8932 checkPoint.ThisTimeLineID = ThisTimeLineID;
8933 if (flags & CHECKPOINT_END_OF_RECOVERY)
8934 checkPoint.PrevTimeLineID = XLogCtl->PrevTimeLineID;
8935 else
8936 checkPoint.PrevTimeLineID = ThisTimeLineID;
8937
8938 checkPoint.fullPageWrites = Insert->fullPageWrites;
8939
8940 /*
8941 * Compute new REDO record ptr = location of next XLOG record.
8942 *
8943 * NB: this is NOT necessarily where the checkpoint record itself will be,
8944 * since other backends may insert more XLOG records while we're off doing
8945 * the buffer flush work. Those XLOG records are logically after the
8946 * checkpoint, even though physically before it. Got that?
8947 */
8948 freespace = INSERT_FREESPACE(curInsert);
8949 if (freespace == 0)
8950 {
8951 if (XLogSegmentOffset(curInsert, wal_segment_size) == 0)
8952 curInsert += SizeOfXLogLongPHD;
8953 else
8954 curInsert += SizeOfXLogShortPHD;
8955 }
8956 checkPoint.redo = curInsert;
8957
8958 /*
8959 * Here we update the shared RedoRecPtr for future XLogInsert calls; this
8960 * must be done while holding all the insertion locks.
8961 *
8962 * Note: if we fail to complete the checkpoint, RedoRecPtr will be left
8963 * pointing past where it really needs to point. This is okay; the only
8964 * consequence is that XLogInsert might back up whole buffers that it
8965 * didn't really need to. We can't postpone advancing RedoRecPtr because
8966 * XLogInserts that happen while we are dumping buffers must assume that
8967 * their buffer changes are not included in the checkpoint.
8968 */
8969 RedoRecPtr = XLogCtl->Insert.RedoRecPtr = checkPoint.redo;
8970
8971 /*
8972 * Now we can release the WAL insertion locks, allowing other xacts to
8973 * proceed while we are flushing disk buffers.
8974 */
8975 WALInsertLockRelease();
8976
8977 /* Update the info_lck-protected copy of RedoRecPtr as well */
8978 SpinLockAcquire(&XLogCtl->info_lck);
8979 XLogCtl->RedoRecPtr = checkPoint.redo;
8980 SpinLockRelease(&XLogCtl->info_lck);
8981
8982 /*
8983 * If enabled, log checkpoint start. We postpone this until now so as not
8984 * to log anything if we decided to skip the checkpoint.
8985 */
8986 if (log_checkpoints)
8987 LogCheckpointStart(flags, false);
8988
8989 TRACE_POSTGRESQL_CHECKPOINT_START(flags);
8990
8991 /*
8992 * Get the other info we need for the checkpoint record.
8993 *
8994 * We don't need to save oldestClogXid in the checkpoint, it only matters
8995 * for the short period in which clog is being truncated, and if we crash
8996 * during that we'll redo the clog truncation and fix up oldestClogXid
8997 * there.
8998 */
8999 LWLockAcquire(XidGenLock, LW_SHARED);
9000 checkPoint.nextXid = ShmemVariableCache->nextXid;
9001 checkPoint.oldestXid = ShmemVariableCache->oldestXid;
9002 checkPoint.oldestXidDB = ShmemVariableCache->oldestXidDB;
9003 LWLockRelease(XidGenLock);
9004
9005 LWLockAcquire(CommitTsLock, LW_SHARED);
9006 checkPoint.oldestCommitTsXid = ShmemVariableCache->oldestCommitTsXid;
9007 checkPoint.newestCommitTsXid = ShmemVariableCache->newestCommitTsXid;
9008 LWLockRelease(CommitTsLock);
9009
9010 /* Increase XID epoch if we've wrapped around since last checkpoint */
9011 checkPoint.nextXidEpoch = ControlFile->checkPointCopy.nextXidEpoch;
9012 if (checkPoint.nextXid < ControlFile->checkPointCopy.nextXid)
9013 checkPoint.nextXidEpoch++;
9014
9015 LWLockAcquire(OidGenLock, LW_SHARED);
9016 checkPoint.nextOid = ShmemVariableCache->nextOid;
9017 if (!shutdown)
9018 checkPoint.nextOid += ShmemVariableCache->oidCount;
9019 LWLockRelease(OidGenLock);
9020
9021 MultiXactGetCheckptMulti(shutdown,
9022 &checkPoint.nextMulti,
9023 &checkPoint.nextMultiOffset,
9024 &checkPoint.oldestMulti,
9025 &checkPoint.oldestMultiDB);
9026
9027 /*
9028 * Having constructed the checkpoint record, ensure all shmem disk buffers
9029 * and commit-log buffers are flushed to disk.
9030 *
9031 * This I/O could fail for various reasons. If so, we will fail to
9032 * complete the checkpoint, but there is no reason to force a system
9033 * panic. Accordingly, exit critical section while doing it.
9034 */
9035 END_CRIT_SECTION();
9036
9037 /*
9038 * In some cases there are groups of actions that must all occur on one
9039 * side or the other of a checkpoint record. Before flushing the
9040 * checkpoint record we must explicitly wait for any backend currently
9041 * performing those groups of actions.
9042 *
9043 * One example is end of transaction, so we must wait for any transactions
9044 * that are currently in commit critical sections. If an xact inserted
9045 * its commit record into XLOG just before the REDO point, then a crash
9046 * restart from the REDO point would not replay that record, which means
9047 * that our flushing had better include the xact's update of pg_xact. So
9048 * we wait till he's out of his commit critical section before proceeding.
9049 * See notes in RecordTransactionCommit().
9050 *
9051 * Because we've already released the insertion locks, this test is a bit
9052 * fuzzy: it is possible that we will wait for xacts we didn't really need
9053 * to wait for. But the delay should be short and it seems better to make
9054 * checkpoint take a bit longer than to hold off insertions longer than
9055 * necessary. (In fact, the whole reason we have this issue is that xact.c
9056 * does commit record XLOG insertion and clog update as two separate steps
9057 * protected by different locks, but again that seems best on grounds of
9058 * minimizing lock contention.)
9059 *
9060 * A transaction that has not yet set delayChkpt when we look cannot be at
9061 * risk, since he's not inserted his commit record yet; and one that's
9062 * already cleared it is not at risk either, since he's done fixing clog
9063 * and we will correctly flush the update below. So we cannot miss any
9064 * xacts we need to wait for.
9065 */
9066 vxids = GetVirtualXIDsDelayingChkpt(&nvxids);
9067 if (nvxids > 0)
9068 {
9069 do
9070 {
9071 pg_usleep(10000L); /* wait for 10 msec */
9072 } while (HaveVirtualXIDsDelayingChkpt(vxids, nvxids));
9073 }
9074 pfree(vxids);
9075
9076 CheckPointGuts(checkPoint.redo, flags);
9077
9078 /*
9079 * Take a snapshot of running transactions and write this to WAL. This
9080 * allows us to reconstruct the state of running transactions during
9081 * archive recovery, if required. Skip, if this info disabled.
9082 *
9083 * If we are shutting down, or Startup process is completing crash
9084 * recovery we don't need to write running xact data.
9085 */
9086 if (!shutdown && XLogStandbyInfoActive())
9087 LogStandbySnapshot();
9088
9089 START_CRIT_SECTION();
9090
9091 /*
9092 * Now insert the checkpoint record into XLOG.
9093 */
9094 XLogBeginInsert();
9095 XLogRegisterData((char *) (&checkPoint), sizeof(checkPoint));
9096 recptr = XLogInsert(RM_XLOG_ID,
9097 shutdown ? XLOG_CHECKPOINT_SHUTDOWN :
9098 XLOG_CHECKPOINT_ONLINE);
9099
9100 XLogFlush(recptr);
9101
9102 /*
9103 * We mustn't write any new WAL after a shutdown checkpoint, or it will be
9104 * overwritten at next startup. No-one should even try, this just allows
9105 * sanity-checking. In the case of an end-of-recovery checkpoint, we want
9106 * to just temporarily disable writing until the system has exited
9107 * recovery.
9108 */
9109 if (shutdown)
9110 {
9111 if (flags & CHECKPOINT_END_OF_RECOVERY)
9112 LocalXLogInsertAllowed = -1; /* return to "check" state */
9113 else
9114 LocalXLogInsertAllowed = 0; /* never again write WAL */
9115 }
9116
9117 /*
9118 * We now have ProcLastRecPtr = start of actual checkpoint record, recptr
9119 * = end of actual checkpoint record.
9120 */
9121 if (shutdown && checkPoint.redo != ProcLastRecPtr)
9122 ereport(PANIC,
9123 (errmsg("concurrent write-ahead log activity while database system is shutting down")));
9124
9125 /*
9126 * Remember the prior checkpoint's redo ptr for
9127 * UpdateCheckPointDistanceEstimate()
9128 */
9129 PriorRedoPtr = ControlFile->checkPointCopy.redo;
9130
9131 /*
9132 * Update the control file.
9133 */
9134 LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
9135 if (shutdown)
9136 ControlFile->state = DB_SHUTDOWNED;
9137 ControlFile->checkPoint = ProcLastRecPtr;
9138 ControlFile->checkPointCopy = checkPoint;
9139 ControlFile->time = (pg_time_t) time(NULL);
9140 /* crash recovery should always recover to the end of WAL */
9141 ControlFile->minRecoveryPoint = InvalidXLogRecPtr;
9142 ControlFile->minRecoveryPointTLI = 0;
9143
9144 /*
9145 * Persist unloggedLSN value. It's reset on crash recovery, so this goes
9146 * unused on non-shutdown checkpoints, but seems useful to store it always
9147 * for debugging purposes.
9148 */
9149 SpinLockAcquire(&XLogCtl->ulsn_lck);
9150 ControlFile->unloggedLSN = XLogCtl->unloggedLSN;
9151 SpinLockRelease(&XLogCtl->ulsn_lck);
9152
9153 UpdateControlFile();
9154 LWLockRelease(ControlFileLock);
9155
9156 /* Update shared-memory copy of checkpoint XID/epoch */
9157 SpinLockAcquire(&XLogCtl->info_lck);
9158 XLogCtl->ckptXidEpoch = checkPoint.nextXidEpoch;
9159 XLogCtl->ckptXid = checkPoint.nextXid;
9160 SpinLockRelease(&XLogCtl->info_lck);
9161
9162 /*
9163 * We are now done with critical updates; no need for system panic if we
9164 * have trouble while fooling with old log segments.
9165 */
9166 END_CRIT_SECTION();
9167
9168 /*
9169 * Let smgr do post-checkpoint cleanup (eg, deleting old files).
9170 */
9171 smgrpostckpt();
9172
9173 /*
9174 * Update the average distance between checkpoints if the prior checkpoint
9175 * exists.
9176 */
9177 if (PriorRedoPtr != InvalidXLogRecPtr)
9178 UpdateCheckPointDistanceEstimate(RedoRecPtr - PriorRedoPtr);
9179
9180 /*
9181 * Delete old log files, those no longer needed for last checkpoint to
9182 * prevent the disk holding the xlog from growing full.
9183 */
9184 XLByteToSeg(RedoRecPtr, _logSegNo, wal_segment_size);
9185 KeepLogSeg(recptr, &_logSegNo);
9186 _logSegNo--;
9187 RemoveOldXlogFiles(_logSegNo, RedoRecPtr, recptr);
9188
9189 /*
9190 * Make more log segments if needed. (Do this after recycling old log
9191 * segments, since that may supply some of the needed files.)
9192 */
9193 if (!shutdown)
9194 PreallocXlogFiles(recptr);
9195
9196 /*
9197 * Truncate pg_subtrans if possible. We can throw away all data before
9198 * the oldest XMIN of any running transaction. No future transaction will
9199 * attempt to reference any pg_subtrans entry older than that (see Asserts
9200 * in subtrans.c). During recovery, though, we mustn't do this because
9201 * StartupSUBTRANS hasn't been called yet.
9202 */
9203 if (!RecoveryInProgress())
9204 TruncateSUBTRANS(GetOldestXmin(NULL, PROCARRAY_FLAGS_DEFAULT));
9205
9206 /* Real work is done, but log and update stats before releasing lock. */
9207 LogCheckpointEnd(false);
9208
9209 TRACE_POSTGRESQL_CHECKPOINT_DONE(CheckpointStats.ckpt_bufs_written,
9210 NBuffers,
9211 CheckpointStats.ckpt_segs_added,
9212 CheckpointStats.ckpt_segs_removed,
9213 CheckpointStats.ckpt_segs_recycled);
9214
9215 LWLockRelease(CheckpointLock);
9216 }
9217
9218 /*
9219 * Mark the end of recovery in WAL though without running a full checkpoint.
9220 * We can expect that a restartpoint is likely to be in progress as we
9221 * do this, though we are unwilling to wait for it to complete. So be
9222 * careful to avoid taking the CheckpointLock anywhere here.
9223 *
9224 * CreateRestartPoint() allows for the case where recovery may end before
9225 * the restartpoint completes so there is no concern of concurrent behaviour.
9226 */
9227 static void
9228 CreateEndOfRecoveryRecord(void)
9229 {
9230 xl_end_of_recovery xlrec;
9231 XLogRecPtr recptr;
9232
9233 /* sanity check */
9234 if (!RecoveryInProgress())
9235 elog(ERROR, "can only be used to end recovery");
9236
9237 xlrec.end_time = GetCurrentTimestamp();
9238
9239 WALInsertLockAcquireExclusive();
9240 xlrec.ThisTimeLineID = ThisTimeLineID;
9241 xlrec.PrevTimeLineID = XLogCtl->PrevTimeLineID;
9242 WALInsertLockRelease();
9243
9244 LocalSetXLogInsertAllowed();
9245
9246 START_CRIT_SECTION();
9247
9248 XLogBeginInsert();
9249 XLogRegisterData((char *) &xlrec, sizeof(xl_end_of_recovery));
9250 recptr = XLogInsert(RM_XLOG_ID, XLOG_END_OF_RECOVERY);
9251
9252 XLogFlush(recptr);
9253
9254 /*
9255 * Update the control file so that crash recovery can follow the timeline
9256 * changes to this point.
9257 */
9258 LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
9259 ControlFile->time = (pg_time_t) time(NULL);
9260 ControlFile->minRecoveryPoint = recptr;
9261 ControlFile->minRecoveryPointTLI = ThisTimeLineID;
9262 UpdateControlFile();
9263 LWLockRelease(ControlFileLock);
9264
9265 END_CRIT_SECTION();
9266
9267 LocalXLogInsertAllowed = -1; /* return to "check" state */
9268 }
9269
9270 /*
9271 * Write an OVERWRITE_CONTRECORD message.
9272 *
9273 * When on WAL replay we expect a continuation record at the start of a page
9274 * that is not there, recovery ends and WAL writing resumes at that point.
9275 * But it's wrong to resume writing new WAL back at the start of the record
9276 * that was broken, because downstream consumers of that WAL (physical
9277 * replicas) are not prepared to "rewind". So the first action after
9278 * finishing replay of all valid WAL must be to write a record of this type
9279 * at the point where the contrecord was missing; to support xlogreader
9280 * detecting the special case, XLP_FIRST_IS_OVERWRITE_CONTRECORD is also added
9281 * to the page header where the record occurs. xlogreader has an ad-hoc
9282 * mechanism to report metadata about the broken record, which is what we
9283 * use here.
9284 *
9285 * At replay time, XLP_FIRST_IS_OVERWRITE_CONTRECORD instructs xlogreader to
9286 * skip the record it was reading, and pass back the LSN of the skipped
9287 * record, so that its caller can verify (on "replay" of that record) that the
9288 * XLOG_OVERWRITE_CONTRECORD matches what was effectively overwritten.
9289 */
9290 static XLogRecPtr
9291 CreateOverwriteContrecordRecord(XLogRecPtr aborted_lsn)
9292 {
9293 xl_overwrite_contrecord xlrec;
9294 XLogRecPtr recptr;
9295
9296 /* sanity check */
9297 if (!RecoveryInProgress())
9298 elog(ERROR, "can only be used at end of recovery");
9299
9300 xlrec.overwritten_lsn = aborted_lsn;
9301 xlrec.overwrite_time = GetCurrentTimestamp();
9302
9303 START_CRIT_SECTION();
9304
9305 XLogBeginInsert();
9306 XLogRegisterData((char *) &xlrec, sizeof(xl_overwrite_contrecord));
9307
9308 recptr = XLogInsert(RM_XLOG_ID, XLOG_OVERWRITE_CONTRECORD);
9309
9310 XLogFlush(recptr);
9311
9312 END_CRIT_SECTION();
9313
9314 return recptr;
9315 }
9316
9317 /*
9318 * Flush all data in shared memory to disk, and fsync
9319 *
9320 * This is the common code shared between regular checkpoints and
9321 * recovery restartpoints.
9322 */
9323 static void
9324 CheckPointGuts(XLogRecPtr checkPointRedo, int flags)
9325 {
9326 CheckPointCLOG();
9327 CheckPointCommitTs();
9328 CheckPointSUBTRANS();
9329 CheckPointMultiXact();
9330 CheckPointPredicate();
9331 CheckPointRelationMap();
9332 CheckPointReplicationSlots();
9333 CheckPointSnapBuild();
9334 CheckPointLogicalRewriteHeap();
9335 CheckPointBuffers(flags); /* performs all required fsyncs */
9336 CheckPointReplicationOrigin();
9337 /* We deliberately delay 2PC checkpointing as long as possible */
9338 CheckPointTwoPhase(checkPointRedo);
9339 }
9340
9341 /*
9342 * Save a checkpoint for recovery restart if appropriate
9343 *
9344 * This function is called each time a checkpoint record is read from XLOG.
9345 * It must determine whether the checkpoint represents a safe restartpoint or
9346 * not. If so, the checkpoint record is stashed in shared memory so that
9347 * CreateRestartPoint can consult it. (Note that the latter function is
9348 * executed by the checkpointer, while this one will be executed by the
9349 * startup process.)
9350 */
9351 static void
9352 RecoveryRestartPoint(const CheckPoint *checkPoint)
9353 {
9354 /*
9355 * Also refrain from creating a restartpoint if we have seen any
9356 * references to non-existent pages. Restarting recovery from the
9357 * restartpoint would not see the references, so we would lose the
9358 * cross-check that the pages belonged to a relation that was dropped
9359 * later.
9360 */
9361 if (XLogHaveInvalidPages())
9362 {
9363 elog(trace_recovery(DEBUG2),
9364 "could not record restart point at %X/%X because there "
9365 "are unresolved references to invalid pages",
9366 (uint32) (checkPoint->redo >> 32),
9367 (uint32) checkPoint->redo);
9368 return;
9369 }
9370
9371 /*
9372 * Copy the checkpoint record to shared memory, so that checkpointer can
9373 * work out the next time it wants to perform a restartpoint.
9374 */
9375 SpinLockAcquire(&XLogCtl->info_lck);
9376 XLogCtl->lastCheckPointRecPtr = ReadRecPtr;
9377 XLogCtl->lastCheckPointEndPtr = EndRecPtr;
9378 XLogCtl->lastCheckPoint = *checkPoint;
9379 SpinLockRelease(&XLogCtl->info_lck);
9380 }
9381
9382 /*
9383 * Establish a restartpoint if possible.
9384 *
9385 * This is similar to CreateCheckPoint, but is used during WAL recovery
9386 * to establish a point from which recovery can roll forward without
9387 * replaying the entire recovery log.
9388 *
9389 * Returns true if a new restartpoint was established. We can only establish
9390 * a restartpoint if we have replayed a safe checkpoint record since last
9391 * restartpoint.
9392 */
9393 bool
9394 CreateRestartPoint(int flags)
9395 {
9396 XLogRecPtr lastCheckPointRecPtr;
9397 XLogRecPtr lastCheckPointEndPtr;
9398 CheckPoint lastCheckPoint;
9399 XLogRecPtr PriorRedoPtr;
9400 XLogRecPtr receivePtr;
9401 XLogRecPtr replayPtr;
9402 TimeLineID replayTLI;
9403 XLogRecPtr endptr;
9404 XLogSegNo _logSegNo;
9405 TimestampTz xtime;
9406
9407 /*
9408 * Acquire CheckpointLock to ensure only one restartpoint or checkpoint
9409 * happens at a time.
9410 */
9411 LWLockAcquire(CheckpointLock, LW_EXCLUSIVE);
9412
9413 /* Get a local copy of the last safe checkpoint record. */
9414 SpinLockAcquire(&XLogCtl->info_lck);
9415 lastCheckPointRecPtr = XLogCtl->lastCheckPointRecPtr;
9416 lastCheckPointEndPtr = XLogCtl->lastCheckPointEndPtr;
9417 lastCheckPoint = XLogCtl->lastCheckPoint;
9418 SpinLockRelease(&XLogCtl->info_lck);
9419
9420 /*
9421 * Check that we're still in recovery mode. It's ok if we exit recovery
9422 * mode after this check, the restart point is valid anyway.
9423 */
9424 if (!RecoveryInProgress())
9425 {
9426 ereport(DEBUG2,
9427 (errmsg("skipping restartpoint, recovery has already ended")));
9428 LWLockRelease(CheckpointLock);
9429 return false;
9430 }
9431
9432 /*
9433 * If the last checkpoint record we've replayed is already our last
9434 * restartpoint, we can't perform a new restart point. We still update
9435 * minRecoveryPoint in that case, so that if this is a shutdown restart
9436 * point, we won't start up earlier than before. That's not strictly
9437 * necessary, but when hot standby is enabled, it would be rather weird if
9438 * the database opened up for read-only connections at a point-in-time
9439 * before the last shutdown. Such time travel is still possible in case of
9440 * immediate shutdown, though.
9441 *
9442 * We don't explicitly advance minRecoveryPoint when we do create a
9443 * restartpoint. It's assumed that flushing the buffers will do that as a
9444 * side-effect.
9445 */
9446 if (XLogRecPtrIsInvalid(lastCheckPointRecPtr) ||
9447 lastCheckPoint.redo <= ControlFile->checkPointCopy.redo)
9448 {
9449 ereport(DEBUG2,
9450 (errmsg("skipping restartpoint, already performed at %X/%X",
9451 (uint32) (lastCheckPoint.redo >> 32),
9452 (uint32) lastCheckPoint.redo)));
9453
9454 UpdateMinRecoveryPoint(InvalidXLogRecPtr, true);
9455 if (flags & CHECKPOINT_IS_SHUTDOWN)
9456 {
9457 LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
9458 ControlFile->state = DB_SHUTDOWNED_IN_RECOVERY;
9459 ControlFile->time = (pg_time_t) time(NULL);
9460 UpdateControlFile();
9461 LWLockRelease(ControlFileLock);
9462 }
9463 LWLockRelease(CheckpointLock);
9464 return false;
9465 }
9466
9467 /*
9468 * Update the shared RedoRecPtr so that the startup process can calculate
9469 * the number of segments replayed since last restartpoint, and request a
9470 * restartpoint if it exceeds CheckPointSegments.
9471 *
9472 * Like in CreateCheckPoint(), hold off insertions to update it, although
9473 * during recovery this is just pro forma, because no WAL insertions are
9474 * happening.
9475 */
9476 WALInsertLockAcquireExclusive();
9477 RedoRecPtr = XLogCtl->Insert.RedoRecPtr = lastCheckPoint.redo;
9478 WALInsertLockRelease();
9479
9480 /* Also update the info_lck-protected copy */
9481 SpinLockAcquire(&XLogCtl->info_lck);
9482 XLogCtl->RedoRecPtr = lastCheckPoint.redo;
9483 SpinLockRelease(&XLogCtl->info_lck);
9484
9485 /*
9486 * Prepare to accumulate statistics.
9487 *
9488 * Note: because it is possible for log_checkpoints to change while a
9489 * checkpoint proceeds, we always accumulate stats, even if
9490 * log_checkpoints is currently off.
9491 */
9492 MemSet(&CheckpointStats, 0, sizeof(CheckpointStats));
9493 CheckpointStats.ckpt_start_t = GetCurrentTimestamp();
9494
9495 if (log_checkpoints)
9496 LogCheckpointStart(flags, true);
9497
9498 CheckPointGuts(lastCheckPoint.redo, flags);
9499
9500 /*
9501 * Remember the prior checkpoint's redo ptr for
9502 * UpdateCheckPointDistanceEstimate()
9503 */
9504 PriorRedoPtr = ControlFile->checkPointCopy.redo;
9505
9506 /*
9507 * Update pg_control, using current time. Check that it still shows
9508 * IN_ARCHIVE_RECOVERY state and an older checkpoint, else do nothing;
9509 * this is a quick hack to make sure nothing really bad happens if somehow
9510 * we get here after the end-of-recovery checkpoint.
9511 */
9512 LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
9513 if (ControlFile->state == DB_IN_ARCHIVE_RECOVERY &&
9514 ControlFile->checkPointCopy.redo < lastCheckPoint.redo)
9515 {
9516 ControlFile->checkPoint = lastCheckPointRecPtr;
9517 ControlFile->checkPointCopy = lastCheckPoint;
9518 ControlFile->time = (pg_time_t) time(NULL);
9519
9520 /*
9521 * Ensure minRecoveryPoint is past the checkpoint record. Normally,
9522 * this will have happened already while writing out dirty buffers,
9523 * but not necessarily - e.g. because no buffers were dirtied. We do
9524 * this because a non-exclusive base backup uses minRecoveryPoint to
9525 * determine which WAL files must be included in the backup, and the
9526 * file (or files) containing the checkpoint record must be included,
9527 * at a minimum. Note that for an ordinary restart of recovery there's
9528 * no value in having the minimum recovery point any earlier than this
9529 * anyway, because redo will begin just after the checkpoint record.
9530 */
9531 if (ControlFile->minRecoveryPoint < lastCheckPointEndPtr)
9532 {
9533 ControlFile->minRecoveryPoint = lastCheckPointEndPtr;
9534 ControlFile->minRecoveryPointTLI = lastCheckPoint.ThisTimeLineID;
9535
9536 /* update local copy */
9537 minRecoveryPoint = ControlFile->minRecoveryPoint;
9538 minRecoveryPointTLI = ControlFile->minRecoveryPointTLI;
9539 }
9540 if (flags & CHECKPOINT_IS_SHUTDOWN)
9541 ControlFile->state = DB_SHUTDOWNED_IN_RECOVERY;
9542 UpdateControlFile();
9543 }
9544 LWLockRelease(ControlFileLock);
9545
9546 /*
9547 * Update the average distance between checkpoints/restartpoints if the
9548 * prior checkpoint exists.
9549 */
9550 if (PriorRedoPtr != InvalidXLogRecPtr)
9551 UpdateCheckPointDistanceEstimate(RedoRecPtr - PriorRedoPtr);
9552
9553 /*
9554 * Delete old log files, those no longer needed for last restartpoint to
9555 * prevent the disk holding the xlog from growing full.
9556 */
9557 XLByteToSeg(RedoRecPtr, _logSegNo, wal_segment_size);
9558
9559 /*
9560 * Retreat _logSegNo using the current end of xlog replayed or received,
9561 * whichever is later.
9562 */
9563 receivePtr = GetWalRcvWriteRecPtr(NULL, NULL);
9564 replayPtr = GetXLogReplayRecPtr(&replayTLI);
9565 endptr = (receivePtr < replayPtr) ? replayPtr : receivePtr;
9566 KeepLogSeg(endptr, &_logSegNo);
9567 _logSegNo--;
9568
9569 /*
9570 * Try to recycle segments on a useful timeline. If we've been promoted
9571 * since the beginning of this restartpoint, use the new timeline chosen
9572 * at end of recovery (RecoveryInProgress() sets ThisTimeLineID in that
9573 * case). If we're still in recovery, use the timeline we're currently
9574 * replaying.
9575 *
9576 * There is no guarantee that the WAL segments will be useful on the
9577 * current timeline; if recovery proceeds to a new timeline right after
9578 * this, the pre-allocated WAL segments on this timeline will not be used,
9579 * and will go wasted until recycled on the next restartpoint. We'll live
9580 * with that.
9581 */
9582 if (RecoveryInProgress())
9583 ThisTimeLineID = replayTLI;
9584
9585 RemoveOldXlogFiles(_logSegNo, RedoRecPtr, endptr);
9586
9587 /*
9588 * Make more log segments if needed. (Do this after recycling old log
9589 * segments, since that may supply some of the needed files.)
9590 */
9591 PreallocXlogFiles(endptr);
9592
9593 /*
9594 * ThisTimeLineID is normally not set when we're still in recovery.
9595 * However, recycling/preallocating segments above needed ThisTimeLineID
9596 * to determine which timeline to install the segments on. Reset it now,
9597 * to restore the normal state of affairs for debugging purposes.
9598 */
9599 if (RecoveryInProgress())
9600 ThisTimeLineID = 0;
9601
9602 /*
9603 * Truncate pg_subtrans if possible. We can throw away all data before
9604 * the oldest XMIN of any running transaction. No future transaction will
9605 * attempt to reference any pg_subtrans entry older than that (see Asserts
9606 * in subtrans.c). When hot standby is disabled, though, we mustn't do
9607 * this because StartupSUBTRANS hasn't been called yet.
9608 */
9609 if (EnableHotStandby)
9610 TruncateSUBTRANS(GetOldestXmin(NULL, PROCARRAY_FLAGS_DEFAULT));
9611
9612 /* Real work is done, but log and update before releasing lock. */
9613 LogCheckpointEnd(true);
9614
9615 xtime = GetLatestXTime();
9616 ereport((log_checkpoints ? LOG : DEBUG2),
9617 (errmsg("recovery restart point at %X/%X",
9618 (uint32) (lastCheckPoint.redo >> 32), (uint32) lastCheckPoint.redo),
9619 xtime ? errdetail("Last completed transaction was at log time %s.",
9620 timestamptz_to_str(xtime)) : 0));
9621
9622 LWLockRelease(CheckpointLock);
9623
9624 /*
9625 * Finally, execute archive_cleanup_command, if any.
9626 */
9627 if (XLogCtl->archiveCleanupCommand[0])
9628 ExecuteRecoveryCommand(XLogCtl->archiveCleanupCommand,
9629 "archive_cleanup_command",
9630 false);
9631
9632 return true;
9633 }
9634
9635 /*
9636 * Retreat *logSegNo to the last segment that we need to retain because of
9637 * either wal_keep_segments or replication slots.
9638 *
9639 * This is calculated by subtracting wal_keep_segments from the given xlog
9640 * location, recptr and by making sure that that result is below the
9641 * requirement of replication slots.
9642 */
9643 static void
9644 KeepLogSeg(XLogRecPtr recptr, XLogSegNo *logSegNo)
9645 {
9646 XLogSegNo segno;
9647 XLogRecPtr keep;
9648
9649 XLByteToSeg(recptr, segno, wal_segment_size);
9650 keep = XLogGetReplicationSlotMinimumLSN();
9651
9652 /* compute limit for wal_keep_segments first */
9653 if (wal_keep_segments > 0)
9654 {
9655 /* avoid underflow, don't go below 1 */
9656 if (segno <= wal_keep_segments)
9657 segno = 1;
9658 else
9659 segno = segno - wal_keep_segments;
9660 }
9661
9662 /* then check whether slots limit removal further */
9663 if (max_replication_slots > 0 && keep != InvalidXLogRecPtr)
9664 {
9665 XLogSegNo slotSegNo;
9666
9667 XLByteToSeg(keep, slotSegNo, wal_segment_size);
9668
9669 if (slotSegNo <= 0)
9670 segno = 1;
9671 else if (slotSegNo < segno)
9672 segno = slotSegNo;
9673 }
9674
9675 /* don't delete WAL segments newer than the calculated segment */
9676 if (segno < *logSegNo)
9677 *logSegNo = segno;
9678 }
9679
9680 /*
9681 * Write a NEXTOID log record
9682 */
9683 void
9684 XLogPutNextOid(Oid nextOid)
9685 {
9686 XLogBeginInsert();
9687 XLogRegisterData((char *) (&nextOid), sizeof(Oid));
9688 (void) XLogInsert(RM_XLOG_ID, XLOG_NEXTOID);
9689
9690 /*
9691 * We need not flush the NEXTOID record immediately, because any of the
9692 * just-allocated OIDs could only reach disk as part of a tuple insert or
9693 * update that would have its own XLOG record that must follow the NEXTOID
9694 * record. Therefore, the standard buffer LSN interlock applied to those
9695 * records will ensure no such OID reaches disk before the NEXTOID record
9696 * does.
9697 *
9698 * Note, however, that the above statement only covers state "within" the
9699 * database. When we use a generated OID as a file or directory name, we
9700 * are in a sense violating the basic WAL rule, because that filesystem
9701 * change may reach disk before the NEXTOID WAL record does. The impact
9702 * of this is that if a database crash occurs immediately afterward, we
9703 * might after restart re-generate the same OID and find that it conflicts
9704 * with the leftover file or directory. But since for safety's sake we
9705 * always loop until finding a nonconflicting filename, this poses no real
9706 * problem in practice. See pgsql-hackers discussion 27-Sep-2006.
9707 */
9708 }
9709
9710 /*
9711 * Write an XLOG SWITCH record.
9712 *
9713 * Here we just blindly issue an XLogInsert request for the record.
9714 * All the magic happens inside XLogInsert.
9715 *
9716 * The return value is either the end+1 address of the switch record,
9717 * or the end+1 address of the prior segment if we did not need to
9718 * write a switch record because we are already at segment start.
9719 */
9720 XLogRecPtr
9721 RequestXLogSwitch(bool mark_unimportant)
9722 {
9723 XLogRecPtr RecPtr;
9724
9725 /* XLOG SWITCH has no data */
9726 XLogBeginInsert();
9727
9728 if (mark_unimportant)
9729 XLogSetRecordFlags(XLOG_MARK_UNIMPORTANT);
9730 RecPtr = XLogInsert(RM_XLOG_ID, XLOG_SWITCH);
9731
9732 return RecPtr;
9733 }
9734
9735 /*
9736 * Write a RESTORE POINT record
9737 */
9738 XLogRecPtr
9739 XLogRestorePoint(const char *rpName)
9740 {
9741 XLogRecPtr RecPtr;
9742 xl_restore_point xlrec;
9743
9744 xlrec.rp_time = GetCurrentTimestamp();
9745 strlcpy(xlrec.rp_name, rpName, MAXFNAMELEN);
9746
9747 XLogBeginInsert();
9748 XLogRegisterData((char *) &xlrec, sizeof(xl_restore_point));
9749
9750 RecPtr = XLogInsert(RM_XLOG_ID, XLOG_RESTORE_POINT);
9751
9752 ereport(LOG,
9753 (errmsg("restore point \"%s\" created at %X/%X",
9754 rpName, (uint32) (RecPtr >> 32), (uint32) RecPtr)));
9755
9756 return RecPtr;
9757 }
9758
9759 /*
9760 * Check if any of the GUC parameters that are critical for hot standby
9761 * have changed, and update the value in pg_control file if necessary.
9762 */
9763 static void
9764 XLogReportParameters(void)
9765 {
9766 if (wal_level != ControlFile->wal_level ||
9767 wal_log_hints != ControlFile->wal_log_hints ||
9768 MaxConnections != ControlFile->MaxConnections ||
9769 max_worker_processes != ControlFile->max_worker_processes ||
9770 max_prepared_xacts != ControlFile->max_prepared_xacts ||
9771 max_locks_per_xact != ControlFile->max_locks_per_xact ||
9772 track_commit_timestamp != ControlFile->track_commit_timestamp)
9773 {
9774 /*
9775 * The change in number of backend slots doesn't need to be WAL-logged
9776 * if archiving is not enabled, as you can't start archive recovery
9777 * with wal_level=minimal anyway. We don't really care about the
9778 * values in pg_control either if wal_level=minimal, but seems better
9779 * to keep them up-to-date to avoid confusion.
9780 */
9781 if (wal_level != ControlFile->wal_level || XLogIsNeeded())
9782 {
9783 xl_parameter_change xlrec;
9784 XLogRecPtr recptr;
9785
9786 xlrec.MaxConnections = MaxConnections;
9787 xlrec.max_worker_processes = max_worker_processes;
9788 xlrec.max_prepared_xacts = max_prepared_xacts;
9789 xlrec.max_locks_per_xact = max_locks_per_xact;
9790 xlrec.wal_level = wal_level;
9791 xlrec.wal_log_hints = wal_log_hints;
9792 xlrec.track_commit_timestamp = track_commit_timestamp;
9793
9794 XLogBeginInsert();
9795 XLogRegisterData((char *) &xlrec, sizeof(xlrec));
9796
9797 recptr = XLogInsert(RM_XLOG_ID, XLOG_PARAMETER_CHANGE);
9798 XLogFlush(recptr);
9799 }
9800
9801 LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
9802
9803 ControlFile->MaxConnections = MaxConnections;
9804 ControlFile->max_worker_processes = max_worker_processes;
9805 ControlFile->max_prepared_xacts = max_prepared_xacts;
9806 ControlFile->max_locks_per_xact = max_locks_per_xact;
9807 ControlFile->wal_level = wal_level;
9808 ControlFile->wal_log_hints = wal_log_hints;
9809 ControlFile->track_commit_timestamp = track_commit_timestamp;
9810 UpdateControlFile();
9811
9812 LWLockRelease(ControlFileLock);
9813 }
9814 }
9815
9816 /*
9817 * Update full_page_writes in shared memory, and write an
9818 * XLOG_FPW_CHANGE record if necessary.
9819 *
9820 * Note: this function assumes there is no other process running
9821 * concurrently that could update it.
9822 */
9823 void
9824 UpdateFullPageWrites(void)
9825 {
9826 XLogCtlInsert *Insert = &XLogCtl->Insert;
9827 bool recoveryInProgress;
9828
9829 /*
9830 * Do nothing if full_page_writes has not been changed.
9831 *
9832 * It's safe to check the shared full_page_writes without the lock,
9833 * because we assume that there is no concurrently running process which
9834 * can update it.
9835 */
9836 if (fullPageWrites == Insert->fullPageWrites)
9837 return;
9838
9839 /*
9840 * Perform this outside critical section so that the WAL insert
9841 * initialization done by RecoveryInProgress() doesn't trigger an
9842 * assertion failure.
9843 */
9844 recoveryInProgress = RecoveryInProgress();
9845
9846 START_CRIT_SECTION();
9847
9848 /*
9849 * It's always safe to take full page images, even when not strictly
9850 * required, but not the other round. So if we're setting full_page_writes
9851 * to true, first set it true and then write the WAL record. If we're
9852 * setting it to false, first write the WAL record and then set the global
9853 * flag.
9854 */
9855 if (fullPageWrites)
9856 {
9857 WALInsertLockAcquireExclusive();
9858 Insert->fullPageWrites = true;
9859 WALInsertLockRelease();
9860 }
9861
9862 /*
9863 * Write an XLOG_FPW_CHANGE record. This allows us to keep track of
9864 * full_page_writes during archive recovery, if required.
9865 */
9866 if (XLogStandbyInfoActive() && !recoveryInProgress)
9867 {
9868 XLogBeginInsert();
9869 XLogRegisterData((char *) (&fullPageWrites), sizeof(bool));
9870
9871 XLogInsert(RM_XLOG_ID, XLOG_FPW_CHANGE);
9872 }
9873
9874 if (!fullPageWrites)
9875 {
9876 WALInsertLockAcquireExclusive();
9877 Insert->fullPageWrites = false;
9878 WALInsertLockRelease();
9879 }
9880 END_CRIT_SECTION();
9881 }
9882
9883 /*
9884 * Check that it's OK to switch to new timeline during recovery.
9885 *
9886 * 'lsn' is the address of the shutdown checkpoint record we're about to
9887 * replay. (Currently, timeline can only change at a shutdown checkpoint).
9888 */
9889 static void
9890 checkTimeLineSwitch(XLogRecPtr lsn, TimeLineID newTLI, TimeLineID prevTLI)
9891 {
9892 /* Check that the record agrees on what the current (old) timeline is */
9893 if (prevTLI != ThisTimeLineID)
9894 ereport(PANIC,
9895 (errmsg("unexpected previous timeline ID %u (current timeline ID %u) in checkpoint record",
9896 prevTLI, ThisTimeLineID)));
9897
9898 /*
9899 * The new timeline better be in the list of timelines we expect to see,
9900 * according to the timeline history. It should also not decrease.
9901 */
9902 if (newTLI < ThisTimeLineID || !tliInHistory(newTLI, expectedTLEs))
9903 ereport(PANIC,
9904 (errmsg("unexpected timeline ID %u (after %u) in checkpoint record",
9905 newTLI, ThisTimeLineID)));
9906
9907 /*
9908 * If we have not yet reached min recovery point, and we're about to
9909 * switch to a timeline greater than the timeline of the min recovery
9910 * point: trouble. After switching to the new timeline, we could not
9911 * possibly visit the min recovery point on the correct timeline anymore.
9912 * This can happen if there is a newer timeline in the archive that
9913 * branched before the timeline the min recovery point is on, and you
9914 * attempt to do PITR to the new timeline.
9915 */
9916 if (!XLogRecPtrIsInvalid(minRecoveryPoint) &&
9917 lsn < minRecoveryPoint &&
9918 newTLI > minRecoveryPointTLI)
9919 ereport(PANIC,
9920 (errmsg("unexpected timeline ID %u in checkpoint record, before reaching minimum recovery point %X/%X on timeline %u",
9921 newTLI,
9922 (uint32) (minRecoveryPoint >> 32),
9923 (uint32) minRecoveryPoint,
9924 minRecoveryPointTLI)));
9925
9926 /* Looks good */
9927 }
9928
9929 /*
9930 * XLOG resource manager's routines
9931 *
9932 * Definitions of info values are in include/catalog/pg_control.h, though
9933 * not all record types are related to control file updates.
9934 */
9935 void
9936 xlog_redo(XLogReaderState *record)
9937 {
9938 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
9939 XLogRecPtr lsn = record->EndRecPtr;
9940
9941 /* in XLOG rmgr, backup blocks are only used by XLOG_FPI records */
9942 Assert(info == XLOG_FPI || info == XLOG_FPI_FOR_HINT ||
9943 info == XLOG_FPI_MULTI || !XLogRecHasAnyBlockRefs(record));
9944
9945 if (info == XLOG_NEXTOID)
9946 {
9947 Oid nextOid;
9948
9949 /*
9950 * We used to try to take the maximum of ShmemVariableCache->nextOid
9951 * and the recorded nextOid, but that fails if the OID counter wraps
9952 * around. Since no OID allocation should be happening during replay
9953 * anyway, better to just believe the record exactly. We still take
9954 * OidGenLock while setting the variable, just in case.
9955 */
9956 memcpy(&nextOid, XLogRecGetData(record), sizeof(Oid));
9957 LWLockAcquire(OidGenLock, LW_EXCLUSIVE);
9958 ShmemVariableCache->nextOid = nextOid;
9959 ShmemVariableCache->oidCount = 0;
9960 LWLockRelease(OidGenLock);
9961 }
9962 else if (info == XLOG_CHECKPOINT_SHUTDOWN)
9963 {
9964 CheckPoint checkPoint;
9965
9966 memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
9967 /* In a SHUTDOWN checkpoint, believe the counters exactly */
9968 LWLockAcquire(XidGenLock, LW_EXCLUSIVE);
9969 ShmemVariableCache->nextXid = checkPoint.nextXid;
9970 LWLockRelease(XidGenLock);
9971 LWLockAcquire(OidGenLock, LW_EXCLUSIVE);
9972 ShmemVariableCache->nextOid = checkPoint.nextOid;
9973 ShmemVariableCache->oidCount = 0;
9974 LWLockRelease(OidGenLock);
9975 MultiXactSetNextMXact(checkPoint.nextMulti,
9976 checkPoint.nextMultiOffset);
9977
9978 MultiXactAdvanceOldest(checkPoint.oldestMulti,
9979 checkPoint.oldestMultiDB);
9980
9981 /*
9982 * No need to set oldestClogXid here as well; it'll be set when we
9983 * redo an xl_clog_truncate if it changed since initialization.
9984 */
9985 SetTransactionIdLimit(checkPoint.oldestXid, checkPoint.oldestXidDB);
9986
9987 /*
9988 * If we see a shutdown checkpoint while waiting for an end-of-backup
9989 * record, the backup was canceled and the end-of-backup record will
9990 * never arrive.
9991 */
9992 if (ArchiveRecoveryRequested &&
9993 !XLogRecPtrIsInvalid(ControlFile->backupStartPoint) &&
9994 XLogRecPtrIsInvalid(ControlFile->backupEndPoint))
9995 ereport(PANIC,
9996 (errmsg("online backup was canceled, recovery cannot continue")));
9997
9998 /*
9999 * If we see a shutdown checkpoint, we know that nothing was running
10000 * on the master at this point. So fake-up an empty running-xacts
10001 * record and use that here and now. Recover additional standby state
10002 * for prepared transactions.
10003 */
10004 if (standbyState >= STANDBY_INITIALIZED)
10005 {
10006 TransactionId *xids;
10007 int nxids;
10008 TransactionId oldestActiveXID;
10009 TransactionId latestCompletedXid;
10010 RunningTransactionsData running;
10011
10012 oldestActiveXID = PrescanPreparedTransactions(&xids, &nxids);
10013
10014 /*
10015 * Construct a RunningTransactions snapshot representing a shut
10016 * down server, with only prepared transactions still alive. We're
10017 * never overflowed at this point because all subxids are listed
10018 * with their parent prepared transactions.
10019 */
10020 running.xcnt = nxids;
10021 running.subxcnt = 0;
10022 running.subxid_overflow = false;
10023 running.nextXid = checkPoint.nextXid;
10024 running.oldestRunningXid = oldestActiveXID;
10025 latestCompletedXid = checkPoint.nextXid;
10026 TransactionIdRetreat(latestCompletedXid);
10027 Assert(TransactionIdIsNormal(latestCompletedXid));
10028 running.latestCompletedXid = latestCompletedXid;
10029 running.xids = xids;
10030
10031 ProcArrayApplyRecoveryInfo(&running);
10032
10033 StandbyRecoverPreparedTransactions();
10034 }
10035
10036 /* ControlFile->checkPointCopy always tracks the latest ckpt XID */
10037 LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
10038 ControlFile->checkPointCopy.nextXidEpoch = checkPoint.nextXidEpoch;
10039 ControlFile->checkPointCopy.nextXid = checkPoint.nextXid;
10040 LWLockRelease(ControlFileLock);
10041
10042 /* Update shared-memory copy of checkpoint XID/epoch */
10043 SpinLockAcquire(&XLogCtl->info_lck);
10044 XLogCtl->ckptXidEpoch = checkPoint.nextXidEpoch;
10045 XLogCtl->ckptXid = checkPoint.nextXid;
10046 SpinLockRelease(&XLogCtl->info_lck);
10047
10048 /*
10049 * We should've already switched to the new TLI before replaying this
10050 * record.
10051 */
10052 if (checkPoint.ThisTimeLineID != ThisTimeLineID)
10053 ereport(PANIC,
10054 (errmsg("unexpected timeline ID %u (should be %u) in checkpoint record",
10055 checkPoint.ThisTimeLineID, ThisTimeLineID)));
10056
10057 RecoveryRestartPoint(&checkPoint);
10058 }
10059 else if (info == XLOG_CHECKPOINT_ONLINE)
10060 {
10061 CheckPoint checkPoint;
10062
10063 memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
10064 /* In an ONLINE checkpoint, treat the XID counter as a minimum */
10065 LWLockAcquire(XidGenLock, LW_EXCLUSIVE);
10066 if (TransactionIdPrecedes(ShmemVariableCache->nextXid,
10067 checkPoint.nextXid))
10068 ShmemVariableCache->nextXid = checkPoint.nextXid;
10069 LWLockRelease(XidGenLock);
10070
10071 /*
10072 * We ignore the nextOid counter in an ONLINE checkpoint, preferring
10073 * to track OID assignment through XLOG_NEXTOID records. The nextOid
10074 * counter is from the start of the checkpoint and might well be stale
10075 * compared to later XLOG_NEXTOID records. We could try to take the
10076 * maximum of the nextOid counter and our latest value, but since
10077 * there's no particular guarantee about the speed with which the OID
10078 * counter wraps around, that's a risky thing to do. In any case,
10079 * users of the nextOid counter are required to avoid assignment of
10080 * duplicates, so that a somewhat out-of-date value should be safe.
10081 */
10082
10083 /* Handle multixact */
10084 MultiXactAdvanceNextMXact(checkPoint.nextMulti,
10085 checkPoint.nextMultiOffset);
10086
10087 /*
10088 * NB: This may perform multixact truncation when replaying WAL
10089 * generated by an older primary.
10090 */
10091 MultiXactAdvanceOldest(checkPoint.oldestMulti,
10092 checkPoint.oldestMultiDB);
10093 if (TransactionIdPrecedes(ShmemVariableCache->oldestXid,
10094 checkPoint.oldestXid))
10095 SetTransactionIdLimit(checkPoint.oldestXid,
10096 checkPoint.oldestXidDB);
10097 /* ControlFile->checkPointCopy always tracks the latest ckpt XID */
10098 LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
10099 ControlFile->checkPointCopy.nextXidEpoch = checkPoint.nextXidEpoch;
10100 ControlFile->checkPointCopy.nextXid = checkPoint.nextXid;
10101 LWLockRelease(ControlFileLock);
10102
10103 /* Update shared-memory copy of checkpoint XID/epoch */
10104 SpinLockAcquire(&XLogCtl->info_lck);
10105 XLogCtl->ckptXidEpoch = checkPoint.nextXidEpoch;
10106 XLogCtl->ckptXid = checkPoint.nextXid;
10107 SpinLockRelease(&XLogCtl->info_lck);
10108
10109 /* TLI should not change in an on-line checkpoint */
10110 if (checkPoint.ThisTimeLineID != ThisTimeLineID)
10111 ereport(PANIC,
10112 (errmsg("unexpected timeline ID %u (should be %u) in checkpoint record",
10113 checkPoint.ThisTimeLineID, ThisTimeLineID)));
10114
10115 RecoveryRestartPoint(&checkPoint);
10116 }
10117 else if (info == XLOG_OVERWRITE_CONTRECORD)
10118 {
10119 xl_overwrite_contrecord xlrec;
10120
10121 memcpy(&xlrec, XLogRecGetData(record), sizeof(xl_overwrite_contrecord));
10122 VerifyOverwriteContrecord(&xlrec, record);
10123 }
10124 else if (info == XLOG_END_OF_RECOVERY)
10125 {
10126 xl_end_of_recovery xlrec;
10127
10128 memcpy(&xlrec, XLogRecGetData(record), sizeof(xl_end_of_recovery));
10129
10130 /*
10131 * For Hot Standby, we could treat this like a Shutdown Checkpoint,
10132 * but this case is rarer and harder to test, so the benefit doesn't
10133 * outweigh the potential extra cost of maintenance.
10134 */
10135
10136 /*
10137 * We should've already switched to the new TLI before replaying this
10138 * record.
10139 */
10140 if (xlrec.ThisTimeLineID != ThisTimeLineID)
10141 ereport(PANIC,
10142 (errmsg("unexpected timeline ID %u (should be %u) in checkpoint record",
10143 xlrec.ThisTimeLineID, ThisTimeLineID)));
10144 }
10145 else if (info == XLOG_NOOP)
10146 {
10147 /* nothing to do here */
10148 }
10149 else if (info == XLOG_SWITCH)
10150 {
10151 /* nothing to do here */
10152 }
10153 else if (info == XLOG_RESTORE_POINT)
10154 {
10155 /* nothing to do here */
10156 }
10157 else if (info == XLOG_FPI || info == XLOG_FPI_FOR_HINT ||
10158 info == XLOG_FPI_MULTI)
10159 {
10160 uint8 block_id;
10161
10162 /*
10163 * Full-page image (FPI) records contain nothing else but a backup
10164 * block (or multiple backup blocks). Every block reference must
10165 * include a full-page image - otherwise there would be no point in
10166 * this record.
10167 *
10168 * No recovery conflicts are generated by these generic records - if a
10169 * resource manager needs to generate conflicts, it has to define a
10170 * separate WAL record type and redo routine.
10171 *
10172 * XLOG_FPI_FOR_HINT records are generated when a page needs to be
10173 * WAL- logged because of a hint bit update. They are only generated
10174 * when checksums are enabled. There is no difference in handling
10175 * XLOG_FPI and XLOG_FPI_FOR_HINT records, they use a different info
10176 * code just to distinguish them for statistics purposes.
10177 */
10178 for (block_id = 0; block_id <= record->max_block_id; block_id++)
10179 {
10180 Buffer buffer;
10181
10182 if (XLogReadBufferForRedo(record, block_id, &buffer) != BLK_RESTORED)
10183 elog(ERROR, "unexpected XLogReadBufferForRedo result when restoring backup block");
10184 UnlockReleaseBuffer(buffer);
10185 }
10186 }
10187 else if (info == XLOG_BACKUP_END)
10188 {
10189 XLogRecPtr startpoint;
10190
10191 memcpy(&startpoint, XLogRecGetData(record), sizeof(startpoint));
10192
10193 if (ControlFile->backupStartPoint == startpoint)
10194 {
10195 /*
10196 * We have reached the end of base backup, the point where
10197 * pg_stop_backup() was done. The data on disk is now consistent.
10198 * Reset backupStartPoint, and update minRecoveryPoint to make
10199 * sure we don't allow starting up at an earlier point even if
10200 * recovery is stopped and restarted soon after this.
10201 */
10202 elog(DEBUG1, "end of backup reached");
10203
10204 LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
10205
10206 if (ControlFile->minRecoveryPoint < lsn)
10207 {
10208 ControlFile->minRecoveryPoint = lsn;
10209 ControlFile->minRecoveryPointTLI = ThisTimeLineID;
10210 }
10211 ControlFile->backupStartPoint = InvalidXLogRecPtr;
10212 ControlFile->backupEndRequired = false;
10213 UpdateControlFile();
10214
10215 LWLockRelease(ControlFileLock);
10216 }
10217 }
10218 else if (info == XLOG_PARAMETER_CHANGE)
10219 {
10220 xl_parameter_change xlrec;
10221
10222 /* Update our copy of the parameters in pg_control */
10223 memcpy(&xlrec, XLogRecGetData(record), sizeof(xl_parameter_change));
10224
10225 LWLockAcquire(ControlFileLock, LW_EXCLUSIVE);
10226 ControlFile->MaxConnections = xlrec.MaxConnections;
10227 ControlFile->max_worker_processes = xlrec.max_worker_processes;
10228 ControlFile->max_prepared_xacts = xlrec.max_prepared_xacts;
10229 ControlFile->max_locks_per_xact = xlrec.max_locks_per_xact;
10230 ControlFile->wal_level = xlrec.wal_level;
10231 ControlFile->wal_log_hints = xlrec.wal_log_hints;
10232
10233 /*
10234 * Update minRecoveryPoint to ensure that if recovery is aborted, we
10235 * recover back up to this point before allowing hot standby again.
10236 * This is important if the max_* settings are decreased, to ensure
10237 * you don't run queries against the WAL preceding the change. The
10238 * local copies cannot be updated as long as crash recovery is
10239 * happening and we expect all the WAL to be replayed.
10240 */
10241 if (InArchiveRecovery)
10242 {
10243 minRecoveryPoint = ControlFile->minRecoveryPoint;
10244 minRecoveryPointTLI = ControlFile->minRecoveryPointTLI;
10245 }
10246 if (minRecoveryPoint != InvalidXLogRecPtr && minRecoveryPoint < lsn)
10247 {
10248 ControlFile->minRecoveryPoint = lsn;
10249 ControlFile->minRecoveryPointTLI = ThisTimeLineID;
10250 }
10251
10252 CommitTsParameterChange(xlrec.track_commit_timestamp,
10253 ControlFile->track_commit_timestamp);
10254 ControlFile->track_commit_timestamp = xlrec.track_commit_timestamp;
10255
10256 UpdateControlFile();
10257 LWLockRelease(ControlFileLock);
10258
10259 /* Check to see if any changes to max_connections give problems */
10260 CheckRequiredParameterValues();
10261 }
10262 else if (info == XLOG_FPW_CHANGE)
10263 {
10264 bool fpw;
10265
10266 memcpy(&fpw, XLogRecGetData(record), sizeof(bool));
10267
10268 /*
10269 * Update the LSN of the last replayed XLOG_FPW_CHANGE record so that
10270 * do_pg_start_backup() and do_pg_stop_backup() can check whether
10271 * full_page_writes has been disabled during online backup.
10272 */
10273 if (!fpw)
10274 {
10275 SpinLockAcquire(&XLogCtl->info_lck);
10276 if (XLogCtl->lastFpwDisableRecPtr < ReadRecPtr)
10277 XLogCtl->lastFpwDisableRecPtr = ReadRecPtr;
10278 SpinLockRelease(&XLogCtl->info_lck);
10279 }
10280
10281 /* Keep track of full_page_writes */
10282 lastFullPageWrites = fpw;
10283 }
10284 }
10285
10286 /*
10287 * Verify the payload of a XLOG_OVERWRITE_CONTRECORD record.
10288 */
10289 static void
10290 VerifyOverwriteContrecord(xl_overwrite_contrecord *xlrec, XLogReaderState *state)
10291 {
10292 if (xlrec->overwritten_lsn != state->overwrittenRecPtr)
10293 elog(FATAL, "mismatching overwritten LSN %X/%X -> %X/%X",
10294 (uint32) (xlrec->overwritten_lsn >> 32),
10295 (uint32) xlrec->overwritten_lsn,
10296 (uint32) (state->overwrittenRecPtr >> 32),
10297 (uint32) state->overwrittenRecPtr);
10298
10299 ereport(LOG,
10300 (errmsg("successfully skipped missing contrecord at %X/%X, overwritten at %s",
10301 (uint32) (xlrec->overwritten_lsn >> 32),
10302 (uint32) xlrec->overwritten_lsn,
10303 timestamptz_to_str(xlrec->overwrite_time))));
10304
10305 /* Verifying the record should only happen once */
10306 state->overwrittenRecPtr = InvalidXLogRecPtr;
10307 }
10308
10309 #ifdef WAL_DEBUG
10310
10311 static void
10312 xlog_outrec(StringInfo buf, XLogReaderState *record)
10313 {
10314 int block_id;
10315
10316 appendStringInfo(buf, "prev %X/%X; xid %u",
10317 (uint32) (XLogRecGetPrev(record) >> 32),
10318 (uint32) XLogRecGetPrev(record),
10319 XLogRecGetXid(record));
10320
10321 appendStringInfo(buf, "; len %u",
10322 XLogRecGetDataLen(record));
10323
10324 /* decode block references */
10325 for (block_id = 0; block_id <= record->max_block_id; block_id++)
10326 {
10327 RelFileNode rnode;
10328 ForkNumber forknum;
10329 BlockNumber blk;
10330
10331 if (!XLogRecHasBlockRef(record, block_id))
10332 continue;
10333
10334 XLogRecGetBlockTag(record, block_id, &rnode, &forknum, &blk);
10335 if (forknum != MAIN_FORKNUM)
10336 appendStringInfo(buf, "; blkref #%u: rel %u/%u/%u, fork %u, blk %u",
10337 block_id,
10338 rnode.spcNode, rnode.dbNode, rnode.relNode,
10339 forknum,
10340 blk);
10341 else
10342 appendStringInfo(buf, "; blkref #%u: rel %u/%u/%u, blk %u",
10343 block_id,
10344 rnode.spcNode, rnode.dbNode, rnode.relNode,
10345 blk);
10346 if (XLogRecHasBlockImage(record, block_id))
10347 appendStringInfoString(buf, " FPW");
10348 }
10349 }
10350 #endif /* WAL_DEBUG */
10351
10352 /*
10353 * Returns a string describing an XLogRecord, consisting of its identity
10354 * optionally followed by a colon, a space, and a further description.
10355 */
10356 static void
10357 xlog_outdesc(StringInfo buf, XLogReaderState *record)
10358 {
10359 RmgrId rmid = XLogRecGetRmid(record);
10360 uint8 info = XLogRecGetInfo(record);
10361 const char *id;
10362
10363 appendStringInfoString(buf, RmgrTable[rmid].rm_name);
10364 appendStringInfoChar(buf, '/');
10365
10366 id = RmgrTable[rmid].rm_identify(info);
10367 if (id == NULL)
10368 appendStringInfo(buf, "UNKNOWN (%X): ", info & ~XLR_INFO_MASK);
10369 else
10370 appendStringInfo(buf, "%s: ", id);
10371
10372 RmgrTable[rmid].rm_desc(buf, record);
10373 }
10374
10375
10376 /*
10377 * Return the (possible) sync flag used for opening a file, depending on the
10378 * value of the GUC wal_sync_method.
10379 */
10380 static int
10381 get_sync_bit(int method)
10382 {
10383 int o_direct_flag = 0;
10384
10385 /* If fsync is disabled, never open in sync mode */
10386 if (!enableFsync)
10387 return 0;
10388
10389 /*
10390 * Optimize writes by bypassing kernel cache with O_DIRECT when using
10391 * O_SYNC/O_FSYNC and O_DSYNC. But only if archiving and streaming are
10392 * disabled, otherwise the archive command or walsender process will read
10393 * the WAL soon after writing it, which is guaranteed to cause a physical
10394 * read if we bypassed the kernel cache. We also skip the
10395 * posix_fadvise(POSIX_FADV_DONTNEED) call in XLogFileClose() for the same
10396 * reason.
10397 *
10398 * Never use O_DIRECT in walreceiver process for similar reasons; the WAL
10399 * written by walreceiver is normally read by the startup process soon
10400 * after its written. Also, walreceiver performs unaligned writes, which
10401 * don't work with O_DIRECT, so it is required for correctness too.
10402 */
10403 if (!XLogIsNeeded() && !AmWalReceiverProcess())
10404 o_direct_flag = PG_O_DIRECT;
10405
10406 switch (method)
10407 {
10408 /*
10409 * enum values for all sync options are defined even if they are
10410 * not supported on the current platform. But if not, they are
10411 * not included in the enum option array, and therefore will never
10412 * be seen here.
10413 */
10414 case SYNC_METHOD_FSYNC:
10415 case SYNC_METHOD_FSYNC_WRITETHROUGH:
10416 case SYNC_METHOD_FDATASYNC:
10417 return 0;
10418 #ifdef OPEN_SYNC_FLAG
10419 case SYNC_METHOD_OPEN:
10420 return OPEN_SYNC_FLAG | o_direct_flag;
10421 #endif
10422 #ifdef OPEN_DATASYNC_FLAG
10423 case SYNC_METHOD_OPEN_DSYNC:
10424 return OPEN_DATASYNC_FLAG | o_direct_flag;
10425 #endif
10426 default:
10427 /* can't happen (unless we are out of sync with option array) */
10428 elog(ERROR, "unrecognized wal_sync_method: %d", method);
10429 return 0; /* silence warning */
10430 }
10431 }
10432
10433 /*
10434 * GUC support
10435 */
10436 void
10437 assign_xlog_sync_method(int new_sync_method, void *extra)
10438 {
10439 if (sync_method != new_sync_method)
10440 {
10441 /*
10442 * To ensure that no blocks escape unsynced, force an fsync on the
10443 * currently open log segment (if any). Also, if the open flag is
10444 * changing, close the log file so it will be reopened (with new flag
10445 * bit) at next use.
10446 */
10447 if (openLogFile >= 0)
10448 {
10449 pgstat_report_wait_start(WAIT_EVENT_WAL_SYNC_METHOD_ASSIGN);
10450 if (pg_fsync(openLogFile) != 0)
10451 ereport(PANIC,
10452 (errcode_for_file_access(),
10453 errmsg("could not fsync log segment %s: %m",
10454 XLogFileNameP(ThisTimeLineID, openLogSegNo))));
10455 pgstat_report_wait_end();
10456 if (get_sync_bit(sync_method) != get_sync_bit(new_sync_method))
10457 XLogFileClose();
10458 }
10459 }
10460 }
10461
10462
10463 /*
10464 * Issue appropriate kind of fsync (if any) for an XLOG output file.
10465 *
10466 * 'fd' is a file descriptor for the XLOG file to be fsync'd.
10467 * 'log' and 'seg' are for error reporting purposes.
10468 */
10469 void
10470 issue_xlog_fsync(int fd, XLogSegNo segno)
10471 {
10472 switch (sync_method)
10473 {
10474 case SYNC_METHOD_FSYNC:
10475 if (pg_fsync_no_writethrough(fd) != 0)
10476 ereport(PANIC,
10477 (errcode_for_file_access(),
10478 errmsg("could not fsync log file %s: %m",
10479 XLogFileNameP(ThisTimeLineID, segno))));
10480 break;
10481 #ifdef HAVE_FSYNC_WRITETHROUGH
10482 case SYNC_METHOD_FSYNC_WRITETHROUGH:
10483 if (pg_fsync_writethrough(fd) != 0)
10484 ereport(PANIC,
10485 (errcode_for_file_access(),
10486 errmsg("could not fsync write-through log file %s: %m",
10487 XLogFileNameP(ThisTimeLineID, segno))));
10488 break;
10489 #endif
10490 #ifdef HAVE_FDATASYNC
10491 case SYNC_METHOD_FDATASYNC:
10492 if (pg_fdatasync(fd) != 0)
10493 ereport(PANIC,
10494 (errcode_for_file_access(),
10495 errmsg("could not fdatasync log file %s: %m",
10496 XLogFileNameP(ThisTimeLineID, segno))));
10497 break;
10498 #endif
10499 case SYNC_METHOD_OPEN:
10500 case SYNC_METHOD_OPEN_DSYNC:
10501 /* write synced it already */
10502 break;
10503 default:
10504 elog(PANIC, "unrecognized wal_sync_method: %d", sync_method);
10505 break;
10506 }
10507 }
10508
10509 /*
10510 * Return the filename of given log segment, as a palloc'd string.
10511 */
10512 char *
10513 XLogFileNameP(TimeLineID tli, XLogSegNo segno)
10514 {
10515 char *result = palloc(MAXFNAMELEN);
10516
10517 XLogFileName(result, tli, segno, wal_segment_size);
10518 return result;
10519 }
10520
10521 /*
10522 * do_pg_start_backup is the workhorse of the user-visible pg_start_backup()
10523 * function. It creates the necessary starting checkpoint and constructs the
10524 * backup label file.
10525 *
10526 * There are two kind of backups: exclusive and non-exclusive. An exclusive
10527 * backup is started with pg_start_backup(), and there can be only one active
10528 * at a time. The backup and tablespace map files of an exclusive backup are
10529 * written to $PGDATA/backup_label and $PGDATA/tablespace_map, and they are
10530 * removed by pg_stop_backup().
10531 *
10532 * A non-exclusive backup is used for the streaming base backups (see
10533 * src/backend/replication/basebackup.c). The difference to exclusive backups
10534 * is that the backup label and tablespace map files are not written to disk.
10535 * Instead, their would-be contents are returned in *labelfile and *tblspcmapfile,
10536 * and the caller is responsible for including them in the backup archive as
10537 * 'backup_label' and 'tablespace_map'. There can be many non-exclusive backups
10538 * active at the same time, and they don't conflict with an exclusive backup
10539 * either.
10540 *
10541 * tblspcmapfile is required mainly for tar format in windows as native windows
10542 * utilities are not able to create symlinks while extracting files from tar.
10543 * However for consistency, the same is used for all platforms.
10544 *
10545 * needtblspcmapfile is true for the cases (exclusive backup and for
10546 * non-exclusive backup only when tar format is used for taking backup)
10547 * when backup needs to generate tablespace_map file, it is used to
10548 * embed escape character before newline character in tablespace path.
10549 *
10550 * Returns the minimum WAL location that must be present to restore from this
10551 * backup, and the corresponding timeline ID in *starttli_p.
10552 *
10553 * Every successfully started non-exclusive backup must be stopped by calling
10554 * do_pg_stop_backup() or do_pg_abort_backup().
10555 *
10556 * It is the responsibility of the caller of this function to verify the
10557 * permissions of the calling user!
10558 */
10559 XLogRecPtr
10560 do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
10561 StringInfo labelfile, List **tablespaces,
10562 StringInfo tblspcmapfile, bool infotbssize,
10563 bool needtblspcmapfile)
10564 {
10565 bool exclusive = (labelfile == NULL);
10566 bool backup_started_in_recovery = false;
10567 XLogRecPtr checkpointloc;
10568 XLogRecPtr startpoint;
10569 TimeLineID starttli;
10570 pg_time_t stamp_time;
10571 char strfbuf[128];
10572 char xlogfilename[MAXFNAMELEN];
10573 XLogSegNo _logSegNo;
10574 struct stat stat_buf;
10575 FILE *fp;
10576
10577 backup_started_in_recovery = RecoveryInProgress();
10578
10579 /*
10580 * Currently only non-exclusive backup can be taken during recovery.
10581 */
10582 if (backup_started_in_recovery && exclusive)
10583 ereport(ERROR,
10584 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
10585 errmsg("recovery is in progress"),
10586 errhint("WAL control functions cannot be executed during recovery.")));
10587
10588 /*
10589 * During recovery, we don't need to check WAL level. Because, if WAL
10590 * level is not sufficient, it's impossible to get here during recovery.
10591 */
10592 if (!backup_started_in_recovery && !XLogIsNeeded())
10593 ereport(ERROR,
10594 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
10595 errmsg("WAL level not sufficient for making an online backup"),
10596 errhint("wal_level must be set to \"replica\" or \"logical\" at server start.")));
10597
10598 if (strlen(backupidstr) > MAXPGPATH)
10599 ereport(ERROR,
10600 (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
10601 errmsg("backup label too long (max %d bytes)",
10602 MAXPGPATH)));
10603
10604 /*
10605 * Mark backup active in shared memory. We must do full-page WAL writes
10606 * during an on-line backup even if not doing so at other times, because
10607 * it's quite possible for the backup dump to obtain a "torn" (partially
10608 * written) copy of a database page if it reads the page concurrently with
10609 * our write to the same page. This can be fixed as long as the first
10610 * write to the page in the WAL sequence is a full-page write. Hence, we
10611 * turn on forcePageWrites and then force a CHECKPOINT, to ensure there
10612 * are no dirty pages in shared memory that might get dumped while the
10613 * backup is in progress without having a corresponding WAL record. (Once
10614 * the backup is complete, we need not force full-page writes anymore,
10615 * since we expect that any pages not modified during the backup interval
10616 * must have been correctly captured by the backup.)
10617 *
10618 * Note that forcePageWrites has no effect during an online backup from
10619 * the standby.
10620 *
10621 * We must hold all the insertion locks to change the value of
10622 * forcePageWrites, to ensure adequate interlocking against
10623 * XLogInsertRecord().
10624 */
10625 WALInsertLockAcquireExclusive();
10626 if (exclusive)
10627 {
10628 /*
10629 * At first, mark that we're now starting an exclusive backup, to
10630 * ensure that there are no other sessions currently running
10631 * pg_start_backup() or pg_stop_backup().
10632 */
10633 if (XLogCtl->Insert.exclusiveBackupState != EXCLUSIVE_BACKUP_NONE)
10634 {
10635 WALInsertLockRelease();
10636 ereport(ERROR,
10637 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
10638 errmsg("a backup is already in progress"),
10639 errhint("Run pg_stop_backup() and try again.")));
10640 }
10641 XLogCtl->Insert.exclusiveBackupState = EXCLUSIVE_BACKUP_STARTING;
10642 }
10643 else
10644 XLogCtl->Insert.nonExclusiveBackups++;
10645 XLogCtl->Insert.forcePageWrites = true;
10646 WALInsertLockRelease();
10647
10648 /* Ensure we release forcePageWrites if fail below */
10649 PG_ENSURE_ERROR_CLEANUP(pg_start_backup_callback, (Datum) BoolGetDatum(exclusive));
10650 {
10651 bool gotUniqueStartpoint = false;
10652 DIR *tblspcdir;
10653 struct dirent *de;
10654 tablespaceinfo *ti;
10655 int datadirpathlen;
10656
10657 /*
10658 * Force an XLOG file switch before the checkpoint, to ensure that the
10659 * WAL segment the checkpoint is written to doesn't contain pages with
10660 * old timeline IDs. That would otherwise happen if you called
10661 * pg_start_backup() right after restoring from a PITR archive: the
10662 * first WAL segment containing the startup checkpoint has pages in
10663 * the beginning with the old timeline ID. That can cause trouble at
10664 * recovery: we won't have a history file covering the old timeline if
10665 * pg_wal directory was not included in the base backup and the WAL
10666 * archive was cleared too before starting the backup.
10667 *
10668 * This also ensures that we have emitted a WAL page header that has
10669 * XLP_BKP_REMOVABLE off before we emit the checkpoint record.
10670 * Therefore, if a WAL archiver (such as pglesslog) is trying to
10671 * compress out removable backup blocks, it won't remove any that
10672 * occur after this point.
10673 *
10674 * During recovery, we skip forcing XLOG file switch, which means that
10675 * the backup taken during recovery is not available for the special
10676 * recovery case described above.
10677 */
10678 if (!backup_started_in_recovery)
10679 RequestXLogSwitch(false);
10680
10681 do
10682 {
10683 bool checkpointfpw;
10684
10685 /*
10686 * Force a CHECKPOINT. Aside from being necessary to prevent torn
10687 * page problems, this guarantees that two successive backup runs
10688 * will have different checkpoint positions and hence different
10689 * history file names, even if nothing happened in between.
10690 *
10691 * During recovery, establish a restartpoint if possible. We use
10692 * the last restartpoint as the backup starting checkpoint. This
10693 * means that two successive backup runs can have same checkpoint
10694 * positions.
10695 *
10696 * Since the fact that we are executing do_pg_start_backup()
10697 * during recovery means that checkpointer is running, we can use
10698 * RequestCheckpoint() to establish a restartpoint.
10699 *
10700 * We use CHECKPOINT_IMMEDIATE only if requested by user (via
10701 * passing fast = true). Otherwise this can take awhile.
10702 */
10703 RequestCheckpoint(CHECKPOINT_FORCE | CHECKPOINT_WAIT |
10704 (fast ? CHECKPOINT_IMMEDIATE : 0));
10705
10706 /*
10707 * Now we need to fetch the checkpoint record location, and also
10708 * its REDO pointer. The oldest point in WAL that would be needed
10709 * to restore starting from the checkpoint is precisely the REDO
10710 * pointer.
10711 */
10712 LWLockAcquire(ControlFileLock, LW_SHARED);
10713 checkpointloc = ControlFile->checkPoint;
10714 startpoint = ControlFile->checkPointCopy.redo;
10715 starttli = ControlFile->checkPointCopy.ThisTimeLineID;
10716 checkpointfpw = ControlFile->checkPointCopy.fullPageWrites;
10717 LWLockRelease(ControlFileLock);
10718
10719 if (backup_started_in_recovery)
10720 {
10721 XLogRecPtr recptr;
10722
10723 /*
10724 * Check to see if all WAL replayed during online backup
10725 * (i.e., since last restartpoint used as backup starting
10726 * checkpoint) contain full-page writes.
10727 */
10728 SpinLockAcquire(&XLogCtl->info_lck);
10729 recptr = XLogCtl->lastFpwDisableRecPtr;
10730 SpinLockRelease(&XLogCtl->info_lck);
10731
10732 if (!checkpointfpw || startpoint <= recptr)
10733 ereport(ERROR,
10734 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
10735 errmsg("WAL generated with full_page_writes=off was replayed "
10736 "since last restartpoint"),
10737 errhint("This means that the backup being taken on the standby "
10738 "is corrupt and should not be used. "
10739 "Enable full_page_writes and run CHECKPOINT on the master, "
10740 "and then try an online backup again.")));
10741
10742 /*
10743 * During recovery, since we don't use the end-of-backup WAL
10744 * record and don't write the backup history file, the
10745 * starting WAL location doesn't need to be unique. This means
10746 * that two base backups started at the same time might use
10747 * the same checkpoint as starting locations.
10748 */
10749 gotUniqueStartpoint = true;
10750 }
10751
10752 /*
10753 * If two base backups are started at the same time (in WAL sender
10754 * processes), we need to make sure that they use different
10755 * checkpoints as starting locations, because we use the starting
10756 * WAL location as a unique identifier for the base backup in the
10757 * end-of-backup WAL record and when we write the backup history
10758 * file. Perhaps it would be better generate a separate unique ID
10759 * for each backup instead of forcing another checkpoint, but
10760 * taking a checkpoint right after another is not that expensive
10761 * either because only few buffers have been dirtied yet.
10762 */
10763 WALInsertLockAcquireExclusive();
10764 if (XLogCtl->Insert.lastBackupStart < startpoint)
10765 {
10766 XLogCtl->Insert.lastBackupStart = startpoint;
10767 gotUniqueStartpoint = true;
10768 }
10769 WALInsertLockRelease();
10770 } while (!gotUniqueStartpoint);
10771
10772 XLByteToSeg(startpoint, _logSegNo, wal_segment_size);
10773 XLogFileName(xlogfilename, starttli, _logSegNo, wal_segment_size);
10774
10775 /*
10776 * Construct tablespace_map file
10777 */
10778 if (exclusive)
10779 tblspcmapfile = makeStringInfo();
10780
10781 datadirpathlen = strlen(DataDir);
10782
10783 /* Collect information about all tablespaces */
10784 tblspcdir = AllocateDir("pg_tblspc");
10785 while ((de = ReadDir(tblspcdir, "pg_tblspc")) != NULL)
10786 {
10787 char fullpath[MAXPGPATH + 10];
10788 char linkpath[MAXPGPATH];
10789 char *relpath = NULL;
10790 int rllen;
10791 StringInfoData buflinkpath;
10792 char *s = linkpath;
10793
10794 /* Skip special stuff */
10795 if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0)
10796 continue;
10797
10798 snprintf(fullpath, sizeof(fullpath), "pg_tblspc/%s", de->d_name);
10799
10800 #if defined(HAVE_READLINK) || defined(WIN32)
10801 rllen = readlink(fullpath, linkpath, sizeof(linkpath));
10802 if (rllen < 0)
10803 {
10804 ereport(WARNING,
10805 (errmsg("could not read symbolic link \"%s\": %m",
10806 fullpath)));
10807 continue;
10808 }
10809 else if (rllen >= sizeof(linkpath))
10810 {
10811 ereport(WARNING,
10812 (errmsg("symbolic link \"%s\" target is too long",
10813 fullpath)));
10814 continue;
10815 }
10816 linkpath[rllen] = '\0';
10817
10818 /*
10819 * Add the escape character '\\' before newline in a string to
10820 * ensure that we can distinguish between the newline in the
10821 * tablespace path and end of line while reading tablespace_map
10822 * file during archive recovery.
10823 */
10824 initStringInfo(&buflinkpath);
10825
10826 while (*s)
10827 {
10828 if ((*s == '\n' || *s == '\r') && needtblspcmapfile)
10829 appendStringInfoChar(&buflinkpath, '\\');
10830 appendStringInfoChar(&buflinkpath, *s++);
10831 }
10832
10833 /*
10834 * Relpath holds the relative path of the tablespace directory
10835 * when it's located within PGDATA, or NULL if it's located
10836 * elsewhere.
10837 */
10838 if (rllen > datadirpathlen &&
10839 strncmp(linkpath, DataDir, datadirpathlen) == 0 &&
10840 IS_DIR_SEP(linkpath[datadirpathlen]))
10841 relpath = linkpath + datadirpathlen + 1;
10842
10843 ti = palloc(sizeof(tablespaceinfo));
10844 ti->oid = pstrdup(de->d_name);
10845 ti->path = pstrdup(buflinkpath.data);
10846 ti->rpath = relpath ? pstrdup(relpath) : NULL;
10847 ti->size = infotbssize ? sendTablespace(fullpath, true) : -1;
10848
10849 if (tablespaces)
10850 *tablespaces = lappend(*tablespaces, ti);
10851
10852 appendStringInfo(tblspcmapfile, "%s %s\n", ti->oid, ti->path);
10853
10854 pfree(buflinkpath.data);
10855 #else
10856
10857 /*
10858 * If the platform does not have symbolic links, it should not be
10859 * possible to have tablespaces - clearly somebody else created
10860 * them. Warn about it and ignore.
10861 */
10862 ereport(WARNING,
10863 (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
10864 errmsg("tablespaces are not supported on this platform")));
10865 #endif
10866 }
10867 FreeDir(tblspcdir);
10868
10869 /*
10870 * Construct backup label file
10871 */
10872 if (exclusive)
10873 labelfile = makeStringInfo();
10874
10875 /* Use the log timezone here, not the session timezone */
10876 stamp_time = (pg_time_t) time(NULL);
10877 pg_strftime(strfbuf, sizeof(strfbuf),
10878 "%Y-%m-%d %H:%M:%S %Z",
10879 pg_localtime(&stamp_time, log_timezone));
10880 appendStringInfo(labelfile, "START WAL LOCATION: %X/%X (file %s)\n",
10881 (uint32) (startpoint >> 32), (uint32) startpoint, xlogfilename);
10882 appendStringInfo(labelfile, "CHECKPOINT LOCATION: %X/%X\n",
10883 (uint32) (checkpointloc >> 32), (uint32) checkpointloc);
10884 appendStringInfo(labelfile, "BACKUP METHOD: %s\n",
10885 exclusive ? "pg_start_backup" : "streamed");
10886 appendStringInfo(labelfile, "BACKUP FROM: %s\n",
10887 backup_started_in_recovery ? "standby" : "master");
10888 appendStringInfo(labelfile, "START TIME: %s\n", strfbuf);
10889 appendStringInfo(labelfile, "LABEL: %s\n", backupidstr);
10890 appendStringInfo(labelfile, "START TIMELINE: %u\n", starttli);
10891
10892 /*
10893 * Okay, write the file, or return its contents to caller.
10894 */
10895 if (exclusive)
10896 {
10897 /*
10898 * Check for existing backup label --- implies a backup is already
10899 * running. (XXX given that we checked exclusiveBackupState
10900 * above, maybe it would be OK to just unlink any such label
10901 * file?)
10902 */
10903 if (stat(BACKUP_LABEL_FILE, &stat_buf) != 0)
10904 {
10905 if (errno != ENOENT)
10906 ereport(ERROR,
10907 (errcode_for_file_access(),
10908 errmsg("could not stat file \"%s\": %m",
10909 BACKUP_LABEL_FILE)));
10910 }
10911 else
10912 ereport(ERROR,
10913 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
10914 errmsg("a backup is already in progress"),
10915 errhint("If you're sure there is no backup in progress, remove file \"%s\" and try again.",
10916 BACKUP_LABEL_FILE)));
10917
10918 fp = AllocateFile(BACKUP_LABEL_FILE, "w");
10919
10920 if (!fp)
10921 ereport(ERROR,
10922 (errcode_for_file_access(),
10923 errmsg("could not create file \"%s\": %m",
10924 BACKUP_LABEL_FILE)));
10925 if (fwrite(labelfile->data, labelfile->len, 1, fp) != 1 ||
10926 fflush(fp) != 0 ||
10927 pg_fsync(fileno(fp)) != 0 ||
10928 ferror(fp) ||
10929 FreeFile(fp))
10930 ereport(ERROR,
10931 (errcode_for_file_access(),
10932 errmsg("could not write file \"%s\": %m",
10933 BACKUP_LABEL_FILE)));
10934 /* Allocated locally for exclusive backups, so free separately */
10935 pfree(labelfile->data);
10936 pfree(labelfile);
10937
10938 /* Write backup tablespace_map file. */
10939 if (tblspcmapfile->len > 0)
10940 {
10941 if (stat(TABLESPACE_MAP, &stat_buf) != 0)
10942 {
10943 if (errno != ENOENT)
10944 ereport(ERROR,
10945 (errcode_for_file_access(),
10946 errmsg("could not stat file \"%s\": %m",
10947 TABLESPACE_MAP)));
10948 }
10949 else
10950 ereport(ERROR,
10951 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
10952 errmsg("a backup is already in progress"),
10953 errhint("If you're sure there is no backup in progress, remove file \"%s\" and try again.",
10954 TABLESPACE_MAP)));
10955
10956 fp = AllocateFile(TABLESPACE_MAP, "w");
10957
10958 if (!fp)
10959 ereport(ERROR,
10960 (errcode_for_file_access(),
10961 errmsg("could not create file \"%s\": %m",
10962 TABLESPACE_MAP)));
10963 if (fwrite(tblspcmapfile->data, tblspcmapfile->len, 1, fp) != 1 ||
10964 fflush(fp) != 0 ||
10965 pg_fsync(fileno(fp)) != 0 ||
10966 ferror(fp) ||
10967 FreeFile(fp))
10968 ereport(ERROR,
10969 (errcode_for_file_access(),
10970 errmsg("could not write file \"%s\": %m",
10971 TABLESPACE_MAP)));
10972 }
10973
10974 /* Allocated locally for exclusive backups, so free separately */
10975 pfree(tblspcmapfile->data);
10976 pfree(tblspcmapfile);
10977 }
10978 }
10979 PG_END_ENSURE_ERROR_CLEANUP(pg_start_backup_callback, (Datum) BoolGetDatum(exclusive));
10980
10981 /*
10982 * Mark that start phase has correctly finished for an exclusive backup.
10983 * Session-level locks are updated as well to reflect that state.
10984 *
10985 * Note that CHECK_FOR_INTERRUPTS() must not occur while updating backup
10986 * counters and session-level lock. Otherwise they can be updated
10987 * inconsistently, and which might cause do_pg_abort_backup() to fail.
10988 */
10989 if (exclusive)
10990 {
10991 WALInsertLockAcquireExclusive();
10992 XLogCtl->Insert.exclusiveBackupState = EXCLUSIVE_BACKUP_IN_PROGRESS;
10993
10994 /* Set session-level lock */
10995 sessionBackupState = SESSION_BACKUP_EXCLUSIVE;
10996 WALInsertLockRelease();
10997 }
10998 else
10999 sessionBackupState = SESSION_BACKUP_NON_EXCLUSIVE;
11000
11001 /*
11002 * We're done. As a convenience, return the starting WAL location.
11003 */
11004 if (starttli_p)
11005 *starttli_p = starttli;
11006 return startpoint;
11007 }
11008
11009 /* Error cleanup callback for pg_start_backup */
11010 static void
11011 pg_start_backup_callback(int code, Datum arg)
11012 {
11013 bool exclusive = DatumGetBool(arg);
11014
11015 /* Update backup counters and forcePageWrites on failure */
11016 WALInsertLockAcquireExclusive();
11017 if (exclusive)
11018 {
11019 Assert(XLogCtl->Insert.exclusiveBackupState == EXCLUSIVE_BACKUP_STARTING);
11020 XLogCtl->Insert.exclusiveBackupState = EXCLUSIVE_BACKUP_NONE;
11021 }
11022 else
11023 {
11024 Assert(XLogCtl->Insert.nonExclusiveBackups > 0);
11025 XLogCtl->Insert.nonExclusiveBackups--;
11026 }
11027
11028 if (XLogCtl->Insert.exclusiveBackupState == EXCLUSIVE_BACKUP_NONE &&
11029 XLogCtl->Insert.nonExclusiveBackups == 0)
11030 {
11031 XLogCtl->Insert.forcePageWrites = false;
11032 }
11033 WALInsertLockRelease();
11034 }
11035
11036 /*
11037 * Error cleanup callback for pg_stop_backup
11038 */
11039 static void
11040 pg_stop_backup_callback(int code, Datum arg)
11041 {
11042 bool exclusive = DatumGetBool(arg);
11043
11044 /* Update backup status on failure */
11045 WALInsertLockAcquireExclusive();
11046 if (exclusive)
11047 {
11048 Assert(XLogCtl->Insert.exclusiveBackupState == EXCLUSIVE_BACKUP_STOPPING);
11049 XLogCtl->Insert.exclusiveBackupState = EXCLUSIVE_BACKUP_IN_PROGRESS;
11050 }
11051 WALInsertLockRelease();
11052 }
11053
11054 /*
11055 * Utility routine to fetch the session-level status of a backup running.
11056 */
11057 SessionBackupState
11058 get_backup_status(void)
11059 {
11060 return sessionBackupState;
11061 }
11062
11063 /*
11064 * do_pg_stop_backup is the workhorse of the user-visible pg_stop_backup()
11065 * function.
11066 *
11067 * If labelfile is NULL, this stops an exclusive backup. Otherwise this stops
11068 * the non-exclusive backup specified by 'labelfile'.
11069 *
11070 * Returns the last WAL location that must be present to restore from this
11071 * backup, and the corresponding timeline ID in *stoptli_p.
11072 *
11073 * It is the responsibility of the caller of this function to verify the
11074 * permissions of the calling user!
11075 */
11076 XLogRecPtr
11077 do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
11078 {
11079 bool exclusive = (labelfile == NULL);
11080 bool backup_started_in_recovery = false;
11081 XLogRecPtr startpoint;
11082 XLogRecPtr stoppoint;
11083 TimeLineID stoptli;
11084 pg_time_t stamp_time;
11085 char strfbuf[128];
11086 char histfilepath[MAXPGPATH];
11087 char startxlogfilename[MAXFNAMELEN];
11088 char stopxlogfilename[MAXFNAMELEN];
11089 char lastxlogfilename[MAXFNAMELEN];
11090 char histfilename[MAXFNAMELEN];
11091 char backupfrom[20];
11092 XLogSegNo _logSegNo;
11093 FILE *lfp;
11094 FILE *fp;
11095 char ch;
11096 int seconds_before_warning;
11097 int waits = 0;
11098 bool reported_waiting = false;
11099 char *remaining;
11100 char *ptr;
11101 uint32 hi,
11102 lo;
11103
11104 backup_started_in_recovery = RecoveryInProgress();
11105
11106 /*
11107 * Currently only non-exclusive backup can be taken during recovery.
11108 */
11109 if (backup_started_in_recovery && exclusive)
11110 ereport(ERROR,
11111 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
11112 errmsg("recovery is in progress"),
11113 errhint("WAL control functions cannot be executed during recovery.")));
11114
11115 /*
11116 * During recovery, we don't need to check WAL level. Because, if WAL
11117 * level is not sufficient, it's impossible to get here during recovery.
11118 */
11119 if (!backup_started_in_recovery && !XLogIsNeeded())
11120 ereport(ERROR,
11121 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
11122 errmsg("WAL level not sufficient for making an online backup"),
11123 errhint("wal_level must be set to \"replica\" or \"logical\" at server start.")));
11124
11125 if (exclusive)
11126 {
11127 /*
11128 * At first, mark that we're now stopping an exclusive backup, to
11129 * ensure that there are no other sessions currently running
11130 * pg_start_backup() or pg_stop_backup().
11131 */
11132 WALInsertLockAcquireExclusive();
11133 if (XLogCtl->Insert.exclusiveBackupState != EXCLUSIVE_BACKUP_IN_PROGRESS)
11134 {
11135 WALInsertLockRelease();
11136 ereport(ERROR,
11137 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
11138 errmsg("exclusive backup not in progress")));
11139 }
11140 XLogCtl->Insert.exclusiveBackupState = EXCLUSIVE_BACKUP_STOPPING;
11141 WALInsertLockRelease();
11142
11143 /*
11144 * Remove backup_label. In case of failure, the state for an exclusive
11145 * backup is switched back to in-progress.
11146 */
11147 PG_ENSURE_ERROR_CLEANUP(pg_stop_backup_callback, (Datum) BoolGetDatum(exclusive));
11148 {
11149 /*
11150 * Read the existing label file into memory.
11151 */
11152 struct stat statbuf;
11153 int r;
11154
11155 if (stat(BACKUP_LABEL_FILE, &statbuf))
11156 {
11157 /* should not happen per the upper checks */
11158 if (errno != ENOENT)
11159 ereport(ERROR,
11160 (errcode_for_file_access(),
11161 errmsg("could not stat file \"%s\": %m",
11162 BACKUP_LABEL_FILE)));
11163 ereport(ERROR,
11164 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
11165 errmsg("a backup is not in progress")));
11166 }
11167
11168 lfp = AllocateFile(BACKUP_LABEL_FILE, "r");
11169 if (!lfp)
11170 {
11171 ereport(ERROR,
11172 (errcode_for_file_access(),
11173 errmsg("could not read file \"%s\": %m",
11174 BACKUP_LABEL_FILE)));
11175 }
11176 labelfile = palloc(statbuf.st_size + 1);
11177 r = fread(labelfile, statbuf.st_size, 1, lfp);
11178 labelfile[statbuf.st_size] = '\0';
11179
11180 /*
11181 * Close and remove the backup label file
11182 */
11183 if (r != 1 || ferror(lfp) || FreeFile(lfp))
11184 ereport(ERROR,
11185 (errcode_for_file_access(),
11186 errmsg("could not read file \"%s\": %m",
11187 BACKUP_LABEL_FILE)));
11188 durable_unlink(BACKUP_LABEL_FILE, ERROR);
11189
11190 /*
11191 * Remove tablespace_map file if present, it is created only if
11192 * there are tablespaces.
11193 */
11194 durable_unlink(TABLESPACE_MAP, DEBUG1);
11195 }
11196 PG_END_ENSURE_ERROR_CLEANUP(pg_stop_backup_callback, (Datum) BoolGetDatum(exclusive));
11197 }
11198
11199 /*
11200 * OK to update backup counters, forcePageWrites and session-level lock.
11201 *
11202 * Note that CHECK_FOR_INTERRUPTS() must not occur while updating them.
11203 * Otherwise they can be updated inconsistently, and which might cause
11204 * do_pg_abort_backup() to fail.
11205 */
11206 WALInsertLockAcquireExclusive();
11207 if (exclusive)
11208 {
11209 XLogCtl->Insert.exclusiveBackupState = EXCLUSIVE_BACKUP_NONE;
11210 }
11211 else
11212 {
11213 /*
11214 * The user-visible pg_start/stop_backup() functions that operate on
11215 * exclusive backups can be called at any time, but for non-exclusive
11216 * backups, it is expected that each do_pg_start_backup() call is
11217 * matched by exactly one do_pg_stop_backup() call.
11218 */
11219 Assert(XLogCtl->Insert.nonExclusiveBackups > 0);
11220 XLogCtl->Insert.nonExclusiveBackups--;
11221 }
11222
11223 if (XLogCtl->Insert.exclusiveBackupState == EXCLUSIVE_BACKUP_NONE &&
11224 XLogCtl->Insert.nonExclusiveBackups == 0)
11225 {
11226 XLogCtl->Insert.forcePageWrites = false;
11227 }
11228
11229 /*
11230 * Clean up session-level lock.
11231 *
11232 * You might think that WALInsertLockRelease() can be called before
11233 * cleaning up session-level lock because session-level lock doesn't need
11234 * to be protected with WAL insertion lock. But since
11235 * CHECK_FOR_INTERRUPTS() can occur in it, session-level lock must be
11236 * cleaned up before it.
11237 */
11238 sessionBackupState = SESSION_BACKUP_NONE;
11239
11240 WALInsertLockRelease();
11241
11242 /*
11243 * Read and parse the START WAL LOCATION line (this code is pretty crude,
11244 * but we are not expecting any variability in the file format).
11245 */
11246 if (sscanf(labelfile, "START WAL LOCATION: %X/%X (file %24s)%c",
11247 &hi, &lo, startxlogfilename,
11248 &ch) != 4 || ch != '\n')
11249 ereport(ERROR,
11250 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
11251 errmsg("invalid data in file \"%s\"", BACKUP_LABEL_FILE)));
11252 startpoint = ((uint64) hi) << 32 | lo;
11253 remaining = strchr(labelfile, '\n') + 1; /* %n is not portable enough */
11254
11255 /*
11256 * Parse the BACKUP FROM line. If we are taking an online backup from the
11257 * standby, we confirm that the standby has not been promoted during the
11258 * backup.
11259 */
11260 ptr = strstr(remaining, "BACKUP FROM:");
11261 if (!ptr || sscanf(ptr, "BACKUP FROM: %19s\n", backupfrom) != 1)
11262 ereport(ERROR,
11263 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
11264 errmsg("invalid data in file \"%s\"", BACKUP_LABEL_FILE)));
11265 if (strcmp(backupfrom, "standby") == 0 && !backup_started_in_recovery)
11266 ereport(ERROR,
11267 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
11268 errmsg("the standby was promoted during online backup"),
11269 errhint("This means that the backup being taken is corrupt "
11270 "and should not be used. "
11271 "Try taking another online backup.")));
11272
11273 /*
11274 * During recovery, we don't write an end-of-backup record. We assume that
11275 * pg_control was backed up last and its minimum recovery point can be
11276 * available as the backup end location. Since we don't have an
11277 * end-of-backup record, we use the pg_control value to check whether
11278 * we've reached the end of backup when starting recovery from this
11279 * backup. We have no way of checking if pg_control wasn't backed up last
11280 * however.
11281 *
11282 * We don't force a switch to new WAL file but it is still possible to
11283 * wait for all the required files to be archived if waitforarchive is
11284 * true. This is okay if we use the backup to start a standby and fetch
11285 * the missing WAL using streaming replication. But in the case of an
11286 * archive recovery, a user should set waitforarchive to true and wait for
11287 * them to be archived to ensure that all the required files are
11288 * available.
11289 *
11290 * We return the current minimum recovery point as the backup end
11291 * location. Note that it can be greater than the exact backup end
11292 * location if the minimum recovery point is updated after the backup of
11293 * pg_control. This is harmless for current uses.
11294 *
11295 * XXX currently a backup history file is for informational and debug
11296 * purposes only. It's not essential for an online backup. Furthermore,
11297 * even if it's created, it will not be archived during recovery because
11298 * an archiver is not invoked. So it doesn't seem worthwhile to write a
11299 * backup history file during recovery.
11300 */
11301 if (backup_started_in_recovery)
11302 {
11303 XLogRecPtr recptr;
11304
11305 /*
11306 * Check to see if all WAL replayed during online backup contain
11307 * full-page writes.
11308 */
11309 SpinLockAcquire(&XLogCtl->info_lck);
11310 recptr = XLogCtl->lastFpwDisableRecPtr;
11311 SpinLockRelease(&XLogCtl->info_lck);
11312
11313 if (startpoint <= recptr)
11314 ereport(ERROR,
11315 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
11316 errmsg("WAL generated with full_page_writes=off was replayed "
11317 "during online backup"),
11318 errhint("This means that the backup being taken on the standby "
11319 "is corrupt and should not be used. "
11320 "Enable full_page_writes and run CHECKPOINT on the master, "
11321 "and then try an online backup again.")));
11322
11323
11324 LWLockAcquire(ControlFileLock, LW_SHARED);
11325 stoppoint = ControlFile->minRecoveryPoint;
11326 stoptli = ControlFile->minRecoveryPointTLI;
11327 LWLockRelease(ControlFileLock);
11328 }
11329 else
11330 {
11331 /*
11332 * Write the backup-end xlog record
11333 */
11334 XLogBeginInsert();
11335 XLogRegisterData((char *) (&startpoint), sizeof(startpoint));
11336 stoppoint = XLogInsert(RM_XLOG_ID, XLOG_BACKUP_END);
11337 stoptli = ThisTimeLineID;
11338
11339 /*
11340 * Force a switch to a new xlog segment file, so that the backup is
11341 * valid as soon as archiver moves out the current segment file.
11342 */
11343 RequestXLogSwitch(false);
11344
11345 XLByteToPrevSeg(stoppoint, _logSegNo, wal_segment_size);
11346 XLogFileName(stopxlogfilename, stoptli, _logSegNo, wal_segment_size);
11347
11348 /* Use the log timezone here, not the session timezone */
11349 stamp_time = (pg_time_t) time(NULL);
11350 pg_strftime(strfbuf, sizeof(strfbuf),
11351 "%Y-%m-%d %H:%M:%S %Z",
11352 pg_localtime(&stamp_time, log_timezone));
11353
11354 /*
11355 * Write the backup history file
11356 */
11357 XLByteToSeg(startpoint, _logSegNo, wal_segment_size);
11358 BackupHistoryFilePath(histfilepath, stoptli, _logSegNo,
11359 startpoint, wal_segment_size);
11360 fp = AllocateFile(histfilepath, "w");
11361 if (!fp)
11362 ereport(ERROR,
11363 (errcode_for_file_access(),
11364 errmsg("could not create file \"%s\": %m",
11365 histfilepath)));
11366 fprintf(fp, "START WAL LOCATION: %X/%X (file %s)\n",
11367 (uint32) (startpoint >> 32), (uint32) startpoint, startxlogfilename);
11368 fprintf(fp, "STOP WAL LOCATION: %X/%X (file %s)\n",
11369 (uint32) (stoppoint >> 32), (uint32) stoppoint, stopxlogfilename);
11370
11371 /*
11372 * Transfer remaining lines including label and start timeline to
11373 * history file.
11374 */
11375 fprintf(fp, "%s", remaining);
11376 fprintf(fp, "STOP TIME: %s\n", strfbuf);
11377 fprintf(fp, "STOP TIMELINE: %u\n", stoptli);
11378 if (fflush(fp) || ferror(fp) || FreeFile(fp))
11379 ereport(ERROR,
11380 (errcode_for_file_access(),
11381 errmsg("could not write file \"%s\": %m",
11382 histfilepath)));
11383
11384 /*
11385 * Clean out any no-longer-needed history files. As a side effect,
11386 * this will post a .ready file for the newly created history file,
11387 * notifying the archiver that history file may be archived
11388 * immediately.
11389 */
11390 CleanupBackupHistory();
11391 }
11392
11393 /*
11394 * If archiving is enabled, wait for all the required WAL files to be
11395 * archived before returning. If archiving isn't enabled, the required WAL
11396 * needs to be transported via streaming replication (hopefully with
11397 * wal_keep_segments set high enough), or some more exotic mechanism like
11398 * polling and copying files from pg_wal with script. We have no knowledge
11399 * of those mechanisms, so it's up to the user to ensure that he gets all
11400 * the required WAL.
11401 *
11402 * We wait until both the last WAL file filled during backup and the
11403 * history file have been archived, and assume that the alphabetic sorting
11404 * property of the WAL files ensures any earlier WAL files are safely
11405 * archived as well.
11406 *
11407 * We wait forever, since archive_command is supposed to work and we
11408 * assume the admin wanted his backup to work completely. If you don't
11409 * wish to wait, then either waitforarchive should be passed in as false,
11410 * or you can set statement_timeout. Also, some notices are issued to
11411 * clue in anyone who might be doing this interactively.
11412 */
11413
11414 if (waitforarchive &&
11415 ((!backup_started_in_recovery && XLogArchivingActive()) ||
11416 (backup_started_in_recovery && XLogArchivingAlways())))
11417 {
11418 XLByteToPrevSeg(stoppoint, _logSegNo, wal_segment_size);
11419 XLogFileName(lastxlogfilename, stoptli, _logSegNo, wal_segment_size);
11420
11421 XLByteToSeg(startpoint, _logSegNo, wal_segment_size);
11422 BackupHistoryFileName(histfilename, stoptli, _logSegNo,
11423 startpoint, wal_segment_size);
11424
11425 seconds_before_warning = 60;
11426 waits = 0;
11427
11428 while (XLogArchiveIsBusy(lastxlogfilename) ||
11429 XLogArchiveIsBusy(histfilename))
11430 {
11431 CHECK_FOR_INTERRUPTS();
11432
11433 if (!reported_waiting && waits > 5)
11434 {
11435 ereport(NOTICE,
11436 (errmsg("pg_stop_backup cleanup done, waiting for required WAL segments to be archived")));
11437 reported_waiting = true;
11438 }
11439
11440 pg_usleep(1000000L);
11441
11442 if (++waits >= seconds_before_warning)
11443 {
11444 seconds_before_warning *= 2; /* This wraps in >10 years... */
11445 ereport(WARNING,
11446 (errmsg("pg_stop_backup still waiting for all required WAL segments to be archived (%d seconds elapsed)",
11447 waits),
11448 errhint("Check that your archive_command is executing properly. "
11449 "pg_stop_backup can be canceled safely, "
11450 "but the database backup will not be usable without all the WAL segments.")));
11451 }
11452 }
11453
11454 ereport(NOTICE,
11455 (errmsg("pg_stop_backup complete, all required WAL segments have been archived")));
11456 }
11457 else if (waitforarchive)
11458 ereport(NOTICE,
11459 (errmsg("WAL archiving is not enabled; you must ensure that all required WAL segments are copied through other means to complete the backup")));
11460
11461 /*
11462 * We're done. As a convenience, return the ending WAL location.
11463 */
11464 if (stoptli_p)
11465 *stoptli_p = stoptli;
11466 return stoppoint;
11467 }
11468
11469
11470 /*
11471 * do_pg_abort_backup: abort a running backup
11472 *
11473 * This does just the most basic steps of do_pg_stop_backup(), by taking the
11474 * system out of backup mode, thus making it a lot more safe to call from
11475 * an error handler.
11476 *
11477 * The caller can pass 'arg' as 'true' or 'false' to control whether a warning
11478 * is emitted.
11479 *
11480 * NB: This is only for aborting a non-exclusive backup that doesn't write
11481 * backup_label. A backup started with pg_start_backup() needs to be finished
11482 * with pg_stop_backup().
11483 *
11484 * NB: This gets used as a before_shmem_exit handler, hence the odd-looking
11485 * signature.
11486 */
11487 void
11488 do_pg_abort_backup(int code, Datum arg)
11489 {
11490 bool emit_warning = DatumGetBool(arg);
11491
11492 /*
11493 * Quick exit if session is not keeping around a non-exclusive backup
11494 * already started.
11495 */
11496 if (sessionBackupState != SESSION_BACKUP_NON_EXCLUSIVE)
11497 return;
11498
11499 WALInsertLockAcquireExclusive();
11500 Assert(XLogCtl->Insert.nonExclusiveBackups > 0);
11501 XLogCtl->Insert.nonExclusiveBackups--;
11502
11503 if (XLogCtl->Insert.exclusiveBackupState == EXCLUSIVE_BACKUP_NONE &&
11504 XLogCtl->Insert.nonExclusiveBackups == 0)
11505 {
11506 XLogCtl->Insert.forcePageWrites = false;
11507 }
11508 WALInsertLockRelease();
11509
11510 if (emit_warning)
11511 ereport(WARNING,
11512 (errmsg("aborting backup due to backend exiting before pg_stop_backup was called")));
11513 }
11514
11515 /*
11516 * Register a handler that will warn about unterminated backups at end of
11517 * session, unless this has already been done.
11518 */
11519 void
11520 register_persistent_abort_backup_handler(void)
11521 {
11522 static bool already_done = false;
11523
11524 if (already_done)
11525 return;
11526 before_shmem_exit(do_pg_abort_backup, DatumGetBool(true));
11527 already_done = true;
11528 }
11529
11530 /*
11531 * Get latest redo apply position.
11532 *
11533 * Exported to allow WALReceiver to read the pointer directly.
11534 */
11535 XLogRecPtr
11536 GetXLogReplayRecPtr(TimeLineID *replayTLI)
11537 {
11538 XLogRecPtr recptr;
11539 TimeLineID tli;
11540
11541 SpinLockAcquire(&XLogCtl->info_lck);
11542 recptr = XLogCtl->lastReplayedEndRecPtr;
11543 tli = XLogCtl->lastReplayedTLI;
11544 SpinLockRelease(&XLogCtl->info_lck);
11545
11546 if (replayTLI)
11547 *replayTLI = tli;
11548 return recptr;
11549 }
11550
11551 /*
11552 * Get latest WAL insert pointer
11553 */
11554 XLogRecPtr
11555 GetXLogInsertRecPtr(void)
11556 {
11557 XLogCtlInsert *Insert = &XLogCtl->Insert;
11558 uint64 current_bytepos;
11559
11560 SpinLockAcquire(&Insert->insertpos_lck);
11561 current_bytepos = Insert->CurrBytePos;
11562 SpinLockRelease(&Insert->insertpos_lck);
11563
11564 return XLogBytePosToRecPtr(current_bytepos);
11565 }
11566
11567 /*
11568 * Get latest WAL write pointer
11569 */
11570 XLogRecPtr
11571 GetXLogWriteRecPtr(void)
11572 {
11573 SpinLockAcquire(&XLogCtl->info_lck);
11574 LogwrtResult = XLogCtl->LogwrtResult;
11575 SpinLockRelease(&XLogCtl->info_lck);
11576
11577 return LogwrtResult.Write;
11578 }
11579
11580 /*
11581 * Returns the redo pointer of the last checkpoint or restartpoint. This is
11582 * the oldest point in WAL that we still need, if we have to restart recovery.
11583 */
11584 void
11585 GetOldestRestartPoint(XLogRecPtr *oldrecptr, TimeLineID *oldtli)
11586 {
11587 LWLockAcquire(ControlFileLock, LW_SHARED);
11588 *oldrecptr = ControlFile->checkPointCopy.redo;
11589 *oldtli = ControlFile->checkPointCopy.ThisTimeLineID;
11590 LWLockRelease(ControlFileLock);
11591 }
11592
11593 /*
11594 * read_backup_label: check to see if a backup_label file is present
11595 *
11596 * If we see a backup_label during recovery, we assume that we are recovering
11597 * from a backup dump file, and we therefore roll forward from the checkpoint
11598 * identified by the label file, NOT what pg_control says. This avoids the
11599 * problem that pg_control might have been archived one or more checkpoints
11600 * later than the start of the dump, and so if we rely on it as the start
11601 * point, we will fail to restore a consistent database state.
11602 *
11603 * Returns true if a backup_label was found (and fills the checkpoint
11604 * location and its REDO location into *checkPointLoc and RedoStartLSN,
11605 * respectively); returns false if not. If this backup_label came from a
11606 * streamed backup, *backupEndRequired is set to true. If this backup_label
11607 * was created during recovery, *backupFromStandby is set to true.
11608 */
11609 static bool
11610 read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
11611 bool *backupFromStandby)
11612 {
11613 char startxlogfilename[MAXFNAMELEN];
11614 TimeLineID tli_from_walseg,
11615 tli_from_file;
11616 FILE *lfp;
11617 char ch;
11618 char backuptype[20];
11619 char backupfrom[20];
11620 char backuplabel[MAXPGPATH];
11621 char backuptime[128];
11622 uint32 hi,
11623 lo;
11624
11625 *backupEndRequired = false;
11626 *backupFromStandby = false;
11627
11628 /*
11629 * See if label file is present
11630 */
11631 lfp = AllocateFile(BACKUP_LABEL_FILE, "r");
11632 if (!lfp)
11633 {
11634 if (errno != ENOENT)
11635 ereport(FATAL,
11636 (errcode_for_file_access(),
11637 errmsg("could not read file \"%s\": %m",
11638 BACKUP_LABEL_FILE)));
11639 return false; /* it's not there, all is fine */
11640 }
11641
11642 /*
11643 * Read and parse the START WAL LOCATION and CHECKPOINT lines (this code
11644 * is pretty crude, but we are not expecting any variability in the file
11645 * format).
11646 */
11647 if (fscanf(lfp, "START WAL LOCATION: %X/%X (file %08X%16s)%c",
11648 &hi, &lo, &tli_from_walseg, startxlogfilename, &ch) != 5 || ch != '\n')
11649 ereport(FATAL,
11650 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
11651 errmsg("invalid data in file \"%s\"", BACKUP_LABEL_FILE)));
11652 RedoStartLSN = ((uint64) hi) << 32 | lo;
11653 if (fscanf(lfp, "CHECKPOINT LOCATION: %X/%X%c",
11654 &hi, &lo, &ch) != 3 || ch != '\n')
11655 ereport(FATAL,
11656 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
11657 errmsg("invalid data in file \"%s\"", BACKUP_LABEL_FILE)));
11658 *checkPointLoc = ((uint64) hi) << 32 | lo;
11659
11660 /*
11661 * BACKUP METHOD and BACKUP FROM lines are new in 9.2. We can't restore
11662 * from an older backup anyway, but since the information on it is not
11663 * strictly required, don't error out if it's missing for some reason.
11664 */
11665 if (fscanf(lfp, "BACKUP METHOD: %19s\n", backuptype) == 1)
11666 {
11667 if (strcmp(backuptype, "streamed") == 0)
11668 *backupEndRequired = true;
11669 }
11670
11671 if (fscanf(lfp, "BACKUP FROM: %19s\n", backupfrom) == 1)
11672 {
11673 if (strcmp(backupfrom, "standby") == 0)
11674 *backupFromStandby = true;
11675 }
11676
11677 /*
11678 * Parse START TIME and LABEL. Those are not mandatory fields for recovery
11679 * but checking for their presence is useful for debugging and the next
11680 * sanity checks. Cope also with the fact that the result buffers have a
11681 * pre-allocated size, hence if the backup_label file has been generated
11682 * with strings longer than the maximum assumed here an incorrect parsing
11683 * happens. That's fine as only minor consistency checks are done
11684 * afterwards.
11685 */
11686 if (fscanf(lfp, "START TIME: %127[^\n]\n", backuptime) == 1)
11687 ereport(DEBUG1,
11688 (errmsg("backup time %s in file \"%s\"",
11689 backuptime, BACKUP_LABEL_FILE)));
11690
11691 if (fscanf(lfp, "LABEL: %1023[^\n]\n", backuplabel) == 1)
11692 ereport(DEBUG1,
11693 (errmsg("backup label %s in file \"%s\"",
11694 backuplabel, BACKUP_LABEL_FILE)));
11695
11696 /*
11697 * START TIMELINE is new as of 11. Its parsing is not mandatory, still use
11698 * it as a sanity check if present.
11699 */
11700 if (fscanf(lfp, "START TIMELINE: %u\n", &tli_from_file) == 1)
11701 {
11702 if (tli_from_walseg != tli_from_file)
11703 ereport(FATAL,
11704 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
11705 errmsg("invalid data in file \"%s\"", BACKUP_LABEL_FILE),
11706 errdetail("Timeline ID parsed is %u, but expected %u",
11707 tli_from_file, tli_from_walseg)));
11708
11709 ereport(DEBUG1,
11710 (errmsg("backup timeline %u in file \"%s\"",
11711 tli_from_file, BACKUP_LABEL_FILE)));
11712 }
11713
11714 if (ferror(lfp) || FreeFile(lfp))
11715 ereport(FATAL,
11716 (errcode_for_file_access(),
11717 errmsg("could not read file \"%s\": %m",
11718 BACKUP_LABEL_FILE)));
11719
11720 return true;
11721 }
11722
11723 /*
11724 * read_tablespace_map: check to see if a tablespace_map file is present
11725 *
11726 * If we see a tablespace_map file during recovery, we assume that we are
11727 * recovering from a backup dump file, and we therefore need to create symlinks
11728 * as per the information present in tablespace_map file.
11729 *
11730 * Returns true if a tablespace_map file was found (and fills the link
11731 * information for all the tablespace links present in file); returns false
11732 * if not.
11733 */
11734 static bool
11735 read_tablespace_map(List **tablespaces)
11736 {
11737 tablespaceinfo *ti;
11738 FILE *lfp;
11739 char tbsoid[MAXPGPATH];
11740 char *tbslinkpath;
11741 char str[MAXPGPATH];
11742 int ch,
11743 prev_ch = -1,
11744 i = 0,
11745 n;
11746
11747 /*
11748 * See if tablespace_map file is present
11749 */
11750 lfp = AllocateFile(TABLESPACE_MAP, "r");
11751 if (!lfp)
11752 {
11753 if (errno != ENOENT)
11754 ereport(FATAL,
11755 (errcode_for_file_access(),
11756 errmsg("could not read file \"%s\": %m",
11757 TABLESPACE_MAP)));
11758 return false; /* it's not there, all is fine */
11759 }
11760
11761 /*
11762 * Read and parse the link name and path lines from tablespace_map file
11763 * (this code is pretty crude, but we are not expecting any variability in
11764 * the file format). While taking backup we embed escape character '\\'
11765 * before newline in tablespace path, so that during reading of
11766 * tablespace_map file, we could distinguish newline in tablespace path
11767 * and end of line. Now while reading tablespace_map file, remove the
11768 * escape character that has been added in tablespace path during backup.
11769 */
11770 while ((ch = fgetc(lfp)) != EOF)
11771 {
11772 if ((ch == '\n' || ch == '\r') && prev_ch != '\\')
11773 {
11774 str[i] = '\0';
11775 if (sscanf(str, "%s %n", tbsoid, &n) != 1)
11776 ereport(FATAL,
11777 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
11778 errmsg("invalid data in file \"%s\"", TABLESPACE_MAP)));
11779 tbslinkpath = str + n;
11780 i = 0;
11781
11782 ti = palloc(sizeof(tablespaceinfo));
11783 ti->oid = pstrdup(tbsoid);
11784 ti->path = pstrdup(tbslinkpath);
11785
11786 *tablespaces = lappend(*tablespaces, ti);
11787 continue;
11788 }
11789 else if ((ch == '\n' || ch == '\r') && prev_ch == '\\')
11790 str[i - 1] = ch;
11791 else if (i < sizeof(str) - 1)
11792 str[i++] = ch;
11793 prev_ch = ch;
11794 }
11795
11796 if (ferror(lfp) || FreeFile(lfp))
11797 ereport(FATAL,
11798 (errcode_for_file_access(),
11799 errmsg("could not read file \"%s\": %m",
11800 TABLESPACE_MAP)));
11801
11802 return true;
11803 }
11804
11805 /*
11806 * Error context callback for errors occurring during rm_redo().
11807 */
11808 static void
11809 rm_redo_error_callback(void *arg)
11810 {
11811 XLogReaderState *record = (XLogReaderState *) arg;
11812 StringInfoData buf;
11813
11814 initStringInfo(&buf);
11815 xlog_outdesc(&buf, record);
11816
11817 /* translator: %s is a WAL record description */
11818 errcontext("WAL redo at %X/%X for %s",
11819 (uint32) (record->ReadRecPtr >> 32),
11820 (uint32) record->ReadRecPtr,
11821 buf.data);
11822
11823 pfree(buf.data);
11824 }
11825
11826 /*
11827 * BackupInProgress: check if online backup mode is active
11828 *
11829 * This is done by checking for existence of the "backup_label" file.
11830 */
11831 bool
11832 BackupInProgress(void)
11833 {
11834 struct stat stat_buf;
11835
11836 return (stat(BACKUP_LABEL_FILE, &stat_buf) == 0);
11837 }
11838
11839 /*
11840 * CancelBackup: rename the "backup_label" and "tablespace_map"
11841 * files to cancel backup mode
11842 *
11843 * If the "backup_label" file exists, it will be renamed to "backup_label.old".
11844 * Similarly, if the "tablespace_map" file exists, it will be renamed to
11845 * "tablespace_map.old".
11846 *
11847 * Note that this will render an online backup in progress
11848 * useless. To correctly finish an online backup, pg_stop_backup must be
11849 * called.
11850 */
11851 void
11852 CancelBackup(void)
11853 {
11854 struct stat stat_buf;
11855
11856 /* if the backup_label file is not there, return */
11857 if (stat(BACKUP_LABEL_FILE, &stat_buf) < 0)
11858 return;
11859
11860 /* remove leftover file from previously canceled backup if it exists */
11861 unlink(BACKUP_LABEL_OLD);
11862
11863 if (durable_rename(BACKUP_LABEL_FILE, BACKUP_LABEL_OLD, DEBUG1) != 0)
11864 {
11865 ereport(WARNING,
11866 (errcode_for_file_access(),
11867 errmsg("online backup mode was not canceled"),
11868 errdetail("File \"%s\" could not be renamed to \"%s\": %m.",
11869 BACKUP_LABEL_FILE, BACKUP_LABEL_OLD)));
11870 return;
11871 }
11872
11873 /* if the tablespace_map file is not there, return */
11874 if (stat(TABLESPACE_MAP, &stat_buf) < 0)
11875 {
11876 ereport(LOG,
11877 (errmsg("online backup mode canceled"),
11878 errdetail("File \"%s\" was renamed to \"%s\".",
11879 BACKUP_LABEL_FILE, BACKUP_LABEL_OLD)));
11880 return;
11881 }
11882
11883 /* remove leftover file from previously canceled backup if it exists */
11884 unlink(TABLESPACE_MAP_OLD);
11885
11886 if (durable_rename(TABLESPACE_MAP, TABLESPACE_MAP_OLD, DEBUG1) == 0)
11887 {
11888 ereport(LOG,
11889 (errmsg("online backup mode canceled"),
11890 errdetail("Files \"%s\" and \"%s\" were renamed to "
11891 "\"%s\" and \"%s\", respectively.",
11892 BACKUP_LABEL_FILE, TABLESPACE_MAP,
11893 BACKUP_LABEL_OLD, TABLESPACE_MAP_OLD)));
11894 }
11895 else
11896 {
11897 ereport(WARNING,
11898 (errcode_for_file_access(),
11899 errmsg("online backup mode canceled"),
11900 errdetail("File \"%s\" was renamed to \"%s\", but "
11901 "file \"%s\" could not be renamed to \"%s\": %m.",
11902 BACKUP_LABEL_FILE, BACKUP_LABEL_OLD,
11903 TABLESPACE_MAP, TABLESPACE_MAP_OLD)));
11904 }
11905 }
11906
11907 /*
11908 * Read the XLOG page containing RecPtr into readBuf (if not read already).
11909 * Returns number of bytes read, if the page is read successfully, or -1
11910 * in case of errors. When errors occur, they are ereport'ed, but only
11911 * if they have not been previously reported.
11912 *
11913 * This is responsible for restoring files from archive as needed, as well
11914 * as for waiting for the requested WAL record to arrive in standby mode.
11915 *
11916 * 'emode' specifies the log level used for reporting "file not found" or
11917 * "end of WAL" situations in archive recovery, or in standby mode when a
11918 * trigger file is found. If set to WARNING or below, XLogPageRead() returns
11919 * false in those situations, on higher log levels the ereport() won't
11920 * return.
11921 *
11922 * In standby mode, if after a successful return of XLogPageRead() the
11923 * caller finds the record it's interested in to be broken, it should
11924 * ereport the error with the level determined by
11925 * emode_for_corrupt_record(), and then set lastSourceFailed
11926 * and call XLogPageRead() again with the same arguments. This lets
11927 * XLogPageRead() to try fetching the record from another source, or to
11928 * sleep and retry.
11929 */
11930 static int
11931 XLogPageRead(XLogReaderState *xlogreader, XLogRecPtr targetPagePtr, int reqLen,
11932 XLogRecPtr targetRecPtr, char *readBuf, TimeLineID *readTLI)
11933 {
11934 XLogPageReadPrivate *private =
11935 (XLogPageReadPrivate *) xlogreader->private_data;
11936 int emode = private->emode;
11937 uint32 targetPageOff;
11938 XLogSegNo targetSegNo PG_USED_FOR_ASSERTS_ONLY;
11939
11940 XLByteToSeg(targetPagePtr, targetSegNo, wal_segment_size);
11941 targetPageOff = XLogSegmentOffset(targetPagePtr, wal_segment_size);
11942
11943 /*
11944 * See if we need to switch to a new segment because the requested record
11945 * is not in the currently open one.
11946 */
11947 if (readFile >= 0 &&
11948 !XLByteInSeg(targetPagePtr, readSegNo, wal_segment_size))
11949 {
11950 /*
11951 * Request a restartpoint if we've replayed too much xlog since the
11952 * last one.
11953 */
11954 if (bgwriterLaunched)
11955 {
11956 if (XLogCheckpointNeeded(readSegNo))
11957 {
11958 (void) GetRedoRecPtr();
11959 if (XLogCheckpointNeeded(readSegNo))
11960 RequestCheckpoint(CHECKPOINT_CAUSE_XLOG);
11961 }
11962 }
11963
11964 close(readFile);
11965 readFile = -1;
11966 readSource = 0;
11967 }
11968
11969 XLByteToSeg(targetPagePtr, readSegNo, wal_segment_size);
11970
11971 retry:
11972 /* See if we need to retrieve more data */
11973 if (readFile < 0 ||
11974 (readSource == XLOG_FROM_STREAM &&
11975 receivedUpto < targetPagePtr + reqLen))
11976 {
11977 if (!WaitForWALToBecomeAvailable(targetPagePtr + reqLen,
11978 private->randAccess,
11979 private->fetching_ckpt,
11980 targetRecPtr))
11981 {
11982 if (readFile >= 0)
11983 close(readFile);
11984 readFile = -1;
11985 readLen = 0;
11986 readSource = 0;
11987
11988 return -1;
11989 }
11990 }
11991
11992 /*
11993 * At this point, we have the right segment open and if we're streaming we
11994 * know the requested record is in it.
11995 */
11996 Assert(readFile != -1);
11997
11998 /*
11999 * If the current segment is being streamed from master, calculate how
12000 * much of the current page we have received already. We know the
12001 * requested record has been received, but this is for the benefit of
12002 * future calls, to allow quick exit at the top of this function.
12003 */
12004 if (readSource == XLOG_FROM_STREAM)
12005 {
12006 if (((targetPagePtr) / XLOG_BLCKSZ) != (receivedUpto / XLOG_BLCKSZ))
12007 readLen = XLOG_BLCKSZ;
12008 else
12009 readLen = XLogSegmentOffset(receivedUpto, wal_segment_size) -
12010 targetPageOff;
12011 }
12012 else
12013 readLen = XLOG_BLCKSZ;
12014
12015 /* Read the requested page */
12016 readOff = targetPageOff;
12017 if (lseek(readFile, (off_t) readOff, SEEK_SET) < 0)
12018 {
12019 char fname[MAXFNAMELEN];
12020 int save_errno = errno;
12021
12022 XLogFileName(fname, curFileTLI, readSegNo, wal_segment_size);
12023 errno = save_errno;
12024 ereport(emode_for_corrupt_record(emode, targetPagePtr + reqLen),
12025 (errcode_for_file_access(),
12026 errmsg("could not seek in log segment %s to offset %u: %m",
12027 fname, readOff)));
12028 goto next_record_is_invalid;
12029 }
12030
12031 pgstat_report_wait_start(WAIT_EVENT_WAL_READ);
12032 if (read(readFile, readBuf, XLOG_BLCKSZ) != XLOG_BLCKSZ)
12033 {
12034 char fname[MAXFNAMELEN];
12035 int save_errno = errno;
12036
12037 pgstat_report_wait_end();
12038 XLogFileName(fname, curFileTLI, readSegNo, wal_segment_size);
12039 errno = save_errno;
12040 ereport(emode_for_corrupt_record(emode, targetPagePtr + reqLen),
12041 (errcode_for_file_access(),
12042 errmsg("could not read from log segment %s, offset %u: %m",
12043 fname, readOff)));
12044 goto next_record_is_invalid;
12045 }
12046 pgstat_report_wait_end();
12047
12048 Assert(targetSegNo == readSegNo);
12049 Assert(targetPageOff == readOff);
12050 Assert(reqLen <= readLen);
12051
12052 *readTLI = curFileTLI;
12053
12054 /*
12055 * Check the page header immediately, so that we can retry immediately if
12056 * it's not valid. This may seem unnecessary, because XLogReadRecord()
12057 * validates the page header anyway, and would propagate the failure up to
12058 * ReadRecord(), which would retry. However, there's a corner case with
12059 * continuation records, if a record is split across two pages such that
12060 * we would need to read the two pages from different sources. For
12061 * example, imagine a scenario where a streaming replica is started up,
12062 * and replay reaches a record that's split across two WAL segments. The
12063 * first page is only available locally, in pg_wal, because it's already
12064 * been recycled in the master. The second page, however, is not present
12065 * in pg_wal, and we should stream it from the master. There is a recycled
12066 * WAL segment present in pg_wal, with garbage contents, however. We would
12067 * read the first page from the local WAL segment, but when reading the
12068 * second page, we would read the bogus, recycled, WAL segment. If we
12069 * didn't catch that case here, we would never recover, because
12070 * ReadRecord() would retry reading the whole record from the beginning.
12071 *
12072 * Of course, this only catches errors in the page header, which is what
12073 * happens in the case of a recycled WAL segment. Other kinds of errors or
12074 * corruption still has the same problem. But this at least fixes the
12075 * common case, which can happen as part of normal operation.
12076 *
12077 * Validating the page header is cheap enough that doing it twice
12078 * shouldn't be a big deal from a performance point of view.
12079 */
12080 if (!XLogReaderValidatePageHeader(xlogreader, targetPagePtr, readBuf))
12081 {
12082 /* reset any error XLogReaderValidatePageHeader() might have set */
12083 xlogreader->errormsg_buf[0] = '\0';
12084 goto next_record_is_invalid;
12085 }
12086
12087 return readLen;
12088
12089 next_record_is_invalid:
12090 lastSourceFailed = true;
12091
12092 if (readFile >= 0)
12093 close(readFile);
12094 readFile = -1;
12095 readLen = 0;
12096 readSource = 0;
12097
12098 /* In standby-mode, keep trying */
12099 if (StandbyMode)
12100 goto retry;
12101 else
12102 return -1;
12103 }
12104
12105 /*
12106 * Open the WAL segment containing WAL location 'RecPtr'.
12107 *
12108 * The segment can be fetched via restore_command, or via walreceiver having
12109 * streamed the record, or it can already be present in pg_wal. Checking
12110 * pg_wal is mainly for crash recovery, but it will be polled in standby mode
12111 * too, in case someone copies a new segment directly to pg_wal. That is not
12112 * documented or recommended, though.
12113 *
12114 * If 'fetching_ckpt' is true, we're fetching a checkpoint record, and should
12115 * prepare to read WAL starting from RedoStartLSN after this.
12116 *
12117 * 'RecPtr' might not point to the beginning of the record we're interested
12118 * in, it might also point to the page or segment header. In that case,
12119 * 'tliRecPtr' is the position of the WAL record we're interested in. It is
12120 * used to decide which timeline to stream the requested WAL from.
12121 *
12122 * If the record is not immediately available, the function returns false
12123 * if we're not in standby mode. In standby mode, waits for it to become
12124 * available.
12125 *
12126 * When the requested record becomes available, the function opens the file
12127 * containing it (if not open already), and returns true. When end of standby
12128 * mode is triggered by the user, and there is no more WAL available, returns
12129 * false.
12130 */
12131 static bool
12132 WaitForWALToBecomeAvailable(XLogRecPtr RecPtr, bool randAccess,
12133 bool fetching_ckpt, XLogRecPtr tliRecPtr)
12134 {
12135 static TimestampTz last_fail_time = 0;
12136 TimestampTz now;
12137 bool streaming_reply_sent = false;
12138
12139 /*-------
12140 * Standby mode is implemented by a state machine:
12141 *
12142 * 1. Read from either archive or pg_wal (XLOG_FROM_ARCHIVE), or just
12143 * pg_wal (XLOG_FROM_PG_WAL)
12144 * 2. Check trigger file
12145 * 3. Read from primary server via walreceiver (XLOG_FROM_STREAM)
12146 * 4. Rescan timelines
12147 * 5. Sleep wal_retrieve_retry_interval milliseconds, and loop back to 1.
12148 *
12149 * Failure to read from the current source advances the state machine to
12150 * the next state.
12151 *
12152 * 'currentSource' indicates the current state. There are no currentSource
12153 * values for "check trigger", "rescan timelines", and "sleep" states,
12154 * those actions are taken when reading from the previous source fails, as
12155 * part of advancing to the next state.
12156 *
12157 * If standby mode is turned off while reading WAL from stream, we move
12158 * to XLOG_FROM_ARCHIVE and reset lastSourceFailed, to force fetching
12159 * the files (which would be required at end of recovery, e.g., timeline
12160 * history file) from archive or pg_wal. We don't need to kill WAL receiver
12161 * here because it's already stopped when standby mode is turned off at
12162 * the end of recovery.
12163 *-------
12164 */
12165 if (!InArchiveRecovery)
12166 currentSource = XLOG_FROM_PG_WAL;
12167 else if (currentSource == 0 ||
12168 (!StandbyMode && currentSource == XLOG_FROM_STREAM))
12169 {
12170 lastSourceFailed = false;
12171 currentSource = XLOG_FROM_ARCHIVE;
12172 }
12173
12174 for (;;)
12175 {
12176 int oldSource = currentSource;
12177
12178 /*
12179 * First check if we failed to read from the current source, and
12180 * advance the state machine if so. The failure to read might've
12181 * happened outside this function, e.g when a CRC check fails on a
12182 * record, or within this loop.
12183 */
12184 if (lastSourceFailed)
12185 {
12186 switch (currentSource)
12187 {
12188 case XLOG_FROM_ARCHIVE:
12189 case XLOG_FROM_PG_WAL:
12190
12191 /*
12192 * Check to see if the trigger file exists. Note that we
12193 * do this only after failure, so when you create the
12194 * trigger file, we still finish replaying as much as we
12195 * can from archive and pg_wal before failover.
12196 */
12197 if (StandbyMode && CheckForStandbyTrigger())
12198 {
12199 ShutdownWalRcv();
12200 return false;
12201 }
12202
12203 /*
12204 * Not in standby mode, and we've now tried the archive
12205 * and pg_wal.
12206 */
12207 if (!StandbyMode)
12208 return false;
12209
12210 /*
12211 * If primary_conninfo is set, launch walreceiver to try
12212 * to stream the missing WAL.
12213 *
12214 * If fetching_ckpt is true, RecPtr points to the initial
12215 * checkpoint location. In that case, we use RedoStartLSN
12216 * as the streaming start position instead of RecPtr, so
12217 * that when we later jump backwards to start redo at
12218 * RedoStartLSN, we will have the logs streamed already.
12219 */
12220 if (PrimaryConnInfo)
12221 {
12222 XLogRecPtr ptr;
12223 TimeLineID tli;
12224
12225 if (fetching_ckpt)
12226 {
12227 ptr = RedoStartLSN;
12228 tli = ControlFile->checkPointCopy.ThisTimeLineID;
12229 }
12230 else
12231 {
12232 ptr = RecPtr;
12233
12234 /*
12235 * Use the record begin position to determine the
12236 * TLI, rather than the position we're reading.
12237 */
12238 tli = tliOfPointInHistory(tliRecPtr, expectedTLEs);
12239
12240 if (curFileTLI > 0 && tli < curFileTLI)
12241 elog(ERROR, "according to history file, WAL location %X/%X belongs to timeline %u, but previous recovered WAL file came from timeline %u",
12242 (uint32) (tliRecPtr >> 32),
12243 (uint32) tliRecPtr,
12244 tli, curFileTLI);
12245 }
12246 curFileTLI = tli;
12247 RequestXLogStreaming(tli, ptr, PrimaryConnInfo,
12248 PrimarySlotName);
12249 receivedUpto = 0;
12250 }
12251
12252 /*
12253 * Move to XLOG_FROM_STREAM state in either case. We'll
12254 * get immediate failure if we didn't launch walreceiver,
12255 * and move on to the next state.
12256 */
12257 currentSource = XLOG_FROM_STREAM;
12258 break;
12259
12260 case XLOG_FROM_STREAM:
12261
12262 /*
12263 * Failure while streaming. Most likely, we got here
12264 * because streaming replication was terminated, or
12265 * promotion was triggered. But we also get here if we
12266 * find an invalid record in the WAL streamed from master,
12267 * in which case something is seriously wrong. There's
12268 * little chance that the problem will just go away, but
12269 * PANIC is not good for availability either, especially
12270 * in hot standby mode. So, we treat that the same as
12271 * disconnection, and retry from archive/pg_wal again. The
12272 * WAL in the archive should be identical to what was
12273 * streamed, so it's unlikely that it helps, but one can
12274 * hope...
12275 */
12276
12277 /*
12278 * We should be able to move to XLOG_FROM_STREAM
12279 * only in standby mode.
12280 */
12281 Assert(StandbyMode);
12282
12283 /*
12284 * Before we leave XLOG_FROM_STREAM state, make sure that
12285 * walreceiver is not active, so that it won't overwrite
12286 * WAL that we restore from archive.
12287 */
12288 if (WalRcvStreaming())
12289 ShutdownWalRcv();
12290
12291 /*
12292 * Before we sleep, re-scan for possible new timelines if
12293 * we were requested to recover to the latest timeline.
12294 */
12295 if (recoveryTargetIsLatest)
12296 {
12297 if (rescanLatestTimeLine())
12298 {
12299 currentSource = XLOG_FROM_ARCHIVE;
12300 break;
12301 }
12302 }
12303
12304 /*
12305 * XLOG_FROM_STREAM is the last state in our state
12306 * machine, so we've exhausted all the options for
12307 * obtaining the requested WAL. We're going to loop back
12308 * and retry from the archive, but if it hasn't been long
12309 * since last attempt, sleep wal_retrieve_retry_interval
12310 * milliseconds to avoid busy-waiting.
12311 */
12312 now = GetCurrentTimestamp();
12313 if (!TimestampDifferenceExceeds(last_fail_time, now,
12314 wal_retrieve_retry_interval))
12315 {
12316 long wait_time;
12317
12318 wait_time = wal_retrieve_retry_interval -
12319 TimestampDifferenceMilliseconds(last_fail_time, now);
12320
12321 WaitLatch(&XLogCtl->recoveryWakeupLatch,
12322 WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
12323 wait_time, WAIT_EVENT_RECOVERY_WAL_STREAM);
12324 ResetLatch(&XLogCtl->recoveryWakeupLatch);
12325 now = GetCurrentTimestamp();
12326
12327 /* Handle interrupt signals of startup process */
12328 HandleStartupProcInterrupts();
12329 }
12330 last_fail_time = now;
12331 currentSource = XLOG_FROM_ARCHIVE;
12332 break;
12333
12334 default:
12335 elog(ERROR, "unexpected WAL source %d", currentSource);
12336 }
12337 }
12338 else if (currentSource == XLOG_FROM_PG_WAL)
12339 {
12340 /*
12341 * We just successfully read a file in pg_wal. We prefer files in
12342 * the archive over ones in pg_wal, so try the next file again
12343 * from the archive first.
12344 */
12345 if (InArchiveRecovery)
12346 currentSource = XLOG_FROM_ARCHIVE;
12347 }
12348
12349 if (currentSource != oldSource)
12350 elog(DEBUG2, "switched WAL source from %s to %s after %s",
12351 xlogSourceNames[oldSource], xlogSourceNames[currentSource],
12352 lastSourceFailed ? "failure" : "success");
12353
12354 /*
12355 * We've now handled possible failure. Try to read from the chosen
12356 * source.
12357 */
12358 lastSourceFailed = false;
12359
12360 switch (currentSource)
12361 {
12362 case XLOG_FROM_ARCHIVE:
12363 case XLOG_FROM_PG_WAL:
12364 /*
12365 * WAL receiver must not be running when reading WAL from
12366 * archive or pg_wal.
12367 */
12368 Assert(!WalRcvStreaming());
12369
12370 /* Close any old file we might have open. */
12371 if (readFile >= 0)
12372 {
12373 close(readFile);
12374 readFile = -1;
12375 }
12376 /* Reset curFileTLI if random fetch. */
12377 if (randAccess)
12378 curFileTLI = 0;
12379
12380 /*
12381 * Try to restore the file from archive, or read an existing
12382 * file from pg_wal.
12383 */
12384 readFile = XLogFileReadAnyTLI(readSegNo, DEBUG2,
12385 currentSource == XLOG_FROM_ARCHIVE ? XLOG_FROM_ANY :
12386 currentSource);
12387 if (readFile >= 0)
12388 return true; /* success! */
12389
12390 /*
12391 * Nope, not found in archive or pg_wal.
12392 */
12393 lastSourceFailed = true;
12394 break;
12395
12396 case XLOG_FROM_STREAM:
12397 {
12398 bool havedata;
12399
12400 /*
12401 * We should be able to move to XLOG_FROM_STREAM
12402 * only in standby mode.
12403 */
12404 Assert(StandbyMode);
12405
12406 /*
12407 * Check if WAL receiver is still active.
12408 */
12409 if (!WalRcvStreaming())
12410 {
12411 lastSourceFailed = true;
12412 break;
12413 }
12414
12415 /*
12416 * Walreceiver is active, so see if new data has arrived.
12417 *
12418 * We only advance XLogReceiptTime when we obtain fresh
12419 * WAL from walreceiver and observe that we had already
12420 * processed everything before the most recent "chunk"
12421 * that it flushed to disk. In steady state where we are
12422 * keeping up with the incoming data, XLogReceiptTime will
12423 * be updated on each cycle. When we are behind,
12424 * XLogReceiptTime will not advance, so the grace time
12425 * allotted to conflicting queries will decrease.
12426 */
12427 if (RecPtr < receivedUpto)
12428 havedata = true;
12429 else
12430 {
12431 XLogRecPtr latestChunkStart;
12432
12433 receivedUpto = GetWalRcvWriteRecPtr(&latestChunkStart, &receiveTLI);
12434 if (RecPtr < receivedUpto && receiveTLI == curFileTLI)
12435 {
12436 havedata = true;
12437 if (latestChunkStart <= RecPtr)
12438 {
12439 XLogReceiptTime = GetCurrentTimestamp();
12440 SetCurrentChunkStartTime(XLogReceiptTime);
12441 }
12442 }
12443 else
12444 havedata = false;
12445 }
12446 if (havedata)
12447 {
12448 /*
12449 * Great, streamed far enough. Open the file if it's
12450 * not open already. Also read the timeline history
12451 * file if we haven't initialized timeline history
12452 * yet; it should be streamed over and present in
12453 * pg_wal by now. Use XLOG_FROM_STREAM so that source
12454 * info is set correctly and XLogReceiptTime isn't
12455 * changed.
12456 *
12457 * NB: We must set readTimeLineHistory based on
12458 * recoveryTargetTLI, not receiveTLI. Normally they'll
12459 * be the same, but if recovery_target_timeline is
12460 * 'latest' and archiving is configured, then it's
12461 * possible that we managed to retrieve one or more
12462 * new timeline history files from the archive,
12463 * updating recoveryTargetTLI.
12464 */
12465 if (readFile < 0)
12466 {
12467 if (!expectedTLEs)
12468 expectedTLEs = readTimeLineHistory(recoveryTargetTLI);
12469 readFile = XLogFileRead(readSegNo, PANIC,
12470 receiveTLI,
12471 XLOG_FROM_STREAM, false);
12472 Assert(readFile >= 0);
12473 }
12474 else
12475 {
12476 /* just make sure source info is correct... */
12477 readSource = XLOG_FROM_STREAM;
12478 XLogReceiptSource = XLOG_FROM_STREAM;
12479 return true;
12480 }
12481 break;
12482 }
12483
12484 /*
12485 * Data not here yet. Check for trigger, then wait for
12486 * walreceiver to wake us up when new WAL arrives.
12487 */
12488 if (CheckForStandbyTrigger())
12489 {
12490 /*
12491 * Note that we don't "return false" immediately here.
12492 * After being triggered, we still want to replay all
12493 * the WAL that was already streamed. It's in pg_wal
12494 * now, so we just treat this as a failure, and the
12495 * state machine will move on to replay the streamed
12496 * WAL from pg_wal, and then recheck the trigger and
12497 * exit replay.
12498 */
12499 lastSourceFailed = true;
12500 break;
12501 }
12502
12503 /*
12504 * Since we have replayed everything we have received so
12505 * far and are about to start waiting for more WAL, let's
12506 * tell the upstream server our replay location now so
12507 * that pg_stat_replication doesn't show stale
12508 * information.
12509 */
12510 if (!streaming_reply_sent)
12511 {
12512 WalRcvForceReply();
12513 streaming_reply_sent = true;
12514 }
12515
12516 /*
12517 * Wait for more WAL to arrive. Time out after 5 seconds
12518 * to react to a trigger file promptly.
12519 */
12520 WaitLatch(&XLogCtl->recoveryWakeupLatch,
12521 WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH,
12522 5000L, WAIT_EVENT_RECOVERY_WAL_ALL);
12523 ResetLatch(&XLogCtl->recoveryWakeupLatch);
12524 break;
12525 }
12526
12527 default:
12528 elog(ERROR, "unexpected WAL source %d", currentSource);
12529 }
12530
12531 /*
12532 * This possibly-long loop needs to handle interrupts of startup
12533 * process.
12534 */
12535 HandleStartupProcInterrupts();
12536 }
12537
12538 return false; /* not reached */
12539 }
12540
12541 /*
12542 * Determine what log level should be used to report a corrupt WAL record
12543 * in the current WAL page, previously read by XLogPageRead().
12544 *
12545 * 'emode' is the error mode that would be used to report a file-not-found
12546 * or legitimate end-of-WAL situation. Generally, we use it as-is, but if
12547 * we're retrying the exact same record that we've tried previously, only
12548 * complain the first time to keep the noise down. However, we only do when
12549 * reading from pg_wal, because we don't expect any invalid records in archive
12550 * or in records streamed from master. Files in the archive should be complete,
12551 * and we should never hit the end of WAL because we stop and wait for more WAL
12552 * to arrive before replaying it.
12553 *
12554 * NOTE: This function remembers the RecPtr value it was last called with,
12555 * to suppress repeated messages about the same record. Only call this when
12556 * you are about to ereport(), or you might cause a later message to be
12557 * erroneously suppressed.
12558 */
12559 static int
12560 emode_for_corrupt_record(int emode, XLogRecPtr RecPtr)
12561 {
12562 static XLogRecPtr lastComplaint = 0;
12563
12564 if (readSource == XLOG_FROM_PG_WAL && emode == LOG)
12565 {
12566 if (RecPtr == lastComplaint)
12567 emode = DEBUG1;
12568 else
12569 lastComplaint = RecPtr;
12570 }
12571 return emode;
12572 }
12573
12574 /*
12575 * Check to see whether the user-specified trigger file exists and whether a
12576 * promote request has arrived. If either condition holds, return true.
12577 */
12578 static bool
12579 CheckForStandbyTrigger(void)
12580 {
12581 struct stat stat_buf;
12582 static bool triggered = false;
12583
12584 if (triggered)
12585 return true;
12586
12587 if (IsPromoteTriggered())
12588 {
12589 /*
12590 * In 9.1 and 9.2 the postmaster unlinked the promote file inside the
12591 * signal handler. It now leaves the file in place and lets the
12592 * Startup process do the unlink. This allows Startup to know whether
12593 * it should create a full checkpoint before starting up (fallback
12594 * mode). Fast promotion takes precedence.
12595 */
12596 if (stat(PROMOTE_SIGNAL_FILE, &stat_buf) == 0)
12597 {
12598 unlink(PROMOTE_SIGNAL_FILE);
12599 unlink(FALLBACK_PROMOTE_SIGNAL_FILE);
12600 fast_promote = true;
12601 }
12602 else if (stat(FALLBACK_PROMOTE_SIGNAL_FILE, &stat_buf) == 0)
12603 {
12604 unlink(FALLBACK_PROMOTE_SIGNAL_FILE);
12605 fast_promote = false;
12606 }
12607
12608 ereport(LOG, (errmsg("received promote request")));
12609
12610 ResetPromoteTriggered();
12611 triggered = true;
12612 return true;
12613 }
12614
12615 if (TriggerFile == NULL)
12616 return false;
12617
12618 if (stat(TriggerFile, &stat_buf) == 0)
12619 {
12620 ereport(LOG,
12621 (errmsg("trigger file found: %s", TriggerFile)));
12622 unlink(TriggerFile);
12623 triggered = true;
12624 fast_promote = true;
12625 return true;
12626 }
12627 else if (errno != ENOENT)
12628 ereport(ERROR,
12629 (errcode_for_file_access(),
12630 errmsg("could not stat trigger file \"%s\": %m",
12631 TriggerFile)));
12632
12633 return false;
12634 }
12635
12636 /*
12637 * Remove the files signaling a standby promotion request.
12638 */
12639 void
12640 RemovePromoteSignalFiles(void)
12641 {
12642 unlink(PROMOTE_SIGNAL_FILE);
12643 unlink(FALLBACK_PROMOTE_SIGNAL_FILE);
12644 }
12645
12646 /*
12647 * Check to see if a promote request has arrived. Should be
12648 * called by postmaster after receiving SIGUSR1.
12649 */
12650 bool
12651 CheckPromoteSignal(void)
12652 {
12653 struct stat stat_buf;
12654
12655 if (stat(PROMOTE_SIGNAL_FILE, &stat_buf) == 0 ||
12656 stat(FALLBACK_PROMOTE_SIGNAL_FILE, &stat_buf) == 0)
12657 return true;
12658
12659 return false;
12660 }
12661
12662 /*
12663 * Wake up startup process to replay newly arrived WAL, or to notice that
12664 * failover has been requested.
12665 */
12666 void
12667 WakeupRecovery(void)
12668 {
12669 SetLatch(&XLogCtl->recoveryWakeupLatch);
12670 }
12671
12672 /*
12673 * Update the WalWriterSleeping flag.
12674 */
12675 void
12676 SetWalWriterSleeping(bool sleeping)
12677 {
12678 SpinLockAcquire(&XLogCtl->info_lck);
12679 XLogCtl->WalWriterSleeping = sleeping;
12680 SpinLockRelease(&XLogCtl->info_lck);
12681 }
12682
12683 /*
12684 * Schedule a walreceiver wakeup in the main recovery loop.
12685 */
12686 void
12687 XLogRequestWalReceiverReply(void)
12688 {
12689 doRequestWalReceiverReply = true;
12690 }
12691