1 /*------------------------------------------------------------------------- 2 * 3 * checkpointer.c 4 * 5 * The checkpointer is new as of Postgres 9.2. It handles all checkpoints. 6 * Checkpoints are automatically dispatched after a certain amount of time has 7 * elapsed since the last one, and it can be signaled to perform requested 8 * checkpoints as well. (The GUC parameter that mandates a checkpoint every 9 * so many WAL segments is implemented by having backends signal when they 10 * fill WAL segments; the checkpointer itself doesn't watch for the 11 * condition.) 12 * 13 * The checkpointer is started by the postmaster as soon as the startup 14 * subprocess finishes, or as soon as recovery begins if we are doing archive 15 * recovery. It remains alive until the postmaster commands it to terminate. 16 * Normal termination is by SIGUSR2, which instructs the checkpointer to 17 * execute a shutdown checkpoint and then exit(0). (All backends must be 18 * stopped before SIGUSR2 is issued!) Emergency termination is by SIGQUIT; 19 * like any backend, the checkpointer will simply abort and exit on SIGQUIT. 20 * 21 * If the checkpointer exits unexpectedly, the postmaster treats that the same 22 * as a backend crash: shared memory may be corrupted, so remaining backends 23 * should be killed by SIGQUIT and then a recovery cycle started. (Even if 24 * shared memory isn't corrupted, we have lost information about which 25 * files need to be fsync'd for the next checkpoint, and so a system 26 * restart needs to be forced.) 27 * 28 * 29 * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group 30 * 31 * 32 * IDENTIFICATION 33 * src/backend/postmaster/checkpointer.c 34 * 35 *------------------------------------------------------------------------- 36 */ 37 #include "postgres.h" 38 39 #include <signal.h> 40 #include <sys/time.h> 41 #include <time.h> 42 #include <unistd.h> 43 44 #include "access/xlog.h" 45 #include "access/xlog_internal.h" 46 #include "libpq/pqsignal.h" 47 #include "miscadmin.h" 48 #include "pgstat.h" 49 #include "postmaster/bgwriter.h" 50 #include "replication/syncrep.h" 51 #include "storage/bufmgr.h" 52 #include "storage/condition_variable.h" 53 #include "storage/fd.h" 54 #include "storage/ipc.h" 55 #include "storage/lwlock.h" 56 #include "storage/proc.h" 57 #include "storage/shmem.h" 58 #include "storage/smgr.h" 59 #include "storage/spin.h" 60 #include "utils/guc.h" 61 #include "utils/memutils.h" 62 #include "utils/resowner.h" 63 64 65 /*---------- 66 * Shared memory area for communication between checkpointer and backends 67 * 68 * The ckpt counters allow backends to watch for completion of a checkpoint 69 * request they send. Here's how it works: 70 * * At start of a checkpoint, checkpointer reads (and clears) the request 71 * flags and increments ckpt_started, while holding ckpt_lck. 72 * * On completion of a checkpoint, checkpointer sets ckpt_done to 73 * equal ckpt_started. 74 * * On failure of a checkpoint, checkpointer increments ckpt_failed 75 * and sets ckpt_done to equal ckpt_started. 76 * 77 * The algorithm for backends is: 78 * 1. Record current values of ckpt_failed and ckpt_started, and 79 * set request flags, while holding ckpt_lck. 80 * 2. Send signal to request checkpoint. 81 * 3. Sleep until ckpt_started changes. Now you know a checkpoint has 82 * begun since you started this algorithm (although *not* that it was 83 * specifically initiated by your signal), and that it is using your flags. 84 * 4. Record new value of ckpt_started. 85 * 5. Sleep until ckpt_done >= saved value of ckpt_started. (Use modulo 86 * arithmetic here in case counters wrap around.) Now you know a 87 * checkpoint has started and completed, but not whether it was 88 * successful. 89 * 6. If ckpt_failed is different from the originally saved value, 90 * assume request failed; otherwise it was definitely successful. 91 * 92 * ckpt_flags holds the OR of the checkpoint request flags sent by all 93 * requesting backends since the last checkpoint start. The flags are 94 * chosen so that OR'ing is the correct way to combine multiple requests. 95 * 96 * num_backend_writes is used to count the number of buffer writes performed 97 * by user backend processes. This counter should be wide enough that it 98 * can't overflow during a single processing cycle. num_backend_fsync 99 * counts the subset of those writes that also had to do their own fsync, 100 * because the checkpointer failed to absorb their request. 101 * 102 * The requests array holds fsync requests sent by backends and not yet 103 * absorbed by the checkpointer. 104 * 105 * Unlike the checkpoint fields, num_backend_writes, num_backend_fsync, and 106 * the requests fields are protected by CheckpointerCommLock. 107 *---------- 108 */ 109 typedef struct 110 { 111 RelFileNode rnode; 112 ForkNumber forknum; 113 BlockNumber segno; /* see md.c for special values */ 114 /* might add a real request-type field later; not needed yet */ 115 } CheckpointerRequest; 116 117 typedef struct 118 { 119 pid_t checkpointer_pid; /* PID (0 if not started) */ 120 121 slock_t ckpt_lck; /* protects all the ckpt_* fields */ 122 123 int ckpt_started; /* advances when checkpoint starts */ 124 int ckpt_done; /* advances when checkpoint done */ 125 int ckpt_failed; /* advances when checkpoint fails */ 126 127 int ckpt_flags; /* checkpoint flags, as defined in xlog.h */ 128 129 uint32 num_backend_writes; /* counts user backend buffer writes */ 130 uint32 num_backend_fsync; /* counts user backend fsync calls */ 131 132 int num_requests; /* current # of requests */ 133 int max_requests; /* allocated array size */ 134 CheckpointerRequest requests[FLEXIBLE_ARRAY_MEMBER]; 135 } CheckpointerShmemStruct; 136 137 static CheckpointerShmemStruct *CheckpointerShmem; 138 139 /* interval for calling AbsorbFsyncRequests in CheckpointWriteDelay */ 140 #define WRITES_PER_ABSORB 1000 141 142 /* 143 * GUC parameters 144 */ 145 int CheckPointTimeout = 300; 146 int CheckPointWarning = 30; 147 double CheckPointCompletionTarget = 0.5; 148 149 /* 150 * Flags set by interrupt handlers for later service in the main loop. 151 */ 152 static volatile sig_atomic_t got_SIGHUP = false; 153 static volatile sig_atomic_t shutdown_requested = false; 154 155 /* 156 * Private state 157 */ 158 static bool ckpt_active = false; 159 160 /* these values are valid when ckpt_active is true: */ 161 static pg_time_t ckpt_start_time; 162 static XLogRecPtr ckpt_start_recptr; 163 static double ckpt_cached_elapsed; 164 165 static pg_time_t last_checkpoint_time; 166 static pg_time_t last_xlog_switch_time; 167 168 /* Prototypes for private functions */ 169 170 static void CheckArchiveTimeout(void); 171 static bool IsCheckpointOnSchedule(double progress); 172 static bool ImmediateCheckpointRequested(void); 173 static bool CompactCheckpointerRequestQueue(void); 174 static void UpdateSharedMemoryConfig(void); 175 176 /* Signal handlers */ 177 178 static void chkpt_quickdie(SIGNAL_ARGS); 179 static void ChkptSigHupHandler(SIGNAL_ARGS); 180 static void ReqCheckpointHandler(SIGNAL_ARGS); 181 static void chkpt_sigusr1_handler(SIGNAL_ARGS); 182 static void ReqShutdownHandler(SIGNAL_ARGS); 183 184 185 /* 186 * Main entry point for checkpointer process 187 * 188 * This is invoked from AuxiliaryProcessMain, which has already created the 189 * basic execution environment, but not enabled signals yet. 190 */ 191 void 192 CheckpointerMain(void) 193 { 194 sigjmp_buf local_sigjmp_buf; 195 MemoryContext checkpointer_context; 196 197 CheckpointerShmem->checkpointer_pid = MyProcPid; 198 199 /* 200 * Properly accept or ignore signals the postmaster might send us 201 * 202 * Note: we deliberately ignore SIGTERM, because during a standard Unix 203 * system shutdown cycle, init will SIGTERM all processes at once. We 204 * want to wait for the backends to exit, whereupon the postmaster will 205 * tell us it's okay to shut down (via SIGUSR2). 206 */ 207 pqsignal(SIGHUP, ChkptSigHupHandler); /* set flag to read config file */ 208 pqsignal(SIGINT, ReqCheckpointHandler); /* request checkpoint */ 209 pqsignal(SIGTERM, SIG_IGN); /* ignore SIGTERM */ 210 pqsignal(SIGQUIT, chkpt_quickdie); /* hard crash time */ 211 pqsignal(SIGALRM, SIG_IGN); 212 pqsignal(SIGPIPE, SIG_IGN); 213 pqsignal(SIGUSR1, chkpt_sigusr1_handler); 214 pqsignal(SIGUSR2, ReqShutdownHandler); /* request shutdown */ 215 216 /* 217 * Reset some signals that are accepted by postmaster but not here 218 */ 219 pqsignal(SIGCHLD, SIG_DFL); 220 pqsignal(SIGTTIN, SIG_DFL); 221 pqsignal(SIGTTOU, SIG_DFL); 222 pqsignal(SIGCONT, SIG_DFL); 223 pqsignal(SIGWINCH, SIG_DFL); 224 225 /* We allow SIGQUIT (quickdie) at all times */ 226 sigdelset(&BlockSig, SIGQUIT); 227 228 /* 229 * Initialize so that first time-driven event happens at the correct time. 230 */ 231 last_checkpoint_time = last_xlog_switch_time = (pg_time_t) time(NULL); 232 233 /* 234 * Create a resource owner to keep track of our resources (currently only 235 * buffer pins). 236 */ 237 CurrentResourceOwner = ResourceOwnerCreate(NULL, "Checkpointer"); 238 239 /* 240 * Create a memory context that we will do all our work in. We do this so 241 * that we can reset the context during error recovery and thereby avoid 242 * possible memory leaks. Formerly this code just ran in 243 * TopMemoryContext, but resetting that would be a really bad idea. 244 */ 245 checkpointer_context = AllocSetContextCreate(TopMemoryContext, 246 "Checkpointer", 247 ALLOCSET_DEFAULT_SIZES); 248 MemoryContextSwitchTo(checkpointer_context); 249 250 /* 251 * If an exception is encountered, processing resumes here. 252 * 253 * See notes in postgres.c about the design of this coding. 254 */ 255 if (sigsetjmp(local_sigjmp_buf, 1) != 0) 256 { 257 /* Since not using PG_TRY, must reset error stack by hand */ 258 error_context_stack = NULL; 259 260 /* Prevent interrupts while cleaning up */ 261 HOLD_INTERRUPTS(); 262 263 /* Report the error to the server log */ 264 EmitErrorReport(); 265 266 /* 267 * These operations are really just a minimal subset of 268 * AbortTransaction(). We don't have very many resources to worry 269 * about in checkpointer, but we do have LWLocks, buffers, and temp 270 * files. 271 */ 272 LWLockReleaseAll(); 273 ConditionVariableCancelSleep(); 274 pgstat_report_wait_end(); 275 AbortBufferIO(); 276 UnlockBuffers(); 277 /* buffer pins are released here: */ 278 ResourceOwnerRelease(CurrentResourceOwner, 279 RESOURCE_RELEASE_BEFORE_LOCKS, 280 false, true); 281 /* we needn't bother with the other ResourceOwnerRelease phases */ 282 AtEOXact_Buffers(false); 283 AtEOXact_SMgr(); 284 AtEOXact_Files(false); 285 AtEOXact_HashTables(false); 286 287 /* Warn any waiting backends that the checkpoint failed. */ 288 if (ckpt_active) 289 { 290 SpinLockAcquire(&CheckpointerShmem->ckpt_lck); 291 CheckpointerShmem->ckpt_failed++; 292 CheckpointerShmem->ckpt_done = CheckpointerShmem->ckpt_started; 293 SpinLockRelease(&CheckpointerShmem->ckpt_lck); 294 295 ckpt_active = false; 296 } 297 298 /* 299 * Now return to normal top-level context and clear ErrorContext for 300 * next time. 301 */ 302 MemoryContextSwitchTo(checkpointer_context); 303 FlushErrorState(); 304 305 /* Flush any leaked data in the top-level context */ 306 MemoryContextResetAndDeleteChildren(checkpointer_context); 307 308 /* Now we can allow interrupts again */ 309 RESUME_INTERRUPTS(); 310 311 /* 312 * Sleep at least 1 second after any error. A write error is likely 313 * to be repeated, and we don't want to be filling the error logs as 314 * fast as we can. 315 */ 316 pg_usleep(1000000L); 317 318 /* 319 * Close all open files after any error. This is helpful on Windows, 320 * where holding deleted files open causes various strange errors. 321 * It's not clear we need it elsewhere, but shouldn't hurt. 322 */ 323 smgrcloseall(); 324 } 325 326 /* We can now handle ereport(ERROR) */ 327 PG_exception_stack = &local_sigjmp_buf; 328 329 /* 330 * Unblock signals (they were blocked when the postmaster forked us) 331 */ 332 PG_SETMASK(&UnBlockSig); 333 334 /* 335 * Ensure all shared memory values are set correctly for the config. Doing 336 * this here ensures no race conditions from other concurrent updaters. 337 */ 338 UpdateSharedMemoryConfig(); 339 340 /* 341 * Advertise our latch that backends can use to wake us up while we're 342 * sleeping. 343 */ 344 ProcGlobal->checkpointerLatch = &MyProc->procLatch; 345 346 /* 347 * Loop forever 348 */ 349 for (;;) 350 { 351 bool do_checkpoint = false; 352 int flags = 0; 353 pg_time_t now; 354 int elapsed_secs; 355 int cur_timeout; 356 int rc; 357 358 /* Clear any already-pending wakeups */ 359 ResetLatch(MyLatch); 360 361 /* 362 * Process any requests or signals received recently. 363 */ 364 AbsorbFsyncRequests(); 365 366 if (got_SIGHUP) 367 { 368 got_SIGHUP = false; 369 ProcessConfigFile(PGC_SIGHUP); 370 371 /* 372 * Checkpointer is the last process to shut down, so we ask it to 373 * hold the keys for a range of other tasks required most of which 374 * have nothing to do with checkpointing at all. 375 * 376 * For various reasons, some config values can change dynamically 377 * so the primary copy of them is held in shared memory to make 378 * sure all backends see the same value. We make Checkpointer 379 * responsible for updating the shared memory copy if the 380 * parameter setting changes because of SIGHUP. 381 */ 382 UpdateSharedMemoryConfig(); 383 } 384 if (shutdown_requested) 385 { 386 /* 387 * From here on, elog(ERROR) should end with exit(1), not send 388 * control back to the sigsetjmp block above 389 */ 390 ExitOnAnyError = true; 391 /* Close down the database */ 392 ShutdownXLOG(0, 0); 393 /* Normal exit from the checkpointer is here */ 394 proc_exit(0); /* done */ 395 } 396 397 /* 398 * Detect a pending checkpoint request by checking whether the flags 399 * word in shared memory is nonzero. We shouldn't need to acquire the 400 * ckpt_lck for this. 401 */ 402 if (((volatile CheckpointerShmemStruct *) CheckpointerShmem)->ckpt_flags) 403 { 404 do_checkpoint = true; 405 BgWriterStats.m_requested_checkpoints++; 406 } 407 408 /* 409 * Force a checkpoint if too much time has elapsed since the last one. 410 * Note that we count a timed checkpoint in stats only when this 411 * occurs without an external request, but we set the CAUSE_TIME flag 412 * bit even if there is also an external request. 413 */ 414 now = (pg_time_t) time(NULL); 415 elapsed_secs = now - last_checkpoint_time; 416 if (elapsed_secs >= CheckPointTimeout) 417 { 418 if (!do_checkpoint) 419 BgWriterStats.m_timed_checkpoints++; 420 do_checkpoint = true; 421 flags |= CHECKPOINT_CAUSE_TIME; 422 } 423 424 /* 425 * Do a checkpoint if requested. 426 */ 427 if (do_checkpoint) 428 { 429 bool ckpt_performed = false; 430 bool do_restartpoint; 431 432 /* 433 * Check if we should perform a checkpoint or a restartpoint. As a 434 * side-effect, RecoveryInProgress() initializes TimeLineID if 435 * it's not set yet. 436 */ 437 do_restartpoint = RecoveryInProgress(); 438 439 /* 440 * Atomically fetch the request flags to figure out what kind of a 441 * checkpoint we should perform, and increase the started-counter 442 * to acknowledge that we've started a new checkpoint. 443 */ 444 SpinLockAcquire(&CheckpointerShmem->ckpt_lck); 445 flags |= CheckpointerShmem->ckpt_flags; 446 CheckpointerShmem->ckpt_flags = 0; 447 CheckpointerShmem->ckpt_started++; 448 SpinLockRelease(&CheckpointerShmem->ckpt_lck); 449 450 /* 451 * The end-of-recovery checkpoint is a real checkpoint that's 452 * performed while we're still in recovery. 453 */ 454 if (flags & CHECKPOINT_END_OF_RECOVERY) 455 do_restartpoint = false; 456 457 /* 458 * We will warn if (a) too soon since last checkpoint (whatever 459 * caused it) and (b) somebody set the CHECKPOINT_CAUSE_XLOG flag 460 * since the last checkpoint start. Note in particular that this 461 * implementation will not generate warnings caused by 462 * CheckPointTimeout < CheckPointWarning. 463 */ 464 if (!do_restartpoint && 465 (flags & CHECKPOINT_CAUSE_XLOG) && 466 elapsed_secs < CheckPointWarning) 467 ereport(LOG, 468 (errmsg_plural("checkpoints are occurring too frequently (%d second apart)", 469 "checkpoints are occurring too frequently (%d seconds apart)", 470 elapsed_secs, 471 elapsed_secs), 472 errhint("Consider increasing the configuration parameter \"max_wal_size\"."))); 473 474 /* 475 * Initialize checkpointer-private variables used during 476 * checkpoint. 477 */ 478 ckpt_active = true; 479 if (do_restartpoint) 480 ckpt_start_recptr = GetXLogReplayRecPtr(NULL); 481 else 482 ckpt_start_recptr = GetInsertRecPtr(); 483 ckpt_start_time = now; 484 ckpt_cached_elapsed = 0; 485 486 /* 487 * Do the checkpoint. 488 */ 489 if (!do_restartpoint) 490 { 491 CreateCheckPoint(flags); 492 ckpt_performed = true; 493 } 494 else 495 ckpt_performed = CreateRestartPoint(flags); 496 497 /* 498 * After any checkpoint, close all smgr files. This is so we 499 * won't hang onto smgr references to deleted files indefinitely. 500 */ 501 smgrcloseall(); 502 503 /* 504 * Indicate checkpoint completion to any waiting backends. 505 */ 506 SpinLockAcquire(&CheckpointerShmem->ckpt_lck); 507 CheckpointerShmem->ckpt_done = CheckpointerShmem->ckpt_started; 508 SpinLockRelease(&CheckpointerShmem->ckpt_lck); 509 510 if (ckpt_performed) 511 { 512 /* 513 * Note we record the checkpoint start time not end time as 514 * last_checkpoint_time. This is so that time-driven 515 * checkpoints happen at a predictable spacing. 516 */ 517 last_checkpoint_time = now; 518 } 519 else 520 { 521 /* 522 * We were not able to perform the restartpoint (checkpoints 523 * throw an ERROR in case of error). Most likely because we 524 * have not received any new checkpoint WAL records since the 525 * last restartpoint. Try again in 15 s. 526 */ 527 last_checkpoint_time = now - CheckPointTimeout + 15; 528 } 529 530 ckpt_active = false; 531 } 532 533 /* Check for archive_timeout and switch xlog files if necessary. */ 534 CheckArchiveTimeout(); 535 536 /* 537 * Send off activity statistics to the stats collector. (The reason 538 * why we re-use bgwriter-related code for this is that the bgwriter 539 * and checkpointer used to be just one process. It's probably not 540 * worth the trouble to split the stats support into two independent 541 * stats message types.) 542 */ 543 pgstat_send_bgwriter(); 544 545 /* 546 * Sleep until we are signaled or it's time for another checkpoint or 547 * xlog file switch. 548 */ 549 now = (pg_time_t) time(NULL); 550 elapsed_secs = now - last_checkpoint_time; 551 if (elapsed_secs >= CheckPointTimeout) 552 continue; /* no sleep for us ... */ 553 cur_timeout = CheckPointTimeout - elapsed_secs; 554 if (XLogArchiveTimeout > 0 && !RecoveryInProgress()) 555 { 556 elapsed_secs = now - last_xlog_switch_time; 557 if (elapsed_secs >= XLogArchiveTimeout) 558 continue; /* no sleep for us ... */ 559 cur_timeout = Min(cur_timeout, XLogArchiveTimeout - elapsed_secs); 560 } 561 562 rc = WaitLatch(MyLatch, 563 WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, 564 cur_timeout * 1000L /* convert to ms */ , 565 WAIT_EVENT_CHECKPOINTER_MAIN); 566 567 /* 568 * Emergency bailout if postmaster has died. This is to avoid the 569 * necessity for manual cleanup of all postmaster children. 570 */ 571 if (rc & WL_POSTMASTER_DEATH) 572 exit(1); 573 } 574 } 575 576 /* 577 * CheckArchiveTimeout -- check for archive_timeout and switch xlog files 578 * 579 * This will switch to a new WAL file and force an archive file write if 580 * meaningful activity is recorded in the current WAL file. This includes most 581 * writes, including just a single checkpoint record, but excludes WAL records 582 * that were inserted with the XLOG_MARK_UNIMPORTANT flag being set (like 583 * snapshots of running transactions). Such records, depending on 584 * configuration, occur on regular intervals and don't contain important 585 * information. This avoids generating archives with a few unimportant 586 * records. 587 */ 588 static void 589 CheckArchiveTimeout(void) 590 { 591 pg_time_t now; 592 pg_time_t last_time; 593 XLogRecPtr last_switch_lsn; 594 595 if (XLogArchiveTimeout <= 0 || RecoveryInProgress()) 596 return; 597 598 now = (pg_time_t) time(NULL); 599 600 /* First we do a quick check using possibly-stale local state. */ 601 if ((int) (now - last_xlog_switch_time) < XLogArchiveTimeout) 602 return; 603 604 /* 605 * Update local state ... note that last_xlog_switch_time is the last time 606 * a switch was performed *or requested*. 607 */ 608 last_time = GetLastSegSwitchData(&last_switch_lsn); 609 610 last_xlog_switch_time = Max(last_xlog_switch_time, last_time); 611 612 /* Now we can do the real checks */ 613 if ((int) (now - last_xlog_switch_time) >= XLogArchiveTimeout) 614 { 615 /* 616 * Switch segment only when "important" WAL has been logged since the 617 * last segment switch (last_switch_lsn points to end of segment 618 * switch occurred in). 619 */ 620 if (GetLastImportantRecPtr() > last_switch_lsn) 621 { 622 XLogRecPtr switchpoint; 623 624 /* mark switch as unimportant, avoids triggering checkpoints */ 625 switchpoint = RequestXLogSwitch(true); 626 627 /* 628 * If the returned pointer points exactly to a segment boundary, 629 * assume nothing happened. 630 */ 631 if (XLogSegmentOffset(switchpoint, wal_segment_size) != 0) 632 elog(DEBUG1, "write-ahead log switch forced (archive_timeout=%d)", 633 XLogArchiveTimeout); 634 } 635 636 /* 637 * Update state in any case, so we don't retry constantly when the 638 * system is idle. 639 */ 640 last_xlog_switch_time = now; 641 } 642 } 643 644 /* 645 * Returns true if an immediate checkpoint request is pending. (Note that 646 * this does not check the *current* checkpoint's IMMEDIATE flag, but whether 647 * there is one pending behind it.) 648 */ 649 static bool 650 ImmediateCheckpointRequested(void) 651 { 652 volatile CheckpointerShmemStruct *cps = CheckpointerShmem; 653 654 /* 655 * We don't need to acquire the ckpt_lck in this case because we're only 656 * looking at a single flag bit. 657 */ 658 if (cps->ckpt_flags & CHECKPOINT_IMMEDIATE) 659 return true; 660 return false; 661 } 662 663 /* 664 * CheckpointWriteDelay -- control rate of checkpoint 665 * 666 * This function is called after each page write performed by BufferSync(). 667 * It is responsible for throttling BufferSync()'s write rate to hit 668 * checkpoint_completion_target. 669 * 670 * The checkpoint request flags should be passed in; currently the only one 671 * examined is CHECKPOINT_IMMEDIATE, which disables delays between writes. 672 * 673 * 'progress' is an estimate of how much of the work has been done, as a 674 * fraction between 0.0 meaning none, and 1.0 meaning all done. 675 */ 676 void 677 CheckpointWriteDelay(int flags, double progress) 678 { 679 static int absorb_counter = WRITES_PER_ABSORB; 680 681 /* Do nothing if checkpoint is being executed by non-checkpointer process */ 682 if (!AmCheckpointerProcess()) 683 return; 684 685 /* 686 * Perform the usual duties and take a nap, unless we're behind schedule, 687 * in which case we just try to catch up as quickly as possible. 688 */ 689 if (!(flags & CHECKPOINT_IMMEDIATE) && 690 !shutdown_requested && 691 !ImmediateCheckpointRequested() && 692 IsCheckpointOnSchedule(progress)) 693 { 694 if (got_SIGHUP) 695 { 696 got_SIGHUP = false; 697 ProcessConfigFile(PGC_SIGHUP); 698 /* update shmem copies of config variables */ 699 UpdateSharedMemoryConfig(); 700 } 701 702 AbsorbFsyncRequests(); 703 absorb_counter = WRITES_PER_ABSORB; 704 705 CheckArchiveTimeout(); 706 707 /* 708 * Report interim activity statistics to the stats collector. 709 */ 710 pgstat_send_bgwriter(); 711 712 /* 713 * This sleep used to be connected to bgwriter_delay, typically 200ms. 714 * That resulted in more frequent wakeups if not much work to do. 715 * Checkpointer and bgwriter are no longer related so take the Big 716 * Sleep. 717 */ 718 pg_usleep(100000L); 719 } 720 else if (--absorb_counter <= 0) 721 { 722 /* 723 * Absorb pending fsync requests after each WRITES_PER_ABSORB write 724 * operations even when we don't sleep, to prevent overflow of the 725 * fsync request queue. 726 */ 727 AbsorbFsyncRequests(); 728 absorb_counter = WRITES_PER_ABSORB; 729 } 730 } 731 732 /* 733 * IsCheckpointOnSchedule -- are we on schedule to finish this checkpoint 734 * (or restartpoint) in time? 735 * 736 * Compares the current progress against the time/segments elapsed since last 737 * checkpoint, and returns true if the progress we've made this far is greater 738 * than the elapsed time/segments. 739 */ 740 static bool 741 IsCheckpointOnSchedule(double progress) 742 { 743 XLogRecPtr recptr; 744 struct timeval now; 745 double elapsed_xlogs, 746 elapsed_time; 747 748 Assert(ckpt_active); 749 750 /* Scale progress according to checkpoint_completion_target. */ 751 progress *= CheckPointCompletionTarget; 752 753 /* 754 * Check against the cached value first. Only do the more expensive 755 * calculations once we reach the target previously calculated. Since 756 * neither time or WAL insert pointer moves backwards, a freshly 757 * calculated value can only be greater than or equal to the cached value. 758 */ 759 if (progress < ckpt_cached_elapsed) 760 return false; 761 762 /* 763 * Check progress against WAL segments written and CheckPointSegments. 764 * 765 * We compare the current WAL insert location against the location 766 * computed before calling CreateCheckPoint. The code in XLogInsert that 767 * actually triggers a checkpoint when CheckPointSegments is exceeded 768 * compares against RedoRecptr, so this is not completely accurate. 769 * However, it's good enough for our purposes, we're only calculating an 770 * estimate anyway. 771 * 772 * During recovery, we compare last replayed WAL record's location with 773 * the location computed before calling CreateRestartPoint. That maintains 774 * the same pacing as we have during checkpoints in normal operation, but 775 * we might exceed max_wal_size by a fair amount. That's because there can 776 * be a large gap between a checkpoint's redo-pointer and the checkpoint 777 * record itself, and we only start the restartpoint after we've seen the 778 * checkpoint record. (The gap is typically up to CheckPointSegments * 779 * checkpoint_completion_target where checkpoint_completion_target is the 780 * value that was in effect when the WAL was generated). 781 */ 782 if (RecoveryInProgress()) 783 recptr = GetXLogReplayRecPtr(NULL); 784 else 785 recptr = GetInsertRecPtr(); 786 elapsed_xlogs = (((double) (recptr - ckpt_start_recptr)) / 787 wal_segment_size) / CheckPointSegments; 788 789 if (progress < elapsed_xlogs) 790 { 791 ckpt_cached_elapsed = elapsed_xlogs; 792 return false; 793 } 794 795 /* 796 * Check progress against time elapsed and checkpoint_timeout. 797 */ 798 gettimeofday(&now, NULL); 799 elapsed_time = ((double) ((pg_time_t) now.tv_sec - ckpt_start_time) + 800 now.tv_usec / 1000000.0) / CheckPointTimeout; 801 802 if (progress < elapsed_time) 803 { 804 ckpt_cached_elapsed = elapsed_time; 805 return false; 806 } 807 808 /* It looks like we're on schedule. */ 809 return true; 810 } 811 812 813 /* -------------------------------- 814 * signal handler routines 815 * -------------------------------- 816 */ 817 818 /* 819 * chkpt_quickdie() occurs when signalled SIGQUIT by the postmaster. 820 * 821 * Some backend has bought the farm, 822 * so we need to stop what we're doing and exit. 823 */ 824 static void 825 chkpt_quickdie(SIGNAL_ARGS) 826 { 827 /* 828 * We DO NOT want to run proc_exit() or atexit() callbacks -- we're here 829 * because shared memory may be corrupted, so we don't want to try to 830 * clean up our transaction. Just nail the windows shut and get out of 831 * town. The callbacks wouldn't be safe to run from a signal handler, 832 * anyway. 833 * 834 * Note we do _exit(2) not _exit(0). This is to force the postmaster into 835 * a system reset cycle if someone sends a manual SIGQUIT to a random 836 * backend. This is necessary precisely because we don't clean up our 837 * shared memory state. (The "dead man switch" mechanism in pmsignal.c 838 * should ensure the postmaster sees this as a crash, too, but no harm in 839 * being doubly sure.) 840 */ 841 _exit(2); 842 } 843 844 /* SIGHUP: set flag to re-read config file at next convenient time */ 845 static void 846 ChkptSigHupHandler(SIGNAL_ARGS) 847 { 848 int save_errno = errno; 849 850 got_SIGHUP = true; 851 SetLatch(MyLatch); 852 853 errno = save_errno; 854 } 855 856 /* SIGINT: set flag to run a normal checkpoint right away */ 857 static void 858 ReqCheckpointHandler(SIGNAL_ARGS) 859 { 860 int save_errno = errno; 861 862 /* 863 * The signalling process should have set ckpt_flags nonzero, so all we 864 * need do is ensure that our main loop gets kicked out of any wait. 865 */ 866 SetLatch(MyLatch); 867 868 errno = save_errno; 869 } 870 871 /* SIGUSR1: used for latch wakeups */ 872 static void 873 chkpt_sigusr1_handler(SIGNAL_ARGS) 874 { 875 int save_errno = errno; 876 877 latch_sigusr1_handler(); 878 879 errno = save_errno; 880 } 881 882 /* SIGUSR2: set flag to run a shutdown checkpoint and exit */ 883 static void 884 ReqShutdownHandler(SIGNAL_ARGS) 885 { 886 int save_errno = errno; 887 888 shutdown_requested = true; 889 SetLatch(MyLatch); 890 891 errno = save_errno; 892 } 893 894 895 /* -------------------------------- 896 * communication with backends 897 * -------------------------------- 898 */ 899 900 /* 901 * CheckpointerShmemSize 902 * Compute space needed for checkpointer-related shared memory 903 */ 904 Size 905 CheckpointerShmemSize(void) 906 { 907 Size size; 908 909 /* 910 * Currently, the size of the requests[] array is arbitrarily set equal to 911 * NBuffers. This may prove too large or small ... 912 */ 913 size = offsetof(CheckpointerShmemStruct, requests); 914 size = add_size(size, mul_size(NBuffers, sizeof(CheckpointerRequest))); 915 916 return size; 917 } 918 919 /* 920 * CheckpointerShmemInit 921 * Allocate and initialize checkpointer-related shared memory 922 */ 923 void 924 CheckpointerShmemInit(void) 925 { 926 Size size = CheckpointerShmemSize(); 927 bool found; 928 929 CheckpointerShmem = (CheckpointerShmemStruct *) 930 ShmemInitStruct("Checkpointer Data", 931 size, 932 &found); 933 934 if (!found) 935 { 936 /* 937 * First time through, so initialize. Note that we zero the whole 938 * requests array; this is so that CompactCheckpointerRequestQueue can 939 * assume that any pad bytes in the request structs are zeroes. 940 */ 941 MemSet(CheckpointerShmem, 0, size); 942 SpinLockInit(&CheckpointerShmem->ckpt_lck); 943 CheckpointerShmem->max_requests = NBuffers; 944 } 945 } 946 947 /* 948 * RequestCheckpoint 949 * Called in backend processes to request a checkpoint 950 * 951 * flags is a bitwise OR of the following: 952 * CHECKPOINT_IS_SHUTDOWN: checkpoint is for database shutdown. 953 * CHECKPOINT_END_OF_RECOVERY: checkpoint is for end of WAL recovery. 954 * CHECKPOINT_IMMEDIATE: finish the checkpoint ASAP, 955 * ignoring checkpoint_completion_target parameter. 956 * CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occurred 957 * since the last one (implied by CHECKPOINT_IS_SHUTDOWN or 958 * CHECKPOINT_END_OF_RECOVERY). 959 * CHECKPOINT_WAIT: wait for completion before returning (otherwise, 960 * just signal checkpointer to do it, and return). 961 * CHECKPOINT_CAUSE_XLOG: checkpoint is requested due to xlog filling. 962 * (This affects logging, and in particular enables CheckPointWarning.) 963 */ 964 void 965 RequestCheckpoint(int flags) 966 { 967 int ntries; 968 int old_failed, 969 old_started; 970 971 /* 972 * If in a standalone backend, just do it ourselves. 973 */ 974 if (!IsPostmasterEnvironment) 975 { 976 /* 977 * There's no point in doing slow checkpoints in a standalone backend, 978 * because there's no other backends the checkpoint could disrupt. 979 */ 980 CreateCheckPoint(flags | CHECKPOINT_IMMEDIATE); 981 982 /* 983 * After any checkpoint, close all smgr files. This is so we won't 984 * hang onto smgr references to deleted files indefinitely. 985 */ 986 smgrcloseall(); 987 988 return; 989 } 990 991 /* 992 * Atomically set the request flags, and take a snapshot of the counters. 993 * When we see ckpt_started > old_started, we know the flags we set here 994 * have been seen by checkpointer. 995 * 996 * Note that we OR the flags with any existing flags, to avoid overriding 997 * a "stronger" request by another backend. The flag senses must be 998 * chosen to make this work! 999 */ 1000 SpinLockAcquire(&CheckpointerShmem->ckpt_lck); 1001 1002 old_failed = CheckpointerShmem->ckpt_failed; 1003 old_started = CheckpointerShmem->ckpt_started; 1004 CheckpointerShmem->ckpt_flags |= (flags | CHECKPOINT_REQUESTED); 1005 1006 SpinLockRelease(&CheckpointerShmem->ckpt_lck); 1007 1008 /* 1009 * Send signal to request checkpoint. It's possible that the checkpointer 1010 * hasn't started yet, or is in process of restarting, so we will retry a 1011 * few times if needed. (Actually, more than a few times, since on slow 1012 * or overloaded buildfarm machines, it's been observed that the 1013 * checkpointer can take several seconds to start.) However, if not told 1014 * to wait for the checkpoint to occur, we consider failure to send the 1015 * signal to be nonfatal and merely LOG it. The checkpointer should see 1016 * the request when it does start, with or without getting a signal. 1017 */ 1018 #define MAX_SIGNAL_TRIES 600 /* max wait 60.0 sec */ 1019 for (ntries = 0;; ntries++) 1020 { 1021 if (CheckpointerShmem->checkpointer_pid == 0) 1022 { 1023 if (ntries >= MAX_SIGNAL_TRIES || !(flags & CHECKPOINT_WAIT)) 1024 { 1025 elog((flags & CHECKPOINT_WAIT) ? ERROR : LOG, 1026 "could not signal for checkpoint: checkpointer is not running"); 1027 break; 1028 } 1029 } 1030 else if (kill(CheckpointerShmem->checkpointer_pid, SIGINT) != 0) 1031 { 1032 if (ntries >= MAX_SIGNAL_TRIES || !(flags & CHECKPOINT_WAIT)) 1033 { 1034 elog((flags & CHECKPOINT_WAIT) ? ERROR : LOG, 1035 "could not signal for checkpoint: %m"); 1036 break; 1037 } 1038 } 1039 else 1040 break; /* signal sent successfully */ 1041 1042 CHECK_FOR_INTERRUPTS(); 1043 pg_usleep(100000L); /* wait 0.1 sec, then retry */ 1044 } 1045 1046 /* 1047 * If requested, wait for completion. We detect completion according to 1048 * the algorithm given above. 1049 */ 1050 if (flags & CHECKPOINT_WAIT) 1051 { 1052 int new_started, 1053 new_failed; 1054 1055 /* Wait for a new checkpoint to start. */ 1056 for (;;) 1057 { 1058 SpinLockAcquire(&CheckpointerShmem->ckpt_lck); 1059 new_started = CheckpointerShmem->ckpt_started; 1060 SpinLockRelease(&CheckpointerShmem->ckpt_lck); 1061 1062 if (new_started != old_started) 1063 break; 1064 1065 CHECK_FOR_INTERRUPTS(); 1066 pg_usleep(100000L); 1067 } 1068 1069 /* 1070 * We are waiting for ckpt_done >= new_started, in a modulo sense. 1071 */ 1072 for (;;) 1073 { 1074 int new_done; 1075 1076 SpinLockAcquire(&CheckpointerShmem->ckpt_lck); 1077 new_done = CheckpointerShmem->ckpt_done; 1078 new_failed = CheckpointerShmem->ckpt_failed; 1079 SpinLockRelease(&CheckpointerShmem->ckpt_lck); 1080 1081 if (new_done - new_started >= 0) 1082 break; 1083 1084 CHECK_FOR_INTERRUPTS(); 1085 pg_usleep(100000L); 1086 } 1087 1088 if (new_failed != old_failed) 1089 ereport(ERROR, 1090 (errmsg("checkpoint request failed"), 1091 errhint("Consult recent messages in the server log for details."))); 1092 } 1093 } 1094 1095 /* 1096 * ForwardFsyncRequest 1097 * Forward a file-fsync request from a backend to the checkpointer 1098 * 1099 * Whenever a backend is compelled to write directly to a relation 1100 * (which should be seldom, if the background writer is getting its job done), 1101 * the backend calls this routine to pass over knowledge that the relation 1102 * is dirty and must be fsync'd before next checkpoint. We also use this 1103 * opportunity to count such writes for statistical purposes. 1104 * 1105 * This functionality is only supported for regular (not backend-local) 1106 * relations, so the rnode argument is intentionally RelFileNode not 1107 * RelFileNodeBackend. 1108 * 1109 * segno specifies which segment (not block!) of the relation needs to be 1110 * fsync'd. (Since the valid range is much less than BlockNumber, we can 1111 * use high values for special flags; that's all internal to md.c, which 1112 * see for details.) 1113 * 1114 * To avoid holding the lock for longer than necessary, we normally write 1115 * to the requests[] queue without checking for duplicates. The checkpointer 1116 * will have to eliminate dups internally anyway. However, if we discover 1117 * that the queue is full, we make a pass over the entire queue to compact 1118 * it. This is somewhat expensive, but the alternative is for the backend 1119 * to perform its own fsync, which is far more expensive in practice. It 1120 * is theoretically possible a backend fsync might still be necessary, if 1121 * the queue is full and contains no duplicate entries. In that case, we 1122 * let the backend know by returning false. 1123 */ 1124 bool 1125 ForwardFsyncRequest(RelFileNode rnode, ForkNumber forknum, BlockNumber segno) 1126 { 1127 CheckpointerRequest *request; 1128 bool too_full; 1129 1130 if (!IsUnderPostmaster) 1131 return false; /* probably shouldn't even get here */ 1132 1133 if (AmCheckpointerProcess()) 1134 elog(ERROR, "ForwardFsyncRequest must not be called in checkpointer"); 1135 1136 LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE); 1137 1138 /* Count all backend writes regardless of if they fit in the queue */ 1139 if (!AmBackgroundWriterProcess()) 1140 CheckpointerShmem->num_backend_writes++; 1141 1142 /* 1143 * If the checkpointer isn't running or the request queue is full, the 1144 * backend will have to perform its own fsync request. But before forcing 1145 * that to happen, we can try to compact the request queue. 1146 */ 1147 if (CheckpointerShmem->checkpointer_pid == 0 || 1148 (CheckpointerShmem->num_requests >= CheckpointerShmem->max_requests && 1149 !CompactCheckpointerRequestQueue())) 1150 { 1151 /* 1152 * Count the subset of writes where backends have to do their own 1153 * fsync 1154 */ 1155 if (!AmBackgroundWriterProcess()) 1156 CheckpointerShmem->num_backend_fsync++; 1157 LWLockRelease(CheckpointerCommLock); 1158 return false; 1159 } 1160 1161 /* OK, insert request */ 1162 request = &CheckpointerShmem->requests[CheckpointerShmem->num_requests++]; 1163 request->rnode = rnode; 1164 request->forknum = forknum; 1165 request->segno = segno; 1166 1167 /* If queue is more than half full, nudge the checkpointer to empty it */ 1168 too_full = (CheckpointerShmem->num_requests >= 1169 CheckpointerShmem->max_requests / 2); 1170 1171 LWLockRelease(CheckpointerCommLock); 1172 1173 /* ... but not till after we release the lock */ 1174 if (too_full && ProcGlobal->checkpointerLatch) 1175 SetLatch(ProcGlobal->checkpointerLatch); 1176 1177 return true; 1178 } 1179 1180 /* 1181 * CompactCheckpointerRequestQueue 1182 * Remove duplicates from the request queue to avoid backend fsyncs. 1183 * Returns "true" if any entries were removed. 1184 * 1185 * Although a full fsync request queue is not common, it can lead to severe 1186 * performance problems when it does happen. So far, this situation has 1187 * only been observed to occur when the system is under heavy write load, 1188 * and especially during the "sync" phase of a checkpoint. Without this 1189 * logic, each backend begins doing an fsync for every block written, which 1190 * gets very expensive and can slow down the whole system. 1191 * 1192 * Trying to do this every time the queue is full could lose if there 1193 * aren't any removable entries. But that should be vanishingly rare in 1194 * practice: there's one queue entry per shared buffer. 1195 */ 1196 static bool 1197 CompactCheckpointerRequestQueue(void) 1198 { 1199 struct CheckpointerSlotMapping 1200 { 1201 CheckpointerRequest request; 1202 int slot; 1203 }; 1204 1205 int n, 1206 preserve_count; 1207 int num_skipped = 0; 1208 HASHCTL ctl; 1209 HTAB *htab; 1210 bool *skip_slot; 1211 1212 /* must hold CheckpointerCommLock in exclusive mode */ 1213 Assert(LWLockHeldByMe(CheckpointerCommLock)); 1214 1215 /* Initialize skip_slot array */ 1216 skip_slot = palloc0(sizeof(bool) * CheckpointerShmem->num_requests); 1217 1218 /* Initialize temporary hash table */ 1219 MemSet(&ctl, 0, sizeof(ctl)); 1220 ctl.keysize = sizeof(CheckpointerRequest); 1221 ctl.entrysize = sizeof(struct CheckpointerSlotMapping); 1222 ctl.hcxt = CurrentMemoryContext; 1223 1224 htab = hash_create("CompactCheckpointerRequestQueue", 1225 CheckpointerShmem->num_requests, 1226 &ctl, 1227 HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); 1228 1229 /* 1230 * The basic idea here is that a request can be skipped if it's followed 1231 * by a later, identical request. It might seem more sensible to work 1232 * backwards from the end of the queue and check whether a request is 1233 * *preceded* by an earlier, identical request, in the hopes of doing less 1234 * copying. But that might change the semantics, if there's an 1235 * intervening FORGET_RELATION_FSYNC or FORGET_DATABASE_FSYNC request, so 1236 * we do it this way. It would be possible to be even smarter if we made 1237 * the code below understand the specific semantics of such requests (it 1238 * could blow away preceding entries that would end up being canceled 1239 * anyhow), but it's not clear that the extra complexity would buy us 1240 * anything. 1241 */ 1242 for (n = 0; n < CheckpointerShmem->num_requests; n++) 1243 { 1244 CheckpointerRequest *request; 1245 struct CheckpointerSlotMapping *slotmap; 1246 bool found; 1247 1248 /* 1249 * We use the request struct directly as a hashtable key. This 1250 * assumes that any padding bytes in the structs are consistently the 1251 * same, which should be okay because we zeroed them in 1252 * CheckpointerShmemInit. Note also that RelFileNode had better 1253 * contain no pad bytes. 1254 */ 1255 request = &CheckpointerShmem->requests[n]; 1256 slotmap = hash_search(htab, request, HASH_ENTER, &found); 1257 if (found) 1258 { 1259 /* Duplicate, so mark the previous occurrence as skippable */ 1260 skip_slot[slotmap->slot] = true; 1261 num_skipped++; 1262 } 1263 /* Remember slot containing latest occurrence of this request value */ 1264 slotmap->slot = n; 1265 } 1266 1267 /* Done with the hash table. */ 1268 hash_destroy(htab); 1269 1270 /* If no duplicates, we're out of luck. */ 1271 if (!num_skipped) 1272 { 1273 pfree(skip_slot); 1274 return false; 1275 } 1276 1277 /* We found some duplicates; remove them. */ 1278 preserve_count = 0; 1279 for (n = 0; n < CheckpointerShmem->num_requests; n++) 1280 { 1281 if (skip_slot[n]) 1282 continue; 1283 CheckpointerShmem->requests[preserve_count++] = CheckpointerShmem->requests[n]; 1284 } 1285 ereport(DEBUG1, 1286 (errmsg("compacted fsync request queue from %d entries to %d entries", 1287 CheckpointerShmem->num_requests, preserve_count))); 1288 CheckpointerShmem->num_requests = preserve_count; 1289 1290 /* Cleanup. */ 1291 pfree(skip_slot); 1292 return true; 1293 } 1294 1295 /* 1296 * AbsorbFsyncRequests 1297 * Retrieve queued fsync requests and pass them to local smgr. 1298 * 1299 * This is exported because it must be called during CreateCheckPoint; 1300 * we have to be sure we have accepted all pending requests just before 1301 * we start fsync'ing. Since CreateCheckPoint sometimes runs in 1302 * non-checkpointer processes, do nothing if not checkpointer. 1303 */ 1304 void 1305 AbsorbFsyncRequests(void) 1306 { 1307 CheckpointerRequest *requests = NULL; 1308 CheckpointerRequest *request; 1309 int n; 1310 1311 if (!AmCheckpointerProcess()) 1312 return; 1313 1314 LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE); 1315 1316 /* Transfer stats counts into pending pgstats message */ 1317 BgWriterStats.m_buf_written_backend += CheckpointerShmem->num_backend_writes; 1318 BgWriterStats.m_buf_fsync_backend += CheckpointerShmem->num_backend_fsync; 1319 1320 CheckpointerShmem->num_backend_writes = 0; 1321 CheckpointerShmem->num_backend_fsync = 0; 1322 1323 /* 1324 * We try to avoid holding the lock for a long time by copying the request 1325 * array, and processing the requests after releasing the lock. 1326 * 1327 * Once we have cleared the requests from shared memory, we have to PANIC 1328 * if we then fail to absorb them (eg, because our hashtable runs out of 1329 * memory). This is because the system cannot run safely if we are unable 1330 * to fsync what we have been told to fsync. Fortunately, the hashtable 1331 * is so small that the problem is quite unlikely to arise in practice. 1332 */ 1333 n = CheckpointerShmem->num_requests; 1334 if (n > 0) 1335 { 1336 requests = (CheckpointerRequest *) palloc(n * sizeof(CheckpointerRequest)); 1337 memcpy(requests, CheckpointerShmem->requests, n * sizeof(CheckpointerRequest)); 1338 } 1339 1340 START_CRIT_SECTION(); 1341 1342 CheckpointerShmem->num_requests = 0; 1343 1344 LWLockRelease(CheckpointerCommLock); 1345 1346 for (request = requests; n > 0; request++, n--) 1347 RememberFsyncRequest(request->rnode, request->forknum, request->segno); 1348 1349 END_CRIT_SECTION(); 1350 1351 if (requests) 1352 pfree(requests); 1353 } 1354 1355 /* 1356 * Update any shared memory configurations based on config parameters 1357 */ 1358 static void 1359 UpdateSharedMemoryConfig(void) 1360 { 1361 /* update global shmem state for sync rep */ 1362 SyncRepUpdateSyncStandbysDefined(); 1363 1364 /* 1365 * If full_page_writes has been changed by SIGHUP, we update it in shared 1366 * memory and write an XLOG_FPW_CHANGE record. 1367 */ 1368 UpdateFullPageWrites(); 1369 1370 elog(DEBUG2, "checkpointer updated shared memory configuration values"); 1371 } 1372 1373 /* 1374 * FirstCallSinceLastCheckpoint allows a process to take an action once 1375 * per checkpoint cycle by asynchronously checking for checkpoint completion. 1376 */ 1377 bool 1378 FirstCallSinceLastCheckpoint(void) 1379 { 1380 static int ckpt_done = 0; 1381 int new_done; 1382 bool FirstCall = false; 1383 1384 SpinLockAcquire(&CheckpointerShmem->ckpt_lck); 1385 new_done = CheckpointerShmem->ckpt_done; 1386 SpinLockRelease(&CheckpointerShmem->ckpt_lck); 1387 1388 if (new_done != ckpt_done) 1389 FirstCall = true; 1390 1391 ckpt_done = new_done; 1392 1393 return FirstCall; 1394 } 1395