PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
checkpointer.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * checkpointer.c
4 *
5 * The checkpointer is new as of Postgres 9.2. It handles all checkpoints.
6 * Checkpoints are automatically dispatched after a certain amount of time has
7 * elapsed since the last one, and it can be signaled to perform requested
8 * checkpoints as well. (The GUC parameter that mandates a checkpoint every
9 * so many WAL segments is implemented by having backends signal when they
10 * fill WAL segments; the checkpointer itself doesn't watch for the
11 * condition.)
12 *
13 * The normal termination sequence is that checkpointer is instructed to
14 * execute the shutdown checkpoint by SIGINT. After that checkpointer waits
15 * to be terminated via SIGUSR2, which instructs the checkpointer to exit(0).
16 * All backends must be stopped before SIGINT or SIGUSR2 is issued!
17 *
18 * Emergency termination is by SIGQUIT; like any backend, the checkpointer
19 * will simply abort and exit on SIGQUIT.
20 *
21 * If the checkpointer exits unexpectedly, the postmaster treats that the same
22 * as a backend crash: shared memory may be corrupted, so remaining backends
23 * should be killed by SIGQUIT and then a recovery cycle started. (Even if
24 * shared memory isn't corrupted, we have lost information about which
25 * files need to be fsync'd for the next checkpoint, and so a system
26 * restart needs to be forced.)
27 *
28 *
29 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
30 *
31 *
32 * IDENTIFICATION
33 * src/backend/postmaster/checkpointer.c
34 *
35 *-------------------------------------------------------------------------
36 */
37#include "postgres.h"
38
39#include <sys/time.h>
40#include <time.h>
41
42#include "access/xlog.h"
44#include "access/xlogrecovery.h"
45#include "libpq/pqsignal.h"
46#include "miscadmin.h"
47#include "pgstat.h"
49#include "postmaster/bgwriter.h"
51#include "replication/syncrep.h"
52#include "storage/aio_subsys.h"
53#include "storage/bufmgr.h"
55#include "storage/fd.h"
56#include "storage/ipc.h"
57#include "storage/lwlock.h"
58#include "storage/pmsignal.h"
59#include "storage/proc.h"
60#include "storage/procsignal.h"
61#include "storage/shmem.h"
62#include "storage/smgr.h"
63#include "storage/spin.h"
64#include "utils/guc.h"
65#include "utils/memutils.h"
66#include "utils/resowner.h"
67
68
69/*----------
70 * Shared memory area for communication between checkpointer and backends
71 *
72 * The ckpt counters allow backends to watch for completion of a checkpoint
73 * request they send. Here's how it works:
74 * * At start of a checkpoint, checkpointer reads (and clears) the request
75 * flags and increments ckpt_started, while holding ckpt_lck.
76 * * On completion of a checkpoint, checkpointer sets ckpt_done to
77 * equal ckpt_started.
78 * * On failure of a checkpoint, checkpointer increments ckpt_failed
79 * and sets ckpt_done to equal ckpt_started.
80 *
81 * The algorithm for backends is:
82 * 1. Record current values of ckpt_failed and ckpt_started, and
83 * set request flags, while holding ckpt_lck.
84 * 2. Send signal to request checkpoint.
85 * 3. Sleep until ckpt_started changes. Now you know a checkpoint has
86 * begun since you started this algorithm (although *not* that it was
87 * specifically initiated by your signal), and that it is using your flags.
88 * 4. Record new value of ckpt_started.
89 * 5. Sleep until ckpt_done >= saved value of ckpt_started. (Use modulo
90 * arithmetic here in case counters wrap around.) Now you know a
91 * checkpoint has started and completed, but not whether it was
92 * successful.
93 * 6. If ckpt_failed is different from the originally saved value,
94 * assume request failed; otherwise it was definitely successful.
95 *
96 * ckpt_flags holds the OR of the checkpoint request flags sent by all
97 * requesting backends since the last checkpoint start. The flags are
98 * chosen so that OR'ing is the correct way to combine multiple requests.
99 *
100 * The requests array holds fsync requests sent by backends and not yet
101 * absorbed by the checkpointer.
102 *
103 * Unlike the checkpoint fields, requests related fields are protected by
104 * CheckpointerCommLock.
105 *----------
106 */
107typedef struct
108{
109 SyncRequestType type; /* request type */
110 FileTag ftag; /* file identifier */
112
113typedef struct
114{
115 pid_t checkpointer_pid; /* PID (0 if not started) */
116
117 slock_t ckpt_lck; /* protects all the ckpt_* fields */
118
119 int ckpt_started; /* advances when checkpoint starts */
120 int ckpt_done; /* advances when checkpoint done */
121 int ckpt_failed; /* advances when checkpoint fails */
122
123 int ckpt_flags; /* checkpoint flags, as defined in xlog.h */
124
125 ConditionVariable start_cv; /* signaled when ckpt_started advances */
126 ConditionVariable done_cv; /* signaled when ckpt_done advances */
127
128 int num_requests; /* current # of requests */
129 int max_requests; /* allocated array size */
132
134
135/* interval for calling AbsorbSyncRequests in CheckpointWriteDelay */
136#define WRITES_PER_ABSORB 1000
137
138/*
139 * GUC parameters
140 */
144
145/*
146 * Private state
147 */
148static bool ckpt_active = false;
149static volatile sig_atomic_t ShutdownXLOGPending = false;
150
151/* these values are valid when ckpt_active is true: */
155
158
159/* Prototypes for private functions */
160
161static void ProcessCheckpointerInterrupts(void);
162static void CheckArchiveTimeout(void);
163static bool IsCheckpointOnSchedule(double progress);
164static bool ImmediateCheckpointRequested(void);
165static bool CompactCheckpointerRequestQueue(void);
166static void UpdateSharedMemoryConfig(void);
167
168/* Signal handlers */
169static void ReqShutdownXLOG(SIGNAL_ARGS);
170
171
172/*
173 * Main entry point for checkpointer process
174 *
175 * This is invoked from AuxiliaryProcessMain, which has already created the
176 * basic execution environment, but not enabled signals yet.
177 */
178void
179CheckpointerMain(const void *startup_data, size_t startup_data_len)
180{
181 sigjmp_buf local_sigjmp_buf;
182 MemoryContext checkpointer_context;
183
184 Assert(startup_data_len == 0);
185
188
190
191 /*
192 * Properly accept or ignore signals the postmaster might send us
193 *
194 * Note: we deliberately ignore SIGTERM, because during a standard Unix
195 * system shutdown cycle, init will SIGTERM all processes at once. We
196 * want to wait for the backends to exit, whereupon the postmaster will
197 * tell us it's okay to shut down (via SIGUSR2).
198 */
200 pqsignal(SIGINT, ReqShutdownXLOG);
201 pqsignal(SIGTERM, SIG_IGN); /* ignore SIGTERM */
202 /* SIGQUIT handler was already set up by InitPostmasterChild */
203 pqsignal(SIGALRM, SIG_IGN);
204 pqsignal(SIGPIPE, SIG_IGN);
207
208 /*
209 * Reset some signals that are accepted by postmaster but not here
210 */
211 pqsignal(SIGCHLD, SIG_DFL);
212
213 /*
214 * Initialize so that first time-driven event happens at the correct time.
215 */
217
218 /*
219 * Write out stats after shutdown. This needs to be called by exactly one
220 * process during a normal shutdown, and since checkpointer is shut down
221 * very late...
222 *
223 * While e.g. walsenders are active after the shutdown checkpoint has been
224 * written (and thus could produce more stats), checkpointer stays around
225 * after the shutdown checkpoint has been written. postmaster will only
226 * signal checkpointer to exit after all processes that could emit stats
227 * have been shut down.
228 */
230
231 /*
232 * Create a memory context that we will do all our work in. We do this so
233 * that we can reset the context during error recovery and thereby avoid
234 * possible memory leaks. Formerly this code just ran in
235 * TopMemoryContext, but resetting that would be a really bad idea.
236 */
237 checkpointer_context = AllocSetContextCreate(TopMemoryContext,
238 "Checkpointer",
240 MemoryContextSwitchTo(checkpointer_context);
241
242 /*
243 * If an exception is encountered, processing resumes here.
244 *
245 * You might wonder why this isn't coded as an infinite loop around a
246 * PG_TRY construct. The reason is that this is the bottom of the
247 * exception stack, and so with PG_TRY there would be no exception handler
248 * in force at all during the CATCH part. By leaving the outermost setjmp
249 * always active, we have at least some chance of recovering from an error
250 * during error recovery. (If we get into an infinite loop thereby, it
251 * will soon be stopped by overflow of elog.c's internal state stack.)
252 *
253 * Note that we use sigsetjmp(..., 1), so that the prevailing signal mask
254 * (to wit, BlockSig) will be restored when longjmp'ing to here. Thus,
255 * signals other than SIGQUIT will be blocked until we complete error
256 * recovery. It might seem that this policy makes the HOLD_INTERRUPTS()
257 * call redundant, but it is not since InterruptPending might be set
258 * already.
259 */
260 if (sigsetjmp(local_sigjmp_buf, 1) != 0)
261 {
262 /* Since not using PG_TRY, must reset error stack by hand */
263 error_context_stack = NULL;
264
265 /* Prevent interrupts while cleaning up */
267
268 /* Report the error to the server log */
270
271 /*
272 * These operations are really just a minimal subset of
273 * AbortTransaction(). We don't have very many resources to worry
274 * about in checkpointer, but we do have LWLocks, buffers, and temp
275 * files.
276 */
283 AtEOXact_Buffers(false);
285 AtEOXact_Files(false);
286 AtEOXact_HashTables(false);
287
288 /* Warn any waiting backends that the checkpoint failed. */
289 if (ckpt_active)
290 {
295
297
298 ckpt_active = false;
299 }
300
301 /*
302 * Now return to normal top-level context and clear ErrorContext for
303 * next time.
304 */
305 MemoryContextSwitchTo(checkpointer_context);
307
308 /* Flush any leaked data in the top-level context */
309 MemoryContextReset(checkpointer_context);
310
311 /* Now we can allow interrupts again */
313
314 /*
315 * Sleep at least 1 second after any error. A write error is likely
316 * to be repeated, and we don't want to be filling the error logs as
317 * fast as we can.
318 */
319 pg_usleep(1000000L);
320 }
321
322 /* We can now handle ereport(ERROR) */
323 PG_exception_stack = &local_sigjmp_buf;
324
325 /*
326 * Unblock signals (they were blocked when the postmaster forked us)
327 */
328 sigprocmask(SIG_SETMASK, &UnBlockSig, NULL);
329
330 /*
331 * Ensure all shared memory values are set correctly for the config. Doing
332 * this here ensures no race conditions from other concurrent updaters.
333 */
335
336 /*
337 * Advertise our proc number that backends can use to wake us up while
338 * we're sleeping.
339 */
341
342 /*
343 * Loop until we've been asked to write the shutdown checkpoint or
344 * terminate.
345 */
346 for (;;)
347 {
348 bool do_checkpoint = false;
349 int flags = 0;
351 int elapsed_secs;
352 int cur_timeout;
353 bool chkpt_or_rstpt_requested = false;
354 bool chkpt_or_rstpt_timed = false;
355
356 /* Clear any already-pending wakeups */
358
359 /*
360 * Process any requests or signals received recently.
361 */
363
366 break;
367
368 /*
369 * Detect a pending checkpoint request by checking whether the flags
370 * word in shared memory is nonzero. We shouldn't need to acquire the
371 * ckpt_lck for this.
372 */
373 if (((volatile CheckpointerShmemStruct *) CheckpointerShmem)->ckpt_flags)
374 {
375 do_checkpoint = true;
376 chkpt_or_rstpt_requested = true;
377 }
378
379 /*
380 * Force a checkpoint if too much time has elapsed since the last one.
381 * Note that we count a timed checkpoint in stats only when this
382 * occurs without an external request, but we set the CAUSE_TIME flag
383 * bit even if there is also an external request.
384 */
385 now = (pg_time_t) time(NULL);
386 elapsed_secs = now - last_checkpoint_time;
387 if (elapsed_secs >= CheckPointTimeout)
388 {
389 if (!do_checkpoint)
390 chkpt_or_rstpt_timed = true;
391 do_checkpoint = true;
392 flags |= CHECKPOINT_CAUSE_TIME;
393 }
394
395 /*
396 * Do a checkpoint if requested.
397 */
398 if (do_checkpoint)
399 {
400 bool ckpt_performed = false;
401 bool do_restartpoint;
402
403 /* Check if we should perform a checkpoint or a restartpoint. */
404 do_restartpoint = RecoveryInProgress();
405
406 /*
407 * Atomically fetch the request flags to figure out what kind of a
408 * checkpoint we should perform, and increase the started-counter
409 * to acknowledge that we've started a new checkpoint.
410 */
416
418
419 /*
420 * The end-of-recovery checkpoint is a real checkpoint that's
421 * performed while we're still in recovery.
422 */
423 if (flags & CHECKPOINT_END_OF_RECOVERY)
424 do_restartpoint = false;
425
426 if (chkpt_or_rstpt_timed)
427 {
428 chkpt_or_rstpt_timed = false;
429 if (do_restartpoint)
431 else
433 }
434
435 if (chkpt_or_rstpt_requested)
436 {
437 chkpt_or_rstpt_requested = false;
438 if (do_restartpoint)
440 else
442 }
443
444 /*
445 * We will warn if (a) too soon since last checkpoint (whatever
446 * caused it) and (b) somebody set the CHECKPOINT_CAUSE_XLOG flag
447 * since the last checkpoint start. Note in particular that this
448 * implementation will not generate warnings caused by
449 * CheckPointTimeout < CheckPointWarning.
450 */
451 if (!do_restartpoint &&
452 (flags & CHECKPOINT_CAUSE_XLOG) &&
453 elapsed_secs < CheckPointWarning)
454 ereport(LOG,
455 (errmsg_plural("checkpoints are occurring too frequently (%d second apart)",
456 "checkpoints are occurring too frequently (%d seconds apart)",
457 elapsed_secs,
458 elapsed_secs),
459 errhint("Consider increasing the configuration parameter \"%s\".", "max_wal_size")));
460
461 /*
462 * Initialize checkpointer-private variables used during
463 * checkpoint.
464 */
465 ckpt_active = true;
466 if (do_restartpoint)
468 else
472
473 /*
474 * Do the checkpoint.
475 */
476 if (!do_restartpoint)
477 ckpt_performed = CreateCheckPoint(flags);
478 else
479 ckpt_performed = CreateRestartPoint(flags);
480
481 /*
482 * After any checkpoint, free all smgr objects. Otherwise we
483 * would never do so for dropped relations, as the checkpointer
484 * does not process shared invalidation messages or call
485 * AtEOXact_SMgr().
486 */
488
489 /*
490 * Indicate checkpoint completion to any waiting backends.
491 */
495
497
498 if (!do_restartpoint)
499 {
500 /*
501 * Note we record the checkpoint start time not end time as
502 * last_checkpoint_time. This is so that time-driven
503 * checkpoints happen at a predictable spacing.
504 */
506
507 if (ckpt_performed)
509 }
510 else
511 {
512 if (ckpt_performed)
513 {
514 /*
515 * The same as for checkpoint. Please see the
516 * corresponding comment.
517 */
519
521 }
522 else
523 {
524 /*
525 * We were not able to perform the restartpoint
526 * (checkpoints throw an ERROR in case of error). Most
527 * likely because we have not received any new checkpoint
528 * WAL records since the last restartpoint. Try again in
529 * 15 s.
530 */
532 }
533 }
534
535 ckpt_active = false;
536
537 /*
538 * We may have received an interrupt during the checkpoint and the
539 * latch might have been reset (e.g. in CheckpointWriteDelay).
540 */
543 break;
544 }
545
546 /* Check for archive_timeout and switch xlog files if necessary. */
548
549 /* Report pending statistics to the cumulative stats system */
551 pgstat_report_wal(true);
552
553 /*
554 * If any checkpoint flags have been set, redo the loop to handle the
555 * checkpoint without sleeping.
556 */
557 if (((volatile CheckpointerShmemStruct *) CheckpointerShmem)->ckpt_flags)
558 continue;
559
560 /*
561 * Sleep until we are signaled or it's time for another checkpoint or
562 * xlog file switch.
563 */
564 now = (pg_time_t) time(NULL);
565 elapsed_secs = now - last_checkpoint_time;
566 if (elapsed_secs >= CheckPointTimeout)
567 continue; /* no sleep for us ... */
568 cur_timeout = CheckPointTimeout - elapsed_secs;
570 {
571 elapsed_secs = now - last_xlog_switch_time;
572 if (elapsed_secs >= XLogArchiveTimeout)
573 continue; /* no sleep for us ... */
574 cur_timeout = Min(cur_timeout, XLogArchiveTimeout - elapsed_secs);
575 }
576
577 (void) WaitLatch(MyLatch,
579 cur_timeout * 1000L /* convert to ms */ ,
580 WAIT_EVENT_CHECKPOINTER_MAIN);
581 }
582
583 /*
584 * From here on, elog(ERROR) should end with exit(1), not send control
585 * back to the sigsetjmp block above.
586 */
587 ExitOnAnyError = true;
588
590 {
591 /*
592 * Close down the database.
593 *
594 * Since ShutdownXLOG() creates restartpoint or checkpoint, and
595 * updates the statistics, increment the checkpoint request and flush
596 * out pending statistic.
597 */
599 ShutdownXLOG(0, 0);
601 pgstat_report_wal(true);
602
603 /*
604 * Tell postmaster that we're done.
605 */
607 ShutdownXLOGPending = false;
608 }
609
610 /*
611 * Wait until we're asked to shut down. By separating the writing of the
612 * shutdown checkpoint from checkpointer exiting, checkpointer can perform
613 * some should-be-as-late-as-possible work like writing out stats.
614 */
615 for (;;)
616 {
617 /* Clear any already-pending wakeups */
619
621
623 break;
624
625 (void) WaitLatch(MyLatch,
627 0,
628 WAIT_EVENT_CHECKPOINTER_SHUTDOWN);
629 }
630
631 /* Normal exit from the checkpointer is here */
632 proc_exit(0); /* done */
633}
634
635/*
636 * Process any new interrupts.
637 */
638static void
640{
643
645 {
646 ConfigReloadPending = false;
648
649 /*
650 * Checkpointer is the last process to shut down, so we ask it to hold
651 * the keys for a range of other tasks required most of which have
652 * nothing to do with checkpointing at all.
653 *
654 * For various reasons, some config values can change dynamically so
655 * the primary copy of them is held in shared memory to make sure all
656 * backends see the same value. We make Checkpointer responsible for
657 * updating the shared memory copy if the parameter setting changes
658 * because of SIGHUP.
659 */
661 }
662
663 /* Perform logging of memory contexts of this process */
666
667 /* Publish memory contexts of this process */
670}
671
672/*
673 * CheckArchiveTimeout -- check for archive_timeout and switch xlog files
674 *
675 * This will switch to a new WAL file and force an archive file write if
676 * meaningful activity is recorded in the current WAL file. This includes most
677 * writes, including just a single checkpoint record, but excludes WAL records
678 * that were inserted with the XLOG_MARK_UNIMPORTANT flag being set (like
679 * snapshots of running transactions). Such records, depending on
680 * configuration, occur on regular intervals and don't contain important
681 * information. This avoids generating archives with a few unimportant
682 * records.
683 */
684static void
686{
688 pg_time_t last_time;
689 XLogRecPtr last_switch_lsn;
690
692 return;
693
694 now = (pg_time_t) time(NULL);
695
696 /* First we do a quick check using possibly-stale local state. */
698 return;
699
700 /*
701 * Update local state ... note that last_xlog_switch_time is the last time
702 * a switch was performed *or requested*.
703 */
704 last_time = GetLastSegSwitchData(&last_switch_lsn);
705
707
708 /* Now we can do the real checks */
710 {
711 /*
712 * Switch segment only when "important" WAL has been logged since the
713 * last segment switch (last_switch_lsn points to end of segment
714 * switch occurred in).
715 */
716 if (GetLastImportantRecPtr() > last_switch_lsn)
717 {
718 XLogRecPtr switchpoint;
719
720 /* mark switch as unimportant, avoids triggering checkpoints */
721 switchpoint = RequestXLogSwitch(true);
722
723 /*
724 * If the returned pointer points exactly to a segment boundary,
725 * assume nothing happened.
726 */
727 if (XLogSegmentOffset(switchpoint, wal_segment_size) != 0)
728 elog(DEBUG1, "write-ahead log switch forced (\"archive_timeout\"=%d)",
730 }
731
732 /*
733 * Update state in any case, so we don't retry constantly when the
734 * system is idle.
735 */
737 }
738}
739
740/*
741 * Returns true if an immediate checkpoint request is pending. (Note that
742 * this does not check the *current* checkpoint's IMMEDIATE flag, but whether
743 * there is one pending behind it.)
744 */
745static bool
747{
749
750 /*
751 * We don't need to acquire the ckpt_lck in this case because we're only
752 * looking at a single flag bit.
753 */
755 return true;
756 return false;
757}
758
759/*
760 * CheckpointWriteDelay -- control rate of checkpoint
761 *
762 * This function is called after each page write performed by BufferSync().
763 * It is responsible for throttling BufferSync()'s write rate to hit
764 * checkpoint_completion_target.
765 *
766 * The checkpoint request flags should be passed in; currently the only one
767 * examined is CHECKPOINT_IMMEDIATE, which disables delays between writes.
768 *
769 * 'progress' is an estimate of how much of the work has been done, as a
770 * fraction between 0.0 meaning none, and 1.0 meaning all done.
771 */
772void
774{
775 static int absorb_counter = WRITES_PER_ABSORB;
776
777 /* Do nothing if checkpoint is being executed by non-checkpointer process */
779 return;
780
781 /*
782 * Perform the usual duties and take a nap, unless we're behind schedule,
783 * in which case we just try to catch up as quickly as possible.
784 */
785 if (!(flags & CHECKPOINT_IMMEDIATE) &&
790 {
792 {
793 ConfigReloadPending = false;
795 /* update shmem copies of config variables */
797 }
798
800 absorb_counter = WRITES_PER_ABSORB;
801
803
804 /* Report interim statistics to the cumulative stats system */
806
807 /*
808 * This sleep used to be connected to bgwriter_delay, typically 200ms.
809 * That resulted in more frequent wakeups if not much work to do.
810 * Checkpointer and bgwriter are no longer related so take the Big
811 * Sleep.
812 */
814 100,
815 WAIT_EVENT_CHECKPOINT_WRITE_DELAY);
817 }
818 else if (--absorb_counter <= 0)
819 {
820 /*
821 * Absorb pending fsync requests after each WRITES_PER_ABSORB write
822 * operations even when we don't sleep, to prevent overflow of the
823 * fsync request queue.
824 */
826 absorb_counter = WRITES_PER_ABSORB;
827 }
828
829 /* Check for barrier events. */
832}
833
834/*
835 * IsCheckpointOnSchedule -- are we on schedule to finish this checkpoint
836 * (or restartpoint) in time?
837 *
838 * Compares the current progress against the time/segments elapsed since last
839 * checkpoint, and returns true if the progress we've made this far is greater
840 * than the elapsed time/segments.
841 */
842static bool
844{
845 XLogRecPtr recptr;
846 struct timeval now;
847 double elapsed_xlogs,
849
851
852 /* Scale progress according to checkpoint_completion_target. */
854
855 /*
856 * Check against the cached value first. Only do the more expensive
857 * calculations once we reach the target previously calculated. Since
858 * neither time or WAL insert pointer moves backwards, a freshly
859 * calculated value can only be greater than or equal to the cached value.
860 */
862 return false;
863
864 /*
865 * Check progress against WAL segments written and CheckPointSegments.
866 *
867 * We compare the current WAL insert location against the location
868 * computed before calling CreateCheckPoint. The code in XLogInsert that
869 * actually triggers a checkpoint when CheckPointSegments is exceeded
870 * compares against RedoRecPtr, so this is not completely accurate.
871 * However, it's good enough for our purposes, we're only calculating an
872 * estimate anyway.
873 *
874 * During recovery, we compare last replayed WAL record's location with
875 * the location computed before calling CreateRestartPoint. That maintains
876 * the same pacing as we have during checkpoints in normal operation, but
877 * we might exceed max_wal_size by a fair amount. That's because there can
878 * be a large gap between a checkpoint's redo-pointer and the checkpoint
879 * record itself, and we only start the restartpoint after we've seen the
880 * checkpoint record. (The gap is typically up to CheckPointSegments *
881 * checkpoint_completion_target where checkpoint_completion_target is the
882 * value that was in effect when the WAL was generated).
883 */
884 if (RecoveryInProgress())
885 recptr = GetXLogReplayRecPtr(NULL);
886 else
887 recptr = GetInsertRecPtr();
888 elapsed_xlogs = (((double) (recptr - ckpt_start_recptr)) /
890
891 if (progress < elapsed_xlogs)
892 {
893 ckpt_cached_elapsed = elapsed_xlogs;
894 return false;
895 }
896
897 /*
898 * Check progress against time elapsed and checkpoint_timeout.
899 */
900 gettimeofday(&now, NULL);
901 elapsed_time = ((double) ((pg_time_t) now.tv_sec - ckpt_start_time) +
902 now.tv_usec / 1000000.0) / CheckPointTimeout;
903
905 {
907 return false;
908 }
909
910 /* It looks like we're on schedule. */
911 return true;
912}
913
914
915/* --------------------------------
916 * signal handler routines
917 * --------------------------------
918 */
919
920/* SIGINT: set flag to trigger writing of shutdown checkpoint */
921static void
923{
924 ShutdownXLOGPending = true;
926}
927
928
929/* --------------------------------
930 * communication with backends
931 * --------------------------------
932 */
933
934/*
935 * CheckpointerShmemSize
936 * Compute space needed for checkpointer-related shared memory
937 */
938Size
940{
941 Size size;
942
943 /*
944 * Currently, the size of the requests[] array is arbitrarily set equal to
945 * NBuffers. This may prove too large or small ...
946 */
947 size = offsetof(CheckpointerShmemStruct, requests);
948 size = add_size(size, mul_size(NBuffers, sizeof(CheckpointerRequest)));
949
950 return size;
951}
952
953/*
954 * CheckpointerShmemInit
955 * Allocate and initialize checkpointer-related shared memory
956 */
957void
959{
961 bool found;
962
964 ShmemInitStruct("Checkpointer Data",
965 size,
966 &found);
967
968 if (!found)
969 {
970 /*
971 * First time through, so initialize. Note that we zero the whole
972 * requests array; this is so that CompactCheckpointerRequestQueue can
973 * assume that any pad bytes in the request structs are zeroes.
974 */
975 MemSet(CheckpointerShmem, 0, size);
980 }
981}
982
983/*
984 * RequestCheckpoint
985 * Called in backend processes to request a checkpoint
986 *
987 * flags is a bitwise OR of the following:
988 * CHECKPOINT_IS_SHUTDOWN: checkpoint is for database shutdown.
989 * CHECKPOINT_END_OF_RECOVERY: checkpoint is for end of WAL recovery.
990 * CHECKPOINT_IMMEDIATE: finish the checkpoint ASAP,
991 * ignoring checkpoint_completion_target parameter.
992 * CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occurred
993 * since the last one (implied by CHECKPOINT_IS_SHUTDOWN or
994 * CHECKPOINT_END_OF_RECOVERY).
995 * CHECKPOINT_WAIT: wait for completion before returning (otherwise,
996 * just signal checkpointer to do it, and return).
997 * CHECKPOINT_CAUSE_XLOG: checkpoint is requested due to xlog filling.
998 * (This affects logging, and in particular enables CheckPointWarning.)
999 */
1000void
1002{
1003 int ntries;
1004 int old_failed,
1005 old_started;
1006
1007 /*
1008 * If in a standalone backend, just do it ourselves.
1009 */
1011 {
1012 /*
1013 * There's no point in doing slow checkpoints in a standalone backend,
1014 * because there's no other backends the checkpoint could disrupt.
1015 */
1017
1018 /* Free all smgr objects, as CheckpointerMain() normally would. */
1020
1021 return;
1022 }
1023
1024 /*
1025 * Atomically set the request flags, and take a snapshot of the counters.
1026 * When we see ckpt_started > old_started, we know the flags we set here
1027 * have been seen by checkpointer.
1028 *
1029 * Note that we OR the flags with any existing flags, to avoid overriding
1030 * a "stronger" request by another backend. The flag senses must be
1031 * chosen to make this work!
1032 */
1034
1035 old_failed = CheckpointerShmem->ckpt_failed;
1036 old_started = CheckpointerShmem->ckpt_started;
1038
1040
1041 /*
1042 * Set checkpointer's latch to request checkpoint. It's possible that the
1043 * checkpointer hasn't started yet, so we will retry a few times if
1044 * needed. (Actually, more than a few times, since on slow or overloaded
1045 * buildfarm machines, it's been observed that the checkpointer can take
1046 * several seconds to start.) However, if not told to wait for the
1047 * checkpoint to occur, we consider failure to set the latch to be
1048 * nonfatal and merely LOG it. The checkpointer should see the request
1049 * when it does start, with or without the SetLatch().
1050 */
1051#define MAX_SIGNAL_TRIES 600 /* max wait 60.0 sec */
1052 for (ntries = 0;; ntries++)
1053 {
1054 volatile PROC_HDR *procglobal = ProcGlobal;
1055 ProcNumber checkpointerProc = procglobal->checkpointerProc;
1056
1057 if (checkpointerProc == INVALID_PROC_NUMBER)
1058 {
1059 if (ntries >= MAX_SIGNAL_TRIES || !(flags & CHECKPOINT_WAIT))
1060 {
1061 elog((flags & CHECKPOINT_WAIT) ? ERROR : LOG,
1062 "could not notify checkpoint: checkpointer is not running");
1063 break;
1064 }
1065 }
1066 else
1067 {
1068 SetLatch(&GetPGProcByNumber(checkpointerProc)->procLatch);
1069 /* notified successfully */
1070 break;
1071 }
1072
1074 pg_usleep(100000L); /* wait 0.1 sec, then retry */
1075 }
1076
1077 /*
1078 * If requested, wait for completion. We detect completion according to
1079 * the algorithm given above.
1080 */
1081 if (flags & CHECKPOINT_WAIT)
1082 {
1083 int new_started,
1084 new_failed;
1085
1086 /* Wait for a new checkpoint to start. */
1088 for (;;)
1089 {
1091 new_started = CheckpointerShmem->ckpt_started;
1093
1094 if (new_started != old_started)
1095 break;
1096
1098 WAIT_EVENT_CHECKPOINT_START);
1099 }
1101
1102 /*
1103 * We are waiting for ckpt_done >= new_started, in a modulo sense.
1104 */
1106 for (;;)
1107 {
1108 int new_done;
1109
1111 new_done = CheckpointerShmem->ckpt_done;
1112 new_failed = CheckpointerShmem->ckpt_failed;
1114
1115 if (new_done - new_started >= 0)
1116 break;
1117
1119 WAIT_EVENT_CHECKPOINT_DONE);
1120 }
1122
1123 if (new_failed != old_failed)
1124 ereport(ERROR,
1125 (errmsg("checkpoint request failed"),
1126 errhint("Consult recent messages in the server log for details.")));
1127 }
1128}
1129
1130/*
1131 * ForwardSyncRequest
1132 * Forward a file-fsync request from a backend to the checkpointer
1133 *
1134 * Whenever a backend is compelled to write directly to a relation
1135 * (which should be seldom, if the background writer is getting its job done),
1136 * the backend calls this routine to pass over knowledge that the relation
1137 * is dirty and must be fsync'd before next checkpoint. We also use this
1138 * opportunity to count such writes for statistical purposes.
1139 *
1140 * To avoid holding the lock for longer than necessary, we normally write
1141 * to the requests[] queue without checking for duplicates. The checkpointer
1142 * will have to eliminate dups internally anyway. However, if we discover
1143 * that the queue is full, we make a pass over the entire queue to compact
1144 * it. This is somewhat expensive, but the alternative is for the backend
1145 * to perform its own fsync, which is far more expensive in practice. It
1146 * is theoretically possible a backend fsync might still be necessary, if
1147 * the queue is full and contains no duplicate entries. In that case, we
1148 * let the backend know by returning false.
1149 */
1150bool
1152{
1153 CheckpointerRequest *request;
1154 bool too_full;
1155
1156 if (!IsUnderPostmaster)
1157 return false; /* probably shouldn't even get here */
1158
1160 elog(ERROR, "ForwardSyncRequest must not be called in checkpointer");
1161
1162 LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
1163
1164 /*
1165 * If the checkpointer isn't running or the request queue is full, the
1166 * backend will have to perform its own fsync request. But before forcing
1167 * that to happen, we can try to compact the request queue.
1168 */
1172 {
1173 LWLockRelease(CheckpointerCommLock);
1174 return false;
1175 }
1176
1177 /* OK, insert request */
1179 request->ftag = *ftag;
1180 request->type = type;
1181
1182 /* If queue is more than half full, nudge the checkpointer to empty it */
1183 too_full = (CheckpointerShmem->num_requests >=
1185
1186 LWLockRelease(CheckpointerCommLock);
1187
1188 /* ... but not till after we release the lock */
1189 if (too_full)
1190 {
1191 volatile PROC_HDR *procglobal = ProcGlobal;
1192 ProcNumber checkpointerProc = procglobal->checkpointerProc;
1193
1194 if (checkpointerProc != INVALID_PROC_NUMBER)
1195 SetLatch(&GetPGProcByNumber(checkpointerProc)->procLatch);
1196 }
1197
1198 return true;
1199}
1200
1201/*
1202 * CompactCheckpointerRequestQueue
1203 * Remove duplicates from the request queue to avoid backend fsyncs.
1204 * Returns "true" if any entries were removed.
1205 *
1206 * Although a full fsync request queue is not common, it can lead to severe
1207 * performance problems when it does happen. So far, this situation has
1208 * only been observed to occur when the system is under heavy write load,
1209 * and especially during the "sync" phase of a checkpoint. Without this
1210 * logic, each backend begins doing an fsync for every block written, which
1211 * gets very expensive and can slow down the whole system.
1212 *
1213 * Trying to do this every time the queue is full could lose if there
1214 * aren't any removable entries. But that should be vanishingly rare in
1215 * practice: there's one queue entry per shared buffer.
1216 */
1217static bool
1219{
1220 struct CheckpointerSlotMapping
1221 {
1222 CheckpointerRequest request;
1223 int slot;
1224 };
1225
1226 int n,
1227 preserve_count;
1228 int num_skipped = 0;
1229 HASHCTL ctl;
1230 HTAB *htab;
1231 bool *skip_slot;
1232
1233 /* must hold CheckpointerCommLock in exclusive mode */
1234 Assert(LWLockHeldByMe(CheckpointerCommLock));
1235
1236 /* Avoid memory allocations in a critical section. */
1237 if (CritSectionCount > 0)
1238 return false;
1239
1240 /* Initialize skip_slot array */
1241 skip_slot = palloc0(sizeof(bool) * CheckpointerShmem->num_requests);
1242
1243 /* Initialize temporary hash table */
1244 ctl.keysize = sizeof(CheckpointerRequest);
1245 ctl.entrysize = sizeof(struct CheckpointerSlotMapping);
1247
1248 htab = hash_create("CompactCheckpointerRequestQueue",
1250 &ctl,
1252
1253 /*
1254 * The basic idea here is that a request can be skipped if it's followed
1255 * by a later, identical request. It might seem more sensible to work
1256 * backwards from the end of the queue and check whether a request is
1257 * *preceded* by an earlier, identical request, in the hopes of doing less
1258 * copying. But that might change the semantics, if there's an
1259 * intervening SYNC_FORGET_REQUEST or SYNC_FILTER_REQUEST, so we do it
1260 * this way. It would be possible to be even smarter if we made the code
1261 * below understand the specific semantics of such requests (it could blow
1262 * away preceding entries that would end up being canceled anyhow), but
1263 * it's not clear that the extra complexity would buy us anything.
1264 */
1265 for (n = 0; n < CheckpointerShmem->num_requests; n++)
1266 {
1267 CheckpointerRequest *request;
1268 struct CheckpointerSlotMapping *slotmap;
1269 bool found;
1270
1271 /*
1272 * We use the request struct directly as a hashtable key. This
1273 * assumes that any padding bytes in the structs are consistently the
1274 * same, which should be okay because we zeroed them in
1275 * CheckpointerShmemInit. Note also that RelFileLocator had better
1276 * contain no pad bytes.
1277 */
1278 request = &CheckpointerShmem->requests[n];
1279 slotmap = hash_search(htab, request, HASH_ENTER, &found);
1280 if (found)
1281 {
1282 /* Duplicate, so mark the previous occurrence as skippable */
1283 skip_slot[slotmap->slot] = true;
1284 num_skipped++;
1285 }
1286 /* Remember slot containing latest occurrence of this request value */
1287 slotmap->slot = n;
1288 }
1289
1290 /* Done with the hash table. */
1291 hash_destroy(htab);
1292
1293 /* If no duplicates, we're out of luck. */
1294 if (!num_skipped)
1295 {
1296 pfree(skip_slot);
1297 return false;
1298 }
1299
1300 /* We found some duplicates; remove them. */
1301 preserve_count = 0;
1302 for (n = 0; n < CheckpointerShmem->num_requests; n++)
1303 {
1304 if (skip_slot[n])
1305 continue;
1306 CheckpointerShmem->requests[preserve_count++] = CheckpointerShmem->requests[n];
1307 }
1309 (errmsg_internal("compacted fsync request queue from %d entries to %d entries",
1310 CheckpointerShmem->num_requests, preserve_count)));
1311 CheckpointerShmem->num_requests = preserve_count;
1312
1313 /* Cleanup. */
1314 pfree(skip_slot);
1315 return true;
1316}
1317
1318/*
1319 * AbsorbSyncRequests
1320 * Retrieve queued sync requests and pass them to sync mechanism.
1321 *
1322 * This is exported because it must be called during CreateCheckPoint;
1323 * we have to be sure we have accepted all pending requests just before
1324 * we start fsync'ing. Since CreateCheckPoint sometimes runs in
1325 * non-checkpointer processes, do nothing if not checkpointer.
1326 */
1327void
1329{
1330 CheckpointerRequest *requests = NULL;
1331 CheckpointerRequest *request;
1332 int n;
1333
1334 if (!AmCheckpointerProcess())
1335 return;
1336
1337 LWLockAcquire(CheckpointerCommLock, LW_EXCLUSIVE);
1338
1339 /*
1340 * We try to avoid holding the lock for a long time by copying the request
1341 * array, and processing the requests after releasing the lock.
1342 *
1343 * Once we have cleared the requests from shared memory, we have to PANIC
1344 * if we then fail to absorb them (eg, because our hashtable runs out of
1345 * memory). This is because the system cannot run safely if we are unable
1346 * to fsync what we have been told to fsync. Fortunately, the hashtable
1347 * is so small that the problem is quite unlikely to arise in practice.
1348 */
1350 if (n > 0)
1351 {
1352 requests = (CheckpointerRequest *) palloc(n * sizeof(CheckpointerRequest));
1353 memcpy(requests, CheckpointerShmem->requests, n * sizeof(CheckpointerRequest));
1354 }
1355
1357
1359
1360 LWLockRelease(CheckpointerCommLock);
1361
1362 for (request = requests; n > 0; request++, n--)
1363 RememberSyncRequest(&request->ftag, request->type);
1364
1366
1367 if (requests)
1368 pfree(requests);
1369}
1370
1371/*
1372 * Update any shared memory configurations based on config parameters
1373 */
1374static void
1376{
1377 /* update global shmem state for sync rep */
1379
1380 /*
1381 * If full_page_writes has been changed by SIGHUP, we update it in shared
1382 * memory and write an XLOG_FPW_CHANGE record.
1383 */
1385
1386 elog(DEBUG2, "checkpointer updated shared memory configuration values");
1387}
1388
1389/*
1390 * FirstCallSinceLastCheckpoint allows a process to take an action once
1391 * per checkpoint cycle by asynchronously checking for checkpoint completion.
1392 */
1393bool
1395{
1396 static int ckpt_done = 0;
1397 int new_done;
1398 bool FirstCall = false;
1399
1401 new_done = CheckpointerShmem->ckpt_done;
1403
1404 if (new_done != ckpt_done)
1405 FirstCall = true;
1406
1407 ckpt_done = new_done;
1408
1409 return FirstCall;
1410}
void pgaio_error_cleanup(void)
Definition: aio.c:1062
void AuxiliaryProcessMainCommon(void)
Definition: auxprocess.c:39
sigset_t UnBlockSig
Definition: pqsignal.c:22
Datum now(PG_FUNCTION_ARGS)
Definition: timestamp.c:1609
void AtEOXact_Buffers(bool isCommit)
Definition: bufmgr.c:3989
void UnlockBuffers(void)
Definition: bufmgr.c:5509
#define Min(x, y)
Definition: c.h:975
#define Max(x, y)
Definition: c.h:969
#define SIGNAL_ARGS
Definition: c.h:1320
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:434
#define MemSet(start, val, len)
Definition: c.h:991
size_t Size
Definition: c.h:576
static void UpdateSharedMemoryConfig(void)
static XLogRecPtr ckpt_start_recptr
Definition: checkpointer.c:153
static bool ImmediateCheckpointRequested(void)
Definition: checkpointer.c:746
static bool IsCheckpointOnSchedule(double progress)
Definition: checkpointer.c:843
bool ForwardSyncRequest(const FileTag *ftag, SyncRequestType type)
static void ReqShutdownXLOG(SIGNAL_ARGS)
Definition: checkpointer.c:922
static void CheckArchiveTimeout(void)
Definition: checkpointer.c:685
static double ckpt_cached_elapsed
Definition: checkpointer.c:154
static bool ckpt_active
Definition: checkpointer.c:148
void CheckpointerMain(const void *startup_data, size_t startup_data_len)
Definition: checkpointer.c:179
static bool CompactCheckpointerRequestQueue(void)
static void ProcessCheckpointerInterrupts(void)
Definition: checkpointer.c:639
static volatile sig_atomic_t ShutdownXLOGPending
Definition: checkpointer.c:149
#define MAX_SIGNAL_TRIES
void AbsorbSyncRequests(void)
#define WRITES_PER_ABSORB
Definition: checkpointer.c:136
double CheckPointCompletionTarget
Definition: checkpointer.c:143
static pg_time_t last_xlog_switch_time
Definition: checkpointer.c:157
int CheckPointWarning
Definition: checkpointer.c:142
void CheckpointerShmemInit(void)
Definition: checkpointer.c:958
bool FirstCallSinceLastCheckpoint(void)
static CheckpointerShmemStruct * CheckpointerShmem
Definition: checkpointer.c:133
int CheckPointTimeout
Definition: checkpointer.c:141
void RequestCheckpoint(int flags)
static pg_time_t last_checkpoint_time
Definition: checkpointer.c:156
void CheckpointWriteDelay(int flags, double progress)
Definition: checkpointer.c:773
static pg_time_t ckpt_start_time
Definition: checkpointer.c:152
Size CheckpointerShmemSize(void)
Definition: checkpointer.c:939
bool ConditionVariableCancelSleep(void)
void ConditionVariableBroadcast(ConditionVariable *cv)
void ConditionVariablePrepareToSleep(ConditionVariable *cv)
void ConditionVariableInit(ConditionVariable *cv)
void ConditionVariableSleep(ConditionVariable *cv, uint32 wait_event_info)
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:955
void AtEOXact_HashTables(bool isCommit)
Definition: dynahash.c:1912
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:865
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:352
int errmsg_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n,...)
Definition: elog.c:1181
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1158
void EmitErrorReport(void)
Definition: elog.c:1709
ErrorContextCallback * error_context_stack
Definition: elog.c:95
void FlushErrorState(void)
Definition: elog.c:1889
int errhint(const char *fmt,...)
Definition: elog.c:1318
int errmsg(const char *fmt,...)
Definition: elog.c:1071
sigjmp_buf * PG_exception_stack
Definition: elog.c:97
#define LOG
Definition: elog.h:31
#define DEBUG2
Definition: elog.h:29
#define DEBUG1
Definition: elog.h:30
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
#define ereport(elevel,...)
Definition: elog.h:149
static double elapsed_time(instr_time *starttime)
Definition: explain.c:1179
void AtEOXact_Files(bool isCommit)
Definition: fd.c:3229
volatile sig_atomic_t LogMemoryContextPending
Definition: globals.c:41
volatile sig_atomic_t ProcSignalBarrierPending
Definition: globals.c:40
int NBuffers
Definition: globals.c:143
int MyProcPid
Definition: globals.c:48
ProcNumber MyProcNumber
Definition: globals.c:91
bool IsUnderPostmaster
Definition: globals.c:121
volatile uint32 CritSectionCount
Definition: globals.c:46
bool ExitOnAnyError
Definition: globals.c:124
bool IsPostmasterEnvironment
Definition: globals.c:120
volatile sig_atomic_t PublishMemoryContextPending
Definition: globals.c:42
struct Latch * MyLatch
Definition: globals.c:64
void ProcessConfigFile(GucContext context)
Definition: guc-file.l:120
@ PGC_SIGHUP
Definition: guc.h:75
Assert(PointerIsAligned(start, uint64))
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_CONTEXT
Definition: hsearch.h:102
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
void SignalHandlerForShutdownRequest(SIGNAL_ARGS)
Definition: interrupt.c:109
volatile sig_atomic_t ShutdownRequestPending
Definition: interrupt.c:28
volatile sig_atomic_t ConfigReloadPending
Definition: interrupt.c:27
void SignalHandlerForConfigReload(SIGNAL_ARGS)
Definition: interrupt.c:65
void before_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition: ipc.c:337
void proc_exit(int code)
Definition: ipc.c:104
void SetLatch(Latch *latch)
Definition: latch.c:288
void ResetLatch(Latch *latch)
Definition: latch.c:372
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition: latch.c:172
bool LWLockHeldByMe(LWLock *lock)
Definition: lwlock.c:1970
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1182
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1902
void LWLockReleaseAll(void)
Definition: lwlock.c:1953
@ LW_EXCLUSIVE
Definition: lwlock.h:114
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:414
void pfree(void *pointer)
Definition: mcxt.c:2147
void * palloc0(Size size)
Definition: mcxt.c:1970
void ProcessGetMemoryContextInterrupt(void)
Definition: mcxt.c:1432
MemoryContext TopMemoryContext
Definition: mcxt.c:165
void * palloc(Size size)
Definition: mcxt.c:1940
MemoryContext CurrentMemoryContext
Definition: mcxt.c:159
void ProcessLogMemoryContextInterrupt(void)
Definition: mcxt.c:1380
#define AllocSetContextCreate
Definition: memutils.h:149
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:180
#define AmCheckpointerProcess()
Definition: miscadmin.h:389
#define RESUME_INTERRUPTS()
Definition: miscadmin.h:136
#define START_CRIT_SECTION()
Definition: miscadmin.h:150
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:123
#define HOLD_INTERRUPTS()
Definition: miscadmin.h:134
@ B_CHECKPOINTER
Definition: miscadmin.h:363
#define END_CRIT_SECTION()
Definition: miscadmin.h:152
BackendType MyBackendType
Definition: miscinit.c:64
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition: palloc.h:124
static int progress
Definition: pgbench.c:262
void pgstat_before_server_shutdown(int code, Datum arg)
Definition: pgstat.c:559
void pgstat_report_checkpointer(void)
PgStat_CheckpointerStats PendingCheckpointerStats
void pgstat_report_wal(bool force)
Definition: pgstat_wal.c:46
int64 pg_time_t
Definition: pgtime.h:23
void SendPostmasterSignal(PMSignalReason reason)
Definition: pmsignal.c:165
@ PMSIGNAL_XLOG_IS_SHUTDOWN
Definition: pmsignal.h:44
#define pqsignal
Definition: port.h:531
#define GetPGProcByNumber(n)
Definition: proc.h:424
#define INVALID_PROC_NUMBER
Definition: procnumber.h:26
int ProcNumber
Definition: procnumber.h:24
void ProcessProcSignalBarrier(void)
Definition: procsignal.c:498
void procsignal_sigusr1_handler(SIGNAL_ARGS)
Definition: procsignal.c:673
tree ctl
Definition: radixtree.h:1838
void ReleaseAuxProcessResources(bool isCommit)
Definition: resowner.c:1019
Size add_size(Size s1, Size s2)
Definition: shmem.c:493
Size mul_size(Size s1, Size s2)
Definition: shmem.c:510
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:387
void pg_usleep(long microsec)
Definition: signal.c:53
void smgrdestroyall(void)
Definition: smgr.c:386
void AtEOXact_SMgr(void)
Definition: smgr.c:1008
#define SpinLockInit(lock)
Definition: spin.h:57
#define SpinLockRelease(lock)
Definition: spin.h:61
#define SpinLockAcquire(lock)
Definition: spin.h:59
PROC_HDR * ProcGlobal
Definition: proc.c:79
SyncRequestType type
Definition: checkpointer.c:109
ConditionVariable done_cv
Definition: checkpointer.c:126
ConditionVariable start_cv
Definition: checkpointer.c:125
CheckpointerRequest requests[FLEXIBLE_ARRAY_MEMBER]
Definition: checkpointer.c:130
Definition: sync.h:51
Definition: dynahash.c:220
Definition: proc.h:370
ProcNumber checkpointerProc
Definition: proc.h:409
PgStat_Counter restartpoints_requested
Definition: pgstat.h:259
PgStat_Counter num_requested
Definition: pgstat.h:256
PgStat_Counter num_performed
Definition: pgstat.h:257
PgStat_Counter restartpoints_timed
Definition: pgstat.h:258
PgStat_Counter num_timed
Definition: pgstat.h:255
PgStat_Counter restartpoints_performed
Definition: pgstat.h:260
void RememberSyncRequest(const FileTag *ftag, SyncRequestType type)
Definition: sync.c:487
SyncRequestType
Definition: sync.h:24
void SyncRepUpdateSyncStandbysDefined(void)
Definition: syncrep.c:964
static void pgstat_report_wait_end(void)
Definition: wait_event.h:101
const char * type
#define WL_TIMEOUT
Definition: waiteventset.h:37
#define WL_EXIT_ON_PM_DEATH
Definition: waiteventset.h:39
#define WL_LATCH_SET
Definition: waiteventset.h:34
#define SIGCHLD
Definition: win32_port.h:168
#define SIGHUP
Definition: win32_port.h:158
#define SIGPIPE
Definition: win32_port.h:163
#define SIGUSR1
Definition: win32_port.h:170
#define SIGALRM
Definition: win32_port.h:164
#define SIGUSR2
Definition: win32_port.h:171
int gettimeofday(struct timeval *tp, void *tzp)
void UpdateFullPageWrites(void)
Definition: xlog.c:8354
bool RecoveryInProgress(void)
Definition: xlog.c:6522
XLogRecPtr RequestXLogSwitch(bool mark_unimportant)
Definition: xlog.c:8248
bool CreateRestartPoint(int flags)
Definition: xlog.c:7779
XLogRecPtr GetInsertRecPtr(void)
Definition: xlog.c:6670
int wal_segment_size
Definition: xlog.c:143
void ShutdownXLOG(int code, Datum arg)
Definition: xlog.c:6790
int XLogArchiveTimeout
Definition: xlog.c:118
pg_time_t GetLastSegSwitchData(XLogRecPtr *lastSwitchLSN)
Definition: xlog.c:6773
XLogRecPtr GetLastImportantRecPtr(void)
Definition: xlog.c:6744
bool CreateCheckPoint(int flags)
Definition: xlog.c:7077
int CheckPointSegments
Definition: xlog.c:156
#define CHECKPOINT_CAUSE_XLOG
Definition: xlog.h:148
#define CHECKPOINT_END_OF_RECOVERY
Definition: xlog.h:140
#define CHECKPOINT_CAUSE_TIME
Definition: xlog.h:149
#define CHECKPOINT_REQUESTED
Definition: xlog.h:146
#define CHECKPOINT_WAIT
Definition: xlog.h:145
#define CHECKPOINT_IMMEDIATE
Definition: xlog.h:141
#define XLogSegmentOffset(xlogptr, wal_segsz_bytes)
uint64 XLogRecPtr
Definition: xlogdefs.h:21
XLogRecPtr GetXLogReplayRecPtr(TimeLineID *replayTLI)