PostgreSQL Source Code git master
Loading...
Searching...
No Matches
xlog.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * xlog.c
4 * PostgreSQL write-ahead log manager
5 *
6 * The Write-Ahead Log (WAL) functionality is split into several source
7 * files, in addition to this one:
8 *
9 * xloginsert.c - Functions for constructing WAL records
10 * xlogrecovery.c - WAL recovery and standby code
11 * xlogreader.c - Facility for reading WAL files and parsing WAL records
12 * xlogutils.c - Helper functions for WAL redo routines
13 *
14 * This file contains functions for coordinating database startup and
15 * checkpointing, and managing the write-ahead log buffers when the
16 * system is running.
17 *
18 * StartupXLOG() is the main entry point of the startup process. It
19 * coordinates database startup, performing WAL recovery, and the
20 * transition from WAL recovery into normal operations.
21 *
22 * XLogInsertRecord() inserts a WAL record into the WAL buffers. Most
23 * callers should not call this directly, but use the functions in
24 * xloginsert.c to construct the WAL record. XLogFlush() can be used
25 * to force the WAL to disk.
26 *
27 * In addition to those, there are many other functions for interrogating
28 * the current system state, and for starting/stopping backups.
29 *
30 *
31 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
32 * Portions Copyright (c) 1994, Regents of the University of California
33 *
34 * src/backend/access/transam/xlog.c
35 *
36 *-------------------------------------------------------------------------
37 */
38
39#include "postgres.h"
40
41#include <ctype.h>
42#include <math.h>
43#include <time.h>
44#include <fcntl.h>
45#include <sys/stat.h>
46#include <sys/time.h>
47#include <unistd.h>
48
49#include "access/clog.h"
50#include "access/commit_ts.h"
51#include "access/heaptoast.h"
52#include "access/multixact.h"
53#include "access/rewriteheap.h"
54#include "access/subtrans.h"
55#include "access/timeline.h"
56#include "access/transam.h"
57#include "access/twophase.h"
58#include "access/xact.h"
60#include "access/xlogarchive.h"
61#include "access/xloginsert.h"
62#include "access/xlogreader.h"
63#include "access/xlogrecovery.h"
64#include "access/xlogutils.h"
65#include "access/xlogwait.h"
66#include "backup/basebackup.h"
67#include "catalog/catversion.h"
68#include "catalog/pg_control.h"
69#include "catalog/pg_database.h"
71#include "common/file_utils.h"
72#include "executor/instrument.h"
73#include "miscadmin.h"
74#include "pg_trace.h"
75#include "pgstat.h"
76#include "port/atomics.h"
77#include "postmaster/bgwriter.h"
79#include "postmaster/startup.h"
82#include "replication/origin.h"
83#include "replication/slot.h"
88#include "storage/bufmgr.h"
89#include "storage/fd.h"
90#include "storage/ipc.h"
92#include "storage/latch.h"
93#include "storage/predicate.h"
94#include "storage/proc.h"
95#include "storage/procarray.h"
96#include "storage/procsignal.h"
97#include "storage/reinit.h"
98#include "storage/spin.h"
99#include "storage/subsystems.h"
100#include "storage/sync.h"
101#include "utils/guc_hooks.h"
102#include "utils/guc_tables.h"
105#include "utils/ps_status.h"
106#include "utils/relmapper.h"
107#include "utils/snapmgr.h"
108#include "utils/timeout.h"
109#include "utils/timestamp.h"
110#include "utils/varlena.h"
111#include "utils/wait_event.h"
112
113#ifdef WAL_DEBUG
114#include "utils/memutils.h"
115#endif
116
117/* timeline ID to be used when bootstrapping */
118#define BootstrapTimeLineID 1
119
120/* User-settable parameters */
121int max_wal_size_mb = 1024; /* 1 GB */
122int min_wal_size_mb = 80; /* 80 MB */
124int XLOGbuffers = -1;
128bool EnableHotStandby = false;
129bool fullPageWrites = true;
130bool wal_log_hints = false;
134bool wal_init_zero = true;
135bool wal_recycle = true;
136bool log_checkpoints = true;
139int CommitDelay = 0; /* precommit delay in microseconds */
140int CommitSiblings = 5; /* # concurrent xacts needed to sleep */
143int wal_decode_buffer_size = 512 * 1024;
145
146#ifdef WAL_DEBUG
147bool XLOG_DEBUG = false;
148#endif
149
151
152/*
153 * Number of WAL insertion locks to use. A higher value allows more insertions
154 * to happen concurrently, but adds some CPU overhead to flushing the WAL,
155 * which needs to iterate all the locks.
156 */
157#define NUM_XLOGINSERT_LOCKS 8
158
159/*
160 * Max distance from last checkpoint, before triggering a new xlog-based
161 * checkpoint.
162 */
164
165/* Estimated distance between checkpoints, in bytes */
167static double PrevCheckPointDistance = 0;
168
169/*
170 * Track whether there were any deferred checks for custom resource managers
171 * specified in wal_consistency_checking.
172 */
174
175/*
176 * GUC support
177 */
179 {"fsync", WAL_SYNC_METHOD_FSYNC, false},
180#ifdef HAVE_FSYNC_WRITETHROUGH
181 {"fsync_writethrough", WAL_SYNC_METHOD_FSYNC_WRITETHROUGH, false},
182#endif
183 {"fdatasync", WAL_SYNC_METHOD_FDATASYNC, false},
184#ifdef O_SYNC
185 {"open_sync", WAL_SYNC_METHOD_OPEN, false},
186#endif
187#ifdef O_DSYNC
188 {"open_datasync", WAL_SYNC_METHOD_OPEN_DSYNC, false},
189#endif
190 {NULL, 0, false}
191};
192
193
194/*
195 * Although only "on", "off", and "always" are documented,
196 * we accept all the likely variants of "on" and "off".
197 */
199 {"always", ARCHIVE_MODE_ALWAYS, false},
200 {"on", ARCHIVE_MODE_ON, false},
201 {"off", ARCHIVE_MODE_OFF, false},
202 {"true", ARCHIVE_MODE_ON, true},
203 {"false", ARCHIVE_MODE_OFF, true},
204 {"yes", ARCHIVE_MODE_ON, true},
205 {"no", ARCHIVE_MODE_OFF, true},
206 {"1", ARCHIVE_MODE_ON, true},
207 {"0", ARCHIVE_MODE_OFF, true},
208 {NULL, 0, false}
209};
210
211/*
212 * Statistics for current checkpoint are collected in this global struct.
213 * Because only the checkpointer or a stand-alone backend can perform
214 * checkpoints, this will be unused in normal backends.
215 */
217
218/*
219 * During recovery, lastFullPageWrites keeps track of full_page_writes that
220 * the replayed WAL records indicate. It's initialized with full_page_writes
221 * that the recovery starting checkpoint record indicates, and then updated
222 * each time XLOG_FPW_CHANGE record is replayed.
223 */
225
226/*
227 * Local copy of the state tracked by SharedRecoveryState in shared memory,
228 * It is false if SharedRecoveryState is RECOVERY_STATE_DONE. True actually
229 * means "not known, need to check the shared state".
230 */
231static bool LocalRecoveryInProgress = true;
232
233/*
234 * Local state for XLogInsertAllowed():
235 * 1: unconditionally allowed to insert XLOG
236 * 0: unconditionally not allowed to insert XLOG
237 * -1: must check RecoveryInProgress(); disallow until it is false
238 * Most processes start with -1 and transition to 1 after seeing that recovery
239 * is not in progress. But we can also force the value for special cases.
240 * The coding in XLogInsertAllowed() depends on the first two of these states
241 * being numerically the same as bool true and false.
242 */
244
245/*
246 * ProcLastRecPtr points to the start of the last XLOG record inserted by the
247 * current backend. It is updated for all inserts. XactLastRecEnd points to
248 * end+1 of the last record, and is reset when we end a top-level transaction,
249 * or start a new one; so it can be used to tell if the current transaction has
250 * created any XLOG records.
251 *
252 * While in parallel mode, this may not be fully up to date. When committing,
253 * a transaction can assume this covers all xlog records written either by the
254 * user backend or by any parallel worker which was present at any point during
255 * the transaction. But when aborting, or when still in parallel mode, other
256 * parallel backends may have written WAL records at later LSNs than the value
257 * stored here. The parallel leader advances its own copy, when necessary,
258 * in WaitForParallelWorkersToFinish.
259 */
263
264/*
265 * RedoRecPtr is this backend's local copy of the REDO record pointer
266 * (which is almost but not quite the same as a pointer to the most recent
267 * CHECKPOINT record). We update this from the shared-memory copy,
268 * XLogCtl->Insert.RedoRecPtr, whenever we can safely do so (ie, when we
269 * hold an insertion lock). See XLogInsertRecord for details. We are also
270 * allowed to update from XLogCtl->RedoRecPtr if we hold the info_lck;
271 * see GetRedoRecPtr.
272 *
273 * NB: Code that uses this variable must be prepared not only for the
274 * possibility that it may be arbitrarily out of date, but also for the
275 * possibility that it might be set to InvalidXLogRecPtr. We used to
276 * initialize it as a side effect of the first call to RecoveryInProgress(),
277 * which meant that most code that might use it could assume that it had a
278 * real if perhaps stale value. That's no longer the case.
279 */
281
282/*
283 * doPageWrites is this backend's local copy of (fullPageWrites ||
284 * runningBackups > 0). It is used together with RedoRecPtr to decide whether
285 * a full-page image of a page need to be taken.
286 *
287 * NB: Initially this is false, and there's no guarantee that it will be
288 * initialized to any other value before it is first used. Any code that
289 * makes use of it must recheck the value after obtaining a WALInsertLock,
290 * and respond appropriately if it turns out that the previous value wasn't
291 * accurate.
292 */
293static bool doPageWrites;
294
295/*----------
296 * Shared-memory data structures for XLOG control
297 *
298 * LogwrtRqst indicates a byte position that we need to write and/or fsync
299 * the log up to (all records before that point must be written or fsynced).
300 * The positions already written/fsynced are maintained in logWriteResult
301 * and logFlushResult using atomic access.
302 * In addition to the shared variable, each backend has a private copy of
303 * both in LogwrtResult, which is updated when convenient.
304 *
305 * The request bookkeeping is simpler: there is a shared XLogCtl->LogwrtRqst
306 * (protected by info_lck), but we don't need to cache any copies of it.
307 *
308 * info_lck is only held long enough to read/update the protected variables,
309 * so it's a plain spinlock. The other locks are held longer (potentially
310 * over I/O operations), so we use LWLocks for them. These locks are:
311 *
312 * WALBufMappingLock: must be held to replace a page in the WAL buffer cache.
313 * It is only held while initializing and changing the mapping. If the
314 * contents of the buffer being replaced haven't been written yet, the mapping
315 * lock is released while the write is done, and reacquired afterwards.
316 *
317 * WALWriteLock: must be held to write WAL buffers to disk (XLogWrite or
318 * XLogFlush).
319 *
320 * ControlFileLock: must be held to read/update control file or create
321 * new log file.
322 *
323 *----------
324 */
325
326typedef struct XLogwrtRqst
327{
328 XLogRecPtr Write; /* last byte + 1 to write out */
329 XLogRecPtr Flush; /* last byte + 1 to flush */
331
332typedef struct XLogwrtResult
333{
334 XLogRecPtr Write; /* last byte + 1 written out */
335 XLogRecPtr Flush; /* last byte + 1 flushed */
337
338/*
339 * Inserting to WAL is protected by a small fixed number of WAL insertion
340 * locks. To insert to the WAL, you must hold one of the locks - it doesn't
341 * matter which one. To lock out other concurrent insertions, you must hold
342 * of them. Each WAL insertion lock consists of a lightweight lock, plus an
343 * indicator of how far the insertion has progressed (insertingAt).
344 *
345 * The insertingAt values are read when a process wants to flush WAL from
346 * the in-memory buffers to disk, to check that all the insertions to the
347 * region the process is about to write out have finished. You could simply
348 * wait for all currently in-progress insertions to finish, but the
349 * insertingAt indicator allows you to ignore insertions to later in the WAL,
350 * so that you only wait for the insertions that are modifying the buffers
351 * you're about to write out.
352 *
353 * This isn't just an optimization. If all the WAL buffers are dirty, an
354 * inserter that's holding a WAL insert lock might need to evict an old WAL
355 * buffer, which requires flushing the WAL. If it's possible for an inserter
356 * to block on another inserter unnecessarily, deadlock can arise when two
357 * inserters holding a WAL insert lock wait for each other to finish their
358 * insertion.
359 *
360 * Small WAL records that don't cross a page boundary never update the value,
361 * the WAL record is just copied to the page and the lock is released. But
362 * to avoid the deadlock-scenario explained above, the indicator is always
363 * updated before sleeping while holding an insertion lock.
364 *
365 * lastImportantAt contains the LSN of the last important WAL record inserted
366 * using a given lock. This value is used to detect if there has been
367 * important WAL activity since the last time some action, like a checkpoint,
368 * was performed - allowing to not repeat the action if not. The LSN is
369 * updated for all insertions, unless the XLOG_MARK_UNIMPORTANT flag was
370 * set. lastImportantAt is never cleared, only overwritten by the LSN of newer
371 * records. Tracking the WAL activity directly in WALInsertLock has the
372 * advantage of not needing any additional locks to update the value.
373 */
380
381/*
382 * All the WAL insertion locks are allocated as an array in shared memory. We
383 * force the array stride to be a power of 2, which saves a few cycles in
384 * indexing, but more importantly also ensures that individual slots don't
385 * cross cache line boundaries. (Of course, we have to also ensure that the
386 * array start address is suitably aligned.)
387 */
393
394/*
395 * Session status of running backup, used for sanity checks in SQL-callable
396 * functions to start and stop backups.
397 */
399
400/*
401 * Shared state data for WAL insertion.
402 */
403typedef struct XLogCtlInsert
404{
405 slock_t insertpos_lck; /* protects CurrBytePos and PrevBytePos */
406
407 /*
408 * CurrBytePos is the end of reserved WAL. The next record will be
409 * inserted at that position. PrevBytePos is the start position of the
410 * previously inserted (or rather, reserved) record - it is copied to the
411 * prev-link of the next record. These are stored as "usable byte
412 * positions" rather than XLogRecPtrs (see XLogBytePosToRecPtr()).
413 */
416
417 /*
418 * Make sure the above heavily-contended spinlock and byte positions are
419 * on their own cache line. In particular, the RedoRecPtr and full page
420 * write variables below should be on a different cache line. They are
421 * read on every WAL insertion, but updated rarely, and we don't want
422 * those reads to steal the cache line containing Curr/PrevBytePos.
423 */
425
426 /*
427 * fullPageWrites is the authoritative value used by all backends to
428 * determine whether to write full-page image to WAL. This shared value,
429 * instead of the process-local fullPageWrites, is required because, when
430 * full_page_writes is changed by SIGHUP, we must WAL-log it before it
431 * actually affects WAL-logging by backends. Checkpointer sets at startup
432 * or after SIGHUP.
433 *
434 * To read these fields, you must hold an insertion lock. To modify them,
435 * you must hold ALL the locks.
436 */
437 XLogRecPtr RedoRecPtr; /* current redo point for insertions */
439
440 /*
441 * runningBackups is a counter indicating the number of backups currently
442 * in progress. lastBackupStart is the latest checkpoint redo location
443 * used as a starting point for an online backup.
444 */
447
448 /*
449 * WAL insertion locks.
450 */
453
454/*
455 * Total shared-memory state for XLOG.
456 */
457typedef struct XLogCtlData
458{
460
461 /* Protected by info_lck: */
463 XLogRecPtr RedoRecPtr; /* a recent copy of Insert->RedoRecPtr */
464 XLogRecPtr asyncXactLSN; /* LSN of newest async commit/abort */
465 XLogRecPtr replicationSlotMinLSN; /* oldest LSN needed by any slot */
466
467 XLogSegNo lastRemovedSegNo; /* latest removed/recycled XLOG segment */
468
469 /* Fake LSN counter, for unlogged relations. */
471
472 /* Time and LSN of last xlog segment switch. Protected by WALWriteLock. */
475
476 /* These are accessed using atomics -- info_lck not needed */
477 pg_atomic_uint64 logInsertResult; /* last byte + 1 inserted to buffers */
478 pg_atomic_uint64 logWriteResult; /* last byte + 1 written out */
479 pg_atomic_uint64 logFlushResult; /* last byte + 1 flushed */
480
481 /*
482 * Latest initialized page in the cache (last byte position + 1).
483 *
484 * To change the identity of a buffer (and InitializedUpTo), you need to
485 * hold WALBufMappingLock. To change the identity of a buffer that's
486 * still dirty, the old page needs to be written out first, and for that
487 * you need WALWriteLock, and you need to ensure that there are no
488 * in-progress insertions to the page by calling
489 * WaitXLogInsertionsToFinish().
490 */
492
493 /*
494 * These values do not change after startup, although the pointed-to pages
495 * and xlblocks values certainly do. xlblocks values are protected by
496 * WALBufMappingLock.
497 */
498 char *pages; /* buffers for unwritten XLOG pages */
499 pg_atomic_uint64 *xlblocks; /* 1st byte ptr-s + XLOG_BLCKSZ */
500 int XLogCacheBlck; /* highest allocated xlog buffer index */
501
502 /*
503 * InsertTimeLineID is the timeline into which new WAL is being inserted
504 * and flushed. It is zero during recovery, and does not change once set.
505 *
506 * If we create a new timeline when the system was started up,
507 * PrevTimeLineID is the old timeline's ID that we forked off from.
508 * Otherwise it's equal to InsertTimeLineID.
509 *
510 * We set these fields while holding info_lck. Most that reads these
511 * values knows that recovery is no longer in progress and so can safely
512 * read the value without a lock, but code that could be run either during
513 * or after recovery can take info_lck while reading these values.
514 */
517
518 /*
519 * SharedRecoveryState indicates if we're still in crash or archive
520 * recovery. Protected by info_lck.
521 */
523
524 /*
525 * InstallXLogFileSegmentActive indicates whether the checkpointer should
526 * arrange for future segments by recycling and/or PreallocXlogFiles().
527 * Protected by ControlFileLock. Only the startup process changes it. If
528 * true, anyone can use InstallXLogFileSegment(). If false, the startup
529 * process owns the exclusive right to install segments, by reading from
530 * the archive and possibly replacing existing files.
531 */
533
534 /*
535 * WalWriterSleeping indicates whether the WAL writer is currently in
536 * low-power mode (and hence should be nudged if an async commit occurs).
537 * Protected by info_lck.
538 */
540
541 /*
542 * During recovery, we keep a copy of the latest checkpoint record here.
543 * lastCheckPointRecPtr points to start of checkpoint record and
544 * lastCheckPointEndPtr points to end+1 of checkpoint record. Used by the
545 * checkpointer when it wants to create a restartpoint.
546 *
547 * Protected by info_lck.
548 */
552
553 /*
554 * lastFpwDisableRecPtr points to the start of the last replayed
555 * XLOG_FPW_CHANGE record that instructs full_page_writes is disabled.
556 */
558
559 /* last data_checksum_version we've seen */
561
562 slock_t info_lck; /* locks shared variables shown above */
564
565/*
566 * Classification of XLogInsertRecord operations.
567 */
574
576
577/* a private copy of XLogCtl->Insert.WALInsertLocks, for convenience */
579
580/*
581 * We maintain an image of pg_control in shared memory.
582 */
585
586static void XLOGShmemRequest(void *arg);
587static void XLOGShmemInit(void *arg);
588static void XLOGShmemAttach(void *arg);
589
592 .init_fn = XLOGShmemInit,
593 .attach_fn = XLOGShmemAttach,
594};
595
596/*
597 * Calculate the amount of space left on the page after 'endptr'. Beware
598 * multiple evaluation!
599 */
600#define INSERT_FREESPACE(endptr) \
601 (((endptr) % XLOG_BLCKSZ == 0) ? 0 : (XLOG_BLCKSZ - (endptr) % XLOG_BLCKSZ))
602
603/* Macro to advance to next buffer index. */
604#define NextBufIdx(idx) \
605 (((idx) == XLogCtl->XLogCacheBlck) ? 0 : ((idx) + 1))
606
607/*
608 * XLogRecPtrToBufIdx returns the index of the WAL buffer that holds, or
609 * would hold if it was in cache, the page containing 'recptr'.
610 */
611#define XLogRecPtrToBufIdx(recptr) \
612 (((recptr) / XLOG_BLCKSZ) % (XLogCtl->XLogCacheBlck + 1))
613
614/*
615 * These are the number of bytes in a WAL page usable for WAL data.
616 */
617#define UsableBytesInPage (XLOG_BLCKSZ - SizeOfXLogShortPHD)
618
619/*
620 * Convert values of GUCs measured in megabytes to equiv. segment count.
621 * Rounds down.
622 */
623#define ConvertToXSegs(x, segsize) XLogMBVarToSegs((x), (segsize))
624
625/* The number of bytes in a WAL segment usable for WAL data. */
627
628/*
629 * Private, possibly out-of-date copy of shared LogwrtResult.
630 * See discussion above.
631 */
633
634/*
635 * Update local copy of shared XLogCtl->log{Write,Flush}Result
636 *
637 * It's critical that Flush always trails Write, so the order of the reads is
638 * important, as is the barrier. See also XLogWrite.
639 */
640#define RefreshXLogWriteResult(_target) \
641 do { \
642 _target.Flush = pg_atomic_read_u64(&XLogCtl->logFlushResult); \
643 pg_read_barrier(); \
644 _target.Write = pg_atomic_read_u64(&XLogCtl->logWriteResult); \
645 } while (0)
646
647/*
648 * openLogFile is -1 or a kernel FD for an open log file segment.
649 * openLogSegNo identifies the segment, and openLogTLI the corresponding TLI.
650 * These variables are only used to write the XLOG, and so will normally refer
651 * to the active segment.
652 *
653 * Note: call Reserve/ReleaseExternalFD to track consumption of this FD.
654 */
655static int openLogFile = -1;
658
659/*
660 * Local copies of equivalent fields in the control file. When running
661 * crash recovery, LocalMinRecoveryPoint is set to InvalidXLogRecPtr as we
662 * expect to replay all the WAL available, and updateMinRecoveryPoint is
663 * switched to false to prevent any updates while replaying records.
664 * Those values are kept consistent as long as crash recovery runs.
665 */
668static bool updateMinRecoveryPoint = true;
669
670/*
671 * Local state for ControlFile data_checksum_version. After initialization
672 * this is only updated when absorbing a procsignal barrier during interrupt
673 * processing. The reason for keeping a copy in backend-private memory is to
674 * avoid locking for interrogating the data checksum state. Possible values
675 * are the data checksum versions defined in storage/checksum.h.
676 */
678
679/*
680 * Variable backing the GUC, keep it in sync with LocalDataChecksumState.
681 * See SetLocalDataChecksumState().
682 */
684
685/* For WALInsertLockAcquire/Release functions */
686static int MyLockNo = 0;
687static bool holdingAllLocks = false;
688
689#ifdef WAL_DEBUG
691#endif
692
696static void CheckRequiredParameterValues(void);
697static void XLogReportParameters(void);
698static int LocalSetXLogInsertAllowed(void);
699static void CreateEndOfRecoveryRecord(void);
703static void CheckPointGuts(XLogRecPtr checkPointRedo, int flags);
705
707 bool opportunistic);
708static void XLogWrite(XLogwrtRqst WriteRqst, TimeLineID tli, bool flexible);
709static bool InstallXLogFileSegment(XLogSegNo *segno, char *tmppath,
711 TimeLineID tli);
712static void XLogFileClose(void);
713static void PreallocXlogFiles(XLogRecPtr endptr, TimeLineID tli);
714static void RemoveTempXlogFiles(void);
717static void RemoveXlogFile(const struct dirent *segment_de,
720static void UpdateLastRemovedPtr(char *filename);
721static void ValidateXLOGDirectoryStructure(void);
722static void CleanupBackupHistory(void);
723static void UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force);
724static bool PerformRecoveryXLogAction(void);
725static void InitControlFile(uint64 sysidentifier, uint32 data_checksum_version);
726static void WriteControlFile(void);
727static void ReadControlFile(void);
728static void UpdateControlFile(void);
729static char *str_time(pg_time_t tnow, char *buf, size_t bufsize);
730
731static int get_sync_bit(int method);
732
733static void CopyXLogRecordToWAL(int write_len, bool isLogSwitch,
736 TimeLineID tli);
737static void ReserveXLogInsertLocation(int size, XLogRecPtr *StartPos,
742static char *GetXLogBuffer(XLogRecPtr ptr, TimeLineID tli);
746
747static void WALInsertLockAcquire(void);
748static void WALInsertLockAcquireExclusive(void);
749static void WALInsertLockRelease(void);
750static void WALInsertLockUpdateInsertingAt(XLogRecPtr insertingAt);
751
752static void XLogChecksums(uint32 new_type);
753
754/*
755 * Insert an XLOG record represented by an already-constructed chain of data
756 * chunks. This is a low-level routine; to construct the WAL record header
757 * and data, use the higher-level routines in xloginsert.c.
758 *
759 * If 'fpw_lsn' is valid, it is the oldest LSN among the pages that this
760 * WAL record applies to, that were not included in the record as full page
761 * images. If fpw_lsn <= RedoRecPtr, the function does not perform the
762 * insertion and returns InvalidXLogRecPtr. The caller can then recalculate
763 * which pages need a full-page image, and retry. If fpw_lsn is invalid, the
764 * record is always inserted.
765 *
766 * 'flags' gives more in-depth control on the record being inserted. See
767 * XLogSetRecordFlags() for details.
768 *
769 * 'topxid_included' tells whether the top-transaction id is logged along with
770 * current subtransaction. See XLogRecordAssemble().
771 *
772 * The first XLogRecData in the chain must be for the record header, and its
773 * data must be MAXALIGNed. XLogInsertRecord fills in the xl_prev and
774 * xl_crc fields in the header, the rest of the header must already be filled
775 * by the caller.
776 *
777 * Returns XLOG pointer to end of record (beginning of next record).
778 * This can be used as LSN for data pages affected by the logged action.
779 * (LSN is the XLOG point up to which the XLOG must be flushed to disk
780 * before the data page can be written out. This implements the basic
781 * WAL rule "write the log before the data".)
782 */
786 uint8 flags,
787 int num_fpi,
789 bool topxid_included)
790{
793 bool inserted;
794 XLogRecord *rechdr = (XLogRecord *) rdata->data;
795 uint8 info = rechdr->xl_info & ~XLR_INFO_MASK;
801
802 /* Does this record type require special handling? */
803 if (unlikely(rechdr->xl_rmid == RM_XLOG_ID))
804 {
805 if (info == XLOG_SWITCH)
807 else if (info == XLOG_CHECKPOINT_REDO)
809 }
810
811 /* we assume that all of the record header is in the first chunk */
813
814 /* cross-check on whether we should be here or not */
815 if (!XLogInsertAllowed())
816 elog(ERROR, "cannot make new WAL entries during recovery");
817
818 /*
819 * Given that we're not in recovery, InsertTimeLineID is set and can't
820 * change, so we can read it without a lock.
821 */
823
824 /*----------
825 *
826 * We have now done all the preparatory work we can without holding a
827 * lock or modifying shared state. From here on, inserting the new WAL
828 * record to the shared WAL buffer cache is a two-step process:
829 *
830 * 1. Reserve the right amount of space from the WAL. The current head of
831 * reserved space is kept in Insert->CurrBytePos, and is protected by
832 * insertpos_lck.
833 *
834 * 2. Copy the record to the reserved WAL space. This involves finding the
835 * correct WAL buffer containing the reserved space, and copying the
836 * record in place. This can be done concurrently in multiple processes.
837 *
838 * To keep track of which insertions are still in-progress, each concurrent
839 * inserter acquires an insertion lock. In addition to just indicating that
840 * an insertion is in progress, the lock tells others how far the inserter
841 * has progressed. There is a small fixed number of insertion locks,
842 * determined by NUM_XLOGINSERT_LOCKS. When an inserter crosses a page
843 * boundary, it updates the value stored in the lock to the how far it has
844 * inserted, to allow the previous buffer to be flushed.
845 *
846 * Holding onto an insertion lock also protects RedoRecPtr and
847 * fullPageWrites from changing until the insertion is finished.
848 *
849 * Step 2 can usually be done completely in parallel. If the required WAL
850 * page is not initialized yet, you have to grab WALBufMappingLock to
851 * initialize it, but the WAL writer tries to do that ahead of insertions
852 * to avoid that from happening in the critical path.
853 *
854 *----------
855 */
857
858 if (likely(class == WALINSERT_NORMAL))
859 {
861
862 /*
863 * Check to see if my copy of RedoRecPtr is out of date. If so, may
864 * have to go back and have the caller recompute everything. This can
865 * only happen just after a checkpoint, so it's better to be slow in
866 * this case and fast otherwise.
867 *
868 * Also check to see if fullPageWrites was just turned on or there's a
869 * running backup (which forces full-page writes); if we weren't
870 * already doing full-page writes then go back and recompute.
871 *
872 * If we aren't doing full-page writes then RedoRecPtr doesn't
873 * actually affect the contents of the XLOG record, so we'll update
874 * our local copy but not force a recomputation. (If doPageWrites was
875 * just turned off, we could recompute the record without full pages,
876 * but we choose not to bother.)
877 */
878 if (RedoRecPtr != Insert->RedoRecPtr)
879 {
881 RedoRecPtr = Insert->RedoRecPtr;
882 }
883 doPageWrites = (Insert->fullPageWrites || Insert->runningBackups > 0);
884
885 if (doPageWrites &&
888 {
889 /*
890 * Oops, some buffer now needs to be backed up that the caller
891 * didn't back up. Start over.
892 */
895 return InvalidXLogRecPtr;
896 }
897
898 /*
899 * Reserve space for the record in the WAL. This also sets the xl_prev
900 * pointer.
901 */
903 &rechdr->xl_prev);
904
905 /* Normal records are always inserted. */
906 inserted = true;
907 }
908 else if (class == WALINSERT_SPECIAL_SWITCH)
909 {
910 /*
911 * In order to insert an XLOG_SWITCH record, we need to hold all of
912 * the WAL insertion locks, not just one, so that no one else can
913 * begin inserting a record until we've figured out how much space
914 * remains in the current WAL segment and claimed all of it.
915 *
916 * Nonetheless, this case is simpler than the normal cases handled
917 * below, which must check for changes in doPageWrites and RedoRecPtr.
918 * Those checks are only needed for records that can contain buffer
919 * references, and an XLOG_SWITCH record never does.
920 */
924 }
925 else
926 {
928
929 /*
930 * We need to update both the local and shared copies of RedoRecPtr,
931 * which means that we need to hold all the WAL insertion locks.
932 * However, there can't be any buffer references, so as above, we need
933 * not check RedoRecPtr before inserting the record; we just need to
934 * update it afterwards.
935 */
939 &rechdr->xl_prev);
940 RedoRecPtr = Insert->RedoRecPtr = StartPos;
941 inserted = true;
942 }
943
944 if (inserted)
945 {
946 /*
947 * Now that xl_prev has been filled in, calculate CRC of the record
948 * header.
949 */
950 rdata_crc = rechdr->xl_crc;
953 rechdr->xl_crc = rdata_crc;
954
955 /*
956 * All the record data, including the header, is now ready to be
957 * inserted. Copy the record in the space reserved.
958 */
959 CopyXLogRecordToWAL(rechdr->xl_tot_len,
962
963 /*
964 * Unless record is flagged as not important, update LSN of last
965 * important record in the current slot. When holding all locks, just
966 * update the first one.
967 */
968 if ((flags & XLOG_MARK_UNIMPORTANT) == 0)
969 {
970 int lockno = holdingAllLocks ? 0 : MyLockNo;
971
973 }
974 }
975 else
976 {
977 /*
978 * This was an xlog-switch record, but the current insert location was
979 * already exactly at the beginning of a segment, so there was no need
980 * to do anything.
981 */
982 }
983
984 /*
985 * Done! Let others know that we're finished.
986 */
988
990
992
993 /*
994 * Mark top transaction id is logged (if needed) so that we should not try
995 * to log it again with the next WAL record in the current subtransaction.
996 */
997 if (topxid_included)
999
1000 /*
1001 * Update shared LogwrtRqst.Write, if we crossed page boundary.
1002 */
1004 {
1006 /* advance global request to include new block(s) */
1011 }
1012
1013 /*
1014 * If this was an XLOG_SWITCH record, flush the record and the empty
1015 * padding space that fills the rest of the segment, and perform
1016 * end-of-segment actions (eg, notifying archiver).
1017 */
1018 if (class == WALINSERT_SPECIAL_SWITCH)
1019 {
1022
1023 /*
1024 * Even though we reserved the rest of the segment for us, which is
1025 * reflected in EndPos, we return a pointer to just the end of the
1026 * xlog-switch record.
1027 */
1028 if (inserted)
1029 {
1032 {
1034
1035 if (offset == EndPos % XLOG_BLCKSZ)
1037 else
1039 }
1040 }
1041 }
1042
1043#ifdef WAL_DEBUG
1044 if (XLOG_DEBUG)
1045 {
1047 XLogRecord *record;
1051 char *errormsg = NULL;
1053
1055
1057 appendStringInfo(&buf, "INSERT @ %X/%08X: ", LSN_FORMAT_ARGS(EndPos));
1058
1059 /*
1060 * We have to piece together the WAL record data from the XLogRecData
1061 * entries, so that we can pass it to the rm_desc function as one
1062 * contiguous chunk.
1063 */
1065 for (; rdata != NULL; rdata = rdata->next)
1067
1068 /* We also need temporary space to decode the record. */
1069 record = (XLogRecord *) recordBuf.data;
1072
1073 if (!debug_reader)
1075 XL_ROUTINE(.page_read = NULL,
1076 .segment_open = NULL,
1077 .segment_close = NULL),
1078 NULL);
1079 if (!debug_reader)
1080 {
1081 appendStringInfoString(&buf, "error decoding record: out of memory while allocating a WAL reading processor");
1082 }
1084 decoded,
1085 record,
1086 EndPos,
1087 &errormsg))
1088 {
1089 appendStringInfo(&buf, "error decoding record: %s",
1090 errormsg ? errormsg : "no error message");
1091 }
1092 else
1093 {
1094 appendStringInfoString(&buf, " - ");
1095
1096 debug_reader->record = decoded;
1098 debug_reader->record = NULL;
1099 }
1100 elog(LOG, "%s", buf.data);
1101
1102 pfree(decoded);
1103 pfree(buf.data);
1104 pfree(recordBuf.data);
1106 }
1107#endif
1108
1109 /*
1110 * Update our global variables
1111 */
1114
1115 /* Report WAL traffic to the instrumentation. */
1116 if (inserted)
1117 {
1118 pgWalUsage.wal_bytes += rechdr->xl_tot_len;
1122
1123 /* Required for the flush of pending stats WAL data */
1124 pgstat_report_fixed = true;
1125 }
1126
1127 return EndPos;
1128}
1129
1130/*
1131 * Reserves the right amount of space for a record of given size from the WAL.
1132 * *StartPos is set to the beginning of the reserved section, *EndPos to
1133 * its end+1. *PrevPtr is set to the beginning of the previous record; it is
1134 * used to set the xl_prev of this record.
1135 *
1136 * This is the performance critical part of XLogInsert that must be serialized
1137 * across backends. The rest can happen mostly in parallel. Try to keep this
1138 * section as short as possible, insertpos_lck can be heavily contended on a
1139 * busy system.
1140 *
1141 * NB: The space calculation here must match the code in CopyXLogRecordToWAL,
1142 * where we actually copy the record to the reserved space.
1143 *
1144 * NB: Testing shows that XLogInsertRecord runs faster if this code is inlined;
1145 * however, because there are two call sites, the compiler is reluctant to
1146 * inline. We use pg_attribute_always_inline here to try to convince it.
1147 */
1151{
1156
1157 size = MAXALIGN(size);
1158
1159 /* All (non xlog-switch) records should contain data. */
1160 Assert(size > SizeOfXLogRecord);
1161
1162 /*
1163 * The duration the spinlock needs to be held is minimized by minimizing
1164 * the calculations that have to be done while holding the lock. The
1165 * current tip of reserved WAL is kept in CurrBytePos, as a byte position
1166 * that only counts "usable" bytes in WAL, that is, it excludes all WAL
1167 * page headers. The mapping between "usable" byte positions and physical
1168 * positions (XLogRecPtrs) can be done outside the locked region, and
1169 * because the usable byte position doesn't include any headers, reserving
1170 * X bytes from WAL is almost as simple as "CurrBytePos += X".
1171 */
1172 SpinLockAcquire(&Insert->insertpos_lck);
1173
1174 startbytepos = Insert->CurrBytePos;
1175 endbytepos = startbytepos + size;
1176 prevbytepos = Insert->PrevBytePos;
1177 Insert->CurrBytePos = endbytepos;
1178 Insert->PrevBytePos = startbytepos;
1179
1180 SpinLockRelease(&Insert->insertpos_lck);
1181
1185
1186 /*
1187 * Check that the conversions between "usable byte positions" and
1188 * XLogRecPtrs work consistently in both directions.
1189 */
1193}
1194
1195/*
1196 * Like ReserveXLogInsertLocation(), but for an xlog-switch record.
1197 *
1198 * A log-switch record is handled slightly differently. The rest of the
1199 * segment will be reserved for this insertion, as indicated by the returned
1200 * *EndPos value. However, if we are already at the beginning of the current
1201 * segment, *StartPos and *EndPos are set to the current location without
1202 * reserving any space, and the function returns false.
1203*/
1204static bool
1206{
1212 XLogRecPtr ptr;
1214
1215 /*
1216 * These calculations are a bit heavy-weight to be done while holding a
1217 * spinlock, but since we're holding all the WAL insertion locks, there
1218 * are no other inserters competing for it. GetXLogInsertRecPtr() does
1219 * compete for it, but that's not called very frequently.
1220 */
1221 SpinLockAcquire(&Insert->insertpos_lck);
1222
1223 startbytepos = Insert->CurrBytePos;
1224
1226 if (XLogSegmentOffset(ptr, wal_segment_size) == 0)
1227 {
1228 SpinLockRelease(&Insert->insertpos_lck);
1229 *EndPos = *StartPos = ptr;
1230 return false;
1231 }
1232
1233 endbytepos = startbytepos + size;
1234 prevbytepos = Insert->PrevBytePos;
1235
1238
1241 {
1242 /* consume the rest of the segment */
1243 *EndPos += segleft;
1245 }
1246 Insert->CurrBytePos = endbytepos;
1247 Insert->PrevBytePos = startbytepos;
1248
1249 SpinLockRelease(&Insert->insertpos_lck);
1250
1252
1257
1258 return true;
1259}
1260
1261/*
1262 * Subroutine of XLogInsertRecord. Copies a WAL record to an already-reserved
1263 * area in the WAL.
1264 */
1265static void
1268{
1269 char *currpos;
1270 int freespace;
1271 int written;
1274
1275 /*
1276 * Get a pointer to the right place in the right WAL buffer to start
1277 * inserting to.
1278 */
1279 CurrPos = StartPos;
1280 currpos = GetXLogBuffer(CurrPos, tli);
1281 freespace = INSERT_FREESPACE(CurrPos);
1282
1283 /*
1284 * there should be enough space for at least the first field (xl_tot_len)
1285 * on this page.
1286 */
1287 Assert(freespace >= sizeof(uint32));
1288
1289 /* Copy record data */
1290 written = 0;
1291 while (rdata != NULL)
1292 {
1293 const char *rdata_data = rdata->data;
1294 int rdata_len = rdata->len;
1295
1296 while (rdata_len > freespace)
1297 {
1298 /*
1299 * Write what fits on this page, and continue on the next page.
1300 */
1301 Assert(CurrPos % XLOG_BLCKSZ >= SizeOfXLogShortPHD || freespace == 0);
1302 memcpy(currpos, rdata_data, freespace);
1303 rdata_data += freespace;
1304 rdata_len -= freespace;
1305 written += freespace;
1306 CurrPos += freespace;
1307
1308 /*
1309 * Get pointer to beginning of next page, and set the xlp_rem_len
1310 * in the page header. Set XLP_FIRST_IS_CONTRECORD.
1311 *
1312 * It's safe to set the contrecord flag and xlp_rem_len without a
1313 * lock on the page. All the other flags were already set when the
1314 * page was initialized, in AdvanceXLInsertBuffer, and we're the
1315 * only backend that needs to set the contrecord flag.
1316 */
1317 currpos = GetXLogBuffer(CurrPos, tli);
1318 pagehdr = (XLogPageHeader) currpos;
1319 pagehdr->xlp_rem_len = write_len - written;
1320 pagehdr->xlp_info |= XLP_FIRST_IS_CONTRECORD;
1321
1322 /* skip over the page header */
1324 {
1326 currpos += SizeOfXLogLongPHD;
1327 }
1328 else
1329 {
1331 currpos += SizeOfXLogShortPHD;
1332 }
1333 freespace = INSERT_FREESPACE(CurrPos);
1334 }
1335
1336 Assert(CurrPos % XLOG_BLCKSZ >= SizeOfXLogShortPHD || rdata_len == 0);
1337 memcpy(currpos, rdata_data, rdata_len);
1338 currpos += rdata_len;
1339 CurrPos += rdata_len;
1340 freespace -= rdata_len;
1341 written += rdata_len;
1342
1343 rdata = rdata->next;
1344 }
1346
1347 /*
1348 * If this was an xlog-switch, it's not enough to write the switch record,
1349 * we also have to consume all the remaining space in the WAL segment. We
1350 * have already reserved that space, but we need to actually fill it.
1351 */
1353 {
1354 /* An xlog-switch record doesn't contain any data besides the header */
1356
1357 /* Assert that we did reserve the right amount of space */
1359
1360 /* Use up all the remaining space on the current page */
1361 CurrPos += freespace;
1362
1363 /*
1364 * Cause all remaining pages in the segment to be flushed, leaving the
1365 * XLog position where it should be, at the start of the next segment.
1366 * We do this one page at a time, to make sure we don't deadlock
1367 * against ourselves if wal_buffers < wal_segment_size.
1368 */
1369 while (CurrPos < EndPos)
1370 {
1371 /*
1372 * The minimal action to flush the page would be to call
1373 * WALInsertLockUpdateInsertingAt(CurrPos) followed by
1374 * AdvanceXLInsertBuffer(...). The page would be left initialized
1375 * mostly to zeros, except for the page header (always the short
1376 * variant, as this is never a segment's first page).
1377 *
1378 * The large vistas of zeros are good for compressibility, but the
1379 * headers interrupting them every XLOG_BLCKSZ (with values that
1380 * differ from page to page) are not. The effect varies with
1381 * compression tool, but bzip2 for instance compresses about an
1382 * order of magnitude worse if those headers are left in place.
1383 *
1384 * Rather than complicating AdvanceXLInsertBuffer itself (which is
1385 * called in heavily-loaded circumstances as well as this lightly-
1386 * loaded one) with variant behavior, we just use GetXLogBuffer
1387 * (which itself calls the two methods we need) to get the pointer
1388 * and zero most of the page. Then we just zero the page header.
1389 */
1390 currpos = GetXLogBuffer(CurrPos, tli);
1391 MemSet(currpos, 0, SizeOfXLogShortPHD);
1392
1394 }
1395 }
1396 else
1397 {
1398 /* Align the end position, so that the next record starts aligned */
1400 }
1401
1402 if (CurrPos != EndPos)
1403 ereport(PANIC,
1405 errmsg_internal("space reserved for WAL record does not match what was written"));
1406}
1407
1408/*
1409 * Acquire a WAL insertion lock, for inserting to WAL.
1410 */
1411static void
1413{
1414 bool immed;
1415
1416 /*
1417 * It doesn't matter which of the WAL insertion locks we acquire, so try
1418 * the one we used last time. If the system isn't particularly busy, it's
1419 * a good bet that it's still available, and it's good to have some
1420 * affinity to a particular lock so that you don't unnecessarily bounce
1421 * cache lines between processes when there's no contention.
1422 *
1423 * If this is the first time through in this backend, pick a lock
1424 * (semi-)randomly. This allows the locks to be used evenly if you have a
1425 * lot of very short connections.
1426 */
1427 static int lockToTry = -1;
1428
1429 if (lockToTry == -1)
1432
1433 /*
1434 * The insertingAt value is initially set to 0, as we don't know our
1435 * insert location yet.
1436 */
1438 if (!immed)
1439 {
1440 /*
1441 * If we couldn't get the lock immediately, try another lock next
1442 * time. On a system with more insertion locks than concurrent
1443 * inserters, this causes all the inserters to eventually migrate to a
1444 * lock that no-one else is using. On a system with more inserters
1445 * than locks, it still helps to distribute the inserters evenly
1446 * across the locks.
1447 */
1449 }
1450}
1451
1452/*
1453 * Acquire all WAL insertion locks, to prevent other backends from inserting
1454 * to WAL.
1455 */
1456static void
1458{
1459 int i;
1460
1461 /*
1462 * When holding all the locks, all but the last lock's insertingAt
1463 * indicator is set to 0xFFFFFFFFFFFFFFFF, which is higher than any real
1464 * XLogRecPtr value, to make sure that no-one blocks waiting on those.
1465 */
1466 for (i = 0; i < NUM_XLOGINSERT_LOCKS - 1; i++)
1467 {
1472 }
1473 /* Variable value reset to 0 at release */
1475
1476 holdingAllLocks = true;
1477}
1478
1479/*
1480 * Release our insertion lock (or locks, if we're holding them all).
1481 *
1482 * NB: Reset all variables to 0, so they cause LWLockWaitForVar to block the
1483 * next time the lock is acquired.
1484 */
1485static void
1487{
1488 if (holdingAllLocks)
1489 {
1490 int i;
1491
1492 for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
1495 0);
1496
1497 holdingAllLocks = false;
1498 }
1499 else
1500 {
1503 0);
1504 }
1505}
1506
1507/*
1508 * Update our insertingAt value, to let others know that we've finished
1509 * inserting up to that point.
1510 */
1511static void
1513{
1514 if (holdingAllLocks)
1515 {
1516 /*
1517 * We use the last lock to mark our actual position, see comments in
1518 * WALInsertLockAcquireExclusive.
1519 */
1522 insertingAt);
1523 }
1524 else
1527 insertingAt);
1528}
1529
1530/*
1531 * Wait for any WAL insertions < upto to finish.
1532 *
1533 * Returns the location of the oldest insertion that is still in-progress.
1534 * Any WAL prior to that point has been fully copied into WAL buffers, and
1535 * can be flushed out to disk. Because this waits for any insertions older
1536 * than 'upto' to finish, the return value is always >= 'upto'.
1537 *
1538 * Note: When you are about to write out WAL, you must call this function
1539 * *before* acquiring WALWriteLock, to avoid deadlocks. This function might
1540 * need to wait for an insertion to finish (or at least advance to next
1541 * uninitialized page), and the inserter might need to evict an old WAL buffer
1542 * to make room for a new one, which in turn requires WALWriteLock.
1543 */
1544static XLogRecPtr
1546{
1552 int i;
1553
1554 if (MyProc == NULL)
1555 elog(PANIC, "cannot wait without a PGPROC structure");
1556
1557 /*
1558 * Check if there's any work to do. Use a barrier to ensure we get the
1559 * freshest value.
1560 */
1562 if (upto <= inserted)
1563 return inserted;
1564
1565 /* Read the current insert position */
1566 SpinLockAcquire(&Insert->insertpos_lck);
1567 bytepos = Insert->CurrBytePos;
1568 SpinLockRelease(&Insert->insertpos_lck);
1570
1571 /*
1572 * No-one should request to flush a piece of WAL that hasn't even been
1573 * reserved yet. However, it can happen if there is a block with a bogus
1574 * LSN on disk, for example. XLogFlush checks for that situation and
1575 * complains, but only after the flush. Here we just assume that to mean
1576 * that all WAL that has been reserved needs to be finished. In this
1577 * corner-case, the return value can be smaller than 'upto' argument.
1578 */
1579 if (upto > reservedUpto)
1580 {
1581 ereport(LOG,
1582 errmsg("request to flush past end of generated WAL; request %X/%08X, current position %X/%08X",
1585 }
1586
1587 /*
1588 * Loop through all the locks, sleeping on any in-progress insert older
1589 * than 'upto'.
1590 *
1591 * finishedUpto is our return value, indicating the point upto which all
1592 * the WAL insertions have been finished. Initialize it to the head of
1593 * reserved WAL, and as we iterate through the insertion locks, back it
1594 * out for any insertion that's still in progress.
1595 */
1597 for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
1598 {
1600
1601 do
1602 {
1603 /*
1604 * See if this insertion is in progress. LWLockWaitForVar will
1605 * wait for the lock to be released, or for the 'value' to be set
1606 * by a LWLockUpdateVar call. When a lock is initially acquired,
1607 * its value is 0 (InvalidXLogRecPtr), which means that we don't
1608 * know where it's inserting yet. We will have to wait for it. If
1609 * it's a small insertion, the record will most likely fit on the
1610 * same page and the inserter will release the lock without ever
1611 * calling LWLockUpdateVar. But if it has to sleep, it will
1612 * advertise the insertion point with LWLockUpdateVar before
1613 * sleeping.
1614 *
1615 * In this loop we are only waiting for insertions that started
1616 * before WaitXLogInsertionsToFinish was called. The lack of
1617 * memory barriers in the loop means that we might see locks as
1618 * "unused" that have since become used. This is fine because
1619 * they only can be used for later insertions that we would not
1620 * want to wait on anyway. Not taking a lock to acquire the
1621 * current insertingAt value means that we might see older
1622 * insertingAt values. This is also fine, because if we read a
1623 * value too old, we will add ourselves to the wait queue, which
1624 * contains atomic operations.
1625 */
1626 if (LWLockWaitForVar(&WALInsertLocks[i].l.lock,
1629 {
1630 /* the lock was free, so no insertion in progress */
1632 break;
1633 }
1634
1635 /*
1636 * This insertion is still in progress. Have to wait, unless the
1637 * inserter has proceeded past 'upto'.
1638 */
1639 } while (insertingat < upto);
1640
1643 }
1644
1645 /*
1646 * Advance the limit we know to have been inserted and return the freshest
1647 * value we know of, which might be beyond what we requested if somebody
1648 * is concurrently doing this with an 'upto' pointer ahead of us.
1649 */
1651 finishedUpto);
1652
1653 return finishedUpto;
1654}
1655
1656/*
1657 * Get a pointer to the right location in the WAL buffer containing the
1658 * given XLogRecPtr.
1659 *
1660 * If the page is not initialized yet, it is initialized. That might require
1661 * evicting an old dirty buffer from the buffer cache, which means I/O.
1662 *
1663 * The caller must ensure that the page containing the requested location
1664 * isn't evicted yet, and won't be evicted. The way to ensure that is to
1665 * hold onto a WAL insertion lock with the insertingAt position set to
1666 * something <= ptr. GetXLogBuffer() will update insertingAt if it needs
1667 * to evict an old page from the buffer. (This means that once you call
1668 * GetXLogBuffer() with a given 'ptr', you must not access anything before
1669 * that point anymore, and must not call GetXLogBuffer() with an older 'ptr'
1670 * later, because older buffers might be recycled already)
1671 */
1672static char *
1674{
1675 int idx;
1676 XLogRecPtr endptr;
1677 static uint64 cachedPage = 0;
1678 static char *cachedPos = NULL;
1680
1681 /*
1682 * Fast path for the common case that we need to access again the same
1683 * page as last time.
1684 */
1685 if (ptr / XLOG_BLCKSZ == cachedPage)
1686 {
1688 Assert(((XLogPageHeader) cachedPos)->xlp_pageaddr == ptr - (ptr % XLOG_BLCKSZ));
1689 return cachedPos + ptr % XLOG_BLCKSZ;
1690 }
1691
1692 /*
1693 * The XLog buffer cache is organized so that a page is always loaded to a
1694 * particular buffer. That way we can easily calculate the buffer a given
1695 * page must be loaded into, from the XLogRecPtr alone.
1696 */
1697 idx = XLogRecPtrToBufIdx(ptr);
1698
1699 /*
1700 * See what page is loaded in the buffer at the moment. It could be the
1701 * page we're looking for, or something older. It can't be anything newer
1702 * - that would imply the page we're looking for has already been written
1703 * out to disk and evicted, and the caller is responsible for making sure
1704 * that doesn't happen.
1705 *
1706 * We don't hold a lock while we read the value. If someone is just about
1707 * to initialize or has just initialized the page, it's possible that we
1708 * get InvalidXLogRecPtr. That's ok, we'll grab the mapping lock (in
1709 * AdvanceXLInsertBuffer) and retry if we see anything other than the page
1710 * we're looking for.
1711 */
1712 expectedEndPtr = ptr;
1714
1716 if (expectedEndPtr != endptr)
1717 {
1719
1720 /*
1721 * Before calling AdvanceXLInsertBuffer(), which can block, let others
1722 * know how far we're finished with inserting the record.
1723 *
1724 * NB: If 'ptr' points to just after the page header, advertise a
1725 * position at the beginning of the page rather than 'ptr' itself. If
1726 * there are no other insertions running, someone might try to flush
1727 * up to our advertised location. If we advertised a position after
1728 * the page header, someone might try to flush the page header, even
1729 * though page might actually not be initialized yet. As the first
1730 * inserter on the page, we are effectively responsible for making
1731 * sure that it's initialized, before we let insertingAt to move past
1732 * the page header.
1733 */
1734 if (ptr % XLOG_BLCKSZ == SizeOfXLogShortPHD &&
1737 else if (ptr % XLOG_BLCKSZ == SizeOfXLogLongPHD &&
1740 else
1741 initializedUpto = ptr;
1742
1744
1745 AdvanceXLInsertBuffer(ptr, tli, false);
1747
1748 if (expectedEndPtr != endptr)
1749 elog(PANIC, "could not find WAL buffer for %X/%08X",
1750 LSN_FORMAT_ARGS(ptr));
1751 }
1752 else
1753 {
1754 /*
1755 * Make sure the initialization of the page is visible to us, and
1756 * won't arrive later to overwrite the WAL data we write on the page.
1757 */
1759 }
1760
1761 /*
1762 * Found the buffer holding this page. Return a pointer to the right
1763 * offset within the page.
1764 */
1765 cachedPage = ptr / XLOG_BLCKSZ;
1767
1769 Assert(((XLogPageHeader) cachedPos)->xlp_pageaddr == ptr - (ptr % XLOG_BLCKSZ));
1770
1771 return cachedPos + ptr % XLOG_BLCKSZ;
1772}
1773
1774/*
1775 * Read WAL data directly from WAL buffers, if available. Returns the number
1776 * of bytes read successfully.
1777 *
1778 * Fewer than 'count' bytes may be read if some of the requested WAL data has
1779 * already been evicted.
1780 *
1781 * No locks are taken.
1782 *
1783 * Caller should ensure that it reads no further than LogwrtResult.Write
1784 * (which should have been updated by the caller when determining how far to
1785 * read). The 'tli' argument is only used as a convenient safety check so that
1786 * callers do not read from WAL buffers on a historical timeline.
1787 */
1788Size
1790 TimeLineID tli)
1791{
1792 char *pdst = dstbuf;
1793 XLogRecPtr recptr = startptr;
1795 Size nbytes = count;
1796
1798 return 0;
1799
1800 Assert(XLogRecPtrIsValid(startptr));
1801
1802 /*
1803 * Caller should ensure that the requested data has been inserted into WAL
1804 * buffers before we try to read it.
1805 */
1807 if (startptr + count > inserted)
1808 ereport(ERROR,
1809 errmsg("cannot read past end of generated WAL: requested %X/%08X, current position %X/%08X",
1810 LSN_FORMAT_ARGS(startptr + count),
1812
1813 /*
1814 * Loop through the buffers without a lock. For each buffer, atomically
1815 * read and verify the end pointer, then copy the data out, and finally
1816 * re-read and re-verify the end pointer.
1817 *
1818 * Once a page is evicted, it never returns to the WAL buffers, so if the
1819 * end pointer matches the expected end pointer before and after we copy
1820 * the data, then the right page must have been present during the data
1821 * copy. Read barriers are necessary to ensure that the data copy actually
1822 * happens between the two verification steps.
1823 *
1824 * If either verification fails, we simply terminate the loop and return
1825 * with the data that had been already copied out successfully.
1826 */
1827 while (nbytes > 0)
1828 {
1829 uint32 offset = recptr % XLOG_BLCKSZ;
1832 XLogRecPtr endptr;
1833 const char *page;
1834 const char *psrc;
1836
1837 /*
1838 * Calculate the end pointer we expect in the xlblocks array if the
1839 * correct page is present.
1840 */
1841 expectedEndPtr = recptr + (XLOG_BLCKSZ - offset);
1842
1843 /*
1844 * First verification step: check that the correct page is present in
1845 * the WAL buffers.
1846 */
1848 if (expectedEndPtr != endptr)
1849 break;
1850
1851 /*
1852 * The correct page is present (or was at the time the endptr was
1853 * read; must re-verify later). Calculate pointer to source data and
1854 * determine how much data to read from this page.
1855 */
1856 page = XLogCtl->pages + idx * (Size) XLOG_BLCKSZ;
1857 psrc = page + offset;
1858 npagebytes = Min(nbytes, XLOG_BLCKSZ - offset);
1859
1860 /*
1861 * Ensure that the data copy and the first verification step are not
1862 * reordered.
1863 */
1865
1866 /* data copy */
1868
1869 /*
1870 * Ensure that the data copy and the second verification step are not
1871 * reordered.
1872 */
1874
1875 /*
1876 * Second verification step: check that the page we read from wasn't
1877 * evicted while we were copying the data.
1878 */
1880 if (expectedEndPtr != endptr)
1881 break;
1882
1883 pdst += npagebytes;
1884 recptr += npagebytes;
1885 nbytes -= npagebytes;
1886 }
1887
1888 Assert(pdst - dstbuf <= count);
1889
1890 return pdst - dstbuf;
1891}
1892
1893/*
1894 * Converts a "usable byte position" to XLogRecPtr. A usable byte position
1895 * is the position starting from the beginning of WAL, excluding all WAL
1896 * page headers.
1897 */
1898static XLogRecPtr
1900{
1906
1909
1911 {
1912 /* fits on first page of segment */
1914 }
1915 else
1916 {
1917 /* account for the first page on segment with long header */
1920
1923
1925 }
1926
1928
1929 return result;
1930}
1931
1932/*
1933 * Like XLogBytePosToRecPtr, but if the position is at a page boundary,
1934 * returns a pointer to the beginning of the page (ie. before page header),
1935 * not to where the first xlog record on that page would go to. This is used
1936 * when converting a pointer to the end of a record.
1937 */
1938static XLogRecPtr
1940{
1946
1949
1951 {
1952 /* fits on first page of segment */
1953 if (bytesleft == 0)
1954 seg_offset = 0;
1955 else
1957 }
1958 else
1959 {
1960 /* account for the first page on segment with long header */
1963
1966
1967 if (bytesleft == 0)
1969 else
1971 }
1972
1974
1975 return result;
1976}
1977
1978/*
1979 * Convert an XLogRecPtr to a "usable byte position".
1980 */
1981static uint64
1983{
1986 uint32 offset;
1987 uint64 result;
1988
1990
1992 offset = ptr % XLOG_BLCKSZ;
1993
1994 if (fullpages == 0)
1995 {
1997 if (offset > 0)
1998 {
1999 Assert(offset >= SizeOfXLogLongPHD);
2000 result += offset - SizeOfXLogLongPHD;
2001 }
2002 }
2003 else
2004 {
2006 (XLOG_BLCKSZ - SizeOfXLogLongPHD) + /* account for first page */
2007 (fullpages - 1) * UsableBytesInPage; /* full pages */
2008 if (offset > 0)
2009 {
2010 Assert(offset >= SizeOfXLogShortPHD);
2011 result += offset - SizeOfXLogShortPHD;
2012 }
2013 }
2014
2015 return result;
2016}
2017
2018/*
2019 * Initialize XLOG buffers, writing out old buffers if they still contain
2020 * unwritten data, upto the page containing 'upto'. Or if 'opportunistic' is
2021 * true, initialize as many pages as we can without having to write out
2022 * unwritten data. Any new pages are initialized to zeros, with pages headers
2023 * initialized properly.
2024 */
2025static void
2027{
2028 int nextidx;
2034 int npages pg_attribute_unused() = 0;
2035
2037
2038 /*
2039 * Now that we have the lock, check if someone initialized the page
2040 * already.
2041 */
2043 {
2045
2046 /*
2047 * Get ending-offset of the buffer page we need to replace (this may
2048 * be zero if the buffer hasn't been used yet). Fall through if it's
2049 * already written out.
2050 */
2053 {
2054 /*
2055 * Nope, got work to do. If we just want to pre-initialize as much
2056 * as we can without flushing, give up now.
2057 */
2058 if (opportunistic)
2059 break;
2060
2061 /* Advance shared memory write request position */
2066
2067 /*
2068 * Acquire an up-to-date LogwrtResult value and see if we still
2069 * need to write it or if someone else already did.
2070 */
2073 {
2074 /*
2075 * Must acquire write lock. Release WALBufMappingLock first,
2076 * to make sure that all insertions that we need to wait for
2077 * can finish (up to this same position). Otherwise we risk
2078 * deadlock.
2079 */
2081
2083
2085
2088 {
2089 /* OK, someone wrote it already */
2091 }
2092 else
2093 {
2094 /* Have to write it ourselves */
2096 WriteRqst.Write = OldPageRqstPtr;
2098 XLogWrite(WriteRqst, tli, false);
2102
2103 /*
2104 * Required for the flush of pending stats WAL data, per
2105 * update of pgWalUsage.
2106 */
2107 pgstat_report_fixed = true;
2108 }
2109 /* Re-acquire WALBufMappingLock and retry */
2111 continue;
2112 }
2113 }
2114
2115 /*
2116 * Now the next buffer slot is free and we can set it up to be the
2117 * next output page.
2118 */
2121
2123
2125
2126 /*
2127 * Mark the xlblock with InvalidXLogRecPtr and issue a write barrier
2128 * before initializing. Otherwise, the old page may be partially
2129 * zeroed but look valid.
2130 */
2133
2134 /*
2135 * Be sure to re-zero the buffer so that bytes beyond what we've
2136 * written will look like zeroes and not valid XLOG records...
2137 */
2139
2140 /*
2141 * Fill the new page's header
2142 */
2143 NewPage->xlp_magic = XLOG_PAGE_MAGIC;
2144
2145 /* NewPage->xlp_info = 0; */ /* done by memset */
2146 NewPage->xlp_tli = tli;
2147 NewPage->xlp_pageaddr = NewPageBeginPtr;
2148
2149 /* NewPage->xlp_rem_len = 0; */ /* done by memset */
2150
2151 /*
2152 * If first page of an XLOG segment file, make it a long header.
2153 */
2154 if ((XLogSegmentOffset(NewPage->xlp_pageaddr, wal_segment_size)) == 0)
2155 {
2157
2159 NewLongPage->xlp_seg_size = wal_segment_size;
2160 NewLongPage->xlp_xlog_blcksz = XLOG_BLCKSZ;
2161 NewPage->xlp_info |= XLP_LONG_HEADER;
2162 }
2163
2164 /*
2165 * Make sure the initialization of the page becomes visible to others
2166 * before the xlblocks update. GetXLogBuffer() reads xlblocks without
2167 * holding a lock.
2168 */
2170
2173
2174 npages++;
2175 }
2177
2178#ifdef WAL_DEBUG
2179 if (XLOG_DEBUG && npages > 0)
2180 {
2181 elog(DEBUG1, "initialized %d pages, up to %X/%08X",
2183 }
2184#endif
2185}
2186
2187/*
2188 * Calculate CheckPointSegments based on max_wal_size_mb and
2189 * checkpoint_completion_target.
2190 */
2191static void
2193{
2194 double target;
2195
2196 /*-------
2197 * Calculate the distance at which to trigger a checkpoint, to avoid
2198 * exceeding max_wal_size_mb. This is based on two assumptions:
2199 *
2200 * a) we keep WAL for only one checkpoint cycle (prior to PG11 we kept
2201 * WAL for two checkpoint cycles to allow us to recover from the
2202 * secondary checkpoint if the first checkpoint failed, though we
2203 * only did this on the primary anyway, not on standby. Keeping just
2204 * one checkpoint simplifies processing and reduces disk space in
2205 * many smaller databases.)
2206 * b) during checkpoint, we consume checkpoint_completion_target *
2207 * number of segments consumed between checkpoints.
2208 *-------
2209 */
2212
2213 /* round down */
2214 CheckPointSegments = (int) target;
2215
2216 if (CheckPointSegments < 1)
2218}
2219
2220void
2226
2227void
2233
2234bool
2236{
2238 {
2239 GUC_check_errdetail("The WAL segment size must be a power of two between 1 MB and 1 GB.");
2240 return false;
2241 }
2242
2243 return true;
2244}
2245
2246/*
2247 * At a checkpoint, how many WAL segments to recycle as preallocated future
2248 * XLOG segments? Returns the highest segment that should be preallocated.
2249 */
2250static XLogSegNo
2252{
2255 double distance;
2257
2258 /*
2259 * Calculate the segment numbers that min_wal_size_mb and max_wal_size_mb
2260 * correspond to. Always recycle enough segments to meet the minimum, and
2261 * remove enough segments to stay below the maximum.
2262 */
2267
2268 /*
2269 * Between those limits, recycle enough segments to get us through to the
2270 * estimated end of next checkpoint.
2271 *
2272 * To estimate where the next checkpoint will finish, assume that the
2273 * system runs steadily consuming CheckPointDistanceEstimate bytes between
2274 * every checkpoint.
2275 */
2277 /* add 10% for good measure. */
2278 distance *= 1.10;
2279
2280 recycleSegNo = (XLogSegNo) ceil(((double) lastredoptr + distance) /
2282
2283 if (recycleSegNo < minSegNo)
2285 if (recycleSegNo > maxSegNo)
2287
2288 return recycleSegNo;
2289}
2290
2291/*
2292 * Check whether we've consumed enough xlog space that a checkpoint is needed.
2293 *
2294 * new_segno indicates a log file that has just been filled up (or read
2295 * during recovery). We measure the distance from RedoRecPtr to new_segno
2296 * and see if that exceeds CheckPointSegments.
2297 *
2298 * Note: it is caller's responsibility that RedoRecPtr is up-to-date.
2299 */
2300bool
2302{
2304
2306
2308 return true;
2309 return false;
2310}
2311
2312/*
2313 * Write and/or fsync the log at least as far as WriteRqst indicates.
2314 *
2315 * If flexible == true, we don't have to write as far as WriteRqst, but
2316 * may stop at any convenient boundary (such as a cache or logfile boundary).
2317 * This option allows us to avoid uselessly issuing multiple writes when a
2318 * single one would do.
2319 *
2320 * Must be called with WALWriteLock held. WaitXLogInsertionsToFinish(WriteRqst)
2321 * must be called before grabbing the lock, to make sure the data is ready to
2322 * write.
2323 */
2324static void
2326{
2327 bool ispartialpage;
2328 bool last_iteration;
2329 bool finishing_seg;
2330 int curridx;
2331 int npages;
2332 int startidx;
2334
2335 /* We should always be inside a critical section here */
2337
2338 /*
2339 * Update local LogwrtResult (caller probably did this already, but...)
2340 */
2342
2343 /*
2344 * Since successive pages in the xlog cache are consecutively allocated,
2345 * we can usually gather multiple pages together and issue just one
2346 * write() call. npages is the number of pages we have determined can be
2347 * written together; startidx is the cache block index of the first one,
2348 * and startoffset is the file offset at which it should go. The latter
2349 * two variables are only valid when npages > 0, but we must initialize
2350 * all of them to keep the compiler quiet.
2351 */
2352 npages = 0;
2353 startidx = 0;
2354 startoffset = 0;
2355
2356 /*
2357 * Within the loop, curridx is the cache block index of the page to
2358 * consider writing. Begin at the buffer containing the next unwritten
2359 * page, or last partially written page.
2360 */
2362
2363 while (LogwrtResult.Write < WriteRqst.Write)
2364 {
2365 /*
2366 * Make sure we're not ahead of the insert process. This could happen
2367 * if we're passed a bogus WriteRqst.Write that is past the end of the
2368 * last page that's been initialized by AdvanceXLInsertBuffer.
2369 */
2371
2372 if (LogwrtResult.Write >= EndPtr)
2373 elog(PANIC, "xlog write request %X/%08X is past end of log %X/%08X",
2376
2377 /* Advance LogwrtResult.Write to end of current buffer page */
2380
2383 {
2384 /*
2385 * Switch to new logfile segment. We cannot have any pending
2386 * pages here (since we dump what we have at segment end).
2387 */
2388 Assert(npages == 0);
2389 if (openLogFile >= 0)
2390 XLogFileClose();
2393 openLogTLI = tli;
2394
2395 /* create/use new log file */
2398 }
2399
2400 /* Make sure we have the current logfile open */
2401 if (openLogFile < 0)
2402 {
2405 openLogTLI = tli;
2408 }
2409
2410 /* Add current page to the set of pending pages-to-dump */
2411 if (npages == 0)
2412 {
2413 /* first of group */
2414 startidx = curridx;
2417 }
2418 npages++;
2419
2420 /*
2421 * Dump the set if this will be the last loop iteration, or if we are
2422 * at the last page of the cache area (since the next page won't be
2423 * contiguous in memory), or if we are at the end of the logfile
2424 * segment.
2425 */
2427
2430
2431 if (last_iteration ||
2434 {
2435 char *from;
2436 Size nbytes;
2437 Size nleft;
2440
2441 /* OK to write the page(s) */
2442 from = XLogCtl->pages + startidx * (Size) XLOG_BLCKSZ;
2443 nbytes = npages * (Size) XLOG_BLCKSZ;
2444 nleft = nbytes;
2445 do
2446 {
2447 errno = 0;
2448
2449 /*
2450 * Measure I/O timing to write WAL data, for pg_stat_io.
2451 */
2453
2457
2459 IOOP_WRITE, start, 1, written);
2460
2461 if (written <= 0)
2462 {
2463 char xlogfname[MAXFNAMELEN];
2464 int save_errno;
2465
2466 if (errno == EINTR)
2467 continue;
2468
2469 save_errno = errno;
2472 errno = save_errno;
2473 ereport(PANIC,
2475 errmsg("could not write to log file \"%s\" at offset %u, length %zu: %m",
2477 }
2478 nleft -= written;
2479 from += written;
2481 } while (nleft > 0);
2482
2483 npages = 0;
2484
2485 /*
2486 * If we just wrote the whole last page of a logfile segment,
2487 * fsync the segment immediately. This avoids having to go back
2488 * and re-open prior segments when an fsync request comes along
2489 * later. Doing it here ensures that one and only one backend will
2490 * perform this fsync.
2491 *
2492 * This is also the right place to notify the Archiver that the
2493 * segment is ready to copy to archival storage, and to update the
2494 * timer for archive_timeout, and to signal for a checkpoint if
2495 * too many logfile segments have been used since the last
2496 * checkpoint.
2497 */
2498 if (finishing_seg)
2499 {
2501
2502 /* signal that we need to wakeup walsenders later */
2504
2505 LogwrtResult.Flush = LogwrtResult.Write; /* end of page */
2506
2507 if (XLogArchivingActive())
2509
2512
2513 /*
2514 * Request a checkpoint if we've consumed too much xlog since
2515 * the last one. For speed, we first check using the local
2516 * copy of RedoRecPtr, which might be out of date; if it looks
2517 * like a checkpoint is needed, forcibly update RedoRecPtr and
2518 * recheck.
2519 */
2521 {
2522 (void) GetRedoRecPtr();
2525 }
2526 }
2527 }
2528
2529 if (ispartialpage)
2530 {
2531 /* Only asked to write a partial page */
2533 break;
2534 }
2536
2537 /* If flexible, break out of loop as soon as we wrote something */
2538 if (flexible && npages == 0)
2539 break;
2540 }
2541
2542 Assert(npages == 0);
2543
2544 /*
2545 * If asked to flush, do so
2546 */
2547 if (LogwrtResult.Flush < WriteRqst.Flush &&
2549 {
2550 /*
2551 * Could get here without iterating above loop, in which case we might
2552 * have no open file or the wrong one. However, we do not need to
2553 * fsync more than one file.
2554 */
2557 {
2558 if (openLogFile >= 0 &&
2561 XLogFileClose();
2562 if (openLogFile < 0)
2563 {
2566 openLogTLI = tli;
2569 }
2570
2572 }
2573
2574 /* signal that we need to wakeup walsenders later */
2576
2578 }
2579
2580 /*
2581 * Update shared-memory status
2582 *
2583 * We make sure that the shared 'request' values do not fall behind the
2584 * 'result' values. This is not absolutely essential, but it saves some
2585 * code in a couple of places.
2586 */
2593
2594 /*
2595 * We write Write first, bar, then Flush. When reading, the opposite must
2596 * be done (with a matching barrier in between), so that we always see a
2597 * Flush value that trails behind the Write value seen.
2598 */
2602
2603#ifdef USE_ASSERT_CHECKING
2604 {
2608
2614
2615 /* WAL written to disk is always ahead of WAL flushed */
2616 Assert(Write >= Flush);
2617
2618 /* WAL inserted to buffers is always ahead of WAL written */
2619 Assert(Insert >= Write);
2620 }
2621#endif
2622}
2623
2624/*
2625 * Record the LSN for an asynchronous transaction commit/abort
2626 * and nudge the WALWriter if there is work for it to do.
2627 * (This should not be called for synchronous commits.)
2628 */
2629void
2631{
2632 XLogRecPtr WriteRqstPtr = asyncXactLSN;
2633 bool sleeping;
2634 bool wakeup = false;
2636
2640 if (XLogCtl->asyncXactLSN < asyncXactLSN)
2641 XLogCtl->asyncXactLSN = asyncXactLSN;
2643
2644 /*
2645 * If somebody else already called this function with a more aggressive
2646 * LSN, they will have done what we needed (and perhaps more).
2647 */
2648 if (asyncXactLSN <= prevAsyncXactLSN)
2649 return;
2650
2651 /*
2652 * If the WALWriter is sleeping, kick it to make it come out of low-power
2653 * mode, so that this async commit will reach disk within the expected
2654 * amount of time. Otherwise, determine whether it has enough WAL
2655 * available to flush, the same way that XLogBackgroundFlush() does.
2656 */
2657 if (sleeping)
2658 wakeup = true;
2659 else
2660 {
2661 int flushblocks;
2662
2664
2665 flushblocks =
2667
2669 wakeup = true;
2670 }
2671
2672 if (wakeup)
2673 {
2674 volatile PROC_HDR *procglobal = ProcGlobal;
2675 ProcNumber walwriterProc = procglobal->walwriterProc;
2676
2677 if (walwriterProc != INVALID_PROC_NUMBER)
2678 SetLatch(&GetPGProcByNumber(walwriterProc)->procLatch);
2679 }
2680}
2681
2682/*
2683 * Record the LSN up to which we can remove WAL because it's not required by
2684 * any replication slot.
2685 */
2686void
2693
2694
2695/*
2696 * Return the oldest LSN we must retain to satisfy the needs of some
2697 * replication slot.
2698 */
2701{
2702 XLogRecPtr retval;
2703
2707
2708 return retval;
2709}
2710
2711/*
2712 * Advance minRecoveryPoint in control file.
2713 *
2714 * If we crash during recovery, we must reach this point again before the
2715 * database is consistent.
2716 *
2717 * If 'force' is true, 'lsn' argument is ignored. Otherwise, minRecoveryPoint
2718 * is only updated if it's not already greater than or equal to 'lsn'.
2719 */
2720static void
2722{
2723 /* Quick check using our local copy of the variable */
2724 if (!updateMinRecoveryPoint || (!force && lsn <= LocalMinRecoveryPoint))
2725 return;
2726
2727 /*
2728 * An invalid minRecoveryPoint means that we need to recover all the WAL,
2729 * i.e., we're doing crash recovery. We never modify the control file's
2730 * value in that case, so we can short-circuit future checks here too. The
2731 * local values of minRecoveryPoint and minRecoveryPointTLI should not be
2732 * updated until crash recovery finishes. We only do this for the startup
2733 * process as it should not update its own reference of minRecoveryPoint
2734 * until it has finished crash recovery to make sure that all WAL
2735 * available is replayed in this case. This also saves from extra locks
2736 * taken on the control file from the startup process.
2737 */
2739 {
2740 updateMinRecoveryPoint = false;
2741 return;
2742 }
2743
2745
2746 /* update local copy */
2749
2751 updateMinRecoveryPoint = false;
2752 else if (force || LocalMinRecoveryPoint < lsn)
2753 {
2756
2757 /*
2758 * To avoid having to update the control file too often, we update it
2759 * all the way to the last record being replayed, even though 'lsn'
2760 * would suffice for correctness. This also allows the 'force' case
2761 * to not need a valid 'lsn' value.
2762 *
2763 * Another important reason for doing it this way is that the passed
2764 * 'lsn' value could be bogus, i.e., past the end of available WAL, if
2765 * the caller got it from a corrupted heap page. Accepting such a
2766 * value as the min recovery point would prevent us from coming up at
2767 * all. Instead, we just log a warning and continue with recovery.
2768 * (See also the comments about corrupt LSNs in XLogFlush.)
2769 */
2771 if (!force && newMinRecoveryPoint < lsn)
2772 elog(WARNING,
2773 "xlog min recovery request %X/%08X is past current point %X/%08X",
2775
2776 /* update control file */
2778 {
2784
2786 errmsg_internal("updated min recovery point to %X/%08X on timeline %u",
2789 }
2790 }
2792}
2793
2794/*
2795 * Ensure that all XLOG data through the given position is flushed to disk.
2796 *
2797 * NOTE: this differs from XLogWrite mainly in that the WALWriteLock is not
2798 * already held, and we try to avoid acquiring it if possible.
2799 */
2800void
2802{
2806
2807 /*
2808 * During REDO, we are reading not writing WAL. Therefore, instead of
2809 * trying to flush the WAL, we should update minRecoveryPoint instead. We
2810 * test XLogInsertAllowed(), not InRecovery, because we need checkpointer
2811 * to act this way too, and because when it tries to write the
2812 * end-of-recovery checkpoint, it should indeed flush.
2813 */
2814 if (!XLogInsertAllowed())
2815 {
2816 UpdateMinRecoveryPoint(record, false);
2817 return;
2818 }
2819
2820 /* Quick exit if already known flushed */
2821 if (record <= LogwrtResult.Flush)
2822 return;
2823
2824#ifdef WAL_DEBUG
2825 if (XLOG_DEBUG)
2826 elog(LOG, "xlog flush request %X/%08X; write %X/%08X; flush %X/%08X",
2827 LSN_FORMAT_ARGS(record),
2830#endif
2831
2833
2834 /*
2835 * Since fsync is usually a horribly expensive operation, we try to
2836 * piggyback as much data as we can on each fsync: if we see any more data
2837 * entered into the xlog buffer, we'll write and fsync that too, so that
2838 * the final value of LogwrtResult.Flush is as large as possible. This
2839 * gives us some chance of avoiding another fsync immediately after.
2840 */
2841
2842 /* initialize to given target; may increase below */
2843 WriteRqstPtr = record;
2844
2845 /*
2846 * Now wait until we get the write lock, or someone else does the flush
2847 * for us.
2848 */
2849 for (;;)
2850 {
2852
2853 /* done already? */
2855 if (record <= LogwrtResult.Flush)
2856 break;
2857
2858 /*
2859 * Before actually performing the write, wait for all in-flight
2860 * insertions to the pages we're about to write to finish.
2861 */
2863 if (WriteRqstPtr < XLogCtl->LogwrtRqst.Write)
2867
2868 /*
2869 * Try to get the write lock. If we can't get it immediately, wait
2870 * until it's released, and recheck if we still need to do the flush
2871 * or if the backend that held the lock did it for us already. This
2872 * helps to maintain a good rate of group committing when the system
2873 * is bottlenecked by the speed of fsyncing.
2874 */
2876 {
2877 /*
2878 * The lock is now free, but we didn't acquire it yet. Before we
2879 * do, loop back to check if someone else flushed the record for
2880 * us already.
2881 */
2882 continue;
2883 }
2884
2885 /* Got the lock; recheck whether request is satisfied */
2887 if (record <= LogwrtResult.Flush)
2888 {
2890 break;
2891 }
2892
2893 /*
2894 * Sleep before flush! By adding a delay here, we may give further
2895 * backends the opportunity to join the backlog of group commit
2896 * followers; this can significantly improve transaction throughput,
2897 * at the risk of increasing transaction latency.
2898 *
2899 * We do not sleep if enableFsync is not turned on, nor if there are
2900 * fewer than CommitSiblings other backends with active transactions.
2901 */
2902 if (CommitDelay > 0 && enableFsync &&
2904 {
2908
2909 /*
2910 * Re-check how far we can now flush the WAL. It's generally not
2911 * safe to call WaitXLogInsertionsToFinish while holding
2912 * WALWriteLock, because an in-progress insertion might need to
2913 * also grab WALWriteLock to make progress. But we know that all
2914 * the insertions up to insertpos have already finished, because
2915 * that's what the earlier WaitXLogInsertionsToFinish() returned.
2916 * We're only calling it again to allow insertpos to be moved
2917 * further forward, not to actually wait for anyone.
2918 */
2920 }
2921
2922 /* try to write/flush later additions to XLOG as well */
2923 WriteRqst.Write = insertpos;
2924 WriteRqst.Flush = insertpos;
2925
2926 XLogWrite(WriteRqst, insertTLI, false);
2927
2929 /* done */
2930 break;
2931 }
2932
2934
2935 /* wake up walsenders now that we've released heavily contended locks */
2937
2938 /*
2939 * If we flushed an LSN that someone was waiting for, notify the waiters.
2940 */
2941 if (waitLSNState &&
2945
2946 /*
2947 * If we still haven't flushed to the request point then we have a
2948 * problem; most likely, the requested flush point is past end of XLOG.
2949 * This has been seen to occur when a disk page has a corrupted LSN.
2950 *
2951 * Formerly we treated this as a PANIC condition, but that hurts the
2952 * system's robustness rather than helping it: we do not want to take down
2953 * the whole system due to corruption on one data page. In particular, if
2954 * the bad page is encountered again during recovery then we would be
2955 * unable to restart the database at all! (This scenario actually
2956 * happened in the field several times with 7.1 releases.) As of 8.4, bad
2957 * LSNs encountered during recovery are UpdateMinRecoveryPoint's problem;
2958 * the only time we can reach here during recovery is while flushing the
2959 * end-of-recovery checkpoint record, and we don't expect that to have a
2960 * bad LSN.
2961 *
2962 * Note that for calls from xact.c, the ERROR will be promoted to PANIC
2963 * since xact.c calls this routine inside a critical section. However,
2964 * calls from bufmgr.c are not within critical sections and so we will not
2965 * force a restart for a bad LSN on a data page.
2966 */
2967 if (LogwrtResult.Flush < record)
2968 elog(ERROR,
2969 "xlog flush request %X/%08X is not satisfied --- flushed only to %X/%08X",
2970 LSN_FORMAT_ARGS(record),
2972
2973 /*
2974 * Cross-check XLogNeedsFlush(). Some of the checks of XLogFlush() and
2975 * XLogNeedsFlush() are duplicated, and this assertion ensures that these
2976 * remain consistent.
2977 */
2978 Assert(!XLogNeedsFlush(record));
2979}
2980
2981/*
2982 * Write & flush xlog, but without specifying exactly where to.
2983 *
2984 * We normally write only completed blocks; but if there is nothing to do on
2985 * that basis, we check for unwritten async commits in the current incomplete
2986 * block, and write through the latest one of those. Thus, if async commits
2987 * are not being used, we will write complete blocks only.
2988 *
2989 * If, based on the above, there's anything to write we do so immediately. But
2990 * to avoid calling fsync, fdatasync et. al. at a rate that'd impact
2991 * concurrent IO, we only flush WAL every wal_writer_delay ms, or if there's
2992 * more than wal_writer_flush_after unflushed blocks.
2993 *
2994 * We can guarantee that async commits reach disk after at most three
2995 * wal_writer_delay cycles. (When flushing complete blocks, we allow XLogWrite
2996 * to write "flexibly", meaning it can stop at the end of the buffer ring;
2997 * this makes a difference only with very high load or long wal_writer_delay,
2998 * but imposes one extra cycle for the worst case for async commits.)
2999 *
3000 * This routine is invoked periodically by the background walwriter process.
3001 *
3002 * Returns true if there was any work to do, even if we skipped flushing due
3003 * to wal_writer_delay/wal_writer_flush_after.
3004 */
3005bool
3007{
3009 bool flexible = true;
3010 static TimestampTz lastflush;
3012 int flushblocks;
3014
3015 /* XLOG doesn't need flushing during recovery */
3016 if (RecoveryInProgress())
3017 return false;
3018
3019 /*
3020 * Since we're not in recovery, InsertTimeLineID is set and can't change,
3021 * so we can read it without a lock.
3022 */
3024
3025 /* read updated LogwrtRqst */
3029
3030 /* back off to last completed page boundary */
3031 WriteRqst.Write -= WriteRqst.Write % XLOG_BLCKSZ;
3032
3033 /* if we have already flushed that far, consider async commit records */
3035 if (WriteRqst.Write <= LogwrtResult.Flush)
3036 {
3040 flexible = false; /* ensure it all gets written */
3041 }
3042
3043 /*
3044 * If already known flushed, we're done. Just need to check if we are
3045 * holding an open file handle to a logfile that's no longer in use,
3046 * preventing the file from being deleted.
3047 */
3048 if (WriteRqst.Write <= LogwrtResult.Flush)
3049 {
3050 if (openLogFile >= 0)
3051 {
3054 {
3055 XLogFileClose();
3056 }
3057 }
3058 return false;
3059 }
3060
3061 /*
3062 * Determine how far to flush WAL, based on the wal_writer_delay and
3063 * wal_writer_flush_after GUCs.
3064 *
3065 * Note that XLogSetAsyncXactLSN() performs similar calculation based on
3066 * wal_writer_flush_after, to decide when to wake us up. Make sure the
3067 * logic is the same in both places if you change this.
3068 */
3070 flushblocks =
3072
3073 if (WalWriterFlushAfter == 0 || lastflush == 0)
3074 {
3075 /* first call, or block based limits disabled */
3076 WriteRqst.Flush = WriteRqst.Write;
3077 lastflush = now;
3078 }
3080 {
3081 /*
3082 * Flush the writes at least every WalWriterDelay ms. This is
3083 * important to bound the amount of time it takes for an asynchronous
3084 * commit to hit disk.
3085 */
3086 WriteRqst.Flush = WriteRqst.Write;
3087 lastflush = now;
3088 }
3089 else if (flushblocks >= WalWriterFlushAfter)
3090 {
3091 /* exceeded wal_writer_flush_after blocks, flush */
3092 WriteRqst.Flush = WriteRqst.Write;
3093 lastflush = now;
3094 }
3095 else
3096 {
3097 /* no flushing, this time round */
3099 }
3100
3101#ifdef WAL_DEBUG
3102 if (XLOG_DEBUG)
3103 elog(LOG, "xlog bg flush request write %X/%08X; flush: %X/%08X, current is write %X/%08X; flush %X/%08X",
3108#endif
3109
3111
3112 /* now wait for any in-progress insertions to finish and get write lock */
3116 if (WriteRqst.Write > LogwrtResult.Write ||
3118 {
3120 }
3122
3124
3125 /* wake up walsenders now that we've released heavily contended locks */
3127
3128 /*
3129 * If we flushed an LSN that someone was waiting for, notify the waiters.
3130 */
3131 if (waitLSNState &&
3135
3136 /*
3137 * Great, done. To take some work off the critical path, try to initialize
3138 * as many of the no-longer-needed WAL buffers for future use as we can.
3139 */
3141
3142 /*
3143 * If we determined that we need to write data, but somebody else
3144 * wrote/flushed already, it should be considered as being active, to
3145 * avoid hibernating too early.
3146 */
3147 return true;
3148}
3149
3150/*
3151 * Test whether XLOG data has been flushed up to (at least) the given
3152 * position, or whether the minimum recovery point has been updated past
3153 * the given position.
3154 *
3155 * Returns true if a flush is still needed, or if the minimum recovery point
3156 * must be updated.
3157 *
3158 * It is possible that someone else is already in the process of flushing
3159 * that far, or has updated the minimum recovery point up to the given
3160 * position.
3161 */
3162bool
3164{
3165 /*
3166 * During recovery, we don't flush WAL but update minRecoveryPoint
3167 * instead. So "needs flush" is taken to mean whether minRecoveryPoint
3168 * would need to be updated.
3169 *
3170 * Using XLogInsertAllowed() rather than RecoveryInProgress() matters for
3171 * the case of an end-of-recovery checkpoint, where WAL data is flushed.
3172 * This check should be consistent with the one in XLogFlush().
3173 */
3174 if (!XLogInsertAllowed())
3175 {
3176 /* Quick exit if already known to be updated or cannot be updated */
3178 return false;
3179
3180 /*
3181 * An invalid minRecoveryPoint means that we need to recover all the
3182 * WAL, i.e., we're doing crash recovery. We never modify the control
3183 * file's value in that case, so we can short-circuit future checks
3184 * here too. This triggers a quick exit path for the startup process,
3185 * which cannot update its local copy of minRecoveryPoint as long as
3186 * it has not replayed all WAL available when doing crash recovery.
3187 */
3189 {
3190 updateMinRecoveryPoint = false;
3191 return false;
3192 }
3193
3194 /*
3195 * Update local copy of minRecoveryPoint. But if the lock is busy,
3196 * just return a conservative guess.
3197 */
3199 return true;
3203
3204 /*
3205 * Check minRecoveryPoint for any other process than the startup
3206 * process doing crash recovery, which should not update the control
3207 * file value if crash recovery is still running.
3208 */
3210 updateMinRecoveryPoint = false;
3211
3212 /* check again */
3214 return false;
3215 else
3216 return true;
3217 }
3218
3219 /* Quick exit if already known flushed */
3220 if (record <= LogwrtResult.Flush)
3221 return false;
3222
3223 /* read LogwrtResult and update local state */
3225
3226 /* check again */
3227 if (record <= LogwrtResult.Flush)
3228 return false;
3229
3230 return true;
3231}
3232
3233/*
3234 * Try to make a given XLOG file segment exist.
3235 *
3236 * logsegno: identify segment.
3237 *
3238 * *added: on return, true if this call raised the number of extant segments.
3239 *
3240 * path: on return, this char[MAXPGPATH] has the path to the logsegno file.
3241 *
3242 * Returns -1 or FD of opened file. A -1 here is not an error; a caller
3243 * wanting an open segment should attempt to open "path", which usually will
3244 * succeed. (This is weird, but it's efficient for the callers.)
3245 */
3246static int
3248 bool *added, char *path)
3249{
3250 char tmppath[MAXPGPATH];
3253 int fd;
3254 int save_errno;
3257
3258 Assert(logtli != 0);
3259
3261
3262 /*
3263 * Try to use existent file (checkpoint maker may have created it already)
3264 */
3265 *added = false;
3268 if (fd < 0)
3269 {
3270 if (errno != ENOENT)
3271 ereport(ERROR,
3273 errmsg("could not open file \"%s\": %m", path)));
3274 }
3275 else
3276 return fd;
3277
3278 /*
3279 * Initialize an empty (all zeroes) segment. NOTE: it is possible that
3280 * another process is doing the same thing. If so, we will end up
3281 * pre-creating an extra log segment. That seems OK, and better than
3282 * holding the lock throughout this lengthy process.
3283 */
3284 elog(DEBUG2, "creating and filling new WAL file");
3285
3286 snprintf(tmppath, MAXPGPATH, XLOGDIR "/xlogtemp.%d", (int) getpid());
3287
3288 unlink(tmppath);
3289
3292
3293 /* do not use get_sync_bit() here --- want to fsync only at end of fill */
3295 if (fd < 0)
3296 ereport(ERROR,
3298 errmsg("could not create file \"%s\": %m", tmppath)));
3299
3300 /* Measure I/O timing when initializing segment */
3302
3304 save_errno = 0;
3305 if (wal_init_zero)
3306 {
3307 ssize_t rc;
3308
3309 /*
3310 * Zero-fill the file. With this setting, we do this the hard way to
3311 * ensure that all the file space has really been allocated. On
3312 * platforms that allow "holes" in files, just seeking to the end
3313 * doesn't allocate intermediate space. This way, we know that we
3314 * have all the space and (after the fsync below) that all the
3315 * indirect blocks are down on disk. Therefore, fdatasync(2) or
3316 * O_DSYNC will be sufficient to sync future writes to the log file.
3317 */
3319
3320 if (rc < 0)
3321 save_errno = errno;
3322 }
3323 else
3324 {
3325 /*
3326 * Otherwise, seeking to the end and writing a solitary byte is
3327 * enough.
3328 */
3329 errno = 0;
3330 if (pg_pwrite(fd, "\0", 1, wal_segment_size - 1) != 1)
3331 {
3332 /* if write didn't set errno, assume no disk space */
3334 }
3335 }
3337
3338 /*
3339 * A full segment worth of data is written when using wal_init_zero. One
3340 * byte is written when not using it.
3341 */
3343 io_start, 1,
3345
3346 if (save_errno)
3347 {
3348 /*
3349 * If we fail to make the file, delete it to release disk space
3350 */
3351 unlink(tmppath);
3352
3353 close(fd);
3354
3355 errno = save_errno;
3356
3357 ereport(ERROR,
3359 errmsg("could not write to file \"%s\": %m", tmppath)));
3360 }
3361
3362 /* Measure I/O timing when flushing segment */
3364
3366 if (pg_fsync(fd) != 0)
3367 {
3368 save_errno = errno;
3369 close(fd);
3370 errno = save_errno;
3371 ereport(ERROR,
3373 errmsg("could not fsync file \"%s\": %m", tmppath)));
3374 }
3376
3378 IOOP_FSYNC, io_start, 1, 0);
3379
3380 if (close(fd) != 0)
3381 ereport(ERROR,
3383 errmsg("could not close file \"%s\": %m", tmppath)));
3384
3385 /*
3386 * Now move the segment into place with its final name. Cope with
3387 * possibility that someone else has created the file while we were
3388 * filling ours: if so, use ours to pre-create a future log segment.
3389 */
3391
3392 /*
3393 * XXX: What should we use as max_segno? We used to use XLOGfileslop when
3394 * that was a constant, but that was always a bit dubious: normally, at a
3395 * checkpoint, XLOGfileslop was the offset from the checkpoint record, but
3396 * here, it was the offset from the insert location. We can't do the
3397 * normal XLOGfileslop calculation here because we don't have access to
3398 * the prior checkpoint's redo location. So somewhat arbitrarily, just use
3399 * CheckPointSegments.
3400 */
3403 logtli))
3404 {
3405 *added = true;
3406 elog(DEBUG2, "done creating and filling new WAL file");
3407 }
3408 else
3409 {
3410 /*
3411 * No need for any more future segments, or InstallXLogFileSegment()
3412 * failed to rename the file into place. If the rename failed, a
3413 * caller opening the file may fail.
3414 */
3415 unlink(tmppath);
3416 elog(DEBUG2, "abandoned new WAL file");
3417 }
3418
3419 return -1;
3420}
3421
3422/*
3423 * Create a new XLOG file segment, or open a pre-existing one.
3424 *
3425 * logsegno: identify segment to be created/opened.
3426 *
3427 * Returns FD of opened file.
3428 *
3429 * Note: errors here are ERROR not PANIC because we might or might not be
3430 * inside a critical section (eg, during checkpoint there is no reason to
3431 * take down the system on failure). They will promote to PANIC if we are
3432 * in a critical section.
3433 */
3434int
3436{
3437 bool ignore_added;
3438 char path[MAXPGPATH];
3439 int fd;
3440
3441 Assert(logtli != 0);
3442
3444 if (fd >= 0)
3445 return fd;
3446
3447 /* Now open original target segment (might not be file I just made) */
3450 if (fd < 0)
3451 ereport(ERROR,
3453 errmsg("could not open file \"%s\": %m", path)));
3454 return fd;
3455}
3456
3457/*
3458 * Create a new XLOG file segment by copying a pre-existing one.
3459 *
3460 * destsegno: identify segment to be created.
3461 *
3462 * srcTLI, srcsegno: identify segment to be copied (could be from
3463 * a different timeline)
3464 *
3465 * upto: how much of the source file to copy (the rest is filled with
3466 * zeros)
3467 *
3468 * Currently this is only used during recovery, and so there are no locking
3469 * considerations. But we should be just as tense as XLogFileInit to avoid
3470 * emplacing a bogus file.
3471 */
3472static void
3475 int upto)
3476{
3477 char path[MAXPGPATH];
3478 char tmppath[MAXPGPATH];
3479 PGAlignedXLogBlock buffer;
3480 int srcfd;
3481 int fd;
3482 int nbytes;
3483
3484 /*
3485 * Open the source file
3486 */
3489 if (srcfd < 0)
3490 ereport(ERROR,
3492 errmsg("could not open file \"%s\": %m", path)));
3493
3494 /*
3495 * Copy into a temp file name.
3496 */
3497 snprintf(tmppath, MAXPGPATH, XLOGDIR "/xlogtemp.%d", (int) getpid());
3498
3499 unlink(tmppath);
3500
3501 /* do not use get_sync_bit() here --- want to fsync only at end of fill */
3503 if (fd < 0)
3504 ereport(ERROR,
3506 errmsg("could not create file \"%s\": %m", tmppath)));
3507
3508 /*
3509 * Do the data copying.
3510 */
3511 for (nbytes = 0; nbytes < wal_segment_size; nbytes += sizeof(buffer))
3512 {
3513 int nread;
3514
3515 nread = upto - nbytes;
3516
3517 /*
3518 * The part that is not read from the source file is filled with
3519 * zeros.
3520 */
3521 if (nread < sizeof(buffer))
3522 memset(buffer.data, 0, sizeof(buffer));
3523
3524 if (nread > 0)
3525 {
3526 int r;
3527
3528 if (nread > sizeof(buffer))
3529 nread = sizeof(buffer);
3531 r = read(srcfd, buffer.data, nread);
3532 if (r != nread)
3533 {
3534 if (r < 0)
3535 ereport(ERROR,
3537 errmsg("could not read file \"%s\": %m",
3538 path)));
3539 else
3540 ereport(ERROR,
3542 errmsg("could not read file \"%s\": read %d of %zu",
3543 path, r, (Size) nread)));
3544 }
3546 }
3547 errno = 0;
3549 if ((int) write(fd, buffer.data, sizeof(buffer)) != (int) sizeof(buffer))
3550 {
3551 int save_errno = errno;
3552
3553 /*
3554 * If we fail to make the file, delete it to release disk space
3555 */
3556 unlink(tmppath);
3557 /* if write didn't set errno, assume problem is no disk space */
3559
3560 ereport(ERROR,
3562 errmsg("could not write to file \"%s\": %m", tmppath)));
3563 }
3565 }
3566
3568 if (pg_fsync(fd) != 0)
3571 errmsg("could not fsync file \"%s\": %m", tmppath)));
3573
3574 if (CloseTransientFile(fd) != 0)
3575 ereport(ERROR,
3577 errmsg("could not close file \"%s\": %m", tmppath)));
3578
3579 if (CloseTransientFile(srcfd) != 0)
3580 ereport(ERROR,
3582 errmsg("could not close file \"%s\": %m", path)));
3583
3584 /*
3585 * Now move the segment into place with its final name.
3586 */
3588 elog(ERROR, "InstallXLogFileSegment should not have failed");
3589}
3590
3591/*
3592 * Install a new XLOG segment file as a current or future log segment.
3593 *
3594 * This is used both to install a newly-created segment (which has a temp
3595 * filename while it's being created) and to recycle an old segment.
3596 *
3597 * *segno: identify segment to install as (or first possible target).
3598 * When find_free is true, this is modified on return to indicate the
3599 * actual installation location or last segment searched.
3600 *
3601 * tmppath: initial name of file to install. It will be renamed into place.
3602 *
3603 * find_free: if true, install the new segment at the first empty segno
3604 * number at or after the passed numbers. If false, install the new segment
3605 * exactly where specified, deleting any existing segment file there.
3606 *
3607 * max_segno: maximum segment number to install the new file as. Fail if no
3608 * free slot is found between *segno and max_segno. (Ignored when find_free
3609 * is false.)
3610 *
3611 * tli: The timeline on which the new segment should be installed.
3612 *
3613 * Returns true if the file was installed successfully. false indicates that
3614 * max_segno limit was exceeded, the startup process has disabled this
3615 * function for now, or an error occurred while renaming the file into place.
3616 */
3617static bool
3620{
3621 char path[MAXPGPATH];
3622 struct stat stat_buf;
3623
3624 Assert(tli != 0);
3625
3626 XLogFilePath(path, tli, *segno, wal_segment_size);
3627
3630 {
3632 return false;
3633 }
3634
3635 if (!find_free)
3636 {
3637 /* Force installation: get rid of any pre-existing segment file */
3638 durable_unlink(path, DEBUG1);
3639 }
3640 else
3641 {
3642 /* Find a free slot to put it in */
3643 while (stat(path, &stat_buf) == 0)
3644 {
3645 if ((*segno) >= max_segno)
3646 {
3647 /* Failed to find a free slot within specified range */
3649 return false;
3650 }
3651 (*segno)++;
3652 XLogFilePath(path, tli, *segno, wal_segment_size);
3653 }
3654 }
3655
3656 Assert(access(path, F_OK) != 0 && errno == ENOENT);
3657 if (durable_rename(tmppath, path, LOG) != 0)
3658 {
3660 /* durable_rename already emitted log message */
3661 return false;
3662 }
3663
3665
3666 return true;
3667}
3668
3669/*
3670 * Open a pre-existing logfile segment for writing.
3671 */
3672int
3674{
3675 char path[MAXPGPATH];
3676 int fd;
3677
3678 XLogFilePath(path, tli, segno, wal_segment_size);
3679
3682 if (fd < 0)
3683 ereport(PANIC,
3685 errmsg("could not open file \"%s\": %m", path)));
3686
3687 return fd;
3688}
3689
3690/*
3691 * Close the current logfile segment for writing.
3692 */
3693static void
3695{
3696 Assert(openLogFile >= 0);
3697
3698 /*
3699 * WAL segment files will not be re-read in normal operation, so we advise
3700 * the OS to release any cached pages. But do not do so if WAL archiving
3701 * or streaming is active, because archiver and walsender process could
3702 * use the cache to read the WAL segment.
3703 */
3704#if defined(USE_POSIX_FADVISE) && defined(POSIX_FADV_DONTNEED)
3705 if (!XLogIsNeeded() && (io_direct_flags & IO_DIRECT_WAL) == 0)
3707#endif
3708
3709 if (close(openLogFile) != 0)
3710 {
3711 char xlogfname[MAXFNAMELEN];
3712 int save_errno = errno;
3713
3715 errno = save_errno;
3716 ereport(PANIC,
3718 errmsg("could not close file \"%s\": %m", xlogfname)));
3719 }
3720
3721 openLogFile = -1;
3723}
3724
3725/*
3726 * Preallocate log files beyond the specified log endpoint.
3727 *
3728 * XXX this is currently extremely conservative, since it forces only one
3729 * future log segment to exist, and even that only if we are 75% done with
3730 * the current one. This is only appropriate for very low-WAL-volume systems.
3731 * High-volume systems will be OK once they've built up a sufficient set of
3732 * recycled log segments, but the startup transient is likely to include
3733 * a lot of segment creations by foreground processes, which is not so good.
3734 *
3735 * XLogFileInitInternal() can ereport(ERROR). All known causes indicate big
3736 * trouble; for example, a full filesystem is one cause. The checkpoint WAL
3737 * and/or ControlFile updates already completed. If a RequestCheckpoint()
3738 * initiated the present checkpoint and an ERROR ends this function, the
3739 * command that called RequestCheckpoint() fails. That's not ideal, but it's
3740 * not worth contorting more functions to use caller-specified elevel values.
3741 * (With or without RequestCheckpoint(), an ERROR forestalls some inessential
3742 * reporting and resource reclamation.)
3743 */
3744static void
3746{
3748 int lf;
3749 bool added;
3750 char path[MAXPGPATH];
3751 uint64 offset;
3752
3754 return; /* unlocked check says no */
3755
3757 offset = XLogSegmentOffset(endptr - 1, wal_segment_size);
3758 if (offset >= (uint32) (0.75 * wal_segment_size))
3759 {
3760 _logSegNo++;
3761 lf = XLogFileInitInternal(_logSegNo, tli, &added, path);
3762 if (lf >= 0)
3763 close(lf);
3764 if (added)
3766 }
3767}
3768
3769/*
3770 * Throws an error if the given log segment has already been removed or
3771 * recycled. The caller should only pass a segment that it knows to have
3772 * existed while the server has been running, as this function always
3773 * succeeds if no WAL segments have been removed since startup.
3774 * 'tli' is only used in the error message.
3775 *
3776 * Note: this function guarantees to keep errno unchanged on return.
3777 * This supports callers that use this to possibly deliver a better
3778 * error message about a missing file, while still being able to throw
3779 * a normal file-access error afterwards, if this does return.
3780 */
3781void
3783{
3784 int save_errno = errno;
3785 XLogSegNo lastRemovedSegNo;
3786
3788 lastRemovedSegNo = XLogCtl->lastRemovedSegNo;
3790
3791 if (segno <= lastRemovedSegNo)
3792 {
3793 char filename[MAXFNAMELEN];
3794
3796 errno = save_errno;
3797 ereport(ERROR,
3799 errmsg("requested WAL segment %s has already been removed",
3800 filename)));
3801 }
3802 errno = save_errno;
3803}
3804
3805/*
3806 * Return the last WAL segment removed, or 0 if no segment has been removed
3807 * since startup.
3808 *
3809 * NB: the result can be out of date arbitrarily fast, the caller has to deal
3810 * with that.
3811 */
3814{
3815 XLogSegNo lastRemovedSegNo;
3816
3818 lastRemovedSegNo = XLogCtl->lastRemovedSegNo;
3820
3821 return lastRemovedSegNo;
3822}
3823
3824/*
3825 * Return the oldest WAL segment on the given TLI that still exists in
3826 * XLOGDIR, or 0 if none.
3827 */
3830{
3831 DIR *xldir;
3832 struct dirent *xlde;
3834
3836 while ((xlde = ReadDir(xldir, XLOGDIR)) != NULL)
3837 {
3840
3841 /* Ignore files that are not XLOG segments. */
3842 if (!IsXLogFileName(xlde->d_name))
3843 continue;
3844
3845 /* Parse filename to get TLI and segno. */
3848
3849 /* Ignore anything that's not from the TLI of interest. */
3850 if (tli != file_tli)
3851 continue;
3852
3853 /* If it's the oldest so far, update oldest_segno. */
3854 if (oldest_segno == 0 || file_segno < oldest_segno)
3856 }
3857
3858 FreeDir(xldir);
3859 return oldest_segno;
3860}
3861
3862/*
3863 * Update the last removed segno pointer in shared memory, to reflect that the
3864 * given XLOG file has been removed.
3865 */
3866static void
3868{
3869 uint32 tli;
3870 XLogSegNo segno;
3871
3873
3875 if (segno > XLogCtl->lastRemovedSegNo)
3876 XLogCtl->lastRemovedSegNo = segno;
3878}
3879
3880/*
3881 * Remove all temporary log files in pg_wal
3882 *
3883 * This is called at the beginning of recovery after a previous crash,
3884 * at a point where no other processes write fresh WAL data.
3885 */
3886static void
3888{
3889 DIR *xldir;
3890 struct dirent *xlde;
3891
3892 elog(DEBUG2, "removing all temporary WAL segments");
3893
3895 while ((xlde = ReadDir(xldir, XLOGDIR)) != NULL)
3896 {
3897 char path[MAXPGPATH];
3898
3899 if (strncmp(xlde->d_name, "xlogtemp.", 9) != 0)
3900 continue;
3901
3902 snprintf(path, MAXPGPATH, XLOGDIR "/%s", xlde->d_name);
3903 unlink(path);
3904 elog(DEBUG2, "removed temporary WAL segment \"%s\"", path);
3905 }
3906 FreeDir(xldir);
3907}
3908
3909/*
3910 * Recycle or remove all log files older or equal to passed segno.
3911 *
3912 * endptr is current (or recent) end of xlog, and lastredoptr is the
3913 * redo pointer of the last checkpoint. These are used to determine
3914 * whether we want to recycle rather than delete no-longer-wanted log files.
3915 *
3916 * insertTLI is the current timeline for XLOG insertion. Any recycled
3917 * segments should be reused for this timeline.
3918 */
3919static void
3922{
3923 DIR *xldir;
3924 struct dirent *xlde;
3925 char lastoff[MAXFNAMELEN];
3928
3929 /* Initialize info about where to try to recycle to */
3932
3933 /*
3934 * Construct a filename of the last segment to be kept. The timeline ID
3935 * doesn't matter, we ignore that in the comparison. (During recovery,
3936 * InsertTimeLineID isn't set, so we can't use that.)
3937 */
3939
3940 elog(DEBUG2, "attempting to remove WAL segments older than log file %s",
3941 lastoff);
3942
3944
3945 while ((xlde = ReadDir(xldir, XLOGDIR)) != NULL)
3946 {
3947 /* Ignore files that are not XLOG segments */
3948 if (!IsXLogFileName(xlde->d_name) &&
3949 !IsPartialXLogFileName(xlde->d_name))
3950 continue;
3951
3952 /*
3953 * We ignore the timeline part of the XLOG segment identifiers in
3954 * deciding whether a segment is still needed. This ensures that we
3955 * won't prematurely remove a segment from a parent timeline. We could
3956 * probably be a little more proactive about removing segments of
3957 * non-parent timelines, but that would be a whole lot more
3958 * complicated.
3959 *
3960 * We use the alphanumeric sorting property of the filenames to decide
3961 * which ones are earlier than the lastoff segment.
3962 */
3963 if (strcmp(xlde->d_name + 8, lastoff + 8) <= 0)
3964 {
3965 if (XLogArchiveCheckDone(xlde->d_name))
3966 {
3967 /* Update the last removed location in shared memory first */
3968 UpdateLastRemovedPtr(xlde->d_name);
3969
3971 }
3972 }
3973 }
3974
3975 FreeDir(xldir);
3976}
3977
3978/*
3979 * Recycle or remove WAL files that are not part of the given timeline's
3980 * history.
3981 *
3982 * This is called during recovery, whenever we switch to follow a new
3983 * timeline, and at the end of recovery when we create a new timeline. We
3984 * wouldn't otherwise care about extra WAL files lying in pg_wal, but they
3985 * might be leftover pre-allocated or recycled WAL segments on the old timeline
3986 * that we haven't used yet, and contain garbage. If we just leave them in
3987 * pg_wal, they will eventually be archived, and we can't let that happen.
3988 * Files that belong to our timeline history are valid, because we have
3989 * successfully replayed them, but from others we can't be sure.
3990 *
3991 * 'switchpoint' is the current point in WAL where we switch to new timeline,
3992 * and 'newTLI' is the new timeline we switch to.
3993 */
3994void
3996{
3997 DIR *xldir;
3998 struct dirent *xlde;
3999 char switchseg[MAXFNAMELEN];
4003
4004 /*
4005 * Initialize info about where to begin the work. This will recycle,
4006 * somewhat arbitrarily, 10 future segments.
4007 */
4011
4012 /*
4013 * Construct a filename of the last segment to be kept.
4014 */
4016
4017 elog(DEBUG2, "attempting to remove WAL segments newer than log file %s",
4018 switchseg);
4019
4021
4022 while ((xlde = ReadDir(xldir, XLOGDIR)) != NULL)
4023 {
4024 /* Ignore files that are not XLOG segments */
4025 if (!IsXLogFileName(xlde->d_name))
4026 continue;
4027
4028 /*
4029 * Remove files that are on a timeline older than the new one we're
4030 * switching to, but with a segment number >= the first segment on the
4031 * new timeline.
4032 */
4033 if (strncmp(xlde->d_name, switchseg, 8) < 0 &&
4034 strcmp(xlde->d_name + 8, switchseg + 8) > 0)
4035 {
4036 /*
4037 * If the file has already been marked as .ready, however, don't
4038 * remove it yet. It should be OK to remove it - files that are
4039 * not part of our timeline history are not required for recovery
4040 * - but seems safer to let them be archived and removed later.
4041 */
4042 if (!XLogArchiveIsReady(xlde->d_name))
4044 }
4045 }
4046
4047 FreeDir(xldir);
4048}
4049
4050/*
4051 * Recycle or remove a log file that's no longer needed.
4052 *
4053 * segment_de is the dirent structure of the segment to recycle or remove.
4054 * recycleSegNo is the segment number to recycle up to. endlogSegNo is
4055 * the segment number of the current (or recent) end of WAL.
4056 *
4057 * endlogSegNo gets incremented if the segment is recycled so as it is not
4058 * checked again with future callers of this function.
4059 *
4060 * insertTLI is the current timeline for XLOG insertion. Any recycled segments
4061 * should be used for this timeline.
4062 */
4063static void
4067{
4068 char path[MAXPGPATH];
4069#ifdef WIN32
4070 char newpath[MAXPGPATH];
4071#endif
4072 const char *segname = segment_de->d_name;
4073
4074 snprintf(path, MAXPGPATH, XLOGDIR "/%s", segname);
4075
4076 /*
4077 * Before deleting the file, see if it can be recycled as a future log
4078 * segment. Only recycle normal files, because we don't want to recycle
4079 * symbolic links pointing to a separate archive directory.
4080 */
4081 if (wal_recycle &&
4083 XLogCtl->InstallXLogFileSegmentActive && /* callee rechecks this */
4084 get_dirent_type(path, segment_de, false, DEBUG2) == PGFILETYPE_REG &&
4086 true, recycleSegNo, insertTLI))
4087 {
4089 (errmsg_internal("recycled write-ahead log file \"%s\"",
4090 segname)));
4092 /* Needn't recheck that slot on future iterations */
4093 (*endlogSegNo)++;
4094 }
4095 else
4096 {
4097 /* No need for any more future segments, or recycling failed ... */
4098 int rc;
4099
4101 (errmsg_internal("removing write-ahead log file \"%s\"",
4102 segname)));
4103
4104#ifdef WIN32
4105
4106 /*
4107 * On Windows, if another process (e.g another backend) holds the file
4108 * open in FILE_SHARE_DELETE mode, unlink will succeed, but the file
4109 * will still show up in directory listing until the last handle is
4110 * closed. To avoid confusing the lingering deleted file for a live
4111 * WAL file that needs to be archived, rename it before deleting it.
4112 *
4113 * If another process holds the file open without FILE_SHARE_DELETE
4114 * flag, rename will fail. We'll try again at the next checkpoint.
4115 */
4116 snprintf(newpath, MAXPGPATH, "%s.deleted", path);
4117 if (rename(path, newpath) != 0)
4118 {
4119 ereport(LOG,
4121 errmsg("could not rename file \"%s\": %m",
4122 path)));
4123 return;
4124 }
4125 rc = durable_unlink(newpath, LOG);
4126#else
4127 rc = durable_unlink(path, LOG);
4128#endif
4129 if (rc != 0)
4130 {
4131 /* Message already logged by durable_unlink() */
4132 return;
4133 }
4135 }
4136
4138}
4139
4140/*
4141 * Verify whether pg_wal, pg_wal/archive_status, and pg_wal/summaries exist.
4142 * If the latter do not exist, recreate them.
4143 *
4144 * It is not the goal of this function to verify the contents of these
4145 * directories, but to help in cases where someone has performed a cluster
4146 * copy for PITR purposes but omitted pg_wal from the copy.
4147 *
4148 * We could also recreate pg_wal if it doesn't exist, but a deliberate
4149 * policy decision was made not to. It is fairly common for pg_wal to be
4150 * a symlink, and if that was the DBA's intent then automatically making a
4151 * plain directory would result in degraded performance with no notice.
4152 */
4153static void
4155{
4156 char path[MAXPGPATH];
4157 struct stat stat_buf;
4158
4159 /* Check for pg_wal; if it doesn't exist, error out */
4160 if (stat(XLOGDIR, &stat_buf) != 0 ||
4161 !S_ISDIR(stat_buf.st_mode))
4162 ereport(FATAL,
4164 errmsg("required WAL directory \"%s\" does not exist",
4165 XLOGDIR)));
4166
4167 /* Check for archive_status */
4168 snprintf(path, MAXPGPATH, XLOGDIR "/archive_status");
4169 if (stat(path, &stat_buf) == 0)
4170 {
4171 /* Check for weird cases where it exists but isn't a directory */
4172 if (!S_ISDIR(stat_buf.st_mode))
4173 ereport(FATAL,
4175 errmsg("required WAL directory \"%s\" does not exist",
4176 path)));
4177 }
4178 else
4179 {
4180 ereport(LOG,
4181 (errmsg("creating missing WAL directory \"%s\"", path)));
4182 if (MakePGDirectory(path) < 0)
4183 ereport(FATAL,
4185 errmsg("could not create missing directory \"%s\": %m",
4186 path)));
4187 }
4188
4189 /* Check for summaries */
4190 snprintf(path, MAXPGPATH, XLOGDIR "/summaries");
4191 if (stat(path, &stat_buf) == 0)
4192 {
4193 /* Check for weird cases where it exists but isn't a directory */
4194 if (!S_ISDIR(stat_buf.st_mode))
4195 ereport(FATAL,
4196 (errmsg("required WAL directory \"%s\" does not exist",
4197 path)));
4198 }
4199 else
4200 {
4201 ereport(LOG,
4202 (errmsg("creating missing WAL directory \"%s\"", path)));
4203 if (MakePGDirectory(path) < 0)
4204 ereport(FATAL,
4205 (errmsg("could not create missing directory \"%s\": %m",
4206 path)));
4207 }
4208}
4209
4210/*
4211 * Remove previous backup history files. This also retries creation of
4212 * .ready files for any backup history files for which XLogArchiveNotify
4213 * failed earlier.
4214 */
4215static void
4217{
4218 DIR *xldir;
4219 struct dirent *xlde;
4220 char path[MAXPGPATH + sizeof(XLOGDIR)];
4221
4223
4224 while ((xlde = ReadDir(xldir, XLOGDIR)) != NULL)
4225 {
4226 if (IsBackupHistoryFileName(xlde->d_name))
4227 {
4228 if (XLogArchiveCheckDone(xlde->d_name))
4229 {
4230 elog(DEBUG2, "removing WAL backup history file \"%s\"",
4231 xlde->d_name);
4232 snprintf(path, sizeof(path), XLOGDIR "/%s", xlde->d_name);
4233 unlink(path);
4234 XLogArchiveCleanup(xlde->d_name);
4235 }
4236 }
4237 }
4238
4239 FreeDir(xldir);
4240}
4241
4242/*
4243 * I/O routines for pg_control
4244 *
4245 * *ControlFile is a buffer in shared memory that holds an image of the
4246 * contents of pg_control. WriteControlFile() initializes pg_control
4247 * given a preloaded buffer, ReadControlFile() loads the buffer from
4248 * the pg_control file (during postmaster or standalone-backend startup),
4249 * and UpdateControlFile() rewrites pg_control after we modify xlog state.
4250 * InitControlFile() fills the buffer with initial values.
4251 *
4252 * For simplicity, WriteControlFile() initializes the fields of pg_control
4253 * that are related to checking backend/database compatibility, and
4254 * ReadControlFile() verifies they are correct. We could split out the
4255 * I/O and compatibility-check functions, but there seems no need currently.
4256 */
4257
4258static void
4259InitControlFile(uint64 sysidentifier, uint32 data_checksum_version)
4260{
4262
4263 /*
4264 * Generate a random nonce. This is used for authentication requests that
4265 * will fail because the user does not exist. The nonce is used to create
4266 * a genuine-looking password challenge for the non-existent user, in lieu
4267 * of an actual stored password.
4268 */
4270 ereport(PANIC,
4272 errmsg("could not generate secret authorization token")));
4273
4274 memset(ControlFile, 0, sizeof(ControlFileData));
4275 /* Initialize pg_control status fields */
4276 ControlFile->system_identifier = sysidentifier;
4280
4281 /* Set important parameter values for use when replaying WAL */
4290 ControlFile->data_checksum_version = data_checksum_version;
4291
4292 /*
4293 * Set the data_checksum_version value into XLogCtl, which is where all
4294 * processes get the current value from.
4295 */
4296 XLogCtl->data_checksum_version = data_checksum_version;
4297}
4298
4299static void
4301{
4302 int fd;
4303 char buffer[PG_CONTROL_FILE_SIZE]; /* need not be aligned */
4304
4305 /*
4306 * Initialize version and compatibility-check fields
4307 */
4310
4313
4319
4322
4325
4326 ControlFile->float8ByVal = true; /* vestigial */
4327
4328 /*
4329 * Initialize the default 'char' signedness.
4330 *
4331 * The signedness of the char type is implementation-defined. For instance
4332 * on x86 architecture CPUs, the char data type is typically treated as
4333 * signed by default, whereas on aarch architecture CPUs, it is typically
4334 * treated as unsigned by default. In v17 or earlier, we accidentally let
4335 * C implementation signedness affect persistent data. This led to
4336 * inconsistent results when comparing char data across different
4337 * platforms.
4338 *
4339 * This flag can be used as a hint to ensure consistent behavior for
4340 * pre-v18 data files that store data sorted by the 'char' type on disk,
4341 * especially in cross-platform replication scenarios.
4342 *
4343 * Newly created database clusters unconditionally set the default char
4344 * signedness to true. pg_upgrade changes this flag for clusters that were
4345 * initialized on signedness=false platforms. As a result,
4346 * signedness=false setting will become rare over time. If we had known
4347 * about this problem during the last development cycle that forced initdb
4348 * (v8.3), we would have made all clusters signed or all clusters
4349 * unsigned. Making pg_upgrade the only source of signedness=false will
4350 * cause the population of database clusters to converge toward that
4351 * retrospective ideal.
4352 */
4354
4355 /* Contents are protected with a CRC */
4361
4362 /*
4363 * We write out PG_CONTROL_FILE_SIZE bytes into pg_control, zero-padding
4364 * the excess over sizeof(ControlFileData). This reduces the odds of
4365 * premature-EOF errors when reading pg_control. We'll still fail when we
4366 * check the contents of the file, but hopefully with a more specific
4367 * error than "couldn't read pg_control".
4368 */
4369 memset(buffer, 0, PG_CONTROL_FILE_SIZE);
4370 memcpy(buffer, ControlFile, sizeof(ControlFileData));
4371
4374 if (fd < 0)
4375 ereport(PANIC,
4377 errmsg("could not create file \"%s\": %m",
4379
4380 errno = 0;
4383 {
4384 /* if write didn't set errno, assume problem is no disk space */
4385 if (errno == 0)
4386 errno = ENOSPC;
4387 ereport(PANIC,
4389 errmsg("could not write to file \"%s\": %m",
4391 }
4393
4395 if (pg_fsync(fd) != 0)
4396 ereport(PANIC,
4398 errmsg("could not fsync file \"%s\": %m",
4401
4402 if (close(fd) != 0)
4403 ereport(PANIC,
4405 errmsg("could not close file \"%s\": %m",
4407}
4408
4409static void
4411{
4412 pg_crc32c crc;
4413 int fd;
4414 char wal_segsz_str[20];
4415 int r;
4416
4417 /*
4418 * Read data...
4419 */
4421 O_RDWR | PG_BINARY);
4422 if (fd < 0)
4423 ereport(PANIC,
4425 errmsg("could not open file \"%s\": %m",
4427
4429 r = read(fd, ControlFile, sizeof(ControlFileData));
4430 if (r != sizeof(ControlFileData))
4431 {
4432 if (r < 0)
4433 ereport(PANIC,
4435 errmsg("could not read file \"%s\": %m",
4437 else
4438 ereport(PANIC,
4440 errmsg("could not read file \"%s\": read %d of %zu",
4441 XLOG_CONTROL_FILE, r, sizeof(ControlFileData))));
4442 }
4444
4445 close(fd);
4446
4447 /*
4448 * Check for expected pg_control format version. If this is wrong, the
4449 * CRC check will likely fail because we'll be checking the wrong number
4450 * of bytes. Complaining about wrong version will probably be more
4451 * enlightening than complaining about wrong CRC.
4452 */
4453
4455 ereport(FATAL,
4457 errmsg("database files are incompatible with server"),
4458 errdetail("The database cluster was initialized with PG_CONTROL_VERSION %d (0x%08x),"
4459 " but the server was compiled with PG_CONTROL_VERSION %d (0x%08x).",
4462 errhint("This could be a problem of mismatched byte ordering. It looks like you need to initdb.")));
4463
4465 ereport(FATAL,
4467 errmsg("database files are incompatible with server"),
4468 errdetail("The database cluster was initialized with PG_CONTROL_VERSION %d,"
4469 " but the server was compiled with PG_CONTROL_VERSION %d.",
4471 errhint("It looks like you need to initdb.")));
4472
4473 /* Now check the CRC. */
4478 FIN_CRC32C(crc);
4479
4480 if (!EQ_CRC32C(crc, ControlFile->crc))
4481 ereport(FATAL,
4483 errmsg("incorrect checksum in control file")));
4484
4485 /*
4486 * Do compatibility checking immediately. If the database isn't
4487 * compatible with the backend executable, we want to abort before we can
4488 * possibly do any damage.
4489 */
4491 ereport(FATAL,
4493 errmsg("database files are incompatible with server"),
4494 /* translator: %s is a variable name and %d is its value */
4495 errdetail("The database cluster was initialized with %s %d,"
4496 " but the server was compiled with %s %d.",
4497 "CATALOG_VERSION_NO", ControlFile->catalog_version_no,
4498 "CATALOG_VERSION_NO", CATALOG_VERSION_NO),
4499 errhint("It looks like you need to initdb.")));
4501 ereport(FATAL,
4503 errmsg("database files are incompatible with server"),
4504 /* translator: %s is a variable name and %d is its value */
4505 errdetail("The database cluster was initialized with %s %d,"
4506 " but the server was compiled with %s %d.",
4507 "MAXALIGN", ControlFile->maxAlign,
4508 "MAXALIGN", MAXIMUM_ALIGNOF),
4509 errhint("It looks like you need to initdb.")));
4511 ereport(FATAL,
4513 errmsg("database files are incompatible with server"),
4514 errdetail("The database cluster appears to use a different floating-point number format than the server executable."),
4515 errhint("It looks like you need to initdb.")));
4516 if (ControlFile->blcksz != BLCKSZ)
4517 ereport(FATAL,
4519 errmsg("database files are incompatible with server"),
4520 /* translator: %s is a variable name and %d is its value */
4521 errdetail("The database cluster was initialized with %s %d,"
4522 " but the server was compiled with %s %d.",
4523 "BLCKSZ", ControlFile->blcksz,
4524 "BLCKSZ", BLCKSZ),
4525 errhint("It looks like you need to recompile or initdb.")));
4527 ereport(FATAL,
4529 errmsg("database files are incompatible with server"),
4530 /* translator: %s is a variable name and %d is its value */
4531 errdetail("The database cluster was initialized with %s %d,"
4532 " but the server was compiled with %s %d.",
4533 "RELSEG_SIZE", ControlFile->relseg_size,
4534 "RELSEG_SIZE", RELSEG_SIZE),
4535 errhint("It looks like you need to recompile or initdb.")));
4537 ereport(FATAL,
4539 errmsg("database files are incompatible with server"),
4540 /* translator: %s is a variable name and %d is its value */
4541 errdetail("The database cluster was initialized with %s %d,"
4542 " but the server was compiled with %s %d.",
4543 "SLRU_PAGES_PER_SEGMENT", ControlFile->slru_pages_per_segment,
4544 "SLRU_PAGES_PER_SEGMENT", SLRU_PAGES_PER_SEGMENT),
4545 errhint("It looks like you need to recompile or initdb.")));
4547 ereport(FATAL,
4549 errmsg("database files are incompatible with server"),
4550 /* translator: %s is a variable name and %d is its value */
4551 errdetail("The database cluster was initialized with %s %d,"
4552 " but the server was compiled with %s %d.",
4553 "XLOG_BLCKSZ", ControlFile->xlog_blcksz,
4554 "XLOG_BLCKSZ", XLOG_BLCKSZ),
4555 errhint("It looks like you need to recompile or initdb.")));
4557 ereport(FATAL,
4559 errmsg("database files are incompatible with server"),
4560 /* translator: %s is a variable name and %d is its value */
4561 errdetail("The database cluster was initialized with %s %d,"
4562 " but the server was compiled with %s %d.",
4563 "NAMEDATALEN", ControlFile->nameDataLen,
4564 "NAMEDATALEN", NAMEDATALEN),
4565 errhint("It looks like you need to recompile or initdb.")));
4567 ereport(FATAL,
4569 errmsg("database files are incompatible with server"),
4570 /* translator: %s is a variable name and %d is its value */
4571 errdetail("The database cluster was initialized with %s %d,"
4572 " but the server was compiled with %s %d.",
4573 "INDEX_MAX_KEYS", ControlFile->indexMaxKeys,
4574 "INDEX_MAX_KEYS", INDEX_MAX_KEYS),
4575 errhint("It looks like you need to recompile or initdb.")));
4577 ereport(FATAL,
4579 errmsg("database files are incompatible with server"),
4580 /* translator: %s is a variable name and %d is its value */
4581 errdetail("The database cluster was initialized with %s %d,"
4582 " but the server was compiled with %s %d.",
4583 "TOAST_MAX_CHUNK_SIZE", ControlFile->toast_max_chunk_size,
4584 "TOAST_MAX_CHUNK_SIZE", (int) TOAST_MAX_CHUNK_SIZE),
4585 errhint("It looks like you need to recompile or initdb.")));
4587 ereport(FATAL,
4589 errmsg("database files are incompatible with server"),
4590 /* translator: %s is a variable name and %d is its value */
4591 errdetail("The database cluster was initialized with %s %d,"
4592 " but the server was compiled with %s %d.",
4593 "LOBLKSIZE", ControlFile->loblksize,
4594 "LOBLKSIZE", (int) LOBLKSIZE),
4595 errhint("It looks like you need to recompile or initdb.")));
4596
4597 Assert(ControlFile->float8ByVal); /* vestigial, not worth an error msg */
4598
4600
4603 errmsg_plural("invalid WAL segment size in control file (%d byte)",
4604 "invalid WAL segment size in control file (%d bytes)",
4607 errdetail("The WAL segment size must be a power of two between 1 MB and 1 GB.")));
4608
4610 SetConfigOption("wal_segment_size", wal_segsz_str, PGC_INTERNAL,
4612
4613 /* check and update variables dependent on wal_segment_size */
4616 /* translator: both %s are GUC names */
4617 errmsg("\"%s\" must be at least twice \"%s\"",
4618 "min_wal_size", "wal_segment_size")));
4619
4622 /* translator: both %s are GUC names */
4623 errmsg("\"%s\" must be at least twice \"%s\"",
4624 "max_wal_size", "wal_segment_size")));
4625
4629
4631}
4632
4633/*
4634 * Utility wrapper to update the control file. Note that the control
4635 * file gets flushed.
4636 */
4637static void
4642
4643/*
4644 * Returns the unique system identifier from control file.
4645 */
4646uint64
4652
4653/*
4654 * Returns the random nonce from control file.
4655 */
4656char *
4662
4663/*
4664 * DataChecksumsNeedWrite
4665 * Returns whether data checksums must be written or not
4666 *
4667 * Returns true if data checksums are enabled, or are in the process of being
4668 * enabled. During "inprogress-on" and "inprogress-off" states checksums must
4669 * be written even though they are not verified (see datachecksum_state.c for
4670 * a longer discussion).
4671 *
4672 * This function is intended for callsites which are about to write a data page
4673 * to storage, and need to know whether to re-calculate the checksum for the
4674 * page header. Calling this function must be performed as close to the write
4675 * operation as possible to keep the critical section short.
4676 */
4677bool
4684
4685
4686bool
4688{
4689 bool ret;
4690
4694
4695 return ret;
4696}
4697
4698bool
4700{
4701 bool ret;
4702
4706
4707 return ret;
4708}
4709
4710bool
4712{
4713 bool ret;
4714
4718
4719 return ret;
4720}
4721
4722/*
4723 * DataChecksumsNeedVerify
4724 * Returns whether data checksums must be verified or not
4725 *
4726 * Data checksums are only verified if they are fully enabled in the cluster.
4727 * During the "inprogress-on" and "inprogress-off" states they are only
4728 * updated, not verified (see datachecksum_state.c for a longer discussion).
4729 *
4730 * This function is intended for callsites which have read data and are about
4731 * to perform checksum validation based on the result of this. Calling this
4732 * function must be performed as close to the validation call as possible to
4733 * keep the critical section short. This is in order to protect against time of
4734 * check/time of use situations around data checksum validation.
4735 */
4736bool
4741
4742/*
4743 * SetDataChecksumsOnInProgress
4744 * Sets the data checksum state to "inprogress-on" to enable checksums
4745 *
4746 * To start the process of enabling data checksums in a running cluster the
4747 * data_checksum_version state must be changed to "inprogress-on". See
4748 * SetDataChecksumsOn below for a description on how this state change works.
4749 * This function blocks until all backends in the cluster have acknowledged the
4750 * state transition.
4751 */
4752void
4782
4783/*
4784 * SetDataChecksumsOn
4785 * Set data checksums state to 'on' cluster-wide
4786 *
4787 * Enabling data checksums is performed using two barriers, the first one to
4788 * set the state to "inprogress-on" (done by SetDataChecksumsOnInProgress())
4789 * and the second one to set the state to "on" (done here). Below is a short
4790 * description of the processing, a more detailed write-up can be found in
4791 * datachecksum_state.c.
4792 *
4793 * To start the process of enabling data checksums in a running cluster the
4794 * data_checksum_version state must be changed to "inprogress-on". This state
4795 * requires data checksums to be written but not verified. This ensures that
4796 * all data pages can be checksummed without the risk of false negatives in
4797 * validation during the process. When all existing pages are guaranteed to
4798 * have checksums, and all new pages will be initiated with checksums, the
4799 * state can be changed to "on". Once the state is "on" checksums will be both
4800 * written and verified.
4801 *
4802 * This function blocks until all backends in the cluster have acknowledged the
4803 * state transition.
4804 */
4805void
4807{
4809
4811
4812 /*
4813 * The only allowed state transition to "on" is from "inprogress-on" since
4814 * that state ensures that all pages will have data checksums written. No
4815 * such state transition exists, if it does happen it's likely due to a
4816 * programmer error.
4817 */
4819 {
4821 elog(WARNING,
4822 "cannot set data checksums to \"on\", current state is not \"inprogress-on\", disabling");
4824 return;
4825 }
4826
4828
4829 INJECTION_POINT("datachecksums-enable-checksums-delay", NULL);
4832
4834
4838
4839 /*
4840 * Update the controlfile before waiting since if we have an immediate
4841 * shutdown while waiting we want to come back up with checksums enabled.
4842 */
4847
4849
4852
4855}
4856
4857/*
4858 * SetDataChecksumsOff
4859 * Disables data checksums cluster-wide
4860 *
4861 * Disabling data checksums must be performed with two sets of barriers, each
4862 * carrying a different state. The state is first set to "inprogress-off"
4863 * during which checksums are still written but not verified. This ensures that
4864 * backends which have yet to observe the state change from "on" won't get
4865 * validation errors on concurrently modified pages. Once all backends have
4866 * changed to "inprogress-off", the barrier for moving to "off" can be emitted.
4867 * This function blocks until all backends in the cluster have acknowledged the
4868 * state transition.
4869 */
4870void
4872{
4874
4876
4877 /* If data checksums are already disabled there is nothing to do */
4879 {
4881 return;
4882 }
4883
4884 /*
4885 * If data checksums are currently enabled, or in the process of being
4886 * enabled, we first transition to the "inprogress-off" state during which
4887 * backends continue to write checksums without verifying them. When all
4888 * backends are in "inprogress-off" the next transition to "off" can be
4889 * performed, after which all data checksum processing is disabled.
4890 */
4893 {
4895
4898
4900
4904
4909
4911
4914
4917
4918 /*
4919 * At this point we know that no backends are verifying data checksums
4920 * during reading. Next, we can safely move to state "off" to also
4921 * stop writing checksums.
4922 */
4923 }
4924 else
4925 {
4926 /*
4927 * Ending up here implies that the checksums state is "inprogress-off"
4928 * and we can transition directly to "off" from there.
4929 */
4931 }
4932
4934 /* Ensure that we don't incur a checkpoint during disabling checksums */
4936
4938
4942
4947
4949
4952
4955}
4956
4957/*
4958 * InitLocalDataChecksumState
4959 *
4960 * Set up backend local caches of controldata variables which may change at
4961 * any point during runtime and thus require special cased locking. So far
4962 * this only applies to data_checksum_version, but it's intended to be general
4963 * purpose enough to handle future cases.
4964 */
4965void
4973
4974void
4975SetLocalDataChecksumState(uint32 data_checksum_version)
4976{
4977 LocalDataChecksumState = data_checksum_version;
4978
4979 data_checksums = data_checksum_version;
4980}
4981
4982/* guc hook */
4983const char *
4988
4989/*
4990 * Return true if the cluster was initialized on a platform where the
4991 * default signedness of char is "signed". This function exists for code
4992 * that deals with pre-v18 data files that store data sorted by the 'char'
4993 * type on disk (e.g., GIN and GiST indexes). See the comments in
4994 * WriteControlFile() for details.
4995 */
4996bool
5001
5002/*
5003 * Returns a fake LSN for unlogged relations.
5004 *
5005 * Each call generates an LSN that is greater than any previous value
5006 * returned. The current counter value is saved and restored across clean
5007 * shutdowns, but like unlogged relations, does not survive a crash. This can
5008 * be used in lieu of real LSN values returned by XLogInsert, if you need an
5009 * LSN-like increasing sequence of numbers without writing any WAL.
5010 */
5016
5017/*
5018 * Auto-tune the number of XLOG buffers.
5019 *
5020 * The preferred setting for wal_buffers is about 3% of shared_buffers, with
5021 * a maximum of one XLOG segment (there is little reason to think that more
5022 * is helpful, at least so long as we force an fsync when switching log files)
5023 * and a minimum of 8 blocks (which was the default value prior to PostgreSQL
5024 * 9.1, when auto-tuning was added).
5025 *
5026 * This should not be called until NBuffers has received its final value.
5027 */
5028static int
5030{
5031 int xbuffers;
5032
5033 xbuffers = NBuffers / 32;
5036 if (xbuffers < 8)
5037 xbuffers = 8;
5038 return xbuffers;
5039}
5040
5041/*
5042 * GUC check_hook for wal_buffers
5043 */
5044bool
5046{
5047 /*
5048 * -1 indicates a request for auto-tune.
5049 */
5050 if (*newval == -1)
5051 {
5052 /*
5053 * If we haven't yet changed the boot_val default of -1, just let it
5054 * be. We'll fix it when XLOGShmemRequest is called.
5055 */
5056 if (XLOGbuffers == -1)
5057 return true;
5058
5059 /* Otherwise, substitute the auto-tune value */
5061 }
5062
5063 /*
5064 * We clamp manually-set values to at least 4 blocks. Prior to PostgreSQL
5065 * 9.1, a minimum of 4 was enforced by guc.c, but since that is no longer
5066 * the case, we just silently treat such values as a request for the
5067 * minimum. (We could throw an error instead, but that doesn't seem very
5068 * helpful.)
5069 */
5070 if (*newval < 4)
5071 *newval = 4;
5072
5073 return true;
5074}
5075
5076/*
5077 * GUC check_hook for wal_consistency_checking
5078 */
5079bool
5081{
5082 char *rawstring;
5083 List *elemlist;
5084 ListCell *l;
5085 bool newwalconsistency[RM_MAX_ID + 1];
5086
5087 /* Initialize the array */
5088 MemSet(newwalconsistency, 0, (RM_MAX_ID + 1) * sizeof(bool));
5089
5090 /* Need a modifiable copy of string */
5092
5093 /* Parse string into list of identifiers */
5095 {
5096 /* syntax error in list */
5097 GUC_check_errdetail("List syntax is invalid.");
5100 return false;
5101 }
5102
5103 foreach(l, elemlist)
5104 {
5105 char *tok = (char *) lfirst(l);
5106 int rmid;
5107
5108 /* Check for 'all'. */
5109 if (pg_strcasecmp(tok, "all") == 0)
5110 {
5111 for (rmid = 0; rmid <= RM_MAX_ID; rmid++)
5112 if (RmgrIdExists(rmid) && GetRmgr(rmid).rm_mask != NULL)
5113 newwalconsistency[rmid] = true;
5114 }
5115 else
5116 {
5117 /* Check if the token matches any known resource manager. */
5118 bool found = false;
5119
5120 for (rmid = 0; rmid <= RM_MAX_ID; rmid++)
5121 {
5122 if (RmgrIdExists(rmid) && GetRmgr(rmid).rm_mask != NULL &&
5123 pg_strcasecmp(tok, GetRmgr(rmid).rm_name) == 0)
5124 {
5125 newwalconsistency[rmid] = true;
5126 found = true;
5127 break;
5128 }
5129 }
5130 if (!found)
5131 {
5132 /*
5133 * During startup, it might be a not-yet-loaded custom
5134 * resource manager. Defer checking until
5135 * InitializeWalConsistencyChecking().
5136 */
5138 {
5140 }
5141 else
5142 {
5143 GUC_check_errdetail("Unrecognized key word: \"%s\".", tok);
5146 return false;
5147 }
5148 }
5149 }
5150 }
5151
5154
5155 /* assign new value */
5156 *extra = guc_malloc(LOG, (RM_MAX_ID + 1) * sizeof(bool));
5157 if (!*extra)
5158 return false;
5159 memcpy(*extra, newwalconsistency, (RM_MAX_ID + 1) * sizeof(bool));
5160 return true;
5161}
5162
5163/*
5164 * GUC assign_hook for wal_consistency_checking
5165 */
5166void
5168{
5169 /*
5170 * If some checks were deferred, it's possible that the checks will fail
5171 * later during InitializeWalConsistencyChecking(). But in that case, the
5172 * postmaster will exit anyway, so it's safe to proceed with the
5173 * assignment.
5174 *
5175 * Any built-in resource managers specified are assigned immediately,
5176 * which affects WAL created before shared_preload_libraries are
5177 * processed. Any custom resource managers specified won't be assigned
5178 * until after shared_preload_libraries are processed, but that's OK
5179 * because WAL for a custom resource manager can't be written before the
5180 * module is loaded anyway.
5181 */
5183}
5184
5185/*
5186 * InitializeWalConsistencyChecking: run after loading custom resource managers
5187 *
5188 * If any unknown resource managers were specified in the
5189 * wal_consistency_checking GUC, processing was deferred. Now that
5190 * shared_preload_libraries have been loaded, process wal_consistency_checking
5191 * again.
5192 */
5193void
5195{
5197
5199 {
5200 struct config_generic *guc;
5201
5202 guc = find_option("wal_consistency_checking", false, false, ERROR);
5203
5205
5206 set_config_option_ext("wal_consistency_checking",
5208 guc->scontext, guc->source, guc->srole,
5209 GUC_ACTION_SET, true, ERROR, false);
5210
5211 /* checking should not be deferred again */
5213 }
5214}
5215
5216/*
5217 * GUC show_hook for archive_command
5218 */
5219const char *
5221{
5222 if (XLogArchivingActive())
5223 return XLogArchiveCommand;
5224 else
5225 return "(disabled)";
5226}
5227
5228/*
5229 * GUC show_hook for in_hot_standby
5230 */
5231const char *
5233{
5234 /*
5235 * We display the actual state based on shared memory, so that this GUC
5236 * reports up-to-date state if examined intra-query. The underlying
5237 * variable (in_hot_standby_guc) changes only when we transmit a new value
5238 * to the client.
5239 */
5240 return RecoveryInProgress() ? "on" : "off";
5241}
5242
5243/*
5244 * GUC show_hook for effective_wal_level
5245 */
5246const char *
5248{
5250 return "minimal";
5251
5252 /*
5253 * During recovery, effective_wal_level reflects the primary's
5254 * configuration rather than the local wal_level value.
5255 */
5256 if (RecoveryInProgress())
5257 return IsXLogLogicalInfoEnabled() ? "logical" : "replica";
5258
5259 return XLogLogicalInfoActive() ? "logical" : "replica";
5260}
5261
5262/*
5263 * Read the control file, set respective GUCs.
5264 *
5265 * This is to be called during startup, including a crash recovery cycle,
5266 * unless in bootstrap mode, where no control file yet exists. As there's no
5267 * usable shared memory yet (its sizing can depend on the contents of the
5268 * control file!), first store the contents in local memory. XLOGShmemInit()
5269 * will then copy it to shared memory later.
5270 *
5271 * reset just controls whether previous contents are to be expected (in the
5272 * reset case, there's a dangling pointer into old shared memory), or not.
5273 */
5274void
5283
5284/*
5285 * Get the wal_level from the control file. For a standby, this value should be
5286 * considered as its active wal_level, because it may be different from what
5287 * was originally configured on standby.
5288 */
5291{
5292 return ControlFile->wal_level;
5293}
5294
5295/*
5296 * Register shared memory for XLOG.
5297 */
5298static void
5300{
5301 Size size;
5302
5303 /*
5304 * If the value of wal_buffers is -1, use the preferred auto-tune value.
5305 * This isn't an amazingly clean place to do this, but we must wait till
5306 * NBuffers has received its final value, and must do it before using the
5307 * value of XLOGbuffers to do anything important.
5308 *
5309 * We prefer to report this value's source as PGC_S_DYNAMIC_DEFAULT.
5310 * However, if the DBA explicitly set wal_buffers = -1 in the config file,
5311 * then PGC_S_DYNAMIC_DEFAULT will fail to override that and we must force
5312 * the matter with PGC_S_OVERRIDE.
5313 */
5314 if (XLOGbuffers == -1)
5315 {
5316 char buf[32];
5317
5318 snprintf(buf, sizeof(buf), "%d", XLOGChooseNumBuffers());
5319 SetConfigOption("wal_buffers", buf, PGC_POSTMASTER,
5321 if (XLOGbuffers == -1) /* failed to apply it? */
5322 SetConfigOption("wal_buffers", buf, PGC_POSTMASTER,
5324 }
5325 Assert(XLOGbuffers > 0);
5326
5327 /* XLogCtl */
5328 size = sizeof(XLogCtlData);
5329
5330 /* WAL insertion locks, plus alignment */
5331 size = add_size(size, mul_size(sizeof(WALInsertLockPadded), NUM_XLOGINSERT_LOCKS + 1));
5332 /* xlblocks array */
5333 size = add_size(size, mul_size(sizeof(pg_atomic_uint64), XLOGbuffers));
5334 /* extra alignment padding for XLOG I/O buffers */
5335 size = add_size(size, Max(XLOG_BLCKSZ, PG_IO_ALIGN_SIZE));
5336 /* and the buffers themselves */
5337 size = add_size(size, mul_size(XLOG_BLCKSZ, XLOGbuffers));
5338
5339 ShmemRequestStruct(.name = "XLOG Ctl",
5340 .size = size,
5341 .ptr = (void **) &XLogCtl,
5342 );
5343 ShmemRequestStruct(.name = "Control File",
5344 .size = sizeof(ControlFileData),
5345 .ptr = (void **) &ControlFile,
5346 );
5347}
5348
5349/*
5350 * XLOGShmemInit - initialize the XLogCtl shared memory area.
5351 */
5352static void
5354{
5355 char *allocptr;
5356 int i;
5357
5358#ifdef WAL_DEBUG
5359
5360 /*
5361 * Create a memory context for WAL debugging that's exempt from the normal
5362 * "no pallocs in critical section" rule. Yes, that can lead to a PANIC if
5363 * an allocation fails, but wal_debug is not for production use anyway.
5364 */
5365 if (walDebugCxt == NULL)
5366 {
5368 "WAL Debug",
5371 }
5372#endif
5373
5374 memset(XLogCtl, 0, sizeof(XLogCtlData));
5375
5376 /*
5377 * Already have read control file locally, unless in bootstrap mode. Move
5378 * contents into shared memory.
5379 */
5380 if (LocalControlFile)
5381 {
5385 }
5386
5387 /*
5388 * Since XLogCtlData contains XLogRecPtr fields, its sizeof should be a
5389 * multiple of the alignment for same, so no extra alignment padding is
5390 * needed here.
5391 */
5392 allocptr = ((char *) XLogCtl) + sizeof(XLogCtlData);
5395
5396 for (i = 0; i < XLOGbuffers; i++)
5397 {
5399 }
5400
5401 /* WAL insertion locks. Ensure they're aligned to the full padded size */
5402 allocptr += sizeof(WALInsertLockPadded) -
5407
5408 for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
5409 {
5413 }
5414
5415 /*
5416 * Align the start of the page buffers to a full xlog block size boundary.
5417 * This simplifies some calculations in XLOG insertion. It is also
5418 * required for O_DIRECT.
5419 */
5423
5424 /*
5425 * Do basic initialization of XLogCtl shared data. (StartupXLOG will fill
5426 * in additional info.)
5427 */
5431 XLogCtl->WalWriterSleeping = false;
5432
5433 /* Use the checksum info from control file */
5436
5443}
5444
5445/*
5446 * XLOGShmemAttach - re-establish WALInsertLocks pointer after attaching.
5447 */
5448static void
5453
5454/*
5455 * This func must be called ONCE on system install. It creates pg_control
5456 * and the initial XLOG segment.
5457 */
5458void
5459BootStrapXLOG(uint32 data_checksum_version)
5460{
5461 CheckPoint checkPoint;
5462 PGAlignedXLogBlock buffer;
5463 XLogPageHeader page;
5465 XLogRecord *record;
5466 char *recptr;
5467 uint64 sysidentifier;
5468 struct timeval tv;
5469 pg_crc32c crc;
5470
5471 /* allow ordinary WAL segment creation, like StartupXLOG() would */
5473
5474 /*
5475 * Select a hopefully-unique system identifier code for this installation.
5476 * We use the result of gettimeofday(), including the fractional seconds
5477 * field, as being about as unique as we can easily get. (Think not to
5478 * use random(), since it hasn't been seeded and there's no portable way
5479 * to seed it other than the system clock value...) The upper half of the
5480 * uint64 value is just the tv_sec part, while the lower half contains the
5481 * tv_usec part (which must fit in 20 bits), plus 12 bits from our current
5482 * PID for a little extra uniqueness. A person knowing this encoding can
5483 * determine the initialization time of the installation, which could
5484 * perhaps be useful sometimes.
5485 */
5486 gettimeofday(&tv, NULL);
5487 sysidentifier = ((uint64) tv.tv_sec) << 32;
5488 sysidentifier |= ((uint64) tv.tv_usec) << 12;
5489 sysidentifier |= getpid() & 0xFFF;
5490
5491 memset(&buffer, 0, sizeof buffer);
5492 page = (XLogPageHeader) &buffer;
5493
5494 /*
5495 * Set up information for the initial checkpoint record
5496 *
5497 * The initial checkpoint record is written to the beginning of the WAL
5498 * segment with logid=0 logseg=1. The very first WAL segment, 0/0, is not
5499 * used, so that we can use 0/0 to mean "before any valid WAL segment".
5500 */
5504 checkPoint.fullPageWrites = fullPageWrites;
5506 checkPoint.wal_level = wal_level;
5507 checkPoint.nextXid =
5509 checkPoint.nextOid = FirstGenbkiObjectId;
5510 checkPoint.nextMulti = FirstMultiXactId;
5511 checkPoint.nextMultiOffset = 1;
5513 checkPoint.oldestXidDB = Template1DbOid;
5514 checkPoint.oldestMulti = FirstMultiXactId;
5515 checkPoint.oldestMultiDB = Template1DbOid;
5518 checkPoint.time = (pg_time_t) time(NULL);
5520 checkPoint.dataChecksumState = data_checksum_version;
5521
5522 TransamVariables->nextXid = checkPoint.nextXid;
5523 TransamVariables->nextOid = checkPoint.nextOid;
5525 MultiXactSetNextMXact(checkPoint.nextMulti, checkPoint.nextMultiOffset);
5526 AdvanceOldestClogXid(checkPoint.oldestXid);
5527 SetTransactionIdLimit(checkPoint.oldestXid, checkPoint.oldestXidDB);
5528 SetMultiXactIdLimit(checkPoint.oldestMulti, checkPoint.oldestMultiDB);
5530
5531 /* Set up the XLOG page header */
5532 page->xlp_magic = XLOG_PAGE_MAGIC;
5533 page->xlp_info = XLP_LONG_HEADER;
5537 longpage->xlp_sysid = sysidentifier;
5538 longpage->xlp_seg_size = wal_segment_size;
5539 longpage->xlp_xlog_blcksz = XLOG_BLCKSZ;
5540
5541 /* Insert the initial checkpoint record */
5542 recptr = ((char *) page + SizeOfXLogLongPHD);
5543 record = (XLogRecord *) recptr;
5544 record->xl_prev = InvalidXLogRecPtr;
5545 record->xl_xid = InvalidTransactionId;
5546 record->xl_tot_len = SizeOfXLogRecord + SizeOfXLogRecordDataHeaderShort + sizeof(checkPoint);
5548 record->xl_rmid = RM_XLOG_ID;
5550 /* fill the XLogRecordDataHeaderShort struct */
5551 *(recptr++) = (char) XLR_BLOCK_ID_DATA_SHORT;
5552 *(recptr++) = sizeof(checkPoint);
5553 memcpy(recptr, &checkPoint, sizeof(checkPoint));
5554 recptr += sizeof(checkPoint);
5555 Assert(recptr - (char *) record == record->xl_tot_len);
5556
5558 COMP_CRC32C(crc, ((char *) record) + SizeOfXLogRecord, record->xl_tot_len - SizeOfXLogRecord);
5559 COMP_CRC32C(crc, (char *) record, offsetof(XLogRecord, xl_crc));
5560 FIN_CRC32C(crc);
5561 record->xl_crc = crc;
5562
5563 /* Create first XLOG segment file */
5566
5567 /*
5568 * We needn't bother with Reserve/ReleaseExternalFD here, since we'll
5569 * close the file again in a moment.
5570 */
5571
5572 /* Write the first page with the initial record */
5573 errno = 0;
5575 if (write(openLogFile, &buffer, XLOG_BLCKSZ) != XLOG_BLCKSZ)
5576 {
5577 /* if write didn't set errno, assume problem is no disk space */
5578 if (errno == 0)
5579 errno = ENOSPC;
5580 ereport(PANIC,
5582 errmsg("could not write bootstrap write-ahead log file: %m")));
5583 }
5585
5587 if (pg_fsync(openLogFile) != 0)
5588 ereport(PANIC,
5590 errmsg("could not fsync bootstrap write-ahead log file: %m")));
5592
5593 if (close(openLogFile) != 0)
5594 ereport(PANIC,
5596 errmsg("could not close bootstrap write-ahead log file: %m")));
5597
5598 openLogFile = -1;
5599
5600 /* Now create pg_control */
5601 InitControlFile(sysidentifier, data_checksum_version);
5602 ControlFile->time = checkPoint.time;
5603 ControlFile->checkPoint = checkPoint.redo;
5604 ControlFile->checkPointCopy = checkPoint;
5605
5606 /* some additional ControlFile fields are set in WriteControlFile() */
5608
5609 /* Bootstrap the commit log, too */
5610 BootStrapCLOG();
5614
5615 /*
5616 * Force control file to be read - in contrast to normal processing we'd
5617 * otherwise never run the checks and GUC related initializations therein.
5618 */
5620}
5621
5622static char *
5624{
5626 "%Y-%m-%d %H:%M:%S %Z",
5628
5629 return buf;
5630}
5631
5632/*
5633 * Initialize the first WAL segment on new timeline.
5634 */
5635static void
5637{
5638 char xlogfname[MAXFNAMELEN];
5641
5642 /* we always switch to a new timeline after archive recovery */
5643 Assert(endTLI != newTLI);
5644
5645 /*
5646 * Update min recovery point one last time.
5647 */
5649
5650 /*
5651 * Calculate the last segment on the old timeline, and the first segment
5652 * on the new timeline. If the switch happens in the middle of a segment,
5653 * they are the same, but if the switch happens exactly at a segment
5654 * boundary, startLogSegNo will be endLogSegNo + 1.
5655 */
5658
5659 /*
5660 * Initialize the starting WAL segment for the new timeline. If the switch
5661 * happens in the middle of a segment, copy data from the last WAL segment
5662 * of the old timeline up to the switch point, to the starting WAL segment
5663 * on the new timeline.
5664 */
5666 {
5667 /*
5668 * Make a copy of the file on the new timeline.
5669 *
5670 * Writing WAL isn't allowed yet, so there are no locking
5671 * considerations. But we should be just as tense as XLogFileInit to
5672 * avoid emplacing a bogus file.
5673 */
5676 }
5677 else
5678 {
5679 /*
5680 * The switch happened at a segment boundary, so just create the next
5681 * segment on the new timeline.
5682 */
5683 int fd;
5684
5686
5687 if (close(fd) != 0)
5688 {
5689 int save_errno = errno;
5690
5692 errno = save_errno;
5693 ereport(ERROR,
5695 errmsg("could not close file \"%s\": %m", xlogfname)));
5696 }
5697 }
5698
5699 /*
5700 * Let's just make real sure there are not .ready or .done flags posted
5701 * for the new segment.
5702 */
5705}
5706
5707/*
5708 * Perform cleanup actions at the conclusion of archive recovery.
5709 */
5710static void
5713{
5714 /*
5715 * Execute the recovery_end_command, if any.
5716 */
5719 "recovery_end_command",
5720 true,
5722
5723 /*
5724 * We switched to a new timeline. Clean up segments on the old timeline.
5725 *
5726 * If there are any higher-numbered segments on the old timeline, remove
5727 * them. They might contain valid WAL, but they might also be
5728 * pre-allocated files containing garbage. In any case, they are not part
5729 * of the new timeline's history so we don't need them.
5730 */
5732
5733 /*
5734 * If the switch happened in the middle of a segment, what to do with the
5735 * last, partial segment on the old timeline? If we don't archive it, and
5736 * the server that created the WAL never archives it either (e.g. because
5737 * it was hit by a meteor), it will never make it to the archive. That's
5738 * OK from our point of view, because the new segment that we created with
5739 * the new TLI contains all the WAL from the old timeline up to the switch
5740 * point. But if you later try to do PITR to the "missing" WAL on the old
5741 * timeline, recovery won't find it in the archive. It's physically
5742 * present in the new file with new TLI, but recovery won't look there
5743 * when it's recovering to the older timeline. On the other hand, if we
5744 * archive the partial segment, and the original server on that timeline
5745 * is still running and archives the completed version of the same segment
5746 * later, it will fail. (We used to do that in 9.4 and below, and it
5747 * caused such problems).
5748 *
5749 * As a compromise, we rename the last segment with the .partial suffix,
5750 * and archive it. Archive recovery will never try to read .partial
5751 * segments, so they will normally go unused. But in the odd PITR case,
5752 * the administrator can copy them manually to the pg_wal directory
5753 * (removing the suffix). They can be useful in debugging, too.
5754 *
5755 * If a .done or .ready file already exists for the old timeline, however,
5756 * we had already determined that the segment is complete, so we can let
5757 * it be archived normally. (In particular, if it was restored from the
5758 * archive to begin with, it's expected to have a .done file).
5759 */
5762 {
5763 char origfname[MAXFNAMELEN];
5765
5768
5770 {
5771 char origpath[MAXPGPATH];
5773 char partialpath[MAXPGPATH];
5774
5775 /*
5776 * If we're summarizing WAL, we can't rename the partial file
5777 * until the summarizer finishes with it, else it will fail.
5778 */
5779 if (summarize_wal)
5781
5783 snprintf(partialfname, MAXFNAMELEN, "%s.partial", origfname);
5784 snprintf(partialpath, MAXPGPATH, "%s.partial", origpath);
5785
5786 /*
5787 * Make sure there's no .done or .ready file for the .partial
5788 * file.
5789 */
5791
5794 }
5795 }
5796}
5797
5798/*
5799 * Check to see if required parameters are set high enough on this server
5800 * for various aspects of recovery operation.
5801 *
5802 * Note that all the parameters which this function tests need to be
5803 * listed in Administrator's Overview section in high-availability.sgml.
5804 * If you change them, don't forget to update the list.
5805 */
5806static void
5808{
5809 /*
5810 * For archive recovery, the WAL must be generated with at least 'replica'
5811 * wal_level.
5812 */
5814 {
5815 ereport(FATAL,
5817 errmsg("WAL was generated with \"wal_level=minimal\", cannot continue recovering"),
5818 errdetail("This happens if you temporarily set \"wal_level=minimal\" on the server."),
5819 errhint("Use a backup taken after setting \"wal_level\" to higher than \"minimal\".")));
5820 }
5821
5822 /*
5823 * For Hot Standby, the WAL must be generated with 'replica' mode, and we
5824 * must have at least as many backend slots as the primary.
5825 */
5827 {
5828 /* We ignore autovacuum_worker_slots when we make this test. */
5829 RecoveryRequiresIntParameter("max_connections",
5832 RecoveryRequiresIntParameter("max_worker_processes",
5835 RecoveryRequiresIntParameter("max_wal_senders",
5838 RecoveryRequiresIntParameter("max_prepared_transactions",
5841 RecoveryRequiresIntParameter("max_locks_per_transaction",
5844 }
5845}
5846
5847/*
5848 * This must be called ONCE during postmaster or standalone-backend startup
5849 */
5850void
5852{
5854 CheckPoint checkPoint;
5855 bool wasShutdown;
5856 bool didCrash;
5857 bool haveTblspcMap;
5858 bool haveBackupLabel;
5867 bool promoted = false;
5868 char timebuf[128];
5869
5870 /*
5871 * We should have an aux process resource owner to use, and we should not
5872 * be in a transaction that's installed some other resowner.
5873 */
5878
5879 /*
5880 * Check that contents look valid.
5881 */
5883 ereport(FATAL,
5885 errmsg("control file contains invalid checkpoint location")));
5886
5887 switch (ControlFile->state)
5888 {
5889 case DB_SHUTDOWNED:
5890
5891 /*
5892 * This is the expected case, so don't be chatty in standalone
5893 * mode
5894 */
5896 (errmsg("database system was shut down at %s",
5898 timebuf, sizeof(timebuf)))));
5899 break;
5900
5902 ereport(LOG,
5903 (errmsg("database system was shut down in recovery at %s",
5905 timebuf, sizeof(timebuf)))));
5906 break;
5907
5908 case DB_SHUTDOWNING:
5909 ereport(LOG,
5910 (errmsg("database system shutdown was interrupted; last known up at %s",
5912 timebuf, sizeof(timebuf)))));
5913 break;
5914
5916 ereport(LOG,
5917 (errmsg("database system was interrupted while in recovery at %s",
5919 timebuf, sizeof(timebuf))),
5920 errhint("This probably means that some data is corrupted and"
5921 " you will have to use the last backup for recovery.")));
5922 break;
5923
5925 ereport(LOG,
5926 (errmsg("database system was interrupted while in recovery at log time %s",
5928 timebuf, sizeof(timebuf))),
5929 errhint("If this has occurred more than once some data might be corrupted"
5930 " and you might need to choose an earlier recovery target.")));
5931 break;
5932
5933 case DB_IN_PRODUCTION:
5934 ereport(LOG,
5935 (errmsg("database system was interrupted; last known up at %s",
5937 timebuf, sizeof(timebuf)))));
5938 break;
5939
5940 default:
5941 ereport(FATAL,
5943 errmsg("control file contains invalid database cluster state")));
5944 }
5945
5946 /* This is just to allow attaching to startup process with a debugger */
5947#ifdef XLOG_REPLAY_DELAY
5949 pg_usleep(60000000L);
5950#endif
5951
5952 /*
5953 * Verify that pg_wal, pg_wal/archive_status, and pg_wal/summaries exist.
5954 * In cases where someone has performed a copy for PITR, these directories
5955 * may have been excluded and need to be re-created.
5956 */
5958
5959 /* Set up timeout handler needed to report startup progress. */
5963
5964 /*----------
5965 * If we previously crashed, perform a couple of actions:
5966 *
5967 * - The pg_wal directory may still include some temporary WAL segments
5968 * used when creating a new segment, so perform some clean up to not
5969 * bloat this path. This is done first as there is no point to sync
5970 * this temporary data.
5971 *
5972 * - There might be data which we had written, intending to fsync it, but
5973 * which we had not actually fsync'd yet. Therefore, a power failure in
5974 * the near future might cause earlier unflushed writes to be lost, even
5975 * though more recent data written to disk from here on would be
5976 * persisted. To avoid that, fsync the entire data directory.
5977 */
5980 {
5983 didCrash = true;
5984 }
5985 else
5986 didCrash = false;
5987
5988 /*
5989 * Prepare for WAL recovery if needed.
5990 *
5991 * InitWalRecovery analyzes the control file and the backup label file, if
5992 * any. It updates the in-memory ControlFile buffer according to the
5993 * starting checkpoint, and sets InRecovery and ArchiveRecoveryRequested.
5994 * It also applies the tablespace map file, if any.
5995 */
5998 checkPoint = ControlFile->checkPointCopy;
5999
6000 /* initialize shared memory variables from the checkpoint record */
6001 TransamVariables->nextXid = checkPoint.nextXid;
6002 TransamVariables->nextOid = checkPoint.nextOid;
6004 MultiXactSetNextMXact(checkPoint.nextMulti, checkPoint.nextMultiOffset);
6005 AdvanceOldestClogXid(checkPoint.oldestXid);
6006 SetTransactionIdLimit(checkPoint.oldestXid, checkPoint.oldestXidDB);
6007 SetMultiXactIdLimit(checkPoint.oldestMulti, checkPoint.oldestMultiDB);
6009 checkPoint.newestCommitTsXid);
6010
6011 /*
6012 * Clear out any old relcache cache files. This is *necessary* if we do
6013 * any WAL replay, since that would probably result in the cache files
6014 * being out of sync with database reality. In theory we could leave them
6015 * in place if the database had been cleanly shut down, but it seems
6016 * safest to just remove them always and let them be rebuilt during the
6017 * first backend startup. These files needs to be removed from all
6018 * directories including pg_tblspc, however the symlinks are created only
6019 * after reading tablespace_map file in case of archive recovery from
6020 * backup, so needs to clear old relcache files here after creating
6021 * symlinks.
6022 */
6024
6025 /*
6026 * Initialize replication slots, before there's a chance to remove
6027 * required resources.
6028 */
6030
6031 /*
6032 * Startup the logical decoding status with the last status stored in the
6033 * checkpoint record.
6034 */
6036
6037 /*
6038 * Startup logical state, needs to be setup now so we have proper data
6039 * during crash recovery.
6040 */
6042
6043 /*
6044 * Startup CLOG. This must be done after TransamVariables->nextXid has
6045 * been initialized and before we accept connections or begin WAL replay.
6046 */
6047 StartupCLOG();
6048
6049 /*
6050 * Startup MultiXact. We need to do this early to be able to replay
6051 * truncations.
6052 */
6054
6055 /*
6056 * Ditto for commit timestamps. Activate the facility if the setting is
6057 * enabled in the control file, as there should be no tracking of commit
6058 * timestamps done when the setting was disabled. This facility can be
6059 * started or stopped when replaying a XLOG_PARAMETER_CHANGE record.
6060 */
6063
6064 /*
6065 * Recover knowledge about replay progress of known replication partners.
6066 */
6068
6069 /*
6070 * Initialize unlogged LSN. On a clean shutdown, it's restored from the
6071 * control file. On recovery, all unlogged relations are blown away, so
6072 * the unlogged LSN counter can be reset too.
6073 */
6077 else
6080
6081 /*
6082 * Copy any missing timeline history files between 'now' and the recovery
6083 * target timeline from archive to pg_wal. While we don't need those files
6084 * ourselves - the history file of the recovery target timeline covers all
6085 * the previous timelines in the history too - a cascading standby server
6086 * might be interested in them. Or, if you archive the WAL from this
6087 * server to a different archive than the primary, it'd be good for all
6088 * the history files to get archived there after failover, so that you can
6089 * use one of the old timelines as a PITR target. Timeline history files
6090 * are small, so it's better to copy them unnecessarily than not copy them
6091 * and regret later.
6092 */
6094
6095 /*
6096 * Before running in recovery, scan pg_twophase and fill in its status to
6097 * be able to work on entries generated by redo. Doing a scan before
6098 * taking any recovery action has the merit to discard any 2PC files that
6099 * are newer than the first record to replay, saving from any conflicts at
6100 * replay. This avoids as well any subsequent scans when doing recovery
6101 * of the on-disk two-phase data.
6102 */
6104
6105 /*
6106 * When starting with crash recovery, reset pgstat data - it might not be
6107 * valid. Otherwise restore pgstat data. It's safe to do this here,
6108 * because postmaster will not yet have started any other processes.
6109 *
6110 * NB: Restoring replication slot stats relies on slot state to have
6111 * already been restored from disk.
6112 *
6113 * TODO: With a bit of extra work we could just start with a pgstat file
6114 * associated with the checkpoint redo location we're starting from.
6115 */
6116 if (didCrash)
6118 else
6120
6122
6125
6126 /* REDO */
6127 if (InRecovery)
6128 {
6129 /* Initialize state for RecoveryInProgress() */
6133 else
6136
6137 /*
6138 * Update pg_control to show that we are recovering and to show the
6139 * selected checkpoint as the place we are starting from. We also mark
6140 * pg_control with any minimum recovery stop point obtained from a
6141 * backup history file.
6142 *
6143 * No need to hold ControlFileLock yet, we aren't up far enough.
6144 */
6146
6147 /*
6148 * If there was a backup label file, it's done its job and the info
6149 * has now been propagated into pg_control. We must get rid of the
6150 * label file so that if we crash during recovery, we'll pick up at
6151 * the latest recovery restartpoint instead of going all the way back
6152 * to the backup start point. It seems prudent though to just rename
6153 * the file out of the way rather than delete it completely.
6154 */
6155 if (haveBackupLabel)
6156 {
6159 }
6160
6161 /*
6162 * If there was a tablespace_map file, it's done its job and the
6163 * symlinks have been created. We must get rid of the map file so
6164 * that if we crash during recovery, we don't create symlinks again.
6165 * It seems prudent though to just rename the file out of the way
6166 * rather than delete it completely.
6167 */
6168 if (haveTblspcMap)
6169 {
6172 }
6173
6174 /*
6175 * Initialize our local copy of minRecoveryPoint. When doing crash
6176 * recovery we want to replay up to the end of WAL. Particularly, in
6177 * the case of a promoted standby minRecoveryPoint value in the
6178 * control file is only updated after the first checkpoint. However,
6179 * if the instance crashes before the first post-recovery checkpoint
6180 * is completed then recovery will use a stale location causing the
6181 * startup process to think that there are still invalid page
6182 * references when checking for data consistency.
6183 */
6185 {
6188 }
6189 else
6190 {
6193 }
6194
6195 /* Check that the GUCs used to generate the WAL allow recovery */
6197
6198 /*
6199 * We're in recovery, so unlogged relations may be trashed and must be
6200 * reset. This should be done BEFORE allowing Hot Standby
6201 * connections, so that read-only backends don't try to read whatever
6202 * garbage is left over from before.
6203 */
6205
6206 /*
6207 * Likewise, delete any saved transaction snapshot files that got left
6208 * behind by crashed backends.
6209 */
6211
6212 /*
6213 * Initialize for Hot Standby, if enabled. We won't let backends in
6214 * yet, not until we've reached the min recovery point specified in
6215 * control file and we've established a recovery snapshot from a
6216 * running-xacts WAL record.
6217 */
6219 {
6220 TransactionId *xids;
6221 int nxids;
6222
6224 (errmsg_internal("initializing for hot standby")));
6225
6227
6228 if (wasShutdown)
6230 else
6231 oldestActiveXID = checkPoint.oldestActiveXid;
6233
6234 /* Tell procarray about the range of xids it has to deal with */
6236
6237 /*
6238 * Startup subtrans only. CLOG, MultiXact and commit timestamp
6239 * have already been started up and other SLRUs are not maintained
6240 * during recovery and need not be started yet.
6241 */
6243
6244 /*
6245 * If we're beginning at a shutdown checkpoint, we know that
6246 * nothing was running on the primary at this point. So fake-up an
6247 * empty running-xacts record and use that here and now. Recover
6248 * additional standby state for prepared transactions.
6249 */
6250 if (wasShutdown)
6251 {
6253 TransactionId latestCompletedXid;
6254
6255 /* Update pg_subtrans entries for any prepared transactions */
6257
6258 /*
6259 * Construct a RunningTransactions snapshot representing a
6260 * shut down server, with only prepared transactions still
6261 * alive. We're never overflowed at this point because all
6262 * subxids are listed with their parent prepared transactions.
6263 */
6264 running.xcnt = nxids;
6265 running.subxcnt = 0;
6267 running.nextXid = XidFromFullTransactionId(checkPoint.nextXid);
6269 latestCompletedXid = XidFromFullTransactionId(checkPoint.nextXid);
6270 TransactionIdRetreat(latestCompletedXid);
6271 Assert(TransactionIdIsNormal(latestCompletedXid));
6272 running.latestCompletedXid = latestCompletedXid;
6273 running.xids = xids;
6274
6276 }
6277 }
6278
6279 /*
6280 * We're all set for replaying the WAL now. Do it.
6281 */
6283 performedWalRecovery = true;
6284 }
6285 else
6286 performedWalRecovery = false;
6287
6288 /*
6289 * Finish WAL recovery.
6290 */
6292 EndOfLog = endOfRecoveryInfo->endOfLog;
6293 EndOfLogTLI = endOfRecoveryInfo->endOfLogTLI;
6294 abortedRecPtr = endOfRecoveryInfo->abortedRecPtr;
6295 missingContrecPtr = endOfRecoveryInfo->missingContrecPtr;
6296
6297 /*
6298 * Reset ps status display, so as no information related to recovery shows
6299 * up.
6300 */
6301 set_ps_display("");
6302
6303 /*
6304 * When recovering from a backup (we are in recovery, and archive recovery
6305 * was requested), complain if we did not roll forward far enough to reach
6306 * the point where the database is consistent. For regular online
6307 * backup-from-primary, that means reaching the end-of-backup WAL record
6308 * (at which point we reset backupStartPoint to be Invalid), for
6309 * backup-from-replica (which can't inject records into the WAL stream),
6310 * that point is when we reach the minRecoveryPoint in pg_control (which
6311 * we purposefully copy last when backing up from a replica). For
6312 * pg_rewind (which creates a backup_label with a method of "pg_rewind")
6313 * or snapshot-style backups (which don't), backupEndRequired will be set
6314 * to false.
6315 *
6316 * Note: it is indeed okay to look at the local variable
6317 * LocalMinRecoveryPoint here, even though ControlFile->minRecoveryPoint
6318 * might be further ahead --- ControlFile->minRecoveryPoint cannot have
6319 * been advanced beyond the WAL we processed.
6320 */
6321 if (InRecovery &&
6324 {
6325 /*
6326 * Ran off end of WAL before reaching end-of-backup WAL record, or
6327 * minRecoveryPoint. That's a bad sign, indicating that you tried to
6328 * recover from an online backup but never called pg_backup_stop(), or
6329 * you didn't archive all the WAL needed.
6330 */
6332 {
6334 ereport(FATAL,
6336 errmsg("WAL ends before end of online backup"),
6337 errhint("All WAL generated while online backup was taken must be available at recovery.")));
6338 else
6339 ereport(FATAL,
6341 errmsg("WAL ends before consistent recovery point")));
6342 }
6343 }
6344
6345 /*
6346 * Reset unlogged relations to the contents of their INIT fork. This is
6347 * done AFTER recovery is complete so as to include any unlogged relations
6348 * created during recovery, but BEFORE recovery is marked as having
6349 * completed successfully. Otherwise we'd not retry if any of the post
6350 * end-of-recovery steps fail.
6351 */
6352 if (InRecovery)
6354
6355 /*
6356 * Pre-scan prepared transactions to find out the range of XIDs present.
6357 * This information is not quite needed yet, but it is positioned here so
6358 * as potential problems are detected before any on-disk change is done.
6359 */
6361
6362 /*
6363 * Allow ordinary WAL segment creation before possibly switching to a new
6364 * timeline, which creates a new segment, and after the last ReadRecord().
6365 */
6367
6368 /*
6369 * Consider whether we need to assign a new timeline ID.
6370 *
6371 * If we did archive recovery, we always assign a new ID. This handles a
6372 * couple of issues. If we stopped short of the end of WAL during
6373 * recovery, then we are clearly generating a new timeline and must assign
6374 * it a unique new ID. Even if we ran to the end, modifying the current
6375 * last segment is problematic because it may result in trying to
6376 * overwrite an already-archived copy of that segment, and we encourage
6377 * DBAs to make their archive_commands reject that. We can dodge the
6378 * problem by making the new active segment have a new timeline ID.
6379 *
6380 * In a normal crash recovery, we can just extend the timeline we were in.
6381 */
6382 newTLI = endOfRecoveryInfo->lastRecTLI;
6384 {
6386 ereport(LOG,
6387 (errmsg("selected new timeline ID: %u", newTLI)));
6388
6389 /*
6390 * Make a writable copy of the last WAL segment. (Note that we also
6391 * have a copy of the last block of the old WAL in
6392 * endOfRecovery->lastPage; we will use that below.)
6393 */
6395
6396 /*
6397 * Remove the signal files out of the way, so that we don't
6398 * accidentally re-enter archive recovery mode in a subsequent crash.
6399 */
6400 if (endOfRecoveryInfo->standby_signal_file_found)
6402
6403 if (endOfRecoveryInfo->recovery_signal_file_found)
6405
6406 /*
6407 * Write the timeline history file, and have it archived. After this
6408 * point (or rather, as soon as the file is archived), the timeline
6409 * will appear as "taken" in the WAL archive and to any standby
6410 * servers. If we crash before actually switching to the new
6411 * timeline, standby servers will nevertheless think that we switched
6412 * to the new timeline, and will try to connect to the new timeline.
6413 * To minimize the window for that, try to do as little as possible
6414 * between here and writing the end-of-recovery record.
6415 */
6417 EndOfLog, endOfRecoveryInfo->recoveryStopReason);
6418
6419 ereport(LOG,
6420 (errmsg("archive recovery complete")));
6421 }
6422
6423 /* Save the selected TimeLineID in shared memory, too */
6428
6429 /*
6430 * Actually, if WAL ended in an incomplete record, skip the parts that
6431 * made it through and start writing after the portion that persisted.
6432 * (It's critical to first write an OVERWRITE_CONTRECORD message, which
6433 * we'll do as soon as we're open for writing new WAL.)
6434 */
6436 {
6437 /*
6438 * We should only have a missingContrecPtr if we're not switching to a
6439 * new timeline. When a timeline switch occurs, WAL is copied from the
6440 * old timeline to the new only up to the end of the last complete
6441 * record, so there can't be an incomplete WAL record that we need to
6442 * disregard.
6443 */
6444 Assert(newTLI == endOfRecoveryInfo->lastRecTLI);
6447 }
6448
6449 /*
6450 * Prepare to write WAL starting at EndOfLog location, and init xlog
6451 * buffer cache using the block containing the last record from the
6452 * previous incarnation.
6453 */
6454 Insert = &XLogCtl->Insert;
6456 Insert->CurrBytePos = XLogRecPtrToBytePos(EndOfLog);
6457
6458 /*
6459 * Tricky point here: lastPage contains the *last* block that the LastRec
6460 * record spans, not the one it starts in. The last block is indeed the
6461 * one we want to use.
6462 */
6463 if (EndOfLog % XLOG_BLCKSZ != 0)
6464 {
6465 char *page;
6466 int len;
6467 int firstIdx;
6468
6470 len = EndOfLog - endOfRecoveryInfo->lastPageBeginPtr;
6472
6473 /* Copy the valid part of the last block, and zero the rest */
6474 page = &XLogCtl->pages[firstIdx * XLOG_BLCKSZ];
6475 memcpy(page, endOfRecoveryInfo->lastPage, len);
6476 memset(page + len, 0, XLOG_BLCKSZ - len);
6477
6480 }
6481 else
6482 {
6483 /*
6484 * There is no partial block to copy. Just set InitializedUpTo, and
6485 * let the first attempt to insert a log record to initialize the next
6486 * buffer.
6487 */
6489 }
6490
6491 /*
6492 * Update local and shared status. This is OK to do without any locks
6493 * because no other process can be reading or writing WAL yet.
6494 */
6501
6502 /*
6503 * Preallocate additional log files, if wanted.
6504 */
6506
6507 /*
6508 * Okay, we're officially UP.
6509 */
6510 InRecovery = false;
6511
6512 /* start the archive_timeout timer and LSN running */
6515
6516 /* also initialize latestCompletedXid, to nextXid - 1 */
6521
6522 /*
6523 * Start up subtrans, if not already done for hot standby. (commit
6524 * timestamps are started below, if necessary.)
6525 */
6528
6529 /*
6530 * Perform end of recovery actions for any SLRUs that need it.
6531 */
6532 TrimCLOG();
6533 TrimMultiXact();
6534
6535 /*
6536 * Reload shared-memory state for prepared transactions. This needs to
6537 * happen before renaming the last partial segment of the old timeline as
6538 * it may be possible that we have to recover some transactions from it.
6539 */
6541
6542 /* Shut down xlogreader */
6544
6545 /* Enable WAL writes for this backend only. */
6547
6548 /* If necessary, write overwrite-contrecord before doing anything else */
6550 {
6553 }
6554
6555 /*
6556 * Update full_page_writes in shared memory and write an XLOG_FPW_CHANGE
6557 * record before resource manager writes cleanup WAL records or checkpoint
6558 * record is written.
6559 */
6560 Insert->fullPageWrites = lastFullPageWrites;
6562
6563 /*
6564 * Emit checkpoint or end-of-recovery record in XLOG, if required.
6565 */
6568
6569 /*
6570 * If any of the critical GUCs have changed, log them before we allow
6571 * backends to write WAL.
6572 */
6574
6575 /* If this is archive recovery, perform post-recovery cleanup actions. */
6578
6579 /*
6580 * Local WAL inserts enabled, so it's time to finish initialization of
6581 * commit timestamp.
6582 */
6584
6585 /*
6586 * Update logical decoding status in shared memory and write an
6587 * XLOG_LOGICAL_DECODING_STATUS_CHANGE, if necessary.
6588 */
6590
6591 /* Clean up EndOfWalRecoveryInfo data to appease Valgrind leak checking */
6592 if (endOfRecoveryInfo->lastPage)
6593 pfree(endOfRecoveryInfo->lastPage);
6594 pfree(endOfRecoveryInfo->recoveryStopReason);
6596
6597 /*
6598 * If we reach this point with checksums in the state inprogress-on, it
6599 * means that data checksums were in the process of being enabled when the
6600 * cluster shut down. Since processing didn't finish, the operation will
6601 * have to be restarted from scratch since there is no capability to
6602 * continue where it was when the cluster shut down. Thus, revert the
6603 * state back to off, and inform the user with a warning message. Being
6604 * able to restart processing is a TODO, but it wouldn't be possible to
6605 * restart here since we cannot launch a dynamic background worker
6606 * directly from here (it has to be from a regular backend).
6607 */
6609 {
6611
6616
6618 errmsg("enabling data checksums was interrupted"),
6619 errhint("Data checksum processing must be manually restarted for checksums to be enabled"));
6620 }
6621
6622 /*
6623 * If data checksums were being disabled when the cluster was shut down,
6624 * we know that we have a state where all backends have stopped validating
6625 * checksums and we can move to off instead of prompting the user to
6626 * perform any action.
6627 */
6629 {
6631
6636 }
6637
6638 /*
6639 * All done with end-of-recovery actions.
6640 *
6641 * Now allow backends to write WAL and update the control file status in
6642 * consequence. SharedRecoveryState, that controls if backends can write
6643 * WAL, is updated while holding ControlFileLock to prevent other backends
6644 * to look at an inconsistent state of the control file in shared memory.
6645 * There is still a small window during which backends can write WAL and
6646 * the control file is still referring to a system not in DB_IN_PRODUCTION
6647 * state while looking at the on-disk control file.
6648 *
6649 * Also, we use info_lck to update SharedRecoveryState to ensure that
6650 * there are no race conditions concerning visibility of other recent
6651 * updates to shared memory.
6652 */
6655
6660
6663
6664 /*
6665 * Wake up the checkpointer process as there might be a request to disable
6666 * logical decoding by concurrent slot drop.
6667 */
6669
6670 /*
6671 * Wake up all waiters. They need to report an error that recovery was
6672 * ended before reaching the target LSN.
6673 */
6677
6678 /*
6679 * Shutdown the recovery environment. This must occur after
6680 * RecoverPreparedTransactions() (see notes in lock_twophase_recover())
6681 * and after switching SharedRecoveryState to RECOVERY_STATE_DONE so as
6682 * any session building a snapshot will not rely on KnownAssignedXids as
6683 * RecoveryInProgress() would return false at this stage. This is
6684 * particularly critical for prepared 2PC transactions, that would still
6685 * need to be included in snapshots once recovery has ended.
6686 */
6689
6690 /*
6691 * If there were cascading standby servers connected to us, nudge any wal
6692 * sender processes to notice that we've been promoted.
6693 */
6694 WalSndWakeup(true, true);
6695
6696 /*
6697 * If this was a promotion, request an (online) checkpoint now. This isn't
6698 * required for consistency, but the last restartpoint might be far back,
6699 * and in case of a crash, recovering from it might take a longer than is
6700 * appropriate now that we're not in standby mode anymore.
6701 */
6702 if (promoted)
6704}
6705
6706/*
6707 * Callback from PerformWalRecovery(), called when we switch from crash
6708 * recovery to archive recovery mode. Updates the control file accordingly.
6709 */
6710void
6712{
6713 /* initialize minRecoveryPoint to this record */
6716 if (ControlFile->minRecoveryPoint < EndRecPtr)
6717 {
6718 ControlFile->minRecoveryPoint = EndRecPtr;
6719 ControlFile->minRecoveryPointTLI = replayTLI;
6720 }
6721 /* update local copy */
6724
6725 /*
6726 * The startup process can update its local copy of minRecoveryPoint from
6727 * this point.
6728 */
6730
6732
6733 /*
6734 * We update SharedRecoveryState while holding the lock on ControlFileLock
6735 * so both states are consistent in shared memory.
6736 */
6740
6742}
6743
6744/*
6745 * Callback from PerformWalRecovery(), called when we reach the end of backup.
6746 * Updates the control file accordingly.
6747 */
6748void
6750{
6751 /*
6752 * We have reached the end of base backup, as indicated by pg_control. The
6753 * data on disk is now consistent (unless minRecoveryPoint is further
6754 * ahead, which can happen if we crashed during previous recovery). Reset
6755 * backupStartPoint and backupEndPoint, and update minRecoveryPoint to
6756 * make sure we don't allow starting up at an earlier point even if
6757 * recovery is stopped and restarted soon after this.
6758 */
6760
6761 if (ControlFile->minRecoveryPoint < EndRecPtr)
6762 {
6763 ControlFile->minRecoveryPoint = EndRecPtr;
6765 }
6766
6771
6773}
6774
6775/*
6776 * Perform whatever XLOG actions are necessary at end of REDO.
6777 *
6778 * The goal here is to make sure that we'll be able to recover properly if
6779 * we crash again. If we choose to write a checkpoint, we'll write a shutdown
6780 * checkpoint rather than an on-line one. This is not particularly critical,
6781 * but since we may be assigning a new TLI, using a shutdown checkpoint allows
6782 * us to have the rule that TLI only changes in shutdown checkpoints, which
6783 * allows some extra error checking in xlog_redo.
6784 */
6785static bool
6787{
6788 bool promoted = false;
6789
6790 /*
6791 * Perform a checkpoint to update all our recovery activity to disk.
6792 *
6793 * Note that we write a shutdown checkpoint rather than an on-line one.
6794 * This is not particularly critical, but since we may be assigning a new
6795 * TLI, using a shutdown checkpoint allows us to have the rule that TLI
6796 * only changes in shutdown checkpoints, which allows some extra error
6797 * checking in xlog_redo.
6798 *
6799 * In promotion, only create a lightweight end-of-recovery record instead
6800 * of a full checkpoint. A checkpoint is requested later, after we're
6801 * fully out of recovery mode and already accepting queries.
6802 */
6805 {
6806 promoted = true;
6807
6808 /*
6809 * Insert a special WAL record to mark the end of recovery, since we
6810 * aren't doing a checkpoint. That means that the checkpointer process
6811 * may likely be in the middle of a time-smoothed restartpoint and
6812 * could continue to be for minutes after this. That sounds strange,
6813 * but the effect is roughly the same and it would be stranger to try
6814 * to come out of the restartpoint and then checkpoint. We request a
6815 * checkpoint later anyway, just for safety.
6816 */
6818 }
6819 else
6820 {
6824 }
6825
6826 return promoted;
6827}
6828
6829/*
6830 * Is the system still in recovery?
6831 *
6832 * Unlike testing InRecovery, this works in any process that's connected to
6833 * shared memory.
6834 */
6835bool
6837{
6838 /*
6839 * We check shared state each time only until we leave recovery mode. We
6840 * can't re-enter recovery, so there's no need to keep checking after the
6841 * shared variable has once been seen false.
6842 */
6844 return false;
6845 else
6846 {
6847 /*
6848 * use volatile pointer to make sure we make a fresh read of the
6849 * shared variable.
6850 */
6851 volatile XLogCtlData *xlogctl = XLogCtl;
6852
6853 LocalRecoveryInProgress = (xlogctl->SharedRecoveryState != RECOVERY_STATE_DONE);
6854
6855 /*
6856 * Note: We don't need a memory barrier when we're still in recovery.
6857 * We might exit recovery immediately after return, so the caller
6858 * can't rely on 'true' meaning that we're still in recovery anyway.
6859 */
6860
6862 }
6863}
6864
6865/*
6866 * Returns current recovery state from shared memory.
6867 *
6868 * This returned state is kept consistent with the contents of the control
6869 * file. See details about the possible values of RecoveryState in xlog.h.
6870 */
6873{
6874 RecoveryState retval;
6875
6877 retval = XLogCtl->SharedRecoveryState;
6879
6880 return retval;
6881}
6882
6883/*
6884 * Is this process allowed to insert new WAL records?
6885 *
6886 * Ordinarily this is essentially equivalent to !RecoveryInProgress().
6887 * But we also have provisions for forcing the result "true" or "false"
6888 * within specific processes regardless of the global state.
6889 */
6890bool
6892{
6893 /*
6894 * If value is "unconditionally true" or "unconditionally false", just
6895 * return it. This provides the normal fast path once recovery is known
6896 * done.
6897 */
6898 if (LocalXLogInsertAllowed >= 0)
6899 return (bool) LocalXLogInsertAllowed;
6900
6901 /*
6902 * Else, must check to see if we're still in recovery.
6903 */
6904 if (RecoveryInProgress())
6905 return false;
6906
6907 /*
6908 * On exit from recovery, reset to "unconditionally true", since there is
6909 * no need to keep checking.
6910 */
6912 return true;
6913}
6914
6915/*
6916 * Make XLogInsertAllowed() return true in the current process only.
6917 *
6918 * Note: it is allowed to switch LocalXLogInsertAllowed back to -1 later,
6919 * and even call LocalSetXLogInsertAllowed() again after that.
6920 *
6921 * Returns the previous value of LocalXLogInsertAllowed.
6922 */
6923static int
6925{
6927
6929
6930 return oldXLogAllowed;
6931}
6932
6933/*
6934 * Return the current Redo pointer from shared memory.
6935 *
6936 * As a side-effect, the local RedoRecPtr copy is updated.
6937 */
6940{
6941 XLogRecPtr ptr;
6942
6943 /*
6944 * The possibly not up-to-date copy in XLogCtl is enough. Even if we
6945 * grabbed a WAL insertion lock to read the authoritative value in
6946 * Insert->RedoRecPtr, someone might update it just after we've released
6947 * the lock.
6948 */
6950 ptr = XLogCtl->RedoRecPtr;
6952
6953 if (RedoRecPtr < ptr)
6954 RedoRecPtr = ptr;
6955
6956 return RedoRecPtr;
6957}
6958
6959/*
6960 * Return information needed to decide whether a modified block needs a
6961 * full-page image to be included in the WAL record.
6962 *
6963 * The returned values are cached copies from backend-private memory, and
6964 * possibly out-of-date or, indeed, uninitialized, in which case they will
6965 * be InvalidXLogRecPtr and false, respectively. XLogInsertRecord will
6966 * re-check them against up-to-date values, while holding the WAL insert lock.
6967 */
6968void
6974
6975/*
6976 * GetInsertRecPtr -- Returns the current insert position.
6977 *
6978 * NOTE: The value *actually* returned is the position of the last full
6979 * xlog page. It lags behind the real insert position by at most 1 page.
6980 * For that, we don't need to scan through WAL insertion locks, and an
6981 * approximation is enough for the current usage of this function.
6982 */
6985{
6987
6991
6992 return recptr;
6993}
6994
6995/*
6996 * GetFlushRecPtr -- Returns the current flush position, ie, the last WAL
6997 * position known to be fsync'd to disk. This should only be used on a
6998 * system that is known not to be in recovery.
6999 */
7002{
7004
7006
7007 /*
7008 * If we're writing and flushing WAL, the time line can't be changing, so
7009 * no lock is required.
7010 */
7011 if (insertTLI)
7013
7014 return LogwrtResult.Flush;
7015}
7016
7017/*
7018 * GetWALInsertionTimeLine -- Returns the current timeline of a system that
7019 * is not in recovery.
7020 */
7023{
7025
7026 /* Since the value can't be changing, no lock is required. */
7027 return XLogCtl->InsertTimeLineID;
7028}
7029
7030/*
7031 * GetWALInsertionTimeLineIfSet -- If the system is not in recovery, returns
7032 * the WAL insertion timeline; else, returns 0. Wherever possible, use
7033 * GetWALInsertionTimeLine() instead, since it's cheaper. Note that this
7034 * function decides recovery has ended as soon as the insert TLI is set, which
7035 * happens before we set XLogCtl->SharedRecoveryState to RECOVERY_STATE_DONE.
7036 */
7048
7049/*
7050 * GetLastImportantRecPtr -- Returns the LSN of the last important record
7051 * inserted. All records not explicitly marked as unimportant are considered
7052 * important.
7053 *
7054 * The LSN is determined by computing the maximum of
7055 * WALInsertLocks[i].lastImportantAt.
7056 */
7059{
7061 int i;
7062
7063 for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
7064 {
7066
7067 /*
7068 * Need to take a lock to prevent torn reads of the LSN, which are
7069 * possible on some of the supported platforms. WAL insert locks only
7070 * support exclusive mode, so we have to use that.
7071 */
7074 LWLockRelease(&WALInsertLocks[i].l.lock);
7075
7076 if (res < last_important)
7077 res = last_important;
7078 }
7079
7080 return res;
7081}
7082
7083/*
7084 * Get the time and LSN of the last xlog segment switch
7085 */
7088{
7090
7091 /* Need WALWriteLock, but shared lock is sufficient */
7096
7097 return result;
7098}
7099
7100/*
7101 * This must be called ONCE during postmaster or standalone-backend shutdown
7102 */
7103void
7105{
7106 /*
7107 * We should have an aux process resource owner to use, and we should not
7108 * be in a transaction that's installed some other resowner.
7109 */
7114
7115 /* Don't be chatty in standalone mode */
7117 (errmsg("shutting down")));
7118
7119 /*
7120 * Signal walsenders to move to stopping state.
7121 */
7123
7124 /*
7125 * Wait for WAL senders to be in stopping state. This prevents commands
7126 * from writing new WAL.
7127 */
7129
7130 if (RecoveryInProgress())
7132 else
7133 {
7134 /*
7135 * If archiving is enabled, rotate the last XLOG file so that all the
7136 * remaining records are archived (postmaster wakes up the archiver
7137 * process one more time at the end of shutdown). The checkpoint
7138 * record will go to the next XLOG file and won't be archived (yet).
7139 */
7140 if (XLogArchivingActive())
7141 RequestXLogSwitch(false);
7142
7144 }
7145}
7146
7147/*
7148 * Format checkpoint request flags as a space-separated string for
7149 * log messages.
7150 */
7151static const char *
7153{
7154 static char buf[128];
7155
7156 snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s",
7157 (flags & CHECKPOINT_IS_SHUTDOWN) ? " shutdown" : "",
7158 (flags & CHECKPOINT_END_OF_RECOVERY) ? " end-of-recovery" : "",
7159 (flags & CHECKPOINT_FAST) ? " fast" : "",
7160 (flags & CHECKPOINT_FORCE) ? " force" : "",
7161 (flags & CHECKPOINT_WAIT) ? " wait" : "",
7162 (flags & CHECKPOINT_CAUSE_XLOG) ? " wal" : "",
7163 (flags & CHECKPOINT_CAUSE_TIME) ? " time" : "",
7164 (flags & CHECKPOINT_FLUSH_UNLOGGED) ? " flush-unlogged" : "");
7165
7166 return buf;
7167}
7168
7169/*
7170 * Log start of a checkpoint.
7171 */
7172static void
7174{
7175 if (restartpoint)
7176 ereport(LOG,
7177 /* translator: the placeholder shows checkpoint options */
7178 (errmsg("restartpoint starting:%s",
7179 CheckpointFlagsString(flags))));
7180 else
7181 ereport(LOG,
7182 /* translator: the placeholder shows checkpoint options */
7183 (errmsg("checkpoint starting:%s",
7184 CheckpointFlagsString(flags))));
7185}
7186
7187/*
7188 * Log end of a checkpoint.
7189 */
7190static void
7192{
7193 long write_msecs,
7194 sync_msecs,
7199
7201
7204
7207
7208 /* Accumulate checkpoint timing summary data, in milliseconds. */
7211
7212 /*
7213 * All of the published timing statistics are accounted for. Only
7214 * continue if a log message is to be written.
7215 */
7216 if (!log_checkpoints)
7217 return;
7218
7221
7222 /*
7223 * Timing values returned from CheckpointStats are in microseconds.
7224 * Convert to milliseconds for consistent printing.
7225 */
7227
7232 average_msecs = (long) ((average_sync_time + 999) / 1000);
7233
7234 /*
7235 * ControlFileLock is not required to see ControlFile->checkPoint and
7236 * ->checkPointCopy here as we are the only updator of those variables at
7237 * this moment.
7238 */
7239 if (restartpoint)
7240 ereport(LOG,
7241 (errmsg("restartpoint complete:%s: wrote %d buffers (%.1f%%), "
7242 "wrote %d SLRU buffers; %d WAL file(s) added, "
7243 "%d removed, %d recycled; write=%ld.%03d s, "
7244 "sync=%ld.%03d s, total=%ld.%03d s; sync files=%d, "
7245 "longest=%ld.%03d s, average=%ld.%03d s; distance=%d kB, "
7246 "estimate=%d kB; lsn=%X/%08X, redo lsn=%X/%08X",
7247 CheckpointFlagsString(flags),
7254 write_msecs / 1000, (int) (write_msecs % 1000),
7255 sync_msecs / 1000, (int) (sync_msecs % 1000),
7256 total_msecs / 1000, (int) (total_msecs % 1000),
7258 longest_msecs / 1000, (int) (longest_msecs % 1000),
7259 average_msecs / 1000, (int) (average_msecs % 1000),
7260 (int) (PrevCheckPointDistance / 1024.0),
7261 (int) (CheckPointDistanceEstimate / 1024.0),
7264 else
7265 ereport(LOG,
7266 (errmsg("checkpoint complete:%s: wrote %d buffers (%.1f%%), "
7267 "wrote %d SLRU buffers; %d WAL file(s) added, "
7268 "%d removed, %d recycled; write=%ld.%03d s, "
7269 "sync=%ld.%03d s, total=%ld.%03d s; sync files=%d, "
7270 "longest=%ld.%03d s, average=%ld.%03d s; distance=%d kB, "
7271 "estimate=%d kB; lsn=%X/%08X, redo lsn=%X/%08X",
7272 CheckpointFlagsString(flags),
7279 write_msecs / 1000, (int) (write_msecs % 1000),
7280 sync_msecs / 1000, (int) (sync_msecs % 1000),
7281 total_msecs / 1000, (int) (total_msecs % 1000),
7283 longest_msecs / 1000, (int) (longest_msecs % 1000),
7284 average_msecs / 1000, (int) (average_msecs % 1000),
7285 (int) (PrevCheckPointDistance / 1024.0),
7286 (int) (CheckPointDistanceEstimate / 1024.0),
7289}
7290
7291/*
7292 * Update the estimate of distance between checkpoints.
7293 *
7294 * The estimate is used to calculate the number of WAL segments to keep
7295 * preallocated, see XLOGfileslop().
7296 */
7297static void
7299{
7300 /*
7301 * To estimate the number of segments consumed between checkpoints, keep a
7302 * moving average of the amount of WAL generated in previous checkpoint
7303 * cycles. However, if the load is bursty, with quiet periods and busy
7304 * periods, we want to cater for the peak load. So instead of a plain
7305 * moving average, let the average decline slowly if the previous cycle
7306 * used less WAL than estimated, but bump it up immediately if it used
7307 * more.
7308 *
7309 * When checkpoints are triggered by max_wal_size, this should converge to
7310 * CheckpointSegments * wal_segment_size,
7311 *
7312 * Note: This doesn't pay any attention to what caused the checkpoint.
7313 * Checkpoints triggered manually with CHECKPOINT command, or by e.g.
7314 * starting a base backup, are counted the same as those created
7315 * automatically. The slow-decline will largely mask them out, if they are
7316 * not frequent. If they are frequent, it seems reasonable to count them
7317 * in as any others; if you issue a manual checkpoint every 5 minutes and
7318 * never let a timed checkpoint happen, it makes sense to base the
7319 * preallocation on that 5 minute interval rather than whatever
7320 * checkpoint_timeout is set to.
7321 */
7322 PrevCheckPointDistance = nbytes;
7323 if (CheckPointDistanceEstimate < nbytes)
7325 else
7327 (0.90 * CheckPointDistanceEstimate + 0.10 * (double) nbytes);
7328}
7329
7330/*
7331 * Update the ps display for a process running a checkpoint. Note that
7332 * this routine should not do any allocations so as it can be called
7333 * from a critical section.
7334 */
7335static void
7337{
7338 /*
7339 * The status is reported only for end-of-recovery and shutdown
7340 * checkpoints or shutdown restartpoints. Updating the ps display is
7341 * useful in those situations as it may not be possible to rely on
7342 * pg_stat_activity to see the status of the checkpointer or the startup
7343 * process.
7344 */
7346 return;
7347
7348 if (reset)
7349 set_ps_display("");
7350 else
7351 {
7352 char activitymsg[128];
7353
7354 snprintf(activitymsg, sizeof(activitymsg), "performing %s%s%s",
7355 (flags & CHECKPOINT_END_OF_RECOVERY) ? "end-of-recovery " : "",
7356 (flags & CHECKPOINT_IS_SHUTDOWN) ? "shutdown " : "",
7357 restartpoint ? "restartpoint" : "checkpoint");
7359 }
7360}
7361
7362
7363/*
7364 * Perform a checkpoint --- either during shutdown, or on-the-fly
7365 *
7366 * flags is a bitwise OR of the following:
7367 * CHECKPOINT_IS_SHUTDOWN: checkpoint is for database shutdown.
7368 * CHECKPOINT_END_OF_RECOVERY: checkpoint is for end of WAL recovery.
7369 * CHECKPOINT_FAST: finish the checkpoint ASAP, ignoring
7370 * checkpoint_completion_target parameter.
7371 * CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occurred
7372 * since the last one (implied by CHECKPOINT_IS_SHUTDOWN or
7373 * CHECKPOINT_END_OF_RECOVERY).
7374 * CHECKPOINT_FLUSH_UNLOGGED: also flush buffers of unlogged tables.
7375 *
7376 * Note: flags contains other bits, of interest here only for logging purposes.
7377 * In particular note that this routine is synchronous and does not pay
7378 * attention to CHECKPOINT_WAIT.
7379 *
7380 * If !shutdown then we are writing an online checkpoint. An XLOG_CHECKPOINT_REDO
7381 * record is inserted into WAL at the logical location of the checkpoint, before
7382 * flushing anything to disk, and when the checkpoint is eventually completed,
7383 * and it is from this point that WAL replay will begin in the case of a recovery
7384 * from this checkpoint. Once everything is written to disk, an
7385 * XLOG_CHECKPOINT_ONLINE record is written to complete the checkpoint, and
7386 * points back to the earlier XLOG_CHECKPOINT_REDO record. This mechanism allows
7387 * other write-ahead log records to be written while the checkpoint is in
7388 * progress, but we must be very careful about order of operations. This function
7389 * may take many minutes to execute on a busy system.
7390 *
7391 * On the other hand, when shutdown is true, concurrent insertion into the
7392 * write-ahead log is impossible, so there is no need for two separate records.
7393 * In this case, we only insert an XLOG_CHECKPOINT_SHUTDOWN record, and it's
7394 * both the record marking the completion of the checkpoint and the location
7395 * from which WAL replay would begin if needed.
7396 *
7397 * Returns true if a new checkpoint was performed, or false if it was skipped
7398 * because the system was idle.
7399 */
7400bool
7402{
7403 bool shutdown;
7404 CheckPoint checkPoint;
7408 uint32 freespace;
7412 int nvxids;
7413 int oldXLogAllowed = 0;
7414
7415 /*
7416 * An end-of-recovery checkpoint is really a shutdown checkpoint, just
7417 * issued at a different time.
7418 */
7420 shutdown = true;
7421 else
7422 shutdown = false;
7423
7424 /* sanity check */
7425 if (RecoveryInProgress() && (flags & CHECKPOINT_END_OF_RECOVERY) == 0)
7426 elog(ERROR, "can't create a checkpoint during recovery");
7427
7428 /*
7429 * Prepare to accumulate statistics.
7430 *
7431 * Note: because it is possible for log_checkpoints to change while a
7432 * checkpoint proceeds, we always accumulate stats, even if
7433 * log_checkpoints is currently off.
7434 */
7437
7438 /*
7439 * Let smgr prepare for checkpoint; this has to happen outside the
7440 * critical section and before we determine the REDO pointer. Note that
7441 * smgr must not do anything that'd have to be undone if we decide no
7442 * checkpoint is needed.
7443 */
7445
7446 /* Run these points outside the critical section. */
7447 INJECTION_POINT("create-checkpoint-initial", NULL);
7448 INJECTION_POINT_LOAD("create-checkpoint-run");
7449
7450 /*
7451 * Use a critical section to force system panic if we have trouble.
7452 */
7454
7455 if (shutdown)
7456 {
7461 }
7462
7463 /* Begin filling in the checkpoint WAL record */
7464 MemSet(&checkPoint, 0, sizeof(checkPoint));
7465 checkPoint.time = (pg_time_t) time(NULL);
7466
7467 /*
7468 * For Hot Standby, derive the oldestActiveXid before we fix the redo
7469 * pointer. This allows us to begin accumulating changes to assemble our
7470 * starting snapshot of locks and transactions.
7471 */
7473 checkPoint.oldestActiveXid = GetOldestActiveTransactionId(false, true);
7474 else
7476
7477 /*
7478 * Get location of last important record before acquiring insert locks (as
7479 * GetLastImportantRecPtr() also locks WAL locks).
7480 */
7482
7483 /*
7484 * If this isn't a shutdown or forced checkpoint, and if there has been no
7485 * WAL activity requiring a checkpoint, skip it. The idea here is to
7486 * avoid inserting duplicate checkpoints when the system is idle.
7487 */
7489 CHECKPOINT_FORCE)) == 0)
7490 {
7492 {
7495 (errmsg_internal("checkpoint skipped because system is idle")));
7496 return false;
7497 }
7498 }
7499
7500 /*
7501 * An end-of-recovery checkpoint is created before anyone is allowed to
7502 * write WAL. To allow us to write the checkpoint record, temporarily
7503 * enable XLogInsertAllowed.
7504 */
7505 if (flags & CHECKPOINT_END_OF_RECOVERY)
7507
7509 if (flags & CHECKPOINT_END_OF_RECOVERY)
7511 else
7512 checkPoint.PrevTimeLineID = checkPoint.ThisTimeLineID;
7513
7514 /*
7515 * We must block concurrent insertions while examining insert state.
7516 */
7518
7519 checkPoint.fullPageWrites = Insert->fullPageWrites;
7520 checkPoint.wal_level = wal_level;
7521
7522 /*
7523 * Get the current data_checksum_version value from xlogctl, valid at the
7524 * time of the checkpoint.
7525 */
7529
7530 if (shutdown)
7531 {
7533
7534 /*
7535 * Compute new REDO record ptr = location of next XLOG record.
7536 *
7537 * Since this is a shutdown checkpoint, there can't be any concurrent
7538 * WAL insertion.
7539 */
7540 freespace = INSERT_FREESPACE(curInsert);
7541 if (freespace == 0)
7542 {
7545 else
7547 }
7548 checkPoint.redo = curInsert;
7549
7550 /*
7551 * Here we update the shared RedoRecPtr for future XLogInsert calls;
7552 * this must be done while holding all the insertion locks.
7553 *
7554 * Note: if we fail to complete the checkpoint, RedoRecPtr will be
7555 * left pointing past where it really needs to point. This is okay;
7556 * the only consequence is that XLogInsert might back up whole buffers
7557 * that it didn't really need to. We can't postpone advancing
7558 * RedoRecPtr because XLogInserts that happen while we are dumping
7559 * buffers must assume that their buffer changes are not included in
7560 * the checkpoint.
7561 */
7562 RedoRecPtr = XLogCtl->Insert.RedoRecPtr = checkPoint.redo;
7563 }
7564
7565 /*
7566 * Now we can release the WAL insertion locks, allowing other xacts to
7567 * proceed while we are flushing disk buffers.
7568 */
7570
7571 /*
7572 * If this is an online checkpoint, we have not yet determined the redo
7573 * point. We do so now by inserting the special XLOG_CHECKPOINT_REDO
7574 * record; the LSN at which it starts becomes the new redo pointer. We
7575 * don't do this for a shutdown checkpoint, because in that case no WAL
7576 * can be written between the redo point and the insertion of the
7577 * checkpoint record itself, so the checkpoint record itself serves to
7578 * mark the redo point.
7579 */
7580 if (!shutdown)
7581 {
7583
7585 redo_rec.wal_level = wal_level;
7587 redo_rec.data_checksum_version = XLogCtl->data_checksum_version;
7590
7591 /* Include WAL level in record for WAL summarizer's benefit. */
7595
7596 /*
7597 * XLogInsertRecord will have updated XLogCtl->Insert.RedoRecPtr in
7598 * shared memory and RedoRecPtr in backend-local memory, but we need
7599 * to copy that into the record that will be inserted when the
7600 * checkpoint is complete.
7601 */
7602 checkPoint.redo = RedoRecPtr;
7603 }
7604
7605 /* Update the info_lck-protected copy of RedoRecPtr as well */
7607 XLogCtl->RedoRecPtr = checkPoint.redo;
7609
7610 /*
7611 * If enabled, log checkpoint start. We postpone this until now so as not
7612 * to log anything if we decided to skip the checkpoint.
7613 */
7614 if (log_checkpoints)
7615 LogCheckpointStart(flags, false);
7616
7617 INJECTION_POINT_CACHED("create-checkpoint-run", NULL);
7618
7619 /* Update the process title */
7620 update_checkpoint_display(flags, false, false);
7621
7623
7624 /*
7625 * Get the other info we need for the checkpoint record.
7626 *
7627 * We don't need to save oldestClogXid in the checkpoint, it only matters
7628 * for the short period in which clog is being truncated, and if we crash
7629 * during that we'll redo the clog truncation and fix up oldestClogXid
7630 * there.
7631 */
7633 checkPoint.nextXid = TransamVariables->nextXid;
7634 checkPoint.oldestXid = TransamVariables->oldestXid;
7637
7642
7644 checkPoint.nextOid = TransamVariables->nextOid;
7645 if (!shutdown)
7646 checkPoint.nextOid += TransamVariables->oidCount;
7648
7650
7652 &checkPoint.nextMulti,
7653 &checkPoint.nextMultiOffset,
7654 &checkPoint.oldestMulti,
7655 &checkPoint.oldestMultiDB);
7656
7657 /*
7658 * Having constructed the checkpoint record, ensure all shmem disk buffers
7659 * and commit-log buffers are flushed to disk.
7660 *
7661 * This I/O could fail for various reasons. If so, we will fail to
7662 * complete the checkpoint, but there is no reason to force a system
7663 * panic. Accordingly, exit critical section while doing it.
7664 */
7666
7667 /*
7668 * In some cases there are groups of actions that must all occur on one
7669 * side or the other of a checkpoint record. Before flushing the
7670 * checkpoint record we must explicitly wait for any backend currently
7671 * performing those groups of actions.
7672 *
7673 * One example is end of transaction, so we must wait for any transactions
7674 * that are currently in commit critical sections. If an xact inserted
7675 * its commit record into XLOG just before the REDO point, then a crash
7676 * restart from the REDO point would not replay that record, which means
7677 * that our flushing had better include the xact's update of pg_xact. So
7678 * we wait till he's out of his commit critical section before proceeding.
7679 * See notes in RecordTransactionCommit().
7680 *
7681 * Because we've already released the insertion locks, this test is a bit
7682 * fuzzy: it is possible that we will wait for xacts we didn't really need
7683 * to wait for. But the delay should be short and it seems better to make
7684 * checkpoint take a bit longer than to hold off insertions longer than
7685 * necessary. (In fact, the whole reason we have this issue is that xact.c
7686 * does commit record XLOG insertion and clog update as two separate steps
7687 * protected by different locks, but again that seems best on grounds of
7688 * minimizing lock contention.)
7689 *
7690 * A transaction that has not yet set delayChkptFlags when we look cannot
7691 * be at risk, since it has not inserted its commit record yet; and one
7692 * that's already cleared it is not at risk either, since it's done fixing
7693 * clog and we will correctly flush the update below. So we cannot miss
7694 * any xacts we need to wait for.
7695 */
7697 if (nvxids > 0)
7698 {
7699 do
7700 {
7701 /*
7702 * Keep absorbing fsync requests while we wait. There could even
7703 * be a deadlock if we don't, if the process that prevents the
7704 * checkpoint is trying to add a request to the queue.
7705 */
7707
7709 pg_usleep(10000L); /* wait for 10 msec */
7713 }
7714 pfree(vxids);
7715
7716 CheckPointGuts(checkPoint.redo, flags);
7717
7719 if (nvxids > 0)
7720 {
7721 do
7722 {
7724
7726 pg_usleep(10000L); /* wait for 10 msec */
7730 }
7731 pfree(vxids);
7732
7733 /*
7734 * Take a snapshot of running transactions and write this to WAL. This
7735 * allows us to reconstruct the state of running transactions during
7736 * archive recovery, if required. Skip, if this info disabled.
7737 *
7738 * If we are shutting down, or Startup process is completing crash
7739 * recovery we don't need to write running xact data.
7740 */
7743
7745
7746 /*
7747 * Now insert the checkpoint record into XLOG.
7748 */
7750 XLogRegisterData(&checkPoint, sizeof(checkPoint));
7754
7756
7757 /*
7758 * We mustn't write any new WAL after a shutdown checkpoint, or it will be
7759 * overwritten at next startup. No-one should even try, this just allows
7760 * sanity-checking. In the case of an end-of-recovery checkpoint, we want
7761 * to just temporarily disable writing until the system has exited
7762 * recovery.
7763 */
7764 if (shutdown)
7765 {
7766 if (flags & CHECKPOINT_END_OF_RECOVERY)
7768 else
7769 LocalXLogInsertAllowed = 0; /* never again write WAL */
7770 }
7771
7772 /*
7773 * We now have ProcLastRecPtr = start of actual checkpoint record, recptr
7774 * = end of actual checkpoint record.
7775 */
7776 if (shutdown && checkPoint.redo != ProcLastRecPtr)
7777 ereport(PANIC,
7778 (errmsg("concurrent write-ahead log activity while database system is shutting down")));
7779
7780 /*
7781 * Remember the prior checkpoint's redo ptr for
7782 * UpdateCheckPointDistanceEstimate()
7783 */
7785
7786 /*
7787 * Update the control file.
7788 */
7790 if (shutdown)
7793 ControlFile->checkPointCopy = checkPoint;
7794 /* crash recovery should always recover to the end of WAL */
7797
7798 /*
7799 * Persist unloggedLSN value. It's reset on crash recovery, so this goes
7800 * unused on non-shutdown checkpoints, but seems useful to store it always
7801 * for debugging purposes.
7802 */
7804
7807
7808 /*
7809 * We are now done with critical updates; no need for system panic if we
7810 * have trouble while fooling with old log segments.
7811 */
7813
7814 /*
7815 * WAL summaries end when the next XLOG_CHECKPOINT_REDO or
7816 * XLOG_CHECKPOINT_SHUTDOWN record is reached. This is the first point
7817 * where (a) we're not inside of a critical section and (b) we can be
7818 * certain that the relevant record has been flushed to disk, which must
7819 * happen before it can be summarized.
7820 *
7821 * If this is a shutdown checkpoint, then this happens reasonably
7822 * promptly: we've only just inserted and flushed the
7823 * XLOG_CHECKPOINT_SHUTDOWN record. If this is not a shutdown checkpoint,
7824 * then this might not be very prompt at all: the XLOG_CHECKPOINT_REDO
7825 * record was written before we began flushing data to disk, and that
7826 * could be many minutes ago at this point. However, we don't XLogFlush()
7827 * after inserting that record, so we're not guaranteed that it's on disk
7828 * until after the above call that flushes the XLOG_CHECKPOINT_ONLINE
7829 * record.
7830 */
7832
7833 /*
7834 * Let smgr do post-checkpoint cleanup (eg, deleting old files).
7835 */
7837
7838 /*
7839 * Update the average distance between checkpoints if the prior checkpoint
7840 * exists.
7841 */
7844
7845 INJECTION_POINT("checkpoint-before-old-wal-removal", NULL);
7846
7847 /*
7848 * Delete old log files, those no longer needed for last checkpoint to
7849 * prevent the disk holding the xlog from growing full.
7850 */
7856 {
7857 /*
7858 * Some slots have been invalidated; recalculate the old-segment
7859 * horizon, starting again from RedoRecPtr.
7860 */
7863 }
7864 _logSegNo--;
7866 checkPoint.ThisTimeLineID);
7867
7868 /*
7869 * Make more log segments if needed. (Do this after recycling old log
7870 * segments, since that may supply some of the needed files.)
7871 */
7872 if (!shutdown)
7874
7875 /*
7876 * Truncate pg_subtrans if possible. We can throw away all data before
7877 * the oldest XMIN of any running transaction. No future transaction will
7878 * attempt to reference any pg_subtrans entry older than that (see Asserts
7879 * in subtrans.c). During recovery, though, we mustn't do this because
7880 * StartupSUBTRANS hasn't been called yet.
7881 */
7882 if (!RecoveryInProgress())
7884
7885 /* Real work is done; log and update stats. */
7886 LogCheckpointEnd(false, flags);
7887
7888 /* Reset the process title */
7889 update_checkpoint_display(flags, false, true);
7890
7892 NBuffers,
7896
7897 return true;
7898}
7899
7900/*
7901 * Mark the end of recovery in WAL though without running a full checkpoint.
7902 * We can expect that a restartpoint is likely to be in progress as we
7903 * do this, though we are unwilling to wait for it to complete.
7904 *
7905 * CreateRestartPoint() allows for the case where recovery may end before
7906 * the restartpoint completes so there is no concern of concurrent behaviour.
7907 */
7908static void
7910{
7913
7914 /* sanity check */
7915 if (!RecoveryInProgress())
7916 elog(ERROR, "can only be used to end recovery");
7917
7918 xlrec.end_time = GetCurrentTimestamp();
7919 xlrec.wal_level = wal_level;
7920
7922 xlrec.ThisTimeLineID = XLogCtl->InsertTimeLineID;
7923 xlrec.PrevTimeLineID = XLogCtl->PrevTimeLineID;
7925
7927
7931
7933
7934 /*
7935 * Update the control file so that crash recovery can follow the timeline
7936 * changes to this point.
7937 */
7940 ControlFile->minRecoveryPointTLI = xlrec.ThisTimeLineID;
7941
7942 /* start with the latest checksum version (as of the end of recovery) */
7946
7949
7951}
7952
7953/*
7954 * Write an OVERWRITE_CONTRECORD message.
7955 *
7956 * When on WAL replay we expect a continuation record at the start of a page
7957 * that is not there, recovery ends and WAL writing resumes at that point.
7958 * But it's wrong to resume writing new WAL back at the start of the record
7959 * that was broken, because downstream consumers of that WAL (physical
7960 * replicas) are not prepared to "rewind". So the first action after
7961 * finishing replay of all valid WAL must be to write a record of this type
7962 * at the point where the contrecord was missing; to support xlogreader
7963 * detecting the special case, XLP_FIRST_IS_OVERWRITE_CONTRECORD is also added
7964 * to the page header where the record occurs. xlogreader has an ad-hoc
7965 * mechanism to report metadata about the broken record, which is what we
7966 * use here.
7967 *
7968 * At replay time, XLP_FIRST_IS_OVERWRITE_CONTRECORD instructs xlogreader to
7969 * skip the record it was reading, and pass back the LSN of the skipped
7970 * record, so that its caller can verify (on "replay" of that record) that the
7971 * XLOG_OVERWRITE_CONTRECORD matches what was effectively overwritten.
7972 *
7973 * 'aborted_lsn' is the beginning position of the record that was incomplete.
7974 * It is included in the WAL record. 'pagePtr' and 'newTLI' point to the
7975 * beginning of the XLOG page where the record is to be inserted. They must
7976 * match the current WAL insert position, they're passed here just so that we
7977 * can verify that.
7978 */
7979static XLogRecPtr
7982{
7987
7988 /* sanity checks */
7989 if (!RecoveryInProgress())
7990 elog(ERROR, "can only be used at end of recovery");
7991 if (pagePtr % XLOG_BLCKSZ != 0)
7992 elog(ERROR, "invalid position for missing continuation record %X/%08X",
7994
7995 /* The current WAL insert position should be right after the page header */
7996 startPos = pagePtr;
7999 else
8002 if (recptr != startPos)
8003 elog(ERROR, "invalid WAL insert position %X/%08X for OVERWRITE_CONTRECORD",
8005
8007
8008 /*
8009 * Initialize the XLOG page header (by GetXLogBuffer), and set the
8010 * XLP_FIRST_IS_OVERWRITE_CONTRECORD flag.
8011 *
8012 * No other backend is allowed to write WAL yet, so acquiring the WAL
8013 * insertion lock is just pro forma.
8014 */
8019
8020 /*
8021 * Insert the XLOG_OVERWRITE_CONTRECORD record as the first record on the
8022 * page. We know it becomes the first record, because no other backend is
8023 * allowed to write WAL yet.
8024 */
8026 xlrec.overwritten_lsn = aborted_lsn;
8027 xlrec.overwrite_time = GetCurrentTimestamp();
8030
8031 /* check that the record was inserted to the right place */
8032 if (ProcLastRecPtr != startPos)
8033 elog(ERROR, "OVERWRITE_CONTRECORD was inserted to unexpected position %X/%08X",
8035
8037
8039
8040 return recptr;
8041}
8042
8043/*
8044 * Flush all data in shared memory to disk, and fsync
8045 *
8046 * This is the common code shared between regular checkpoints and
8047 * recovery restartpoints.
8048 */
8049static void
8051{
8057
8058 /* Write out all dirty data in SLRUs and the main buffer pool */
8066 CheckPointBuffers(flags);
8067
8068 /* Perform all queued up fsyncs */
8074
8075 /* We deliberately delay 2PC checkpointing as long as possible */
8077}
8078
8079/*
8080 * Save a checkpoint for recovery restart if appropriate
8081 *
8082 * This function is called each time a checkpoint record is read from XLOG.
8083 * It must determine whether the checkpoint represents a safe restartpoint or
8084 * not. If so, the checkpoint record is stashed in shared memory so that
8085 * CreateRestartPoint can consult it. (Note that the latter function is
8086 * executed by the checkpointer, while this one will be executed by the
8087 * startup process.)
8088 */
8089static void
8091{
8092 /*
8093 * Also refrain from creating a restartpoint if we have seen any
8094 * references to non-existent pages. Restarting recovery from the
8095 * restartpoint would not see the references, so we would lose the
8096 * cross-check that the pages belonged to a relation that was dropped
8097 * later.
8098 */
8100 {
8101 elog(DEBUG2,
8102 "could not record restart point at %X/%08X because there are unresolved references to invalid pages",
8103 LSN_FORMAT_ARGS(checkPoint->redo));
8104 return;
8105 }
8106
8107 /*
8108 * Copy the checkpoint record to shared memory, so that checkpointer can
8109 * work out the next time it wants to perform a restartpoint.
8110 */
8114 XLogCtl->lastCheckPoint = *checkPoint;
8116}
8117
8118/*
8119 * Establish a restartpoint if possible.
8120 *
8121 * This is similar to CreateCheckPoint, but is used during WAL recovery
8122 * to establish a point from which recovery can roll forward without
8123 * replaying the entire recovery log.
8124 *
8125 * Returns true if a new restartpoint was established. We can only establish
8126 * a restartpoint if we have replayed a safe checkpoint record since last
8127 * restartpoint.
8128 */
8129bool
8131{
8132 XLogRecPtr lastCheckPointRecPtr;
8133 XLogRecPtr lastCheckPointEndPtr;
8134 CheckPoint lastCheckPoint;
8138 TimeLineID replayTLI;
8139 XLogRecPtr endptr;
8142
8143 /* Concurrent checkpoint/restartpoint cannot happen */
8145
8146 /* Get a local copy of the last safe checkpoint record. */
8148 lastCheckPointRecPtr = XLogCtl->lastCheckPointRecPtr;
8149 lastCheckPointEndPtr = XLogCtl->lastCheckPointEndPtr;
8150 lastCheckPoint = XLogCtl->lastCheckPoint;
8152
8153 /*
8154 * Check that we're still in recovery mode. It's ok if we exit recovery
8155 * mode after this check, the restart point is valid anyway.
8156 */
8157 if (!RecoveryInProgress())
8158 {
8160 (errmsg_internal("skipping restartpoint, recovery has already ended")));
8161 return false;
8162 }
8163
8164 /*
8165 * If the last checkpoint record we've replayed is already our last
8166 * restartpoint, we can't perform a new restart point. We still update
8167 * minRecoveryPoint in that case, so that if this is a shutdown restart
8168 * point, we won't start up earlier than before. That's not strictly
8169 * necessary, but when hot standby is enabled, it would be rather weird if
8170 * the database opened up for read-only connections at a point-in-time
8171 * before the last shutdown. Such time travel is still possible in case of
8172 * immediate shutdown, though.
8173 *
8174 * We don't explicitly advance minRecoveryPoint when we do create a
8175 * restartpoint. It's assumed that flushing the buffers will do that as a
8176 * side-effect.
8177 */
8178 if (!XLogRecPtrIsValid(lastCheckPointRecPtr) ||
8179 lastCheckPoint.redo <= ControlFile->checkPointCopy.redo)
8180 {
8182 errmsg_internal("skipping restartpoint, already performed at %X/%08X",
8183 LSN_FORMAT_ARGS(lastCheckPoint.redo)));
8184
8186 if (flags & CHECKPOINT_IS_SHUTDOWN)
8187 {
8192 }
8193 return false;
8194 }
8195
8196 /*
8197 * Update the shared RedoRecPtr so that the startup process can calculate
8198 * the number of segments replayed since last restartpoint, and request a
8199 * restartpoint if it exceeds CheckPointSegments.
8200 *
8201 * Like in CreateCheckPoint(), hold off insertions to update it, although
8202 * during recovery this is just pro forma, because no WAL insertions are
8203 * happening.
8204 */
8206 RedoRecPtr = XLogCtl->Insert.RedoRecPtr = lastCheckPoint.redo;
8208
8209 /* Also update the info_lck-protected copy */
8211 XLogCtl->RedoRecPtr = lastCheckPoint.redo;
8213
8214 /*
8215 * Prepare to accumulate statistics.
8216 *
8217 * Note: because it is possible for log_checkpoints to change while a
8218 * checkpoint proceeds, we always accumulate stats, even if
8219 * log_checkpoints is currently off.
8220 */
8223
8224 if (log_checkpoints)
8225 LogCheckpointStart(flags, true);
8226
8227 /* Update the process title */
8228 update_checkpoint_display(flags, true, false);
8229
8230 CheckPointGuts(lastCheckPoint.redo, flags);
8231
8232 /*
8233 * This location needs to be after CheckPointGuts() to ensure that some
8234 * work has already happened during this checkpoint.
8235 */
8236 INJECTION_POINT("create-restart-point", NULL);
8237
8238 /*
8239 * Remember the prior checkpoint's redo ptr for
8240 * UpdateCheckPointDistanceEstimate()
8241 */
8243
8244 /*
8245 * Update pg_control, using current time. Check that it still shows an
8246 * older checkpoint, else do nothing; this is a quick hack to make sure
8247 * nothing really bad happens if somehow we get here after the
8248 * end-of-recovery checkpoint.
8249 */
8251 if (ControlFile->checkPointCopy.redo < lastCheckPoint.redo)
8252 {
8253 /*
8254 * Update the checkpoint information. We do this even if the cluster
8255 * does not show DB_IN_ARCHIVE_RECOVERY to match with the set of WAL
8256 * segments recycled below.
8257 */
8258 ControlFile->checkPoint = lastCheckPointRecPtr;
8259 ControlFile->checkPointCopy = lastCheckPoint;
8260
8261 /*
8262 * Ensure minRecoveryPoint is past the checkpoint record and update it
8263 * if the control file still shows DB_IN_ARCHIVE_RECOVERY. Normally,
8264 * this will have happened already while writing out dirty buffers,
8265 * but not necessarily - e.g. because no buffers were dirtied. We do
8266 * this because a backup performed in recovery uses minRecoveryPoint
8267 * to determine which WAL files must be included in the backup, and
8268 * the file (or files) containing the checkpoint record must be
8269 * included, at a minimum. Note that for an ordinary restart of
8270 * recovery there's no value in having the minimum recovery point any
8271 * earlier than this anyway, because redo will begin just after the
8272 * checkpoint record.
8273 */
8275 {
8276 if (ControlFile->minRecoveryPoint < lastCheckPointEndPtr)
8277 {
8278 ControlFile->minRecoveryPoint = lastCheckPointEndPtr;
8280
8281 /* update local copy */
8284 }
8285 if (flags & CHECKPOINT_IS_SHUTDOWN)
8287 }
8288
8289 /* we shall start with the latest checksum version */
8291
8293 }
8295
8296 /*
8297 * Update the average distance between checkpoints/restartpoints if the
8298 * prior checkpoint exists.
8299 */
8302
8303 /*
8304 * Delete old log files, those no longer needed for last restartpoint to
8305 * prevent the disk holding the xlog from growing full.
8306 */
8308
8309 /*
8310 * Retreat _logSegNo using the current end of xlog replayed or received,
8311 * whichever is later.
8312 */
8314 replayPtr = GetXLogReplayRecPtr(&replayTLI);
8315 endptr = (receivePtr < replayPtr) ? replayPtr : receivePtr;
8316 KeepLogSeg(endptr, &_logSegNo);
8317
8318 INJECTION_POINT("restartpoint-before-slot-invalidation", NULL);
8319
8323 {
8324 /*
8325 * Some slots have been invalidated; recalculate the old-segment
8326 * horizon, starting again from RedoRecPtr.
8327 */
8329 KeepLogSeg(endptr, &_logSegNo);
8330 }
8331 _logSegNo--;
8332
8333 /*
8334 * Try to recycle segments on a useful timeline. If we've been promoted
8335 * since the beginning of this restartpoint, use the new timeline chosen
8336 * at end of recovery. If we're still in recovery, use the timeline we're
8337 * currently replaying.
8338 *
8339 * There is no guarantee that the WAL segments will be useful on the
8340 * current timeline; if recovery proceeds to a new timeline right after
8341 * this, the pre-allocated WAL segments on this timeline will not be used,
8342 * and will go wasted until recycled on the next restartpoint. We'll live
8343 * with that.
8344 */
8345 if (!RecoveryInProgress())
8346 replayTLI = XLogCtl->InsertTimeLineID;
8347
8348 RemoveOldXlogFiles(_logSegNo, RedoRecPtr, endptr, replayTLI);
8349
8350 /*
8351 * Make more log segments if needed. (Do this after recycling old log
8352 * segments, since that may supply some of the needed files.)
8353 */
8354 PreallocXlogFiles(endptr, replayTLI);
8355
8356 /*
8357 * Truncate pg_subtrans if possible. We can throw away all data before
8358 * the oldest XMIN of any running transaction. No future transaction will
8359 * attempt to reference any pg_subtrans entry older than that (see Asserts
8360 * in subtrans.c). When hot standby is disabled, though, we mustn't do
8361 * this because StartupSUBTRANS hasn't been called yet.
8362 */
8363 if (EnableHotStandby)
8365
8366 /* Real work is done; log and update stats. */
8367 LogCheckpointEnd(true, flags);
8368
8369 /* Reset the process title */
8370 update_checkpoint_display(flags, true, true);
8371
8374 errmsg("recovery restart point at %X/%08X",
8375 LSN_FORMAT_ARGS(lastCheckPoint.redo)),
8376 xtime ? errdetail("Last completed transaction was at log time %s.",
8378
8379 /*
8380 * Finally, execute archive_cleanup_command, if any.
8381 */
8384 "archive_cleanup_command",
8385 false,
8387
8388 return true;
8389}
8390
8391/*
8392 * Report availability of WAL for the given target LSN
8393 * (typically a slot's restart_lsn)
8394 *
8395 * Returns one of the following enum values:
8396 *
8397 * * WALAVAIL_RESERVED means targetLSN is available and it is in the range of
8398 * max_wal_size.
8399 *
8400 * * WALAVAIL_EXTENDED means it is still available by preserving extra
8401 * segments beyond max_wal_size. If max_slot_wal_keep_size is smaller
8402 * than max_wal_size, this state is not returned.
8403 *
8404 * * WALAVAIL_UNRESERVED means it is being lost and the next checkpoint will
8405 * remove reserved segments. The walsender using this slot may return to the
8406 * above.
8407 *
8408 * * WALAVAIL_REMOVED means it has been removed. A replication stream on
8409 * a slot with this LSN cannot continue. (Any associated walsender
8410 * processes should have been terminated already.)
8411 *
8412 * * WALAVAIL_INVALID_LSN means the slot hasn't been set to reserve WAL.
8413 */
8416{
8417 XLogRecPtr currpos; /* current write LSN */
8418 XLogSegNo currSeg; /* segid of currpos */
8419 XLogSegNo targetSeg; /* segid of targetLSN */
8420 XLogSegNo oldestSeg; /* actual oldest segid */
8421 XLogSegNo oldestSegMaxWalSize; /* oldest segid kept by max_wal_size */
8422 XLogSegNo oldestSlotSeg; /* oldest segid kept by slot */
8424
8425 /*
8426 * slot does not reserve WAL. Either deactivated, or has never been active
8427 */
8429 return WALAVAIL_INVALID_LSN;
8430
8431 /*
8432 * Calculate the oldest segment currently reserved by all slots,
8433 * considering wal_keep_size and max_slot_wal_keep_size. Initialize
8434 * oldestSlotSeg to the current segment.
8435 */
8436 currpos = GetXLogWriteRecPtr();
8438 KeepLogSeg(currpos, &oldestSlotSeg);
8439
8440 /*
8441 * Find the oldest extant segment file. We get 1 until checkpoint removes
8442 * the first WAL segment file since startup, which causes the status being
8443 * wrong under certain abnormal conditions but that doesn't actually harm.
8444 */
8446
8447 /* calculate oldest segment by max_wal_size */
8450
8451 if (currSeg > keepSegs)
8453 else
8455
8456 /* the segment we care about */
8458
8459 /*
8460 * No point in returning reserved or extended status values if the
8461 * targetSeg is known to be lost.
8462 */
8463 if (targetSeg >= oldestSlotSeg)
8464 {
8465 /* show "reserved" when targetSeg is within max_wal_size */
8467 return WALAVAIL_RESERVED;
8468
8469 /* being retained by slots exceeding max_wal_size */
8470 return WALAVAIL_EXTENDED;
8471 }
8472
8473 /* WAL segments are no longer retained but haven't been removed yet */
8474 if (targetSeg >= oldestSeg)
8475 return WALAVAIL_UNRESERVED;
8476
8477 /* Definitely lost */
8478 return WALAVAIL_REMOVED;
8479}
8480
8481
8482/*
8483 * Retreat *logSegNo to the last segment that we need to retain because of
8484 * either wal_keep_size or replication slots.
8485 *
8486 * This is calculated by subtracting wal_keep_size from the given xlog
8487 * location, recptr and by making sure that that result is below the
8488 * requirement of replication slots. For the latter criterion we do consider
8489 * the effects of max_slot_wal_keep_size: reserve at most that much space back
8490 * from recptr.
8491 *
8492 * Note about replication slots: if this function calculates a value
8493 * that's further ahead than what slots need reserved, then affected
8494 * slots need to be invalidated and this function invoked again.
8495 * XXX it might be a good idea to rewrite this function so that
8496 * invalidation is optionally done here, instead.
8497 */
8498static void
8500{
8502 XLogSegNo segno;
8504
8506 segno = currSegNo;
8507
8508 /* Calculate how many segments are kept by slots. */
8511 {
8513
8514 /*
8515 * Account for max_slot_wal_keep_size to avoid keeping more than
8516 * configured. However, don't do that during a binary upgrade: if
8517 * slots were to be invalidated because of this, it would not be
8518 * possible to preserve logical ones during the upgrade.
8519 */
8521 {
8523
8526
8527 if (currSegNo - segno > slot_keep_segs)
8528 segno = currSegNo - slot_keep_segs;
8529 }
8530 }
8531
8532 /*
8533 * If WAL summarization is in use, don't remove WAL that has yet to be
8534 * summarized.
8535 */
8538 {
8540
8542 if (unsummarized_segno < segno)
8543 segno = unsummarized_segno;
8544 }
8545
8546 /* but, keep at least wal_keep_size if that's set */
8547 if (wal_keep_size_mb > 0)
8548 {
8550
8552 if (currSegNo - segno < keep_segs)
8553 {
8554 /* avoid underflow, don't go below 1 */
8555 if (currSegNo <= keep_segs)
8556 segno = 1;
8557 else
8558 segno = currSegNo - keep_segs;
8559 }
8560 }
8561
8562 /* don't delete WAL segments newer than the calculated segment */
8563 if (segno < *logSegNo)
8564 *logSegNo = segno;
8565}
8566
8567/*
8568 * Write a NEXTOID log record
8569 */
8570void
8572{
8574 XLogRegisterData(&nextOid, sizeof(Oid));
8576
8577 /*
8578 * We need not flush the NEXTOID record immediately, because any of the
8579 * just-allocated OIDs could only reach disk as part of a tuple insert or
8580 * update that would have its own XLOG record that must follow the NEXTOID
8581 * record. Therefore, the standard buffer LSN interlock applied to those
8582 * records will ensure no such OID reaches disk before the NEXTOID record
8583 * does.
8584 *
8585 * Note, however, that the above statement only covers state "within" the
8586 * database. When we use a generated OID as a file or directory name, we
8587 * are in a sense violating the basic WAL rule, because that filesystem
8588 * change may reach disk before the NEXTOID WAL record does. The impact
8589 * of this is that if a database crash occurs immediately afterward, we
8590 * might after restart re-generate the same OID and find that it conflicts
8591 * with the leftover file or directory. But since for safety's sake we
8592 * always loop until finding a nonconflicting filename, this poses no real
8593 * problem in practice. See pgsql-hackers discussion 27-Sep-2006.
8594 */
8595}
8596
8597/*
8598 * Write an XLOG SWITCH record.
8599 *
8600 * Here we just blindly issue an XLogInsert request for the record.
8601 * All the magic happens inside XLogInsert.
8602 *
8603 * The return value is either the end+1 address of the switch record,
8604 * or the end+1 address of the prior segment if we did not need to
8605 * write a switch record because we are already at segment start.
8606 */
8609{
8611
8612 /* XLOG SWITCH has no data */
8614
8615 if (mark_unimportant)
8618
8619 return RecPtr;
8620}
8621
8622/*
8623 * Write a RESTORE POINT record
8624 */
8627{
8630
8632 strlcpy(xlrec.rp_name, rpName, MAXFNAMELEN);
8633
8636
8638
8639 ereport(LOG,
8640 errmsg("restore point \"%s\" created at %X/%08X",
8642
8643 return RecPtr;
8644}
8645
8646/*
8647 * Write an empty XLOG record to assign a distinct LSN.
8648 *
8649 * This is used by some index AMs when building indexes on permanent relations
8650 * with wal_level=minimal. In that scenario, WAL-logging will start after
8651 * commit, but the index AM needs distinct LSNs to detect concurrent page
8652 * modifications. When the current WAL insert position hasn't advanced since
8653 * the last call, we emit a dummy record to ensure we get a new, distinct LSN.
8654 */
8657{
8658 int dummy = 0;
8659
8660 /*
8661 * Records other than XLOG_SWITCH must have content. We use an integer 0
8662 * to satisfy this restriction.
8663 */
8666 XLogRegisterData(&dummy, sizeof(dummy));
8668}
8669
8670/*
8671 * Check if any of the GUC parameters that are critical for hot standby
8672 * have changed, and update the value in pg_control file if necessary.
8673 */
8674static void
8676{
8685 {
8686 /*
8687 * The change in number of backend slots doesn't need to be WAL-logged
8688 * if archiving is not enabled, as you can't start archive recovery
8689 * with wal_level=minimal anyway. We don't really care about the
8690 * values in pg_control either if wal_level=minimal, but seems better
8691 * to keep them up-to-date to avoid confusion.
8692 */
8694 {
8697
8699 xlrec.max_worker_processes = max_worker_processes;
8700 xlrec.max_wal_senders = max_wal_senders;
8701 xlrec.max_prepared_xacts = max_prepared_xacts;
8702 xlrec.max_locks_per_xact = max_locks_per_xact;
8703 xlrec.wal_level = wal_level;
8704 xlrec.wal_log_hints = wal_log_hints;
8705 xlrec.track_commit_timestamp = track_commit_timestamp;
8706
8708 XLogRegisterData(&xlrec, sizeof(xlrec));
8709
8712 }
8713
8715
8725
8727 }
8728}
8729
8730/*
8731 * Log the new state of checksums
8732 */
8733static void
8747
8748/*
8749 * Update full_page_writes in shared memory, and write an
8750 * XLOG_FPW_CHANGE record if necessary.
8751 *
8752 * Note: this function assumes there is no other process running
8753 * concurrently that could update it.
8754 */
8755void
8757{
8759 bool recoveryInProgress;
8760
8761 /*
8762 * Do nothing if full_page_writes has not been changed.
8763 *
8764 * It's safe to check the shared full_page_writes without the lock,
8765 * because we assume that there is no concurrently running process which
8766 * can update it.
8767 */
8768 if (fullPageWrites == Insert->fullPageWrites)
8769 return;
8770
8771 /*
8772 * Perform this outside critical section so that the WAL insert
8773 * initialization done by RecoveryInProgress() doesn't trigger an
8774 * assertion failure.
8775 */
8777
8779
8780 /*
8781 * It's always safe to take full page images, even when not strictly
8782 * required, but not the other round. So if we're setting full_page_writes
8783 * to true, first set it true and then write the WAL record. If we're
8784 * setting it to false, first write the WAL record and then set the global
8785 * flag.
8786 */
8787 if (fullPageWrites)
8788 {
8790 Insert->fullPageWrites = true;
8792 }
8793
8794 /*
8795 * Write an XLOG_FPW_CHANGE record. This allows us to keep track of
8796 * full_page_writes during archive recovery, if required.
8797 */
8799 {
8801 XLogRegisterData(&fullPageWrites, sizeof(bool));
8802
8804 }
8805
8806 if (!fullPageWrites)
8807 {
8809 Insert->fullPageWrites = false;
8811 }
8813}
8814
8815/*
8816 * XLOG resource manager's routines
8817 *
8818 * Definitions of info values are in include/catalog/pg_control.h, though
8819 * not all record types are related to control file updates.
8820 *
8821 * NOTE: Some XLOG record types that are directly related to WAL recovery
8822 * are handled in xlogrecovery_redo().
8823 */
8824void
8826{
8827 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
8828 XLogRecPtr lsn = record->EndRecPtr;
8829
8830 /*
8831 * In XLOG rmgr, backup blocks are only used by XLOG_FPI and
8832 * XLOG_FPI_FOR_HINT records.
8833 */
8834 Assert(info == XLOG_FPI || info == XLOG_FPI_FOR_HINT ||
8835 !XLogRecHasAnyBlockRefs(record));
8836
8837 if (info == XLOG_NEXTOID)
8838 {
8839 Oid nextOid;
8840
8841 /*
8842 * We used to try to take the maximum of TransamVariables->nextOid and
8843 * the recorded nextOid, but that fails if the OID counter wraps
8844 * around. Since no OID allocation should be happening during replay
8845 * anyway, better to just believe the record exactly. We still take
8846 * OidGenLock while setting the variable, just in case.
8847 */
8848 memcpy(&nextOid, XLogRecGetData(record), sizeof(Oid));
8850 TransamVariables->nextOid = nextOid;
8853 }
8854 else if (info == XLOG_CHECKPOINT_SHUTDOWN)
8855 {
8856 CheckPoint checkPoint;
8857 TimeLineID replayTLI;
8858
8859 memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
8860 /* In a SHUTDOWN checkpoint, believe the counters exactly */
8862 TransamVariables->nextXid = checkPoint.nextXid;
8865 TransamVariables->nextOid = checkPoint.nextOid;
8869 checkPoint.nextMultiOffset);
8870
8872 checkPoint.oldestMultiDB);
8873
8874 /*
8875 * No need to set oldestClogXid here as well; it'll be set when we
8876 * redo an xl_clog_truncate if it changed since initialization.
8877 */
8878 SetTransactionIdLimit(checkPoint.oldestXid, checkPoint.oldestXidDB);
8879
8880 /*
8881 * If we see a shutdown checkpoint while waiting for an end-of-backup
8882 * record, the backup was canceled and the end-of-backup record will
8883 * never arrive.
8884 */
8888 ereport(PANIC,
8889 (errmsg("online backup was canceled, recovery cannot continue")));
8890
8891 /*
8892 * If we see a shutdown checkpoint, we know that nothing was running
8893 * on the primary at this point. So fake-up an empty running-xacts
8894 * record and use that here and now. Recover additional standby state
8895 * for prepared transactions.
8896 */
8898 {
8899 TransactionId *xids;
8900 int nxids;
8902 TransactionId latestCompletedXid;
8904
8906
8907 /* Update pg_subtrans entries for any prepared transactions */
8909
8910 /*
8911 * Construct a RunningTransactions snapshot representing a shut
8912 * down server, with only prepared transactions still alive. We're
8913 * never overflowed at this point because all subxids are listed
8914 * with their parent prepared transactions.
8915 */
8916 running.xcnt = nxids;
8917 running.subxcnt = 0;
8919 running.nextXid = XidFromFullTransactionId(checkPoint.nextXid);
8921 latestCompletedXid = XidFromFullTransactionId(checkPoint.nextXid);
8922 TransactionIdRetreat(latestCompletedXid);
8923 Assert(TransactionIdIsNormal(latestCompletedXid));
8924 running.latestCompletedXid = latestCompletedXid;
8925 running.xids = xids;
8926
8928 }
8929
8930 /* ControlFile->checkPointCopy always tracks the latest ckpt XID */
8934
8937
8938 /*
8939 * We should've already switched to the new TLI before replaying this
8940 * record.
8941 */
8942 (void) GetCurrentReplayRecPtr(&replayTLI);
8943 if (checkPoint.ThisTimeLineID != replayTLI)
8944 ereport(PANIC,
8945 (errmsg("unexpected timeline ID %u (should be %u) in shutdown checkpoint record",
8946 checkPoint.ThisTimeLineID, replayTLI)));
8947
8948 RecoveryRestartPoint(&checkPoint, record);
8949
8950 /*
8951 * After replaying a checkpoint record, free all smgr objects.
8952 * Otherwise we would never do so for dropped relations, as the
8953 * startup does not process shared invalidation messages or call
8954 * AtEOXact_SMgr().
8955 */
8957 }
8958 else if (info == XLOG_CHECKPOINT_ONLINE)
8959 {
8960 CheckPoint checkPoint;
8961 TimeLineID replayTLI;
8962
8963 memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
8964 /* In an ONLINE checkpoint, treat the XID counter as a minimum */
8967 checkPoint.nextXid))
8968 TransamVariables->nextXid = checkPoint.nextXid;
8970
8971 /*
8972 * We ignore the nextOid counter in an ONLINE checkpoint, preferring
8973 * to track OID assignment through XLOG_NEXTOID records. The nextOid
8974 * counter is from the start of the checkpoint and might well be stale
8975 * compared to later XLOG_NEXTOID records. We could try to take the
8976 * maximum of the nextOid counter and our latest value, but since
8977 * there's no particular guarantee about the speed with which the OID
8978 * counter wraps around, that's a risky thing to do. In any case,
8979 * users of the nextOid counter are required to avoid assignment of
8980 * duplicates, so that a somewhat out-of-date value should be safe.
8981 */
8982
8983 /* Handle multixact */
8985 checkPoint.nextMultiOffset);
8986
8987 /*
8988 * NB: This may perform multixact truncation when replaying WAL
8989 * generated by an older primary.
8990 */
8992 checkPoint.oldestMultiDB);
8994 checkPoint.oldestXid))
8996 checkPoint.oldestXidDB);
8997 /* ControlFile->checkPointCopy always tracks the latest ckpt XID */
9001
9002 /* TLI should not change in an on-line checkpoint */
9003 (void) GetCurrentReplayRecPtr(&replayTLI);
9004 if (checkPoint.ThisTimeLineID != replayTLI)
9005 ereport(PANIC,
9006 (errmsg("unexpected timeline ID %u (should be %u) in online checkpoint record",
9007 checkPoint.ThisTimeLineID, replayTLI)));
9008
9009 RecoveryRestartPoint(&checkPoint, record);
9010
9011 /*
9012 * After replaying a checkpoint record, free all smgr objects.
9013 * Otherwise we would never do so for dropped relations, as the
9014 * startup does not process shared invalidation messages or call
9015 * AtEOXact_SMgr().
9016 */
9018 }
9019 else if (info == XLOG_OVERWRITE_CONTRECORD)
9020 {
9021 /* nothing to do here, handled in xlogrecovery_redo() */
9022 }
9023 else if (info == XLOG_END_OF_RECOVERY)
9024 {
9026 TimeLineID replayTLI;
9027
9028 memcpy(&xlrec, XLogRecGetData(record), sizeof(xl_end_of_recovery));
9029
9030 /*
9031 * For Hot Standby, we could treat this like a Shutdown Checkpoint,
9032 * but this case is rarer and harder to test, so the benefit doesn't
9033 * outweigh the potential extra cost of maintenance.
9034 */
9035
9036 /*
9037 * We should've already switched to the new TLI before replaying this
9038 * record.
9039 */
9040 (void) GetCurrentReplayRecPtr(&replayTLI);
9041 if (xlrec.ThisTimeLineID != replayTLI)
9042 ereport(PANIC,
9043 (errmsg("unexpected timeline ID %u (should be %u) in end-of-recovery record",
9044 xlrec.ThisTimeLineID, replayTLI)));
9045 }
9046 else if (info == XLOG_NOOP)
9047 {
9048 /* nothing to do here */
9049 }
9050 else if (info == XLOG_SWITCH)
9051 {
9052 /* nothing to do here */
9053 }
9054 else if (info == XLOG_RESTORE_POINT)
9055 {
9056 /* nothing to do here, handled in xlogrecovery.c */
9057 }
9058 else if (info == XLOG_ASSIGN_LSN)
9059 {
9060 /* nothing to do here, see XLogGetFakeLSN() */
9061 }
9062 else if (info == XLOG_FPI || info == XLOG_FPI_FOR_HINT)
9063 {
9064 /*
9065 * XLOG_FPI records contain nothing else but one or more block
9066 * references. Every block reference must include a full-page image
9067 * even if full_page_writes was disabled when the record was generated
9068 * - otherwise there would be no point in this record.
9069 *
9070 * XLOG_FPI_FOR_HINT records are generated when a page needs to be
9071 * WAL-logged because of a hint bit update. They are only generated
9072 * when checksums and/or wal_log_hints are enabled. They may include
9073 * no full-page images if full_page_writes was disabled when they were
9074 * generated. In this case there is nothing to do here.
9075 *
9076 * No recovery conflicts are generated by these generic records - if a
9077 * resource manager needs to generate conflicts, it has to define a
9078 * separate WAL record type and redo routine.
9079 */
9080 for (uint8 block_id = 0; block_id <= XLogRecMaxBlockId(record); block_id++)
9081 {
9082 Buffer buffer;
9083
9084 if (!XLogRecHasBlockImage(record, block_id))
9085 {
9086 if (info == XLOG_FPI)
9087 elog(ERROR, "XLOG_FPI record did not contain a full-page image");
9088 continue;
9089 }
9090
9091 if (XLogReadBufferForRedo(record, block_id, &buffer) != BLK_RESTORED)
9092 elog(ERROR, "unexpected XLogReadBufferForRedo result when restoring backup block");
9093 UnlockReleaseBuffer(buffer);
9094 }
9095 }
9096 else if (info == XLOG_BACKUP_END)
9097 {
9098 /* nothing to do here, handled in xlogrecovery_redo() */
9099 }
9100 else if (info == XLOG_PARAMETER_CHANGE)
9101 {
9103
9104 /* Update our copy of the parameters in pg_control */
9105 memcpy(&xlrec, XLogRecGetData(record), sizeof(xl_parameter_change));
9106
9108 ControlFile->MaxConnections = xlrec.MaxConnections;
9109 ControlFile->max_worker_processes = xlrec.max_worker_processes;
9110 ControlFile->max_wal_senders = xlrec.max_wal_senders;
9111 ControlFile->max_prepared_xacts = xlrec.max_prepared_xacts;
9112 ControlFile->max_locks_per_xact = xlrec.max_locks_per_xact;
9113 ControlFile->wal_level = xlrec.wal_level;
9114 ControlFile->wal_log_hints = xlrec.wal_log_hints;
9115
9116 /*
9117 * Update minRecoveryPoint to ensure that if recovery is aborted, we
9118 * recover back up to this point before allowing hot standby again.
9119 * This is important if the max_* settings are decreased, to ensure
9120 * you don't run queries against the WAL preceding the change. The
9121 * local copies cannot be updated as long as crash recovery is
9122 * happening and we expect all the WAL to be replayed.
9123 */
9125 {
9128 }
9130 {
9131 TimeLineID replayTLI;
9132
9133 (void) GetCurrentReplayRecPtr(&replayTLI);
9135 ControlFile->minRecoveryPointTLI = replayTLI;
9136 }
9137
9138 CommitTsParameterChange(xlrec.track_commit_timestamp,
9140 ControlFile->track_commit_timestamp = xlrec.track_commit_timestamp;
9141
9144
9145 /* Check to see if any parameter change gives a problem on recovery */
9147 }
9148 else if (info == XLOG_FPW_CHANGE)
9149 {
9150 bool fpw;
9151
9152 memcpy(&fpw, XLogRecGetData(record), sizeof(bool));
9153
9154 /*
9155 * Update the LSN of the last replayed XLOG_FPW_CHANGE record so that
9156 * do_pg_backup_start() and do_pg_backup_stop() can check whether
9157 * full_page_writes has been disabled during online backup.
9158 */
9159 if (!fpw)
9160 {
9165 }
9166
9167 /* Keep track of full_page_writes */
9169 }
9170 else if (info == XLOG_CHECKPOINT_REDO)
9171 {
9173 bool new_state = false;
9174
9176
9178 XLogCtl->data_checksum_version = redo_rec.data_checksum_version;
9179 SetLocalDataChecksumState(redo_rec.data_checksum_version);
9180 if (redo_rec.data_checksum_version != ControlFile->data_checksum_version)
9181 new_state = true;
9183
9184 if (new_state)
9185 EmitAndWaitDataChecksumsBarrier(redo_rec.data_checksum_version);
9186 }
9187 else if (info == XLOG_LOGICAL_DECODING_STATUS_CHANGE)
9188 {
9189 bool status;
9190
9191 memcpy(&status, XLogRecGetData(record), sizeof(bool));
9192
9193 /*
9194 * We need to toggle the logical decoding status and update the
9195 * XLogLogicalInfo cache of processes synchronously because
9196 * XLogLogicalInfoActive() is used even during read-only queries
9197 * (e.g., via RelationIsAccessibleInLogicalDecoding()). In the
9198 * 'disable' case, it is safe to invalidate existing slots after
9199 * disabling logical decoding because logical decoding cannot process
9200 * subsequent WAL records, which may not contain logical information.
9201 */
9202 if (status)
9204 else
9206
9207 elog(DEBUG1, "update logical decoding status to %d during recovery",
9208 status);
9209
9210 if (InRecovery && InHotStandby)
9211 {
9212 if (!status)
9213 {
9214 /*
9215 * Invalidate logical slots if we are in hot standby and the
9216 * primary disabled logical decoding.
9217 */
9219 0, InvalidOid,
9221 }
9222 else if (sync_replication_slots)
9223 {
9224 /*
9225 * Signal the postmaster to launch the slotsync worker.
9226 *
9227 * XXX: For simplicity, we keep the slotsync worker running
9228 * even after logical decoding is disabled. A future
9229 * improvement can consider starting and stopping the worker
9230 * based on logical decoding status change.
9231 */
9233 }
9234 }
9235 }
9236}
9237
9238void
9240{
9241 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
9242
9243 if (info == XLOG2_CHECKSUMS)
9244 {
9246
9247 memcpy(&state, XLogRecGetData(record), sizeof(xl_checksum_state));
9248
9250 XLogCtl->data_checksum_version = state.new_checksum_state;
9252
9254 ControlFile->data_checksum_version = state.new_checksum_state;
9257
9258 /*
9259 * Block on a procsignalbarrier to await all processes having seen the
9260 * change to checksum status. Once the barrier has been passed we can
9261 * initiate the corresponding processing.
9262 */
9263 EmitAndWaitDataChecksumsBarrier(state.new_checksum_state);
9264 }
9265}
9266
9267/*
9268 * Return the extra open flags used for opening a file, depending on the
9269 * value of the GUCs wal_sync_method, fsync and debug_io_direct.
9270 */
9271static int
9272get_sync_bit(int method)
9273{
9274 int o_direct_flag = 0;
9275
9276 /*
9277 * Use O_DIRECT if requested, except in walreceiver process. The WAL
9278 * written by walreceiver is normally read by the startup process soon
9279 * after it's written. Also, walreceiver performs unaligned writes, which
9280 * don't work with O_DIRECT, so it is required for correctness too.
9281 */
9284
9285 /* If fsync is disabled, never open in sync mode */
9286 if (!enableFsync)
9287 return o_direct_flag;
9288
9289 switch (method)
9290 {
9291 /*
9292 * enum values for all sync options are defined even if they are
9293 * not supported on the current platform. But if not, they are
9294 * not included in the enum option array, and therefore will never
9295 * be seen here.
9296 */
9300 return o_direct_flag;
9301#ifdef O_SYNC
9303 return O_SYNC | o_direct_flag;
9304#endif
9305#ifdef O_DSYNC
9307 return O_DSYNC | o_direct_flag;
9308#endif
9309 default:
9310 /* can't happen (unless we are out of sync with option array) */
9311 elog(ERROR, "unrecognized \"wal_sync_method\": %d", method);
9312 return 0; /* silence warning */
9313 }
9314}
9315
9316/*
9317 * GUC support
9318 */
9319void
9321{
9323 {
9324 /*
9325 * To ensure that no blocks escape unsynced, force an fsync on the
9326 * currently open log segment (if any). Also, if the open flag is
9327 * changing, close the log file so it will be reopened (with new flag
9328 * bit) at next use.
9329 */
9330 if (openLogFile >= 0)
9331 {
9333 if (pg_fsync(openLogFile) != 0)
9334 {
9335 char xlogfname[MAXFNAMELEN];
9336 int save_errno;
9337
9338 save_errno = errno;
9341 errno = save_errno;
9342 ereport(PANIC,
9344 errmsg("could not fsync file \"%s\": %m", xlogfname)));
9345 }
9346
9349 XLogFileClose();
9350 }
9351 }
9352}
9353
9354
9355/*
9356 * Issue appropriate kind of fsync (if any) for an XLOG output file.
9357 *
9358 * 'fd' is a file descriptor for the XLOG file to be fsync'd.
9359 * 'segno' is for error reporting purposes.
9360 */
9361void
9363{
9364 char *msg = NULL;
9366
9367 Assert(tli != 0);
9368
9369 /*
9370 * Quick exit if fsync is disabled or write() has already synced the WAL
9371 * file.
9372 */
9373 if (!enableFsync ||
9376 return;
9377
9378 /*
9379 * Measure I/O timing to sync the WAL file for pg_stat_io.
9380 */
9382
9384 switch (wal_sync_method)
9385 {
9387 if (pg_fsync_no_writethrough(fd) != 0)
9388 msg = _("could not fsync file \"%s\": %m");
9389 break;
9390#ifdef HAVE_FSYNC_WRITETHROUGH
9392 if (pg_fsync_writethrough(fd) != 0)
9393 msg = _("could not fsync write-through file \"%s\": %m");
9394 break;
9395#endif
9397 if (pg_fdatasync(fd) != 0)
9398 msg = _("could not fdatasync file \"%s\": %m");
9399 break;
9402 /* not reachable */
9403 Assert(false);
9404 break;
9405 default:
9406 ereport(PANIC,
9408 errmsg_internal("unrecognized \"wal_sync_method\": %d", wal_sync_method));
9409 break;
9410 }
9411
9412 /* PANIC if failed to fsync */
9413 if (msg)
9414 {
9415 char xlogfname[MAXFNAMELEN];
9416 int save_errno = errno;
9417
9419 errno = save_errno;
9420 ereport(PANIC,
9422 errmsg(msg, xlogfname)));
9423 }
9424
9426
9428 start, 1, 0);
9429}
9430
9431/*
9432 * do_pg_backup_start is the workhorse of the user-visible pg_backup_start()
9433 * function. It creates the necessary starting checkpoint and constructs the
9434 * backup state and tablespace map.
9435 *
9436 * Input parameters are "state" (the backup state), "fast" (if true, we do
9437 * the checkpoint in fast mode), and "tablespaces" (if non-NULL, indicates a
9438 * list of tablespaceinfo structs describing the cluster's tablespaces.).
9439 *
9440 * The tablespace map contents are appended to passed-in parameter
9441 * tablespace_map and the caller is responsible for including it in the backup
9442 * archive as 'tablespace_map'. The tablespace_map file is required mainly for
9443 * tar format in windows as native windows utilities are not able to create
9444 * symlinks while extracting files from tar. However for consistency and
9445 * platform-independence, we do it the same way everywhere.
9446 *
9447 * It fills in "state" with the information required for the backup, such
9448 * as the minimum WAL location that must be present to restore from this
9449 * backup (starttli) and the corresponding timeline ID (starttli).
9450 *
9451 * Every successfully started backup must be stopped by calling
9452 * do_pg_backup_stop() or do_pg_abort_backup(). There can be many
9453 * backups active at the same time.
9454 *
9455 * It is the responsibility of the caller of this function to verify the
9456 * permissions of the calling user!
9457 */
9458void
9459do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces,
9461{
9463
9464 Assert(state != NULL);
9466
9467 /*
9468 * During recovery, we don't need to check WAL level. Because, if WAL
9469 * level is not sufficient, it's impossible to get here during recovery.
9470 */
9472 ereport(ERROR,
9474 errmsg("WAL level not sufficient for making an online backup"),
9475 errhint("\"wal_level\" must be set to \"replica\" or \"logical\" at server start.")));
9476
9478 ereport(ERROR,
9480 errmsg("backup label too long (max %d bytes)",
9481 MAXPGPATH)));
9482
9483 strlcpy(state->name, backupidstr, sizeof(state->name));
9484
9485 /*
9486 * Mark backup active in shared memory. We must do full-page WAL writes
9487 * during an on-line backup even if not doing so at other times, because
9488 * it's quite possible for the backup dump to obtain a "torn" (partially
9489 * written) copy of a database page if it reads the page concurrently with
9490 * our write to the same page. This can be fixed as long as the first
9491 * write to the page in the WAL sequence is a full-page write. Hence, we
9492 * increment runningBackups then force a CHECKPOINT, to ensure there are
9493 * no dirty pages in shared memory that might get dumped while the backup
9494 * is in progress without having a corresponding WAL record. (Once the
9495 * backup is complete, we need not force full-page writes anymore, since
9496 * we expect that any pages not modified during the backup interval must
9497 * have been correctly captured by the backup.)
9498 *
9499 * Note that forcing full-page writes has no effect during an online
9500 * backup from the standby.
9501 *
9502 * We must hold all the insertion locks to change the value of
9503 * runningBackups, to ensure adequate interlocking against
9504 * XLogInsertRecord().
9505 */
9509
9510 /*
9511 * Ensure we decrement runningBackups if we fail below. NB -- for this to
9512 * work correctly, it is critical that sessionBackupState is only updated
9513 * after this block is over.
9514 */
9516 {
9517 bool gotUniqueStartpoint = false;
9518 DIR *tblspcdir;
9519 struct dirent *de;
9521 int datadirpathlen;
9522
9523 /*
9524 * Force an XLOG file switch before the checkpoint, to ensure that the
9525 * WAL segment the checkpoint is written to doesn't contain pages with
9526 * old timeline IDs. That would otherwise happen if you called
9527 * pg_backup_start() right after restoring from a PITR archive: the
9528 * first WAL segment containing the startup checkpoint has pages in
9529 * the beginning with the old timeline ID. That can cause trouble at
9530 * recovery: we won't have a history file covering the old timeline if
9531 * pg_wal directory was not included in the base backup and the WAL
9532 * archive was cleared too before starting the backup.
9533 *
9534 * During recovery, we skip forcing XLOG file switch, which means that
9535 * the backup taken during recovery is not available for the special
9536 * recovery case described above.
9537 */
9539 RequestXLogSwitch(false);
9540
9541 do
9542 {
9543 bool checkpointfpw;
9544
9545 /*
9546 * Force a CHECKPOINT. Aside from being necessary to prevent torn
9547 * page problems, this guarantees that two successive backup runs
9548 * will have different checkpoint positions and hence different
9549 * history file names, even if nothing happened in between.
9550 *
9551 * During recovery, establish a restartpoint if possible. We use
9552 * the last restartpoint as the backup starting checkpoint. This
9553 * means that two successive backup runs can have same checkpoint
9554 * positions.
9555 *
9556 * Since the fact that we are executing do_pg_backup_start()
9557 * during recovery means that checkpointer is running, we can use
9558 * RequestCheckpoint() to establish a restartpoint.
9559 *
9560 * We use CHECKPOINT_FAST only if requested by user (via passing
9561 * fast = true). Otherwise this can take awhile.
9562 */
9564 (fast ? CHECKPOINT_FAST : 0));
9565
9566 /*
9567 * Now we need to fetch the checkpoint record location, and also
9568 * its REDO pointer. The oldest point in WAL that would be needed
9569 * to restore starting from the checkpoint is precisely the REDO
9570 * pointer.
9571 */
9573 state->checkpointloc = ControlFile->checkPoint;
9574 state->startpoint = ControlFile->checkPointCopy.redo;
9578
9580 {
9582
9583 /*
9584 * Check to see if all WAL replayed during online backup
9585 * (i.e., since last restartpoint used as backup starting
9586 * checkpoint) contain full-page writes.
9587 */
9591
9592 if (!checkpointfpw || state->startpoint <= recptr)
9593 ereport(ERROR,
9595 errmsg("WAL generated with \"full_page_writes=off\" was replayed "
9596 "since last restartpoint"),
9597 errhint("This means that the backup being taken on the standby "
9598 "is corrupt and should not be used. "
9599 "Enable \"full_page_writes\" and run CHECKPOINT on the primary, "
9600 "and then try an online backup again.")));
9601
9602 /*
9603 * During recovery, since we don't use the end-of-backup WAL
9604 * record and don't write the backup history file, the
9605 * starting WAL location doesn't need to be unique. This means
9606 * that two base backups started at the same time might use
9607 * the same checkpoint as starting locations.
9608 */
9609 gotUniqueStartpoint = true;
9610 }
9611
9612 /*
9613 * If two base backups are started at the same time (in WAL sender
9614 * processes), we need to make sure that they use different
9615 * checkpoints as starting locations, because we use the starting
9616 * WAL location as a unique identifier for the base backup in the
9617 * end-of-backup WAL record and when we write the backup history
9618 * file. Perhaps it would be better generate a separate unique ID
9619 * for each backup instead of forcing another checkpoint, but
9620 * taking a checkpoint right after another is not that expensive
9621 * either because only few buffers have been dirtied yet.
9622 */
9624 if (XLogCtl->Insert.lastBackupStart < state->startpoint)
9625 {
9626 XLogCtl->Insert.lastBackupStart = state->startpoint;
9627 gotUniqueStartpoint = true;
9628 }
9630 } while (!gotUniqueStartpoint);
9631
9632 /*
9633 * Construct tablespace_map file.
9634 */
9636
9637 /* Collect information about all tablespaces */
9639 while ((de = ReadDir(tblspcdir, PG_TBLSPC_DIR)) != NULL)
9640 {
9641 char fullpath[MAXPGPATH + sizeof(PG_TBLSPC_DIR)];
9642 char linkpath[MAXPGPATH];
9643 char *relpath = NULL;
9644 char *s;
9646 char *badp;
9647 Oid tsoid;
9648
9649 /*
9650 * Try to parse the directory name as an unsigned integer.
9651 *
9652 * Tablespace directories should be positive integers that can be
9653 * represented in 32 bits, with no leading zeroes or trailing
9654 * garbage. If we come across a name that doesn't meet those
9655 * criteria, skip it.
9656 */
9657 if (de->d_name[0] < '1' || de->d_name[1] > '9')
9658 continue;
9659 errno = 0;
9660 tsoid = strtoul(de->d_name, &badp, 10);
9661 if (*badp != '\0' || errno == EINVAL || errno == ERANGE)
9662 continue;
9663
9664 snprintf(fullpath, sizeof(fullpath), "%s/%s", PG_TBLSPC_DIR, de->d_name);
9665
9666 de_type = get_dirent_type(fullpath, de, false, ERROR);
9667
9668 if (de_type == PGFILETYPE_LNK)
9669 {
9671 int rllen;
9672
9673 rllen = readlink(fullpath, linkpath, sizeof(linkpath));
9674 if (rllen < 0)
9675 {
9677 (errmsg("could not read symbolic link \"%s\": %m",
9678 fullpath)));
9679 continue;
9680 }
9681 else if (rllen >= sizeof(linkpath))
9682 {
9684 (errmsg("symbolic link \"%s\" target is too long",
9685 fullpath)));
9686 continue;
9687 }
9688 linkpath[rllen] = '\0';
9689
9690 /*
9691 * Relpath holds the relative path of the tablespace directory
9692 * when it's located within PGDATA, or NULL if it's located
9693 * elsewhere.
9694 */
9695 if (rllen > datadirpathlen &&
9699
9700 /*
9701 * Add a backslash-escaped version of the link path to the
9702 * tablespace map file.
9703 */
9705 for (s = linkpath; *s; s++)
9706 {
9707 if (*s == '\n' || *s == '\r' || *s == '\\')
9710 }
9712 de->d_name, escapedpath.data);
9713 pfree(escapedpath.data);
9714 }
9715 else if (de_type == PGFILETYPE_DIR)
9716 {
9717 /*
9718 * It's possible to use allow_in_place_tablespaces to create
9719 * directories directly under pg_tblspc, for testing purposes
9720 * only.
9721 *
9722 * In this case, we store a relative path rather than an
9723 * absolute path into the tablespaceinfo.
9724 */
9725 snprintf(linkpath, sizeof(linkpath), "%s/%s",
9726 PG_TBLSPC_DIR, de->d_name);
9728 }
9729 else
9730 {
9731 /* Skip any other file type that appears here. */
9732 continue;
9733 }
9734
9736 ti->oid = tsoid;
9737 ti->path = pstrdup(linkpath);
9738 ti->rpath = relpath;
9739 ti->size = -1;
9740
9741 if (tablespaces)
9742 *tablespaces = lappend(*tablespaces, ti);
9743 }
9745
9746 state->starttime = (pg_time_t) time(NULL);
9747 }
9749
9750 state->started_in_recovery = backup_started_in_recovery;
9751
9752 /*
9753 * Mark that the start phase has correctly finished for the backup.
9754 */
9756}
9757
9758/*
9759 * Utility routine to fetch the session-level status of a backup running.
9760 */
9763{
9764 return sessionBackupState;
9765}
9766
9767/*
9768 * do_pg_backup_stop
9769 *
9770 * Utility function called at the end of an online backup. It creates history
9771 * file (if required), resets sessionBackupState and so on. It can optionally
9772 * wait for WAL segments to be archived.
9773 *
9774 * "state" is filled with the information necessary to restore from this
9775 * backup with its stop LSN (stoppoint), its timeline ID (stoptli), etc.
9776 *
9777 * It is the responsibility of the caller of this function to verify the
9778 * permissions of the calling user!
9779 */
9780void
9782{
9783 bool backup_stopped_in_recovery = false;
9784 char histfilepath[MAXPGPATH];
9788 FILE *fp;
9790 int waits = 0;
9791 bool reported_waiting = false;
9792
9793 Assert(state != NULL);
9794
9796
9797 /*
9798 * During recovery, we don't need to check WAL level. Because, if WAL
9799 * level is not sufficient, it's impossible to get here during recovery.
9800 */
9802 ereport(ERROR,
9804 errmsg("WAL level not sufficient for making an online backup"),
9805 errhint("\"wal_level\" must be set to \"replica\" or \"logical\" at server start.")));
9806
9807 /*
9808 * OK to update backup counter and session-level lock.
9809 *
9810 * Note that CHECK_FOR_INTERRUPTS() must not occur while updating them,
9811 * otherwise they can be updated inconsistently, which might cause
9812 * do_pg_abort_backup() to fail.
9813 */
9815
9816 /*
9817 * It is expected that each do_pg_backup_start() call is matched by
9818 * exactly one do_pg_backup_stop() call.
9819 */
9822
9823 /*
9824 * Clean up session-level lock.
9825 *
9826 * You might think that WALInsertLockRelease() can be called before
9827 * cleaning up session-level lock because session-level lock doesn't need
9828 * to be protected with WAL insertion lock. But since
9829 * CHECK_FOR_INTERRUPTS() can occur in it, session-level lock must be
9830 * cleaned up before it.
9831 */
9833
9835
9836 /*
9837 * If we are taking an online backup from the standby, we confirm that the
9838 * standby has not been promoted during the backup.
9839 */
9840 if (state->started_in_recovery && !backup_stopped_in_recovery)
9841 ereport(ERROR,
9843 errmsg("the standby was promoted during online backup"),
9844 errhint("This means that the backup being taken is corrupt "
9845 "and should not be used. "
9846 "Try taking another online backup.")));
9847
9848 /*
9849 * During recovery, we don't write an end-of-backup record. We assume that
9850 * pg_control was backed up last and its minimum recovery point can be
9851 * available as the backup end location. Since we don't have an
9852 * end-of-backup record, we use the pg_control value to check whether
9853 * we've reached the end of backup when starting recovery from this
9854 * backup. We have no way of checking if pg_control wasn't backed up last
9855 * however.
9856 *
9857 * We don't force a switch to new WAL file but it is still possible to
9858 * wait for all the required files to be archived if waitforarchive is
9859 * true. This is okay if we use the backup to start a standby and fetch
9860 * the missing WAL using streaming replication. But in the case of an
9861 * archive recovery, a user should set waitforarchive to true and wait for
9862 * them to be archived to ensure that all the required files are
9863 * available.
9864 *
9865 * We return the current minimum recovery point as the backup end
9866 * location. Note that it can be greater than the exact backup end
9867 * location if the minimum recovery point is updated after the backup of
9868 * pg_control. This is harmless for current uses.
9869 *
9870 * XXX currently a backup history file is for informational and debug
9871 * purposes only. It's not essential for an online backup. Furthermore,
9872 * even if it's created, it will not be archived during recovery because
9873 * an archiver is not invoked. So it doesn't seem worthwhile to write a
9874 * backup history file during recovery.
9875 */
9877 {
9879
9880 /*
9881 * Check to see if all WAL replayed during online backup contain
9882 * full-page writes.
9883 */
9887
9888 if (state->startpoint <= recptr)
9889 ereport(ERROR,
9891 errmsg("WAL generated with \"full_page_writes=off\" was replayed "
9892 "during online backup"),
9893 errhint("This means that the backup being taken on the standby "
9894 "is corrupt and should not be used. "
9895 "Enable \"full_page_writes\" and run CHECKPOINT on the primary, "
9896 "and then try an online backup again.")));
9897
9898
9900 state->stoppoint = ControlFile->minRecoveryPoint;
9903 }
9904 else
9905 {
9906 char *history_file;
9907
9908 /*
9909 * Write the backup-end xlog record
9910 */
9912 XLogRegisterData(&state->startpoint,
9913 sizeof(state->startpoint));
9915
9916 /*
9917 * Given that we're not in recovery, InsertTimeLineID is set and can't
9918 * change, so we can read it without a lock.
9919 */
9920 state->stoptli = XLogCtl->InsertTimeLineID;
9921
9922 /*
9923 * Force a switch to a new xlog segment file, so that the backup is
9924 * valid as soon as archiver moves out the current segment file.
9925 */
9926 RequestXLogSwitch(false);
9927
9928 state->stoptime = (pg_time_t) time(NULL);
9929
9930 /*
9931 * Write the backup history file
9932 */
9935 state->startpoint, wal_segment_size);
9936 fp = AllocateFile(histfilepath, "w");
9937 if (!fp)
9938 ereport(ERROR,
9940 errmsg("could not create file \"%s\": %m",
9941 histfilepath)));
9942
9943 /* Build and save the contents of the backup history file */
9945 fprintf(fp, "%s", history_file);
9947
9948 if (fflush(fp) || ferror(fp) || FreeFile(fp))
9949 ereport(ERROR,
9951 errmsg("could not write file \"%s\": %m",
9952 histfilepath)));
9953
9954 /*
9955 * Clean out any no-longer-needed history files. As a side effect,
9956 * this will post a .ready file for the newly created history file,
9957 * notifying the archiver that history file may be archived
9958 * immediately.
9959 */
9961 }
9962
9963 /*
9964 * If archiving is enabled, wait for all the required WAL files to be
9965 * archived before returning. If archiving isn't enabled, the required WAL
9966 * needs to be transported via streaming replication (hopefully with
9967 * wal_keep_size set high enough), or some more exotic mechanism like
9968 * polling and copying files from pg_wal with script. We have no knowledge
9969 * of those mechanisms, so it's up to the user to ensure that he gets all
9970 * the required WAL.
9971 *
9972 * We wait until both the last WAL file filled during backup and the
9973 * history file have been archived, and assume that the alphabetic sorting
9974 * property of the WAL files ensures any earlier WAL files are safely
9975 * archived as well.
9976 *
9977 * We wait forever, since archive_command is supposed to work and we
9978 * assume the admin wanted his backup to work completely. If you don't
9979 * wish to wait, then either waitforarchive should be passed in as false,
9980 * or you can set statement_timeout. Also, some notices are issued to
9981 * clue in anyone who might be doing this interactively.
9982 */
9983
9984 if (waitforarchive &&
9987 {
9991
9994 state->startpoint, wal_segment_size);
9995
9997 waits = 0;
9998
10001 {
10003
10004 if (!reported_waiting && waits > 5)
10005 {
10007 (errmsg("base backup done, waiting for required WAL segments to be archived")));
10008 reported_waiting = true;
10009 }
10010
10013 1000L,
10016
10017 if (++waits >= seconds_before_warning)
10018 {
10019 seconds_before_warning *= 2; /* This wraps in >10 years... */
10021 (errmsg("still waiting for all required WAL segments to be archived (%d seconds elapsed)",
10022 waits),
10023 errhint("Check that your \"archive_command\" is executing properly. "
10024 "You can safely cancel this backup, "
10025 "but the database backup will not be usable without all the WAL segments.")));
10026 }
10027 }
10028
10030 (errmsg("all required WAL segments have been archived")));
10031 }
10032 else if (waitforarchive)
10034 (errmsg("WAL archiving is not enabled; you must ensure that all required WAL segments are copied through other means to complete the backup")));
10035}
10036
10037
10038/*
10039 * do_pg_abort_backup: abort a running backup
10040 *
10041 * This does just the most basic steps of do_pg_backup_stop(), by taking the
10042 * system out of backup mode, thus making it a lot more safe to call from
10043 * an error handler.
10044 *
10045 * 'arg' indicates that it's being called during backup setup; so
10046 * sessionBackupState has not been modified yet, but runningBackups has
10047 * already been incremented. When it's false, then it's invoked as a
10048 * before_shmem_exit handler, and therefore we must not change state
10049 * unless sessionBackupState indicates that a backup is actually running.
10050 *
10051 * NB: This gets used as a PG_ENSURE_ERROR_CLEANUP callback and
10052 * before_shmem_exit handler, hence the odd-looking signature.
10053 */
10054void
10056{
10058
10059 /* If called during backup start, there shouldn't be one already running */
10061
10063 {
10067
10070
10073 errmsg("aborting backup due to backend exiting before pg_backup_stop was called"));
10074 }
10075}
10076
10077/*
10078 * Register a handler that will warn about unterminated backups at end of
10079 * session, unless this has already been done.
10080 */
10081void
10083{
10084 static bool already_done = false;
10085
10086 if (already_done)
10087 return;
10089 already_done = true;
10090}
10091
10092/*
10093 * Get latest WAL insert pointer
10094 */
10097{
10100
10101 SpinLockAcquire(&Insert->insertpos_lck);
10102 current_bytepos = Insert->CurrBytePos;
10103 SpinLockRelease(&Insert->insertpos_lck);
10104
10106}
10107
10108/*
10109 * Get latest WAL record end pointer
10110 */
10113{
10116
10117 SpinLockAcquire(&Insert->insertpos_lck);
10118 current_bytepos = Insert->CurrBytePos;
10119 SpinLockRelease(&Insert->insertpos_lck);
10120
10122}
10123
10124/*
10125 * Get latest WAL write pointer
10126 */
10129{
10131
10132 return LogwrtResult.Write;
10133}
10134
10135/*
10136 * Returns the redo pointer of the last checkpoint or restartpoint. This is
10137 * the oldest point in WAL that we still need, if we have to restart recovery.
10138 */
10139void
10147
10148/* Thin wrapper around ShutdownWalRcv(). */
10149void
10157
10158/* Enable WAL file recycling and preallocation. */
10159void
10166
10167/* Disable WAL file recycling and preallocation. */
10168void
10175
10176bool
10187
10188/*
10189 * Update the WalWriterSleeping flag.
10190 */
10191void
Datum idx(PG_FUNCTION_ARGS)
Definition _int_op.c:262
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition atomics.h:485
#define pg_memory_barrier()
Definition atomics.h:141
#define pg_read_barrier()
Definition atomics.h:154
static uint64 pg_atomic_read_membarrier_u64(volatile pg_atomic_uint64 *ptr)
Definition atomics.h:476
#define pg_write_barrier()
Definition atomics.h:155
static uint64 pg_atomic_monotonic_advance_u64(volatile pg_atomic_uint64 *ptr, uint64 target)
Definition atomics.h:595
static uint64 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition atomics.h:532
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition atomics.h:453
static void pg_atomic_write_membarrier_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition atomics.h:504
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition atomics.h:467
TimeLineID findNewestTimeLine(TimeLineID startTLI)
Definition timeline.c:265
void restoreTimeLineHistoryFiles(TimeLineID begin, TimeLineID end)
Definition timeline.c:51
void writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI, XLogRecPtr switchpoint, char *reason)
Definition timeline.c:305
void startup_progress_timeout_handler(void)
Definition startup.c:302
long TimestampDifferenceMilliseconds(TimestampTz start_time, TimestampTz stop_time)
Definition timestamp.c:1751
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition timestamp.c:1775
TimestampTz GetCurrentTimestamp(void)
Definition timestamp.c:1639
const char * timestamptz_to_str(TimestampTz t)
Definition timestamp.c:1856
Datum now(PG_FUNCTION_ARGS)
Definition timestamp.c:1603
static bool backup_started_in_recovery
Definition basebackup.c:129
int Buffer
Definition buf.h:23
void CheckPointBuffers(int flags)
Definition bufmgr.c:4432
void UnlockReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5603
#define Min(x, y)
Definition c.h:1091
#define pg_attribute_unused()
Definition c.h:149
#define likely(x)
Definition c.h:437
#define MAXALIGN(LEN)
Definition c.h:896
#define TYPEALIGN(ALIGNVAL, LEN)
Definition c.h:889
uint8_t uint8
Definition c.h:622
#define Max(x, y)
Definition c.h:1085
#define Assert(condition)
Definition c.h:943
#define PG_BINARY
Definition c.h:1374
#define pg_attribute_always_inline
Definition c.h:305
uint64_t uint64
Definition c.h:625
#define unlikely(x)
Definition c.h:438
uint32_t uint32
Definition c.h:624
#define MAXALIGN64(LEN)
Definition c.h:921
#define PG_UINT64_MAX
Definition c.h:677
#define MemSet(start, val, len)
Definition c.h:1107
uint32 TransactionId
Definition c.h:736
size_t Size
Definition c.h:689
#define CATALOG_VERSION_NO
Definition catversion.h:60
void WakeupCheckpointer(void)
void AbsorbSyncRequests(void)
double CheckPointCompletionTarget
void RequestCheckpoint(int flags)
ChecksumStateType
Definition checksum.h:27
@ PG_DATA_CHECKSUM_VERSION
Definition checksum.h:29
@ PG_DATA_CHECKSUM_INPROGRESS_OFF
Definition checksum.h:30
@ PG_DATA_CHECKSUM_INPROGRESS_ON
Definition checksum.h:31
@ PG_DATA_CHECKSUM_OFF
Definition checksum.h:28
uint32 result
memcpy(sums, checksumBaseOffsets, sizeof(checksumBaseOffsets))
void BootStrapCLOG(void)
Definition clog.c:851
void StartupCLOG(void)
Definition clog.c:862
void CheckPointCLOG(void)
Definition clog.c:922
void TrimCLOG(void)
Definition clog.c:877
void StartupCommitTs(void)
Definition commit_ts.c:613
void CommitTsParameterChange(bool newvalue, bool oldvalue)
Definition commit_ts.c:645
bool track_commit_timestamp
Definition commit_ts.c:121
void CompleteCommitTsInitialization(void)
Definition commit_ts.c:623
void BootStrapCommitTs(void)
Definition commit_ts.c:599
void SetCommitTsLimit(TransactionId oldestXact, TransactionId newestXact)
Definition commit_ts.c:892
void CheckPointCommitTs(void)
Definition commit_ts.c:799
void update_controlfile(const char *DataDir, ControlFileData *ControlFile, bool do_sync)
#define fprintf(file, fmt, msg)
Definition cubescan.l:21
void EmitAndWaitDataChecksumsBarrier(uint32 state)
int64 TimestampTz
Definition timestamp.h:39
Datum arg
Definition elog.c:1323
int errcode_for_file_access(void)
Definition elog.c:898
int errcode(int sqlerrcode)
Definition elog.c:875
#define _(x)
Definition elog.c:96
#define LOG
Definition elog.h:32
int errhint(const char *fmt,...) pg_attribute_printf(1
int errdetail(const char *fmt,...) pg_attribute_printf(1
#define FATAL
Definition elog.h:42
int int errmsg_internal(const char *fmt,...) pg_attribute_printf(1
#define WARNING
Definition elog.h:37
int int int errmsg_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n,...) pg_attribute_printf(1
#define DEBUG2
Definition elog.h:30
#define PANIC
Definition elog.h:44
#define DEBUG1
Definition elog.h:31
#define ERROR
Definition elog.h:40
#define elog(elevel,...)
Definition elog.h:228
#define NOTICE
Definition elog.h:36
#define ereport(elevel,...)
Definition elog.h:152
int MakePGDirectory(const char *directoryName)
Definition fd.c:3963
int FreeDir(DIR *dir)
Definition fd.c:3009
int pg_fsync_no_writethrough(int fd)
Definition fd.c:442
int io_direct_flags
Definition fd.c:172
int durable_rename(const char *oldfile, const char *newfile, int elevel)
Definition fd.c:783
int pg_fdatasync(int fd)
Definition fd.c:481
int CloseTransientFile(int fd)
Definition fd.c:2855
int BasicOpenFile(const char *fileName, int fileFlags)
Definition fd.c:1090
int FreeFile(FILE *file)
Definition fd.c:2827
int pg_fsync_writethrough(int fd)
Definition fd.c:462
void ReleaseExternalFD(void)
Definition fd.c:1225
int data_sync_elevel(int elevel)
Definition fd.c:3986
static void Insert(File file)
Definition fd.c:1301
DIR * AllocateDir(const char *dirname)
Definition fd.c:2891
int durable_unlink(const char *fname, int elevel)
Definition fd.c:873
void ReserveExternalFD(void)
Definition fd.c:1207
struct dirent * ReadDir(DIR *dir, const char *dirname)
Definition fd.c:2957
int pg_fsync(int fd)
Definition fd.c:390
FILE * AllocateFile(const char *name, const char *mode)
Definition fd.c:2628
int OpenTransientFile(const char *fileName, int fileFlags)
Definition fd.c:2678
void SyncDataDirectory(void)
Definition fd.c:3594
#define IO_DIRECT_WAL
Definition fd.h:55
#define IO_DIRECT_WAL_INIT
Definition fd.h:56
#define PG_O_DIRECT
Definition fd.h:123
#define palloc_object(type)
Definition fe_memutils.h:74
ssize_t pg_pwrite_zeros(int fd, size_t size, pgoff_t offset)
Definition file_utils.c:709
PGFileType get_dirent_type(const char *path, const struct dirent *de, bool look_through_symlinks, int elevel)
Definition file_utils.c:547
PGFileType
Definition file_utils.h:19
@ PGFILETYPE_LNK
Definition file_utils.h:24
@ PGFILETYPE_DIR
Definition file_utils.h:23
@ PGFILETYPE_REG
Definition file_utils.h:22
bool IsBinaryUpgrade
Definition globals.c:123
int NBuffers
Definition globals.c:144
pid_t PostmasterPid
Definition globals.c:108
volatile uint32 InterruptHoldoffCount
Definition globals.c:43
bool enableFsync
Definition globals.c:131
ProcNumber MyProcNumber
Definition globals.c:92
bool IsUnderPostmaster
Definition globals.c:122
int MaxConnections
Definition globals.c:145
volatile uint32 CritSectionCount
Definition globals.c:45
char * DataDir
Definition globals.c:73
bool IsPostmasterEnvironment
Definition globals.c:121
struct Latch * MyLatch
Definition globals.c:65
int max_worker_processes
Definition globals.c:146
int set_config_option_ext(const char *name, const char *value, GucContext context, GucSource source, Oid srole, GucAction action, bool changeVal, int elevel, bool is_reload)
Definition guc.c:3288
void SetConfigOption(const char *name, const char *value, GucContext context, GucSource source)
Definition guc.c:4234
void * guc_malloc(int elevel, size_t size)
Definition guc.c:637
#define newval
struct config_generic * find_option(const char *name, bool create_placeholders, bool skip_errors, int elevel)
Definition guc.c:1114
@ GUC_ACTION_SET
Definition guc.h:203
#define GUC_check_errdetail
Definition guc.h:507
GucSource
Definition guc.h:112
@ PGC_S_DYNAMIC_DEFAULT
Definition guc.h:114
@ PGC_S_OVERRIDE
Definition guc.h:123
@ PGC_INTERNAL
Definition guc.h:73
@ PGC_POSTMASTER
Definition guc.h:74
return str start
#define TOAST_MAX_CHUNK_SIZE
Definition heaptoast.h:84
#define bufsize
#define INJECTION_POINT(name, arg)
#define INJECTION_POINT_CACHED(name, arg)
#define INJECTION_POINT_LOAD(name)
WalUsage pgWalUsage
Definition instrument.c:27
#define close(a)
Definition win32.h:12
#define write(a, b, c)
Definition win32.h:14
#define read(a, b, c)
Definition win32.h:13
void before_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition ipc.c:344
#define PG_ENSURE_ERROR_CLEANUP(cleanup_function, arg)
Definition ipc.h:47
#define PG_END_ENSURE_ERROR_CLEANUP(cleanup_function, arg)
Definition ipc.h:52
int i
Definition isn.c:77
#define LOBLKSIZE
void SetLatch(Latch *latch)
Definition latch.c:290
void ResetLatch(Latch *latch)
Definition latch.c:374
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition latch.c:172
List * lappend(List *list, void *datum)
Definition list.c:339
void list_free(List *list)
Definition list.c:1546
int max_locks_per_xact
Definition lock.c:56
void UpdateLogicalDecodingStatusEndOfRecovery(void)
Definition logicalctl.c:551
bool IsLogicalDecodingEnabled(void)
Definition logicalctl.c:202
bool IsXLogLogicalInfoEnabled(void)
Definition logicalctl.c:218
void StartupLogicalDecodingStatus(bool last_status)
Definition logicalctl.c:144
void DisableLogicalDecoding(void)
Definition logicalctl.c:489
void EnableLogicalDecoding(void)
Definition logicalctl.c:338
void LWLockUpdateVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
Definition lwlock.c:1702
void LWLockReleaseClearVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
Definition lwlock.c:1840
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1150
bool LWLockWaitForVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval, uint64 *newval)
Definition lwlock.c:1566
void LWLockRelease(LWLock *lock)
Definition lwlock.c:1767
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition lwlock.c:670
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1321
bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1378
@ LW_SHARED
Definition lwlock.h:105
@ LW_EXCLUSIVE
Definition lwlock.h:104
char * pstrdup(const char *in)
Definition mcxt.c:1781
void pfree(void *pointer)
Definition mcxt.c:1616
MemoryContext TopMemoryContext
Definition mcxt.c:166
void * palloc(Size size)
Definition mcxt.c:1387
void MemoryContextAllowInCriticalSection(MemoryContext context, bool allow)
Definition mcxt.c:743
#define AllocSetContextCreate
Definition memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition memutils.h:160
#define AmStartupProcess()
Definition miscadmin.h:405
#define IsBootstrapProcessingMode()
Definition miscadmin.h:495
#define START_CRIT_SECTION()
Definition miscadmin.h:152
#define CHECK_FOR_INTERRUPTS()
Definition miscadmin.h:125
@ B_CHECKPOINTER
Definition miscadmin.h:375
#define END_CRIT_SECTION()
Definition miscadmin.h:154
#define AmWalReceiverProcess()
Definition miscadmin.h:406
bool process_shared_preload_libraries_done
Definition miscinit.c:1789
BackendType MyBackendType
Definition miscinit.c:65
void MultiXactSetNextMXact(MultiXactId nextMulti, MultiXactOffset nextMultiOffset)
Definition multixact.c:2063
void MultiXactAdvanceOldest(MultiXactId oldestMulti, Oid oldestMultiDB)
Definition multixact.c:2266
void MultiXactGetCheckptMulti(bool is_shutdown, MultiXactId *nextMulti, MultiXactOffset *nextMultiOffset, MultiXactId *oldestMulti, Oid *oldestMultiDB)
Definition multixact.c:2017
void CheckPointMultiXact(void)
Definition multixact.c:2039
void TrimMultiXact(void)
Definition multixact.c:1904
void MultiXactAdvanceNextMXact(MultiXactId minMulti, MultiXactOffset minMultiOffset)
Definition multixact.c:2239
void BootStrapMultiXact(void)
Definition multixact.c:1863
void StartupMultiXact(void)
Definition multixact.c:1879
void SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
Definition multixact.c:2085
#define FirstMultiXactId
Definition multixact.h:26
static char * errmsg
void StartupReplicationOrigin(void)
Definition origin.c:740
void CheckPointReplicationOrigin(void)
Definition origin.c:614
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition palloc.h:124
#define ERRCODE_DATA_CORRUPTED
#define INDEX_MAX_KEYS
#define NAMEDATALEN
#define MAXPGPATH
#define DEFAULT_XLOG_SEG_SIZE
#define SLRU_PAGES_PER_SEGMENT
#define PG_IO_ALIGN_SIZE
#define PG_CACHE_LINE_SIZE
#define FLOATFORMAT_VALUE
Definition pg_control.h:209
#define XLOG_RESTORE_POINT
Definition pg_control.h:79
#define XLOG_FPW_CHANGE
Definition pg_control.h:80
#define XLOG_CHECKPOINT_REDO
Definition pg_control.h:86
#define PG_CONTROL_VERSION
Definition pg_control.h:25
#define XLOG_OVERWRITE_CONTRECORD
Definition pg_control.h:85
#define XLOG_ASSIGN_LSN
Definition pg_control.h:84
#define XLOG_FPI
Definition pg_control.h:83
#define XLOG_FPI_FOR_HINT
Definition pg_control.h:82
#define MOCK_AUTH_NONCE_LEN
Definition pg_control.h:28
#define XLOG2_CHECKSUMS
Definition pg_control.h:90
#define XLOG_NEXTOID
Definition pg_control.h:75
@ DB_IN_PRODUCTION
Definition pg_control.h:105
@ DB_SHUTDOWNING
Definition pg_control.h:102
@ DB_IN_ARCHIVE_RECOVERY
Definition pg_control.h:104
@ DB_SHUTDOWNED_IN_RECOVERY
Definition pg_control.h:101
@ DB_SHUTDOWNED
Definition pg_control.h:100
@ DB_IN_CRASH_RECOVERY
Definition pg_control.h:103
#define XLOG_NOOP
Definition pg_control.h:74
#define XLOG_CHECKPOINT_SHUTDOWN
Definition pg_control.h:72
#define PG_CONTROL_FILE_SIZE
Definition pg_control.h:266
#define XLOG_SWITCH
Definition pg_control.h:76
#define XLOG_BACKUP_END
Definition pg_control.h:77
#define XLOG_PARAMETER_CHANGE
Definition pg_control.h:78
#define XLOG_LOGICAL_DECODING_STATUS_CHANGE
Definition pg_control.h:87
#define XLOG_CHECKPOINT_ONLINE
Definition pg_control.h:73
#define XLOG_END_OF_RECOVERY
Definition pg_control.h:81
uint32 pg_crc32c
Definition pg_crc32c.h:38
#define COMP_CRC32C(crc, data, len)
Definition pg_crc32c.h:173
#define EQ_CRC32C(c1, c2)
Definition pg_crc32c.h:42
#define INIT_CRC32C(crc)
Definition pg_crc32c.h:41
#define FIN_CRC32C(crc)
Definition pg_crc32c.h:178
const void size_t len
return crc
static char * filename
Definition pg_dumpall.c:133
#define lfirst(lc)
Definition pg_list.h:172
static rewind_source * source
Definition pg_rewind.c:89
static char buf[DEFAULT_XLOG_SEG_SIZE]
static THREAD_BARRIER_T barrier
Definition pgbench.c:488
bool pgstat_report_fixed
Definition pgstat.c:219
void pgstat_restore_stats(void)
Definition pgstat.c:525
void pgstat_discard_stats(void)
Definition pgstat.c:537
@ IOOBJECT_WAL
Definition pgstat.h:283
@ IOCONTEXT_INIT
Definition pgstat.h:292
@ IOCONTEXT_NORMAL
Definition pgstat.h:293
@ IOOP_FSYNC
Definition pgstat.h:312
@ IOOP_WRITE
Definition pgstat.h:320
PgStat_CheckpointerStats PendingCheckpointerStats
instr_time pgstat_prepare_io_time(bool track_io_guc)
Definition pgstat_io.c:91
void pgstat_count_io_op_time(IOObject io_object, IOContext io_context, IOOp io_op, instr_time start_time, uint32 cnt, uint64 bytes)
Definition pgstat_io.c:122
int64 pg_time_t
Definition pgtime.h:23
size_t pg_strftime(char *s, size_t maxsize, const char *format, const struct pg_tm *t)
Definition strftime.c:128
struct pg_tm * pg_localtime(const pg_time_t *timep, const pg_tz *tz)
Definition localtime.c:1345
PGDLLIMPORT pg_tz * log_timezone
Definition pgtz.c:31
bool pg_strong_random(void *buf, size_t len)
int pg_strcasecmp(const char *s1, const char *s2)
#define pg_pwrite
Definition port.h:248
#define snprintf
Definition port.h:260
#define IS_DIR_SEP(ch)
Definition port.h:103
size_t strlcpy(char *dst, const char *src, size_t siz)
Definition strlcpy.c:45
static bool DatumGetBool(Datum X)
Definition postgres.h:100
static Datum BoolGetDatum(bool X)
Definition postgres.h:112
uint64_t Datum
Definition postgres.h:70
#define InvalidOid
unsigned int Oid
void CheckPointPredicate(void)
Definition predicate.c:1022
static int fd(const char *x, int i)
static int fb(int x)
short access
#define GetPGProcByNumber(n)
Definition proc.h:504
#define DELAY_CHKPT_START
Definition proc.h:139
#define DELAY_CHKPT_COMPLETE
Definition proc.h:140
bool MinimumActiveBackends(int min)
Definition procarray.c:3589
TransactionId GetOldestTransactionIdConsideredRunning(void)
Definition procarray.c:1973
bool HaveVirtualXIDsDelayingChkpt(VirtualTransactionId *vxids, int nvxids, int type)
Definition procarray.c:3062
void ProcArrayApplyRecoveryInfo(RunningTransactions running)
Definition procarray.c:1045
TransactionId GetOldestActiveTransactionId(bool inCommitOnly, bool allDbs)
Definition procarray.c:2845
void ProcArrayInitRecovery(TransactionId initializedUptoXID)
Definition procarray.c:1014
VirtualTransactionId * GetVirtualXIDsDelayingChkpt(int *nvxids, int type)
Definition procarray.c:3017
#define INVALID_PROC_NUMBER
Definition procnumber.h:26
int ProcNumber
Definition procnumber.h:24
void WaitForProcSignalBarrier(uint64 generation)
Definition procsignal.c:428
uint64 EmitProcSignalBarrier(ProcSignalBarrierType type)
Definition procsignal.c:360
@ PROCSIGNAL_BARRIER_CHECKSUM_INPROGRESS_OFF
Definition procsignal.h:55
@ PROCSIGNAL_BARRIER_CHECKSUM_INPROGRESS_ON
Definition procsignal.h:54
@ PROCSIGNAL_BARRIER_CHECKSUM_ON
Definition procsignal.h:56
@ PROCSIGNAL_BARRIER_CHECKSUM_OFF
Definition procsignal.h:53
static void set_ps_display(const char *activity)
Definition ps_status.h:40
void ResetUnloggedRelations(int op)
Definition reinit.c:47
#define UNLOGGED_RELATION_INIT
Definition reinit.h:28
#define UNLOGGED_RELATION_CLEANUP
Definition reinit.h:27
void RelationCacheInitFileRemove(void)
Definition relcache.c:6916
void CheckPointRelationMap(void)
Definition relmapper.c:612
#define relpath(rlocator, forknum)
Definition relpath.h:150
#define PG_TBLSPC_DIR
Definition relpath.h:41
void StartupReorderBuffer(void)
ResourceOwner CurrentResourceOwner
Definition resowner.c:173
ResourceOwner AuxProcessResourceOwner
Definition resowner.c:176
void CheckPointLogicalRewriteHeap(void)
#define RM_MAX_ID
Definition rmgr.h:33
Size add_size(Size s1, Size s2)
Definition shmem.c:1048
Size mul_size(Size s1, Size s2)
Definition shmem.c:1063
#define ShmemRequestStruct(...)
Definition shmem.h:176
void pg_usleep(long microsec)
Definition signal.c:53
void CheckPointReplicationSlots(bool is_shutdown)
Definition slot.c:2324
void StartupReplicationSlots(void)
Definition slot.c:2402
bool InvalidateObsoleteReplicationSlots(uint32 possible_causes, XLogSegNo oldestSegno, Oid dboid, TransactionId snapshotConflictHorizon)
Definition slot.c:2220
@ RS_INVAL_WAL_REMOVED
Definition slot.h:62
@ RS_INVAL_IDLE_TIMEOUT
Definition slot.h:68
@ RS_INVAL_WAL_LEVEL
Definition slot.h:66
bool sync_replication_slots
Definition slotsync.c:132
void smgrdestroyall(void)
Definition smgr.c:386
void CheckPointSnapBuild(void)
Definition snapbuild.c:2030
void DeleteAllExportedSnapshotFiles(void)
Definition snapmgr.c:1587
static void SpinLockRelease(volatile slock_t *lock)
Definition spin.h:62
static void SpinLockAcquire(volatile slock_t *lock)
Definition spin.h:56
static void SpinLockInit(volatile slock_t *lock)
Definition spin.h:50
void reset(void)
PGPROC * MyProc
Definition proc.c:71
PROC_HDR * ProcGlobal
Definition proc.c:74
void InitRecoveryTransactionEnvironment(void)
Definition standby.c:96
XLogRecPtr LogStandbySnapshot(Oid dbid)
Definition standby.c:1303
void ShutdownRecoveryTransactionEnvironment(void)
Definition standby.c:162
@ SUBXIDS_IN_SUBTRANS
Definition standby.h:123
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition stringinfo.c:145
void appendBinaryStringInfo(StringInfo str, const void *data, int datalen)
Definition stringinfo.c:281
void appendStringInfoString(StringInfo str, const char *s)
Definition stringinfo.c:230
void appendStringInfoChar(StringInfo str, char ch)
Definition stringinfo.c:242
void initStringInfo(StringInfo str)
Definition stringinfo.c:97
Oid oldestMultiDB
Definition pg_control.h:52
MultiXactId oldestMulti
Definition pg_control.h:51
MultiXactOffset nextMultiOffset
Definition pg_control.h:48
TransactionId newestCommitTsXid
Definition pg_control.h:56
TransactionId oldestXid
Definition pg_control.h:49
TimeLineID PrevTimeLineID
Definition pg_control.h:40
TimeLineID ThisTimeLineID
Definition pg_control.h:39
TransactionId oldestActiveXid
Definition pg_control.h:65
bool fullPageWrites
Definition pg_control.h:42
MultiXactId nextMulti
Definition pg_control.h:47
FullTransactionId nextXid
Definition pg_control.h:45
TransactionId oldestCommitTsXid
Definition pg_control.h:54
pg_time_t time
Definition pg_control.h:53
int wal_level
Definition pg_control.h:43
bool logicalDecodingEnabled
Definition pg_control.h:44
uint32 dataChecksumState
Definition pg_control.h:68
XLogRecPtr redo
Definition pg_control.h:37
Oid oldestXidDB
Definition pg_control.h:50
uint64 ckpt_agg_sync_time
Definition xlog.h:188
uint64 ckpt_longest_sync
Definition xlog.h:187
TimestampTz ckpt_start_t
Definition xlog.h:173
TimestampTz ckpt_end_t
Definition xlog.h:177
int ckpt_segs_removed
Definition xlog.h:183
TimestampTz ckpt_write_t
Definition xlog.h:174
TimestampTz ckpt_sync_end_t
Definition xlog.h:176
TimestampTz ckpt_sync_t
Definition xlog.h:175
int ckpt_bufs_written
Definition xlog.h:179
int ckpt_segs_recycled
Definition xlog.h:184
int ckpt_slru_written
Definition xlog.h:180
char mock_authentication_nonce[MOCK_AUTH_NONCE_LEN]
Definition pg_control.h:245
uint32 pg_control_version
Definition pg_control.h:133
uint32 xlog_seg_size
Definition pg_control.h:221
XLogRecPtr backupStartPoint
Definition pg_control.h:178
bool track_commit_timestamp
Definition pg_control.h:193
CheckPoint checkPointCopy
Definition pg_control.h:143
uint32 slru_pages_per_segment
Definition pg_control.h:218
XLogRecPtr backupEndPoint
Definition pg_control.h:179
XLogRecPtr minRecoveryPoint
Definition pg_control.h:176
uint32 data_checksum_version
Definition pg_control.h:232
XLogRecPtr unloggedLSN
Definition pg_control.h:145
uint32 indexMaxKeys
Definition pg_control.h:224
pg_time_t time
Definition pg_control.h:140
bool default_char_signedness
Definition pg_control.h:238
XLogRecPtr checkPoint
Definition pg_control.h:141
uint64 system_identifier
Definition pg_control.h:118
uint32 catalog_version_no
Definition pg_control.h:134
TimeLineID minRecoveryPointTLI
Definition pg_control.h:177
pg_crc32c crc
Definition pg_control.h:248
uint32 toast_max_chunk_size
Definition pg_control.h:226
Definition dirent.c:26
Definition pg_list.h:54
char data[XLOG_BLCKSZ]
Definition c.h:1231
int delayChkptFlags
Definition proc.h:260
ProcNumber walwriterProc
Definition proc.h:488
PgStat_Counter sync_time
Definition pgstat.h:269
PgStat_Counter write_time
Definition pgstat.h:268
void(* rm_mask)(char *pagedata, BlockNumber blkno)
TransactionId oldestRunningXid
Definition standby.h:134
TransactionId nextXid
Definition standby.h:133
TransactionId latestCompletedXid
Definition standby.h:137
subxids_array_status subxid_status
Definition standby.h:132
TransactionId * xids
Definition standby.h:139
ShmemRequestCallback request_fn
Definition shmem.h:133
TransactionId oldestCommitTsXid
Definition transam.h:232
TransactionId newestCommitTsXid
Definition transam.h:233
FullTransactionId latestCompletedXid
Definition transam.h:238
FullTransactionId nextXid
Definition transam.h:220
TransactionId oldestXid
Definition transam.h:222
pg_atomic_uint64 insertingAt
Definition xlog.c:377
XLogRecPtr lastImportantAt
Definition xlog.c:378
LWLock lock
Definition xlog.c:376
pg_atomic_uint64 minWaitedLSN[WAIT_LSN_TYPE_COUNT]
Definition xlogwait.h:85
int64 wal_buffers_full
Definition instrument.h:57
uint64 wal_bytes
Definition instrument.h:55
int64 wal_fpi
Definition instrument.h:54
uint64 wal_fpi_bytes
Definition instrument.h:56
int64 wal_records
Definition instrument.h:53
CheckPoint lastCheckPoint
Definition xlog.c:551
XLogwrtRqst LogwrtRqst
Definition xlog.c:462
slock_t info_lck
Definition xlog.c:562
XLogRecPtr InitializedUpTo
Definition xlog.c:491
char * pages
Definition xlog.c:498
pg_time_t lastSegSwitchTime
Definition xlog.c:473
XLogRecPtr replicationSlotMinLSN
Definition xlog.c:465
RecoveryState SharedRecoveryState
Definition xlog.c:522
uint32 data_checksum_version
Definition xlog.c:560
TimeLineID InsertTimeLineID
Definition xlog.c:515
XLogRecPtr lastSegSwitchLSN
Definition xlog.c:474
XLogSegNo lastRemovedSegNo
Definition xlog.c:467
pg_atomic_uint64 * xlblocks
Definition xlog.c:499
pg_atomic_uint64 logWriteResult
Definition xlog.c:478
int XLogCacheBlck
Definition xlog.c:500
XLogRecPtr RedoRecPtr
Definition xlog.c:463
XLogRecPtr lastCheckPointRecPtr
Definition xlog.c:549
XLogRecPtr lastFpwDisableRecPtr
Definition xlog.c:557
XLogCtlInsert Insert
Definition xlog.c:459
bool InstallXLogFileSegmentActive
Definition xlog.c:532
bool WalWriterSleeping
Definition xlog.c:539
XLogRecPtr asyncXactLSN
Definition xlog.c:464
XLogRecPtr lastCheckPointEndPtr
Definition xlog.c:550
pg_atomic_uint64 logFlushResult
Definition xlog.c:479
pg_atomic_uint64 logInsertResult
Definition xlog.c:477
TimeLineID PrevTimeLineID
Definition xlog.c:516
pg_atomic_uint64 unloggedLSN
Definition xlog.c:470
WALInsertLockPadded * WALInsertLocks
Definition xlog.c:451
XLogRecPtr RedoRecPtr
Definition xlog.c:437
uint64 PrevBytePos
Definition xlog.c:415
char pad[PG_CACHE_LINE_SIZE]
Definition xlog.c:424
int runningBackups
Definition xlog.c:445
slock_t insertpos_lck
Definition xlog.c:405
uint64 CurrBytePos
Definition xlog.c:414
bool fullPageWrites
Definition xlog.c:438
XLogRecPtr lastBackupStart
Definition xlog.c:446
XLogRecPtr xlp_pageaddr
XLogRecPtr EndRecPtr
Definition xlogreader.h:206
XLogRecPtr ReadRecPtr
Definition xlogreader.h:205
XLogRecPtr xl_prev
Definition xlogrecord.h:45
pg_crc32c xl_crc
Definition xlogrecord.h:49
uint8 xl_info
Definition xlogrecord.h:46
uint32 xl_tot_len
Definition xlogrecord.h:43
TransactionId xl_xid
Definition xlogrecord.h:44
RmgrId xl_rmid
Definition xlogrecord.h:47
XLogRecPtr Flush
Definition xlog.c:335
XLogRecPtr Write
Definition xlog.c:334
XLogRecPtr Flush
Definition xlog.c:329
XLogRecPtr Write
Definition xlog.c:328
Definition guc.h:174
ChecksumStateType new_checksum_state
TimestampTz rp_time
void StartupSUBTRANS(TransactionId oldestActiveXID)
Definition subtrans.c:302
void CheckPointSUBTRANS(void)
Definition subtrans.c:348
void BootStrapSUBTRANS(void)
Definition subtrans.c:288
void TruncateSUBTRANS(TransactionId oldestXact)
Definition subtrans.c:404
void ProcessSyncRequests(void)
Definition sync.c:287
void SyncPreCheckpoint(void)
Definition sync.c:178
void SyncPostCheckpoint(void)
Definition sync.c:203
TimeoutId RegisterTimeout(TimeoutId id, timeout_handler_proc handler)
Definition timeout.c:505
@ STARTUP_PROGRESS_TIMEOUT
Definition timeout.h:38
#define TransactionIdRetreat(dest)
Definition transam.h:141
#define InvalidTransactionId
Definition transam.h:31
static void FullTransactionIdRetreat(FullTransactionId *dest)
Definition transam.h:103
#define XidFromFullTransactionId(x)
Definition transam.h:48
#define FirstGenbkiObjectId
Definition transam.h:195
#define FirstNormalTransactionId
Definition transam.h:34
#define TransactionIdIsValid(xid)
Definition transam.h:41
static FullTransactionId FullTransactionIdFromEpochAndXid(uint32 epoch, TransactionId xid)
Definition transam.h:71
#define TransactionIdIsNormal(xid)
Definition transam.h:42
#define FullTransactionIdPrecedes(a, b)
Definition transam.h:51
static bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition transam.h:263
void RecoverPreparedTransactions(void)
Definition twophase.c:2089
void restoreTwoPhaseData(void)
Definition twophase.c:1910
int max_prepared_xacts
Definition twophase.c:118
TransactionId PrescanPreparedTransactions(TransactionId **xids_p, int *nxids_p)
Definition twophase.c:1972
void StandbyRecoverPreparedTransactions(void)
Definition twophase.c:2051
void CheckPointTwoPhase(XLogRecPtr redo_horizon)
Definition twophase.c:1828
WALInsertLock l
Definition xlog.c:390
char pad[PG_CACHE_LINE_SIZE]
Definition xlog.c:391
bool SplitIdentifierString(char *rawstring, char separator, List **namelist)
Definition varlena.c:2867
void SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid)
Definition varsup.c:367
void AdvanceOldestClogXid(TransactionId oldest_datfrozenxid)
Definition varsup.c:350
TransamVariablesData * TransamVariables
Definition varsup.c:37
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition wait_event.h:67
static void pgstat_report_wait_end(void)
Definition wait_event.h:83
const char * name
#define WL_TIMEOUT
#define WL_EXIT_ON_PM_DEATH
#define WL_LATCH_SET
static TimestampTz wakeup[NUM_WALRCV_WAKEUPS]
XLogRecPtr Flush
XLogRecPtr Write
XLogRecPtr GetWalRcvFlushRecPtr(XLogRecPtr *latestChunkStart, TimeLineID *receiveTLI)
void ShutdownWalRcv(void)
void WalSndWakeup(bool physical, bool logical)
Definition walsender.c:4012
int max_wal_senders
Definition walsender.c:141
void WalSndInitStopping(void)
Definition walsender.c:4091
void WalSndWaitStopping(void)
Definition walsender.c:4117
static void WalSndWakeupProcessRequests(bool physical, bool logical)
Definition walsender.h:64
#define WalSndWakeupRequest()
Definition walsender.h:57
bool summarize_wal
void WaitForWalSummarization(XLogRecPtr lsn)
void WakeupWalSummarizer(void)
XLogRecPtr GetOldestUnsummarizedLSN(TimeLineID *tli, bool *lsn_is_exact)
int WalWriterFlushAfter
Definition walwriter.c:72
int WalWriterDelay
Definition walwriter.c:71
#define stat
Definition win32_port.h:74
#define EINTR
Definition win32_port.h:361
#define S_ISDIR(m)
Definition win32_port.h:315
#define kill(pid, sig)
Definition win32_port.h:490
#define SIGUSR1
Definition win32_port.h:170
#define readlink(path, buf, size)
Definition win32_port.h:226
#define O_CLOEXEC
Definition win32_port.h:344
#define O_DSYNC
Definition win32_port.h:346
int gettimeofday(struct timeval *tp, void *tzp)
void MarkSubxactTopXidLogged(void)
Definition xact.c:593
void MarkCurrentTransactionIdLoggedIfAny(void)
Definition xact.c:543
int XLogFileInit(XLogSegNo logsegno, TimeLineID logtli)
Definition xlog.c:3435
void assign_wal_sync_method(int new_wal_sync_method, void *extra)
Definition xlog.c:9320
static const char * CheckpointFlagsString(int flags)
Definition xlog.c:7152
static void CreateEndOfRecoveryRecord(void)
Definition xlog.c:7909
uint64 GetSystemIdentifier(void)
Definition xlog.c:4647
int wal_decode_buffer_size
Definition xlog.c:143
XLogRecPtr ProcLastRecPtr
Definition xlog.c:260
static XLogCtlData * XLogCtl
Definition xlog.c:575
bool fullPageWrites
Definition xlog.c:129
void UpdateFullPageWrites(void)
Definition xlog.c:8756
bool RecoveryInProgress(void)
Definition xlog.c:6836
static void CleanupBackupHistory(void)
Definition xlog.c:4216
void GetFullPageWriteInfo(XLogRecPtr *RedoRecPtr_p, bool *doPageWrites_p)
Definition xlog.c:6969
TimeLineID GetWALInsertionTimeLine(void)
Definition xlog.c:7022
static ControlFileData * LocalControlFile
Definition xlog.c:583
XLogRecPtr RequestXLogSwitch(bool mark_unimportant)
Definition xlog.c:8608
void do_pg_abort_backup(int code, Datum arg)
Definition xlog.c:10055
XLogSegNo XLogGetLastRemovedSegno(void)
Definition xlog.c:3813
XLogRecPtr XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn, uint8 flags, int num_fpi, uint64 fpi_bytes, bool topxid_included)
Definition xlog.c:784
void SetLocalDataChecksumState(uint32 data_checksum_version)
Definition xlog.c:4975
char * XLogArchiveCommand
Definition xlog.c:127
int wal_keep_size_mb
Definition xlog.c:123
Size WALReadFromBuffers(char *dstbuf, XLogRecPtr startptr, Size count, TimeLineID tli)
Definition xlog.c:1789
static XLogRecPtr WaitXLogInsertionsToFinish(XLogRecPtr upto)
Definition xlog.c:1545
static void WALInsertLockRelease(void)
Definition xlog.c:1486
void SetDataChecksumsOff(void)
Definition xlog.c:4871
static XLogRecPtr XLogBytePosToRecPtr(uint64 bytepos)
Definition xlog.c:1899
bool EnableHotStandby
Definition xlog.c:128
static void WALInsertLockUpdateInsertingAt(XLogRecPtr insertingAt)
Definition xlog.c:1512
XLogRecPtr GetRedoRecPtr(void)
Definition xlog.c:6939
void assign_wal_consistency_checking(const char *newval, void *extra)
Definition xlog.c:5167
static void InitControlFile(uint64 sysidentifier, uint32 data_checksum_version)
Definition xlog.c:4259
void SetInstallXLogFileSegmentActive(void)
Definition xlog.c:10160
static void AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic)
Definition xlog.c:2026
static void WALInsertLockAcquireExclusive(void)
Definition xlog.c:1457
static void UpdateControlFile(void)
Definition xlog.c:4638
void StartupXLOG(void)
Definition xlog.c:5851
bool IsInstallXLogFileSegmentActive(void)
Definition xlog.c:10177
static int openLogFile
Definition xlog.c:655
void BootStrapXLOG(uint32 data_checksum_version)
Definition xlog.c:5459
XLogRecPtr XactLastRecEnd
Definition xlog.c:261
bool CreateRestartPoint(int flags)
Definition xlog.c:8130
static void ValidateXLOGDirectoryStructure(void)
Definition xlog.c:4154
int CommitDelay
Definition xlog.c:139
static void RemoveOldXlogFiles(XLogSegNo segno, XLogRecPtr lastredoptr, XLogRecPtr endptr, TimeLineID insertTLI)
Definition xlog.c:3920
static XLogRecPtr CreateOverwriteContrecordRecord(XLogRecPtr aborted_lsn, XLogRecPtr pagePtr, TimeLineID newTLI)
Definition xlog.c:7980
void xlog2_redo(XLogReaderState *record)
Definition xlog.c:9239
XLogRecPtr GetInsertRecPtr(void)
Definition xlog.c:6984
bool wal_init_zero
Definition xlog.c:134
static void CalculateCheckpointSegments(void)
Definition xlog.c:2192
XLogRecPtr XLogGetReplicationSlotMinimumLSN(void)
Definition xlog.c:2700
int XLogArchiveMode
Definition xlog.c:126
SessionBackupState get_backup_status(void)
Definition xlog.c:9762
static void XLogReportParameters(void)
Definition xlog.c:8675
#define RefreshXLogWriteResult(_target)
Definition xlog.c:640
void CheckXLogRemoved(XLogSegNo segno, TimeLineID tli)
Definition xlog.c:3782
int wal_level
Definition xlog.c:138
static void LogCheckpointStart(int flags, bool restartpoint)
Definition xlog.c:7173
static XLogRecPtr RedoRecPtr
Definition xlog.c:280
void assign_checkpoint_completion_target(double newval, void *extra)
Definition xlog.c:2228
static bool InstallXLogFileSegment(XLogSegNo *segno, char *tmppath, bool find_free, XLogSegNo max_segno, TimeLineID tli)
Definition xlog.c:3618
static void WriteControlFile(void)
Definition xlog.c:4300
int wal_segment_size
Definition xlog.c:150
WALAvailability GetWALAvailability(XLogRecPtr targetLSN)
Definition xlog.c:8415
const char * show_archive_command(void)
Definition xlog.c:5220
#define UsableBytesInPage
Definition xlog.c:617
int max_wal_size_mb
Definition xlog.c:121
const ShmemCallbacks XLOGShmemCallbacks
Definition xlog.c:590
void ShutdownXLOG(int code, Datum arg)
Definition xlog.c:7104
static bool PerformRecoveryXLogAction(void)
Definition xlog.c:6786
RecoveryState GetRecoveryState(void)
Definition xlog.c:6872
int XLogArchiveTimeout
Definition xlog.c:125
static void CleanupAfterArchiveRecovery(TimeLineID EndOfLogTLI, XLogRecPtr EndOfLog, TimeLineID newTLI)
Definition xlog.c:5711
#define ConvertToXSegs(x, segsize)
Definition xlog.c:623
bool wal_recycle
Definition xlog.c:135
static void RemoveXlogFile(const struct dirent *segment_de, XLogSegNo recycleSegNo, XLogSegNo *endlogSegNo, TimeLineID insertTLI)
Definition xlog.c:4064
pg_time_t GetLastSegSwitchData(XLogRecPtr *lastSwitchLSN)
Definition xlog.c:7087
const char * show_effective_wal_level(void)
Definition xlog.c:5247
static int XLOGChooseNumBuffers(void)
Definition xlog.c:5029
static XLogRecPtr XLogBytePosToEndRecPtr(uint64 bytepos)
Definition xlog.c:1939
static void LogCheckpointEnd(bool restartpoint, int flags)
Definition xlog.c:7191
static int get_sync_bit(int method)
Definition xlog.c:9272
static XLogwrtResult LogwrtResult
Definition xlog.c:632
void XLogSetReplicationSlotMinimumLSN(XLogRecPtr lsn)
Definition xlog.c:2687
void SwitchIntoArchiveRecovery(XLogRecPtr EndRecPtr, TimeLineID replayTLI)
Definition xlog.c:6711
static bool lastFullPageWrites
Definition xlog.c:224
char * wal_consistency_checking_string
Definition xlog.c:132
bool DataChecksumsNeedVerify(void)
Definition xlog.c:4737
static void WALInsertLockAcquire(void)
Definition xlog.c:1412
void SetDataChecksumsOn(void)
Definition xlog.c:4806
int CommitSiblings
Definition xlog.c:140
static void CopyXLogRecordToWAL(int write_len, bool isLogSwitch, XLogRecData *rdata, XLogRecPtr StartPos, XLogRecPtr EndPos, TimeLineID tli)
Definition xlog.c:1266
bool GetDefaultCharSignedness(void)
Definition xlog.c:4997
bool DataChecksumsOn(void)
Definition xlog.c:4699
static double CheckPointDistanceEstimate
Definition xlog.c:166
static uint64 XLogRecPtrToBytePos(XLogRecPtr ptr)
Definition xlog.c:1982
const char * show_in_hot_standby(void)
Definition xlog.c:5232
XLogRecPtr GetXLogInsertRecPtr(void)
Definition xlog.c:10096
void SetWalWriterSleeping(bool sleeping)
Definition xlog.c:10192
bool wal_log_hints
Definition xlog.c:130
static void XLogInitNewTimeline(TimeLineID endTLI, XLogRecPtr endOfLog, TimeLineID newTLI)
Definition xlog.c:5636
static void CheckRequiredParameterValues(void)
Definition xlog.c:5807
#define XLogRecPtrToBufIdx(recptr)
Definition xlog.c:611
int wal_sync_method
Definition xlog.c:137
void SetDataChecksumsOnInProgress(void)
Definition xlog.c:4753
int XLogFileOpen(XLogSegNo segno, TimeLineID tli)
Definition xlog.c:3673
int max_slot_wal_keep_size_mb
Definition xlog.c:142
XLogRecPtr GetFlushRecPtr(TimeLineID *insertTLI)
Definition xlog.c:7001
static void PreallocXlogFiles(XLogRecPtr endptr, TimeLineID tli)
Definition xlog.c:3745
static bool doPageWrites
Definition xlog.c:293
static bool holdingAllLocks
Definition xlog.c:687
static TimeLineID openLogTLI
Definition xlog.c:657
XLogRecPtr XactLastCommitEnd
Definition xlog.c:262
WalLevel GetActiveWalLevelOnStandby(void)
Definition xlog.c:5290
bool log_checkpoints
Definition xlog.c:136
static void KeepLogSeg(XLogRecPtr recptr, XLogSegNo *logSegNo)
Definition xlog.c:8499
static void XLogWrite(XLogwrtRqst WriteRqst, TimeLineID tli, bool flexible)
Definition xlog.c:2325
static void XLogChecksums(uint32 new_type)
Definition xlog.c:8734
void InitializeWalConsistencyChecking(void)
Definition xlog.c:5194
static void UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force)
Definition xlog.c:2721
static int LocalSetXLogInsertAllowed(void)
Definition xlog.c:6924
void assign_max_wal_size(int newval, void *extra)
Definition xlog.c:2221
void RemoveNonParentXlogFiles(XLogRecPtr switchpoint, TimeLineID newTLI)
Definition xlog.c:3995
XLogRecPtr GetLastImportantRecPtr(void)
Definition xlog.c:7058
void xlog_redo(XLogReaderState *record)
Definition xlog.c:8825
static int MyLockNo
Definition xlog.c:686
static void RecoveryRestartPoint(const CheckPoint *checkPoint, XLogReaderState *record)
Definition xlog.c:8090
bool XLogNeedsFlush(XLogRecPtr record)
Definition xlog.c:3163
void register_persistent_abort_backup_handler(void)
Definition xlog.c:10082
static double PrevCheckPointDistance
Definition xlog.c:167
void ReachedEndOfBackup(XLogRecPtr EndRecPtr, TimeLineID tli)
Definition xlog.c:6749
void LocalProcessControlFile(bool reset)
Definition xlog.c:5275
static void XLOGShmemInit(void *arg)
Definition xlog.c:5353
static void XLogFileClose(void)
Definition xlog.c:3694
int wal_compression
Definition xlog.c:131
static void UpdateCheckPointDistanceEstimate(uint64 nbytes)
Definition xlog.c:7298
static bool LocalRecoveryInProgress
Definition xlog.c:231
XLogSegNo XLogGetOldestSegno(TimeLineID tli)
Definition xlog.c:3829
int data_checksums
Definition xlog.c:683
bool DataChecksumsOff(void)
Definition xlog.c:4687
XLogRecPtr GetXLogWriteRecPtr(void)
Definition xlog.c:10128
static void XLOGShmemAttach(void *arg)
Definition xlog.c:5449
void ResetInstallXLogFileSegmentActive(void)
Definition xlog.c:10169
static WALInsertLockPadded * WALInsertLocks
Definition xlog.c:578
static XLogSegNo openLogSegNo
Definition xlog.c:656
#define INSERT_FREESPACE(endptr)
Definition xlog.c:600
int wal_retrieve_retry_interval
Definition xlog.c:141
int XLOGbuffers
Definition xlog.c:124
bool XLogBackgroundFlush(void)
Definition xlog.c:3006
const struct config_enum_entry archive_mode_options[]
Definition xlog.c:198
void GetOldestRestartPoint(XLogRecPtr *oldrecptr, TimeLineID *oldtli)
Definition xlog.c:10140
char * GetMockAuthenticationNonce(void)
Definition xlog.c:4657
bool track_wal_io_timing
Definition xlog.c:144
static XLogSegNo XLOGfileslop(XLogRecPtr lastredoptr)
Definition xlog.c:2251
static int UsableBytesInSegment
Definition xlog.c:626
const char * show_data_checksums(void)
Definition xlog.c:4984
static char * GetXLogBuffer(XLogRecPtr ptr, TimeLineID tli)
Definition xlog.c:1673
bool DataChecksumsInProgressOn(void)
Definition xlog.c:4711
WalInsertClass
Definition xlog.c:569
@ WALINSERT_SPECIAL_SWITCH
Definition xlog.c:571
@ WALINSERT_NORMAL
Definition xlog.c:570
@ WALINSERT_SPECIAL_CHECKPOINT
Definition xlog.c:572
bool XLogInsertAllowed(void)
Definition xlog.c:6891
void do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces, BackupState *state, StringInfo tblspcmapfile)
Definition xlog.c:9459
static ControlFileData * ControlFile
Definition xlog.c:584
bool check_wal_segment_size(int *newval, void **extra, GucSource source)
Definition xlog.c:2235
static void XLogFileCopy(TimeLineID destTLI, XLogSegNo destsegno, TimeLineID srcTLI, XLogSegNo srcsegno, int upto)
Definition xlog.c:3473
static int LocalXLogInsertAllowed
Definition xlog.c:243
static void RemoveTempXlogFiles(void)
Definition xlog.c:3887
XLogRecPtr XLogRestorePoint(const char *rpName)
Definition xlog.c:8626
static XLogRecPtr LocalMinRecoveryPoint
Definition xlog.c:666
#define NUM_XLOGINSERT_LOCKS
Definition xlog.c:157
TimeLineID GetWALInsertionTimeLineIfSet(void)
Definition xlog.c:7038
void do_pg_backup_stop(BackupState *state, bool waitforarchive)
Definition xlog.c:9781
bool check_wal_consistency_checking(char **newval, void **extra, GucSource source)
Definition xlog.c:5080
const struct config_enum_entry wal_sync_method_options[]
Definition xlog.c:178
int min_wal_size_mb
Definition xlog.c:122
bool CreateCheckPoint(int flags)
Definition xlog.c:7401
#define BootstrapTimeLineID
Definition xlog.c:118
bool DataChecksumsNeedWrite(void)
Definition xlog.c:4678
CheckpointStatsData CheckpointStats
Definition xlog.c:216
bool check_wal_buffers(int *newval, void **extra, GucSource source)
Definition xlog.c:5045
XLogRecPtr GetFakeLSNForUnloggedRel(void)
Definition xlog.c:5012
static char * str_time(pg_time_t tnow, char *buf, size_t bufsize)
Definition xlog.c:5623
XLogRecPtr GetXLogInsertEndRecPtr(void)
Definition xlog.c:10112
void XLogPutNextOid(Oid nextOid)
Definition xlog.c:8571
static ChecksumStateType LocalDataChecksumState
Definition xlog.c:677
void XLogFlush(XLogRecPtr record)
Definition xlog.c:2801
static void ReadControlFile(void)
Definition xlog.c:4410
static SessionBackupState sessionBackupState
Definition xlog.c:398
XLogRecPtr XLogAssignLSN(void)
Definition xlog.c:8656
void InitLocalDataChecksumState(void)
Definition xlog.c:4966
static void CheckPointGuts(XLogRecPtr checkPointRedo, int flags)
Definition xlog.c:8050
static bool updateMinRecoveryPoint
Definition xlog.c:668
static void XLOGShmemRequest(void *arg)
Definition xlog.c:5299
int CheckPointSegments
Definition xlog.c:163
static bool check_wal_consistency_checking_deferred
Definition xlog.c:173
static void ReserveXLogInsertLocation(int size, XLogRecPtr *StartPos, XLogRecPtr *EndPos, XLogRecPtr *PrevPtr)
Definition xlog.c:1149
void XLogShutdownWalRcv(void)
Definition xlog.c:10150
#define NextBufIdx(idx)
Definition xlog.c:604
static void UpdateLastRemovedPtr(char *filename)
Definition xlog.c:3867
static TimeLineID LocalMinRecoveryPointTLI
Definition xlog.c:667
void issue_xlog_fsync(int fd, XLogSegNo segno, TimeLineID tli)
Definition xlog.c:9362
static bool ReserveXLogSwitch(XLogRecPtr *StartPos, XLogRecPtr *EndPos, XLogRecPtr *PrevPtr)
Definition xlog.c:1205
void XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN)
Definition xlog.c:2630
bool XLogCheckpointNeeded(XLogSegNo new_segno)
Definition xlog.c:2301
bool * wal_consistency_checking
Definition xlog.c:133
static int XLogFileInitInternal(XLogSegNo logsegno, TimeLineID logtli, bool *added, char *path)
Definition xlog.c:3247
static void update_checkpoint_display(int flags, bool restartpoint, bool reset)
Definition xlog.c:7336
#define XLogArchivingActive()
Definition xlog.h:102
#define TABLESPACE_MAP_OLD
Definition xlog.h:338
#define XLOG_MARK_UNIMPORTANT
Definition xlog.h:167
#define TABLESPACE_MAP
Definition xlog.h:337
@ ARCHIVE_MODE_ALWAYS
Definition xlog.h:69
@ ARCHIVE_MODE_OFF
Definition xlog.h:67
@ ARCHIVE_MODE_ON
Definition xlog.h:68
#define CHECKPOINT_FLUSH_UNLOGGED
Definition xlog.h:155
#define XLogLogicalInfoActive()
Definition xlog.h:137
#define STANDBY_SIGNAL_FILE
Definition xlog.h:333
#define CHECKPOINT_CAUSE_XLOG
Definition xlog.h:160
WALAvailability
Definition xlog.h:200
@ WALAVAIL_REMOVED
Definition xlog.h:206
@ WALAVAIL_RESERVED
Definition xlog.h:202
@ WALAVAIL_UNRESERVED
Definition xlog.h:205
@ WALAVAIL_EXTENDED
Definition xlog.h:203
@ WALAVAIL_INVALID_LSN
Definition xlog.h:201
#define BACKUP_LABEL_OLD
Definition xlog.h:335
#define CHECKPOINT_END_OF_RECOVERY
Definition xlog.h:152
@ WAL_COMPRESSION_NONE
Definition xlog.h:84
#define BACKUP_LABEL_FILE
Definition xlog.h:334
#define CHECKPOINT_CAUSE_TIME
Definition xlog.h:161
#define CHECKPOINT_FORCE
Definition xlog.h:154
SessionBackupState
Definition xlog.h:318
@ SESSION_BACKUP_RUNNING
Definition xlog.h:320
@ SESSION_BACKUP_NONE
Definition xlog.h:319
#define CHECKPOINT_WAIT
Definition xlog.h:157
#define CHECKPOINT_FAST
Definition xlog.h:153
#define RECOVERY_SIGNAL_FILE
Definition xlog.h:332
#define CHECKPOINT_IS_SHUTDOWN
Definition xlog.h:151
#define XLogArchivingAlways()
Definition xlog.h:105
WalLevel
Definition xlog.h:75
@ WAL_LEVEL_REPLICA
Definition xlog.h:77
@ WAL_LEVEL_LOGICAL
Definition xlog.h:78
@ WAL_LEVEL_MINIMAL
Definition xlog.h:76
RecoveryState
Definition xlog.h:92
@ RECOVERY_STATE_CRASH
Definition xlog.h:93
@ RECOVERY_STATE_DONE
Definition xlog.h:95
@ RECOVERY_STATE_ARCHIVE
Definition xlog.h:94
#define XLogIsNeeded()
Definition xlog.h:112
@ WAL_SYNC_METHOD_OPEN
Definition xlog.h:27
@ WAL_SYNC_METHOD_FDATASYNC
Definition xlog.h:26
@ WAL_SYNC_METHOD_FSYNC_WRITETHROUGH
Definition xlog.h:28
@ WAL_SYNC_METHOD_OPEN_DSYNC
Definition xlog.h:29
@ WAL_SYNC_METHOD_FSYNC
Definition xlog.h:25
#define XLogStandbyInfoActive()
Definition xlog.h:126
#define XLP_FIRST_IS_CONTRECORD
static RmgrData GetRmgr(RmgrId rmid)
#define IsValidWalSegSize(size)
XLogLongPageHeaderData * XLogLongPageHeader
#define XLP_FIRST_IS_OVERWRITE_CONTRECORD
#define XLOG_CONTROL_FILE
#define XLogSegmentOffset(xlogptr, wal_segsz_bytes)
static bool IsXLogFileName(const char *fname)
static void XLogFromFileName(const char *fname, TimeLineID *tli, XLogSegNo *logSegNo, int wal_segsz_bytes)
#define XLByteToPrevSeg(xlrp, logSegNo, wal_segsz_bytes)
#define XLogSegNoOffsetToRecPtr(segno, offset, wal_segsz_bytes, dest)
#define MAXFNAMELEN
XLogPageHeaderData * XLogPageHeader
#define XLOGDIR
#define XLP_LONG_HEADER
static bool IsBackupHistoryFileName(const char *fname)
#define XLOG_PAGE_MAGIC
#define XLByteToSeg(xlrp, logSegNo, wal_segsz_bytes)
static void BackupHistoryFileName(char *fname, TimeLineID tli, XLogSegNo logSegNo, XLogRecPtr startpoint, int wal_segsz_bytes)
static void XLogFilePath(char *path, TimeLineID tli, XLogSegNo logSegNo, int wal_segsz_bytes)
#define XRecOffIsValid(xlrp)
#define SizeOfXLogShortPHD
#define SizeOfXLogLongPHD
static void XLogFileName(char *fname, TimeLineID tli, XLogSegNo logSegNo, int wal_segsz_bytes)
static void BackupHistoryFilePath(char *path, TimeLineID tli, XLogSegNo logSegNo, XLogRecPtr startpoint, int wal_segsz_bytes)
static bool RmgrIdExists(RmgrId rmid)
#define XLByteInPrevSeg(xlrp, logSegNo, wal_segsz_bytes)
static bool IsPartialXLogFileName(const char *fname)
bool XLogArchiveIsReadyOrDone(const char *xlog)
bool XLogArchiveIsBusy(const char *xlog)
bool XLogArchiveIsReady(const char *xlog)
void XLogArchiveNotifySeg(XLogSegNo segno, TimeLineID tli)
void ExecuteRecoveryCommand(const char *command, const char *commandName, bool failOnSignal, uint32 wait_event_info)
bool XLogArchiveCheckDone(const char *xlog)
void XLogArchiveNotify(const char *xlog)
void XLogArchiveCleanup(const char *xlog)
char * build_backup_content(BackupState *state, bool ishistoryfile)
Definition xlogbackup.c:29
#define XLogRecPtrIsValid(r)
Definition xlogdefs.h:29
#define LSN_FORMAT_ARGS(lsn)
Definition xlogdefs.h:47
#define FirstNormalUnloggedLSN
Definition xlogdefs.h:37
uint64 XLogRecPtr
Definition xlogdefs.h:21
#define InvalidXLogRecPtr
Definition xlogdefs.h:28
uint32 TimeLineID
Definition xlogdefs.h:63
#define DEFAULT_WAL_SYNC_METHOD
Definition xlogdefs.h:83
uint64 XLogSegNo
Definition xlogdefs.h:52
const char * get_checksum_state_string(uint32 state)
Definition xlogdesc.c:59
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition xloginsert.c:482
void XLogRegisterData(const void *data, uint32 len)
Definition xloginsert.c:372
void XLogSetRecordFlags(uint8 flags)
Definition xloginsert.c:464
void XLogBeginInsert(void)
Definition xloginsert.c:153
XLogReaderState * XLogReaderAllocate(int wal_segment_size, const char *waldir, XLogReaderRoutine *routine, void *private_data)
Definition xlogreader.c:108
bool DecodeXLogRecord(XLogReaderState *state, DecodedXLogRecord *decoded, XLogRecord *record, XLogRecPtr lsn, char **errormsg)
size_t DecodeXLogRecordRequiredSpace(size_t xl_tot_len)
#define XLogRecGetInfo(decoder)
Definition xlogreader.h:410
#define XLogRecGetData(decoder)
Definition xlogreader.h:415
#define XL_ROUTINE(...)
Definition xlogreader.h:117
#define XLogRecMaxBlockId(decoder)
Definition xlogreader.h:418
#define XLogRecHasBlockImage(decoder, block_id)
Definition xlogreader.h:423
#define XLogRecHasAnyBlockRefs(decoder)
Definition xlogreader.h:417
#define SizeOfXLogRecordDataHeaderShort
Definition xlogrecord.h:217
#define XLR_BLOCK_ID_DATA_SHORT
Definition xlogrecord.h:241
#define SizeOfXLogRecord
Definition xlogrecord.h:55
void ShutdownWalRecovery(void)
bool ArchiveRecoveryRequested
bool InArchiveRecovery
void RecoveryRequiresIntParameter(const char *param_name, int currValue, int minValue)
void PerformWalRecovery(void)
char * archiveCleanupCommand
XLogRecPtr GetCurrentReplayRecPtr(TimeLineID *replayEndTLI)
void xlog_outdesc(StringInfo buf, XLogReaderState *record)
bool PromoteIsTriggered(void)
static XLogRecPtr missingContrecPtr
XLogRecPtr GetXLogReplayRecPtr(TimeLineID *replayTLI)
static XLogRecPtr abortedRecPtr
EndOfWalRecoveryInfo * FinishWalRecovery(void)
void InitWalRecovery(ControlFileData *ControlFile, bool *wasShutdown_ptr, bool *haveBackupLabel_ptr, bool *haveTblspcMap_ptr)
char * recoveryEndCommand
TimeLineID recoveryTargetTLI
TimestampTz GetLatestXTime(void)
bool XLogHaveInvalidPages(void)
Definition xlogutils.c:224
XLogRedoAction XLogReadBufferForRedo(XLogReaderState *record, uint8 block_id, Buffer *buf)
Definition xlogutils.c:303
HotStandbyState standbyState
Definition xlogutils.c:53
bool InRecovery
Definition xlogutils.c:50
@ STANDBY_DISABLED
Definition xlogutils.h:52
@ STANDBY_INITIALIZED
Definition xlogutils.h:53
#define InHotStandby
Definition xlogutils.h:60
@ BLK_RESTORED
Definition xlogutils.h:76
struct WaitLSNState * waitLSNState
Definition xlogwait.c:70
void WaitLSNWakeup(WaitLSNType lsnType, XLogRecPtr currentLSN)
Definition xlogwait.c:320
@ WAIT_LSN_TYPE_PRIMARY_FLUSH
Definition xlogwait.h:44
@ WAIT_LSN_TYPE_STANDBY_REPLAY
Definition xlogwait.h:39
@ WAIT_LSN_TYPE_STANDBY_FLUSH
Definition xlogwait.h:41
@ WAIT_LSN_TYPE_STANDBY_WRITE
Definition xlogwait.h:40