PostgreSQL Source Code git master
Loading...
Searching...
No Matches
xlog.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * xlog.c
4 * PostgreSQL write-ahead log manager
5 *
6 * The Write-Ahead Log (WAL) functionality is split into several source
7 * files, in addition to this one:
8 *
9 * xloginsert.c - Functions for constructing WAL records
10 * xlogrecovery.c - WAL recovery and standby code
11 * xlogreader.c - Facility for reading WAL files and parsing WAL records
12 * xlogutils.c - Helper functions for WAL redo routines
13 *
14 * This file contains functions for coordinating database startup and
15 * checkpointing, and managing the write-ahead log buffers when the
16 * system is running.
17 *
18 * StartupXLOG() is the main entry point of the startup process. It
19 * coordinates database startup, performing WAL recovery, and the
20 * transition from WAL recovery into normal operations.
21 *
22 * XLogInsertRecord() inserts a WAL record into the WAL buffers. Most
23 * callers should not call this directly, but use the functions in
24 * xloginsert.c to construct the WAL record. XLogFlush() can be used
25 * to force the WAL to disk.
26 *
27 * In addition to those, there are many other functions for interrogating
28 * the current system state, and for starting/stopping backups.
29 *
30 *
31 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
32 * Portions Copyright (c) 1994, Regents of the University of California
33 *
34 * src/backend/access/transam/xlog.c
35 *
36 *-------------------------------------------------------------------------
37 */
38
39#include "postgres.h"
40
41#include <ctype.h>
42#include <math.h>
43#include <time.h>
44#include <fcntl.h>
45#include <sys/stat.h>
46#include <sys/time.h>
47#include <unistd.h>
48
49#include "access/clog.h"
50#include "access/commit_ts.h"
51#include "access/heaptoast.h"
52#include "access/multixact.h"
53#include "access/rewriteheap.h"
54#include "access/subtrans.h"
55#include "access/timeline.h"
56#include "access/transam.h"
57#include "access/twophase.h"
58#include "access/xact.h"
60#include "access/xlogarchive.h"
61#include "access/xloginsert.h"
62#include "access/xlogreader.h"
63#include "access/xlogrecovery.h"
64#include "access/xlogutils.h"
65#include "access/xlogwait.h"
66#include "backup/basebackup.h"
67#include "catalog/catversion.h"
68#include "catalog/pg_control.h"
69#include "catalog/pg_database.h"
71#include "common/file_utils.h"
72#include "executor/instrument.h"
73#include "miscadmin.h"
74#include "pg_trace.h"
75#include "pgstat.h"
76#include "port/atomics.h"
77#include "postmaster/bgwriter.h"
78#include "postmaster/startup.h"
81#include "replication/origin.h"
82#include "replication/slot.h"
87#include "storage/bufmgr.h"
88#include "storage/fd.h"
89#include "storage/ipc.h"
91#include "storage/latch.h"
92#include "storage/predicate.h"
93#include "storage/proc.h"
94#include "storage/procarray.h"
95#include "storage/reinit.h"
96#include "storage/spin.h"
97#include "storage/sync.h"
98#include "utils/guc_hooks.h"
99#include "utils/guc_tables.h"
102#include "utils/ps_status.h"
103#include "utils/relmapper.h"
104#include "utils/snapmgr.h"
105#include "utils/timeout.h"
106#include "utils/timestamp.h"
107#include "utils/varlena.h"
108
109#ifdef WAL_DEBUG
110#include "utils/memutils.h"
111#endif
112
113/* timeline ID to be used when bootstrapping */
114#define BootstrapTimeLineID 1
115
116/* User-settable parameters */
117int max_wal_size_mb = 1024; /* 1 GB */
118int min_wal_size_mb = 80; /* 80 MB */
120int XLOGbuffers = -1;
124bool EnableHotStandby = false;
125bool fullPageWrites = true;
126bool wal_log_hints = false;
130bool wal_init_zero = true;
131bool wal_recycle = true;
132bool log_checkpoints = true;
135int CommitDelay = 0; /* precommit delay in microseconds */
136int CommitSiblings = 5; /* # concurrent xacts needed to sleep */
139int wal_decode_buffer_size = 512 * 1024;
141
142#ifdef WAL_DEBUG
143bool XLOG_DEBUG = false;
144#endif
145
147
148/*
149 * Number of WAL insertion locks to use. A higher value allows more insertions
150 * to happen concurrently, but adds some CPU overhead to flushing the WAL,
151 * which needs to iterate all the locks.
152 */
153#define NUM_XLOGINSERT_LOCKS 8
154
155/*
156 * Max distance from last checkpoint, before triggering a new xlog-based
157 * checkpoint.
158 */
160
161/* Estimated distance between checkpoints, in bytes */
163static double PrevCheckPointDistance = 0;
164
165/*
166 * Track whether there were any deferred checks for custom resource managers
167 * specified in wal_consistency_checking.
168 */
170
171/*
172 * GUC support
173 */
175 {"fsync", WAL_SYNC_METHOD_FSYNC, false},
176#ifdef HAVE_FSYNC_WRITETHROUGH
177 {"fsync_writethrough", WAL_SYNC_METHOD_FSYNC_WRITETHROUGH, false},
178#endif
179 {"fdatasync", WAL_SYNC_METHOD_FDATASYNC, false},
180#ifdef O_SYNC
181 {"open_sync", WAL_SYNC_METHOD_OPEN, false},
182#endif
183#ifdef O_DSYNC
184 {"open_datasync", WAL_SYNC_METHOD_OPEN_DSYNC, false},
185#endif
186 {NULL, 0, false}
187};
188
189
190/*
191 * Although only "on", "off", and "always" are documented,
192 * we accept all the likely variants of "on" and "off".
193 */
195 {"always", ARCHIVE_MODE_ALWAYS, false},
196 {"on", ARCHIVE_MODE_ON, false},
197 {"off", ARCHIVE_MODE_OFF, false},
198 {"true", ARCHIVE_MODE_ON, true},
199 {"false", ARCHIVE_MODE_OFF, true},
200 {"yes", ARCHIVE_MODE_ON, true},
201 {"no", ARCHIVE_MODE_OFF, true},
202 {"1", ARCHIVE_MODE_ON, true},
203 {"0", ARCHIVE_MODE_OFF, true},
204 {NULL, 0, false}
205};
206
207/*
208 * Statistics for current checkpoint are collected in this global struct.
209 * Because only the checkpointer or a stand-alone backend can perform
210 * checkpoints, this will be unused in normal backends.
211 */
213
214/*
215 * During recovery, lastFullPageWrites keeps track of full_page_writes that
216 * the replayed WAL records indicate. It's initialized with full_page_writes
217 * that the recovery starting checkpoint record indicates, and then updated
218 * each time XLOG_FPW_CHANGE record is replayed.
219 */
221
222/*
223 * Local copy of the state tracked by SharedRecoveryState in shared memory,
224 * It is false if SharedRecoveryState is RECOVERY_STATE_DONE. True actually
225 * means "not known, need to check the shared state".
226 */
227static bool LocalRecoveryInProgress = true;
228
229/*
230 * Local state for XLogInsertAllowed():
231 * 1: unconditionally allowed to insert XLOG
232 * 0: unconditionally not allowed to insert XLOG
233 * -1: must check RecoveryInProgress(); disallow until it is false
234 * Most processes start with -1 and transition to 1 after seeing that recovery
235 * is not in progress. But we can also force the value for special cases.
236 * The coding in XLogInsertAllowed() depends on the first two of these states
237 * being numerically the same as bool true and false.
238 */
240
241/*
242 * ProcLastRecPtr points to the start of the last XLOG record inserted by the
243 * current backend. It is updated for all inserts. XactLastRecEnd points to
244 * end+1 of the last record, and is reset when we end a top-level transaction,
245 * or start a new one; so it can be used to tell if the current transaction has
246 * created any XLOG records.
247 *
248 * While in parallel mode, this may not be fully up to date. When committing,
249 * a transaction can assume this covers all xlog records written either by the
250 * user backend or by any parallel worker which was present at any point during
251 * the transaction. But when aborting, or when still in parallel mode, other
252 * parallel backends may have written WAL records at later LSNs than the value
253 * stored here. The parallel leader advances its own copy, when necessary,
254 * in WaitForParallelWorkersToFinish.
255 */
259
260/*
261 * RedoRecPtr is this backend's local copy of the REDO record pointer
262 * (which is almost but not quite the same as a pointer to the most recent
263 * CHECKPOINT record). We update this from the shared-memory copy,
264 * XLogCtl->Insert.RedoRecPtr, whenever we can safely do so (ie, when we
265 * hold an insertion lock). See XLogInsertRecord for details. We are also
266 * allowed to update from XLogCtl->RedoRecPtr if we hold the info_lck;
267 * see GetRedoRecPtr.
268 *
269 * NB: Code that uses this variable must be prepared not only for the
270 * possibility that it may be arbitrarily out of date, but also for the
271 * possibility that it might be set to InvalidXLogRecPtr. We used to
272 * initialize it as a side effect of the first call to RecoveryInProgress(),
273 * which meant that most code that might use it could assume that it had a
274 * real if perhaps stale value. That's no longer the case.
275 */
277
278/*
279 * doPageWrites is this backend's local copy of (fullPageWrites ||
280 * runningBackups > 0). It is used together with RedoRecPtr to decide whether
281 * a full-page image of a page need to be taken.
282 *
283 * NB: Initially this is false, and there's no guarantee that it will be
284 * initialized to any other value before it is first used. Any code that
285 * makes use of it must recheck the value after obtaining a WALInsertLock,
286 * and respond appropriately if it turns out that the previous value wasn't
287 * accurate.
288 */
289static bool doPageWrites;
290
291/*----------
292 * Shared-memory data structures for XLOG control
293 *
294 * LogwrtRqst indicates a byte position that we need to write and/or fsync
295 * the log up to (all records before that point must be written or fsynced).
296 * The positions already written/fsynced are maintained in logWriteResult
297 * and logFlushResult using atomic access.
298 * In addition to the shared variable, each backend has a private copy of
299 * both in LogwrtResult, which is updated when convenient.
300 *
301 * The request bookkeeping is simpler: there is a shared XLogCtl->LogwrtRqst
302 * (protected by info_lck), but we don't need to cache any copies of it.
303 *
304 * info_lck is only held long enough to read/update the protected variables,
305 * so it's a plain spinlock. The other locks are held longer (potentially
306 * over I/O operations), so we use LWLocks for them. These locks are:
307 *
308 * WALBufMappingLock: must be held to replace a page in the WAL buffer cache.
309 * It is only held while initializing and changing the mapping. If the
310 * contents of the buffer being replaced haven't been written yet, the mapping
311 * lock is released while the write is done, and reacquired afterwards.
312 *
313 * WALWriteLock: must be held to write WAL buffers to disk (XLogWrite or
314 * XLogFlush).
315 *
316 * ControlFileLock: must be held to read/update control file or create
317 * new log file.
318 *
319 *----------
320 */
321
322typedef struct XLogwrtRqst
323{
324 XLogRecPtr Write; /* last byte + 1 to write out */
325 XLogRecPtr Flush; /* last byte + 1 to flush */
327
328typedef struct XLogwrtResult
329{
330 XLogRecPtr Write; /* last byte + 1 written out */
331 XLogRecPtr Flush; /* last byte + 1 flushed */
333
334/*
335 * Inserting to WAL is protected by a small fixed number of WAL insertion
336 * locks. To insert to the WAL, you must hold one of the locks - it doesn't
337 * matter which one. To lock out other concurrent insertions, you must hold
338 * of them. Each WAL insertion lock consists of a lightweight lock, plus an
339 * indicator of how far the insertion has progressed (insertingAt).
340 *
341 * The insertingAt values are read when a process wants to flush WAL from
342 * the in-memory buffers to disk, to check that all the insertions to the
343 * region the process is about to write out have finished. You could simply
344 * wait for all currently in-progress insertions to finish, but the
345 * insertingAt indicator allows you to ignore insertions to later in the WAL,
346 * so that you only wait for the insertions that are modifying the buffers
347 * you're about to write out.
348 *
349 * This isn't just an optimization. If all the WAL buffers are dirty, an
350 * inserter that's holding a WAL insert lock might need to evict an old WAL
351 * buffer, which requires flushing the WAL. If it's possible for an inserter
352 * to block on another inserter unnecessarily, deadlock can arise when two
353 * inserters holding a WAL insert lock wait for each other to finish their
354 * insertion.
355 *
356 * Small WAL records that don't cross a page boundary never update the value,
357 * the WAL record is just copied to the page and the lock is released. But
358 * to avoid the deadlock-scenario explained above, the indicator is always
359 * updated before sleeping while holding an insertion lock.
360 *
361 * lastImportantAt contains the LSN of the last important WAL record inserted
362 * using a given lock. This value is used to detect if there has been
363 * important WAL activity since the last time some action, like a checkpoint,
364 * was performed - allowing to not repeat the action if not. The LSN is
365 * updated for all insertions, unless the XLOG_MARK_UNIMPORTANT flag was
366 * set. lastImportantAt is never cleared, only overwritten by the LSN of newer
367 * records. Tracking the WAL activity directly in WALInsertLock has the
368 * advantage of not needing any additional locks to update the value.
369 */
376
377/*
378 * All the WAL insertion locks are allocated as an array in shared memory. We
379 * force the array stride to be a power of 2, which saves a few cycles in
380 * indexing, but more importantly also ensures that individual slots don't
381 * cross cache line boundaries. (Of course, we have to also ensure that the
382 * array start address is suitably aligned.)
383 */
389
390/*
391 * Session status of running backup, used for sanity checks in SQL-callable
392 * functions to start and stop backups.
393 */
395
396/*
397 * Shared state data for WAL insertion.
398 */
399typedef struct XLogCtlInsert
400{
401 slock_t insertpos_lck; /* protects CurrBytePos and PrevBytePos */
402
403 /*
404 * CurrBytePos is the end of reserved WAL. The next record will be
405 * inserted at that position. PrevBytePos is the start position of the
406 * previously inserted (or rather, reserved) record - it is copied to the
407 * prev-link of the next record. These are stored as "usable byte
408 * positions" rather than XLogRecPtrs (see XLogBytePosToRecPtr()).
409 */
412
413 /*
414 * Make sure the above heavily-contended spinlock and byte positions are
415 * on their own cache line. In particular, the RedoRecPtr and full page
416 * write variables below should be on a different cache line. They are
417 * read on every WAL insertion, but updated rarely, and we don't want
418 * those reads to steal the cache line containing Curr/PrevBytePos.
419 */
421
422 /*
423 * fullPageWrites is the authoritative value used by all backends to
424 * determine whether to write full-page image to WAL. This shared value,
425 * instead of the process-local fullPageWrites, is required because, when
426 * full_page_writes is changed by SIGHUP, we must WAL-log it before it
427 * actually affects WAL-logging by backends. Checkpointer sets at startup
428 * or after SIGHUP.
429 *
430 * To read these fields, you must hold an insertion lock. To modify them,
431 * you must hold ALL the locks.
432 */
433 XLogRecPtr RedoRecPtr; /* current redo point for insertions */
435
436 /*
437 * runningBackups is a counter indicating the number of backups currently
438 * in progress. lastBackupStart is the latest checkpoint redo location
439 * used as a starting point for an online backup.
440 */
443
444 /*
445 * WAL insertion locks.
446 */
449
450/*
451 * Total shared-memory state for XLOG.
452 */
453typedef struct XLogCtlData
454{
456
457 /* Protected by info_lck: */
459 XLogRecPtr RedoRecPtr; /* a recent copy of Insert->RedoRecPtr */
460 XLogRecPtr asyncXactLSN; /* LSN of newest async commit/abort */
461 XLogRecPtr replicationSlotMinLSN; /* oldest LSN needed by any slot */
462
463 XLogSegNo lastRemovedSegNo; /* latest removed/recycled XLOG segment */
464
465 /* Fake LSN counter, for unlogged relations. */
467
468 /* Time and LSN of last xlog segment switch. Protected by WALWriteLock. */
471
472 /* These are accessed using atomics -- info_lck not needed */
473 pg_atomic_uint64 logInsertResult; /* last byte + 1 inserted to buffers */
474 pg_atomic_uint64 logWriteResult; /* last byte + 1 written out */
475 pg_atomic_uint64 logFlushResult; /* last byte + 1 flushed */
476
477 /*
478 * Latest initialized page in the cache (last byte position + 1).
479 *
480 * To change the identity of a buffer (and InitializedUpTo), you need to
481 * hold WALBufMappingLock. To change the identity of a buffer that's
482 * still dirty, the old page needs to be written out first, and for that
483 * you need WALWriteLock, and you need to ensure that there are no
484 * in-progress insertions to the page by calling
485 * WaitXLogInsertionsToFinish().
486 */
488
489 /*
490 * These values do not change after startup, although the pointed-to pages
491 * and xlblocks values certainly do. xlblocks values are protected by
492 * WALBufMappingLock.
493 */
494 char *pages; /* buffers for unwritten XLOG pages */
495 pg_atomic_uint64 *xlblocks; /* 1st byte ptr-s + XLOG_BLCKSZ */
496 int XLogCacheBlck; /* highest allocated xlog buffer index */
497
498 /*
499 * InsertTimeLineID is the timeline into which new WAL is being inserted
500 * and flushed. It is zero during recovery, and does not change once set.
501 *
502 * If we create a new timeline when the system was started up,
503 * PrevTimeLineID is the old timeline's ID that we forked off from.
504 * Otherwise it's equal to InsertTimeLineID.
505 *
506 * We set these fields while holding info_lck. Most that reads these
507 * values knows that recovery is no longer in progress and so can safely
508 * read the value without a lock, but code that could be run either during
509 * or after recovery can take info_lck while reading these values.
510 */
513
514 /*
515 * SharedRecoveryState indicates if we're still in crash or archive
516 * recovery. Protected by info_lck.
517 */
519
520 /*
521 * InstallXLogFileSegmentActive indicates whether the checkpointer should
522 * arrange for future segments by recycling and/or PreallocXlogFiles().
523 * Protected by ControlFileLock. Only the startup process changes it. If
524 * true, anyone can use InstallXLogFileSegment(). If false, the startup
525 * process owns the exclusive right to install segments, by reading from
526 * the archive and possibly replacing existing files.
527 */
529
530 /*
531 * WalWriterSleeping indicates whether the WAL writer is currently in
532 * low-power mode (and hence should be nudged if an async commit occurs).
533 * Protected by info_lck.
534 */
536
537 /*
538 * During recovery, we keep a copy of the latest checkpoint record here.
539 * lastCheckPointRecPtr points to start of checkpoint record and
540 * lastCheckPointEndPtr points to end+1 of checkpoint record. Used by the
541 * checkpointer when it wants to create a restartpoint.
542 *
543 * Protected by info_lck.
544 */
548
549 /*
550 * lastFpwDisableRecPtr points to the start of the last replayed
551 * XLOG_FPW_CHANGE record that instructs full_page_writes is disabled.
552 */
554
555 slock_t info_lck; /* locks shared variables shown above */
557
558/*
559 * Classification of XLogInsertRecord operations.
560 */
567
569
570/* a private copy of XLogCtl->Insert.WALInsertLocks, for convenience */
572
573/*
574 * We maintain an image of pg_control in shared memory.
575 */
577
578/*
579 * Calculate the amount of space left on the page after 'endptr'. Beware
580 * multiple evaluation!
581 */
582#define INSERT_FREESPACE(endptr) \
583 (((endptr) % XLOG_BLCKSZ == 0) ? 0 : (XLOG_BLCKSZ - (endptr) % XLOG_BLCKSZ))
584
585/* Macro to advance to next buffer index. */
586#define NextBufIdx(idx) \
587 (((idx) == XLogCtl->XLogCacheBlck) ? 0 : ((idx) + 1))
588
589/*
590 * XLogRecPtrToBufIdx returns the index of the WAL buffer that holds, or
591 * would hold if it was in cache, the page containing 'recptr'.
592 */
593#define XLogRecPtrToBufIdx(recptr) \
594 (((recptr) / XLOG_BLCKSZ) % (XLogCtl->XLogCacheBlck + 1))
595
596/*
597 * These are the number of bytes in a WAL page usable for WAL data.
598 */
599#define UsableBytesInPage (XLOG_BLCKSZ - SizeOfXLogShortPHD)
600
601/*
602 * Convert values of GUCs measured in megabytes to equiv. segment count.
603 * Rounds down.
604 */
605#define ConvertToXSegs(x, segsize) XLogMBVarToSegs((x), (segsize))
606
607/* The number of bytes in a WAL segment usable for WAL data. */
609
610/*
611 * Private, possibly out-of-date copy of shared LogwrtResult.
612 * See discussion above.
613 */
615
616/*
617 * Update local copy of shared XLogCtl->log{Write,Flush}Result
618 *
619 * It's critical that Flush always trails Write, so the order of the reads is
620 * important, as is the barrier. See also XLogWrite.
621 */
622#define RefreshXLogWriteResult(_target) \
623 do { \
624 _target.Flush = pg_atomic_read_u64(&XLogCtl->logFlushResult); \
625 pg_read_barrier(); \
626 _target.Write = pg_atomic_read_u64(&XLogCtl->logWriteResult); \
627 } while (0)
628
629/*
630 * openLogFile is -1 or a kernel FD for an open log file segment.
631 * openLogSegNo identifies the segment, and openLogTLI the corresponding TLI.
632 * These variables are only used to write the XLOG, and so will normally refer
633 * to the active segment.
634 *
635 * Note: call Reserve/ReleaseExternalFD to track consumption of this FD.
636 */
637static int openLogFile = -1;
640
641/*
642 * Local copies of equivalent fields in the control file. When running
643 * crash recovery, LocalMinRecoveryPoint is set to InvalidXLogRecPtr as we
644 * expect to replay all the WAL available, and updateMinRecoveryPoint is
645 * switched to false to prevent any updates while replaying records.
646 * Those values are kept consistent as long as crash recovery runs.
647 */
650static bool updateMinRecoveryPoint = true;
651
652/* For WALInsertLockAcquire/Release functions */
653static int MyLockNo = 0;
654static bool holdingAllLocks = false;
655
656#ifdef WAL_DEBUG
658#endif
659
663static void CheckRequiredParameterValues(void);
664static void XLogReportParameters(void);
665static int LocalSetXLogInsertAllowed(void);
666static void CreateEndOfRecoveryRecord(void);
670static void CheckPointGuts(XLogRecPtr checkPointRedo, int flags);
672
674 bool opportunistic);
675static void XLogWrite(XLogwrtRqst WriteRqst, TimeLineID tli, bool flexible);
676static bool InstallXLogFileSegment(XLogSegNo *segno, char *tmppath,
678 TimeLineID tli);
679static void XLogFileClose(void);
680static void PreallocXlogFiles(XLogRecPtr endptr, TimeLineID tli);
681static void RemoveTempXlogFiles(void);
684static void RemoveXlogFile(const struct dirent *segment_de,
687static void UpdateLastRemovedPtr(char *filename);
688static void ValidateXLOGDirectoryStructure(void);
689static void CleanupBackupHistory(void);
690static void UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force);
691static bool PerformRecoveryXLogAction(void);
692static void InitControlFile(uint64 sysidentifier, uint32 data_checksum_version);
693static void WriteControlFile(void);
694static void ReadControlFile(void);
695static void UpdateControlFile(void);
696static char *str_time(pg_time_t tnow, char *buf, size_t bufsize);
697
698static int get_sync_bit(int method);
699
700static void CopyXLogRecordToWAL(int write_len, bool isLogSwitch,
703 TimeLineID tli);
704static void ReserveXLogInsertLocation(int size, XLogRecPtr *StartPos,
709static char *GetXLogBuffer(XLogRecPtr ptr, TimeLineID tli);
713
714static void WALInsertLockAcquire(void);
715static void WALInsertLockAcquireExclusive(void);
716static void WALInsertLockRelease(void);
717static void WALInsertLockUpdateInsertingAt(XLogRecPtr insertingAt);
718
719/*
720 * Insert an XLOG record represented by an already-constructed chain of data
721 * chunks. This is a low-level routine; to construct the WAL record header
722 * and data, use the higher-level routines in xloginsert.c.
723 *
724 * If 'fpw_lsn' is valid, it is the oldest LSN among the pages that this
725 * WAL record applies to, that were not included in the record as full page
726 * images. If fpw_lsn <= RedoRecPtr, the function does not perform the
727 * insertion and returns InvalidXLogRecPtr. The caller can then recalculate
728 * which pages need a full-page image, and retry. If fpw_lsn is invalid, the
729 * record is always inserted.
730 *
731 * 'flags' gives more in-depth control on the record being inserted. See
732 * XLogSetRecordFlags() for details.
733 *
734 * 'topxid_included' tells whether the top-transaction id is logged along with
735 * current subtransaction. See XLogRecordAssemble().
736 *
737 * The first XLogRecData in the chain must be for the record header, and its
738 * data must be MAXALIGNed. XLogInsertRecord fills in the xl_prev and
739 * xl_crc fields in the header, the rest of the header must already be filled
740 * by the caller.
741 *
742 * Returns XLOG pointer to end of record (beginning of next record).
743 * This can be used as LSN for data pages affected by the logged action.
744 * (LSN is the XLOG point up to which the XLOG must be flushed to disk
745 * before the data page can be written out. This implements the basic
746 * WAL rule "write the log before the data".)
747 */
751 uint8 flags,
752 int num_fpi,
754 bool topxid_included)
755{
758 bool inserted;
759 XLogRecord *rechdr = (XLogRecord *) rdata->data;
760 uint8 info = rechdr->xl_info & ~XLR_INFO_MASK;
766
767 /* Does this record type require special handling? */
768 if (unlikely(rechdr->xl_rmid == RM_XLOG_ID))
769 {
770 if (info == XLOG_SWITCH)
772 else if (info == XLOG_CHECKPOINT_REDO)
774 }
775
776 /* we assume that all of the record header is in the first chunk */
778
779 /* cross-check on whether we should be here or not */
780 if (!XLogInsertAllowed())
781 elog(ERROR, "cannot make new WAL entries during recovery");
782
783 /*
784 * Given that we're not in recovery, InsertTimeLineID is set and can't
785 * change, so we can read it without a lock.
786 */
788
789 /*----------
790 *
791 * We have now done all the preparatory work we can without holding a
792 * lock or modifying shared state. From here on, inserting the new WAL
793 * record to the shared WAL buffer cache is a two-step process:
794 *
795 * 1. Reserve the right amount of space from the WAL. The current head of
796 * reserved space is kept in Insert->CurrBytePos, and is protected by
797 * insertpos_lck.
798 *
799 * 2. Copy the record to the reserved WAL space. This involves finding the
800 * correct WAL buffer containing the reserved space, and copying the
801 * record in place. This can be done concurrently in multiple processes.
802 *
803 * To keep track of which insertions are still in-progress, each concurrent
804 * inserter acquires an insertion lock. In addition to just indicating that
805 * an insertion is in progress, the lock tells others how far the inserter
806 * has progressed. There is a small fixed number of insertion locks,
807 * determined by NUM_XLOGINSERT_LOCKS. When an inserter crosses a page
808 * boundary, it updates the value stored in the lock to the how far it has
809 * inserted, to allow the previous buffer to be flushed.
810 *
811 * Holding onto an insertion lock also protects RedoRecPtr and
812 * fullPageWrites from changing until the insertion is finished.
813 *
814 * Step 2 can usually be done completely in parallel. If the required WAL
815 * page is not initialized yet, you have to grab WALBufMappingLock to
816 * initialize it, but the WAL writer tries to do that ahead of insertions
817 * to avoid that from happening in the critical path.
818 *
819 *----------
820 */
822
823 if (likely(class == WALINSERT_NORMAL))
824 {
826
827 /*
828 * Check to see if my copy of RedoRecPtr is out of date. If so, may
829 * have to go back and have the caller recompute everything. This can
830 * only happen just after a checkpoint, so it's better to be slow in
831 * this case and fast otherwise.
832 *
833 * Also check to see if fullPageWrites was just turned on or there's a
834 * running backup (which forces full-page writes); if we weren't
835 * already doing full-page writes then go back and recompute.
836 *
837 * If we aren't doing full-page writes then RedoRecPtr doesn't
838 * actually affect the contents of the XLOG record, so we'll update
839 * our local copy but not force a recomputation. (If doPageWrites was
840 * just turned off, we could recompute the record without full pages,
841 * but we choose not to bother.)
842 */
843 if (RedoRecPtr != Insert->RedoRecPtr)
844 {
846 RedoRecPtr = Insert->RedoRecPtr;
847 }
848 doPageWrites = (Insert->fullPageWrites || Insert->runningBackups > 0);
849
850 if (doPageWrites &&
853 {
854 /*
855 * Oops, some buffer now needs to be backed up that the caller
856 * didn't back up. Start over.
857 */
860 return InvalidXLogRecPtr;
861 }
862
863 /*
864 * Reserve space for the record in the WAL. This also sets the xl_prev
865 * pointer.
866 */
868 &rechdr->xl_prev);
869
870 /* Normal records are always inserted. */
871 inserted = true;
872 }
873 else if (class == WALINSERT_SPECIAL_SWITCH)
874 {
875 /*
876 * In order to insert an XLOG_SWITCH record, we need to hold all of
877 * the WAL insertion locks, not just one, so that no one else can
878 * begin inserting a record until we've figured out how much space
879 * remains in the current WAL segment and claimed all of it.
880 *
881 * Nonetheless, this case is simpler than the normal cases handled
882 * below, which must check for changes in doPageWrites and RedoRecPtr.
883 * Those checks are only needed for records that can contain buffer
884 * references, and an XLOG_SWITCH record never does.
885 */
889 }
890 else
891 {
893
894 /*
895 * We need to update both the local and shared copies of RedoRecPtr,
896 * which means that we need to hold all the WAL insertion locks.
897 * However, there can't be any buffer references, so as above, we need
898 * not check RedoRecPtr before inserting the record; we just need to
899 * update it afterwards.
900 */
904 &rechdr->xl_prev);
905 RedoRecPtr = Insert->RedoRecPtr = StartPos;
906 inserted = true;
907 }
908
909 if (inserted)
910 {
911 /*
912 * Now that xl_prev has been filled in, calculate CRC of the record
913 * header.
914 */
915 rdata_crc = rechdr->xl_crc;
918 rechdr->xl_crc = rdata_crc;
919
920 /*
921 * All the record data, including the header, is now ready to be
922 * inserted. Copy the record in the space reserved.
923 */
924 CopyXLogRecordToWAL(rechdr->xl_tot_len,
927
928 /*
929 * Unless record is flagged as not important, update LSN of last
930 * important record in the current slot. When holding all locks, just
931 * update the first one.
932 */
933 if ((flags & XLOG_MARK_UNIMPORTANT) == 0)
934 {
935 int lockno = holdingAllLocks ? 0 : MyLockNo;
936
938 }
939 }
940 else
941 {
942 /*
943 * This was an xlog-switch record, but the current insert location was
944 * already exactly at the beginning of a segment, so there was no need
945 * to do anything.
946 */
947 }
948
949 /*
950 * Done! Let others know that we're finished.
951 */
953
955
957
958 /*
959 * Mark top transaction id is logged (if needed) so that we should not try
960 * to log it again with the next WAL record in the current subtransaction.
961 */
962 if (topxid_included)
964
965 /*
966 * Update shared LogwrtRqst.Write, if we crossed page boundary.
967 */
969 {
971 /* advance global request to include new block(s) */
976 }
977
978 /*
979 * If this was an XLOG_SWITCH record, flush the record and the empty
980 * padding space that fills the rest of the segment, and perform
981 * end-of-segment actions (eg, notifying archiver).
982 */
983 if (class == WALINSERT_SPECIAL_SWITCH)
984 {
987
988 /*
989 * Even though we reserved the rest of the segment for us, which is
990 * reflected in EndPos, we return a pointer to just the end of the
991 * xlog-switch record.
992 */
993 if (inserted)
994 {
997 {
999
1000 if (offset == EndPos % XLOG_BLCKSZ)
1002 else
1004 }
1005 }
1006 }
1007
1008#ifdef WAL_DEBUG
1009 if (XLOG_DEBUG)
1010 {
1012 XLogRecord *record;
1016 char *errormsg = NULL;
1018
1020
1022 appendStringInfo(&buf, "INSERT @ %X/%08X: ", LSN_FORMAT_ARGS(EndPos));
1023
1024 /*
1025 * We have to piece together the WAL record data from the XLogRecData
1026 * entries, so that we can pass it to the rm_desc function as one
1027 * contiguous chunk.
1028 */
1030 for (; rdata != NULL; rdata = rdata->next)
1032
1033 /* We also need temporary space to decode the record. */
1034 record = (XLogRecord *) recordBuf.data;
1037
1038 if (!debug_reader)
1040 XL_ROUTINE(.page_read = NULL,
1041 .segment_open = NULL,
1042 .segment_close = NULL),
1043 NULL);
1044 if (!debug_reader)
1045 {
1046 appendStringInfoString(&buf, "error decoding record: out of memory while allocating a WAL reading processor");
1047 }
1049 decoded,
1050 record,
1051 EndPos,
1052 &errormsg))
1053 {
1054 appendStringInfo(&buf, "error decoding record: %s",
1055 errormsg ? errormsg : "no error message");
1056 }
1057 else
1058 {
1059 appendStringInfoString(&buf, " - ");
1060
1061 debug_reader->record = decoded;
1063 debug_reader->record = NULL;
1064 }
1065 elog(LOG, "%s", buf.data);
1066
1067 pfree(decoded);
1068 pfree(buf.data);
1069 pfree(recordBuf.data);
1071 }
1072#endif
1073
1074 /*
1075 * Update our global variables
1076 */
1079
1080 /* Report WAL traffic to the instrumentation. */
1081 if (inserted)
1082 {
1083 pgWalUsage.wal_bytes += rechdr->xl_tot_len;
1087
1088 /* Required for the flush of pending stats WAL data */
1089 pgstat_report_fixed = true;
1090 }
1091
1092 return EndPos;
1093}
1094
1095/*
1096 * Reserves the right amount of space for a record of given size from the WAL.
1097 * *StartPos is set to the beginning of the reserved section, *EndPos to
1098 * its end+1. *PrevPtr is set to the beginning of the previous record; it is
1099 * used to set the xl_prev of this record.
1100 *
1101 * This is the performance critical part of XLogInsert that must be serialized
1102 * across backends. The rest can happen mostly in parallel. Try to keep this
1103 * section as short as possible, insertpos_lck can be heavily contended on a
1104 * busy system.
1105 *
1106 * NB: The space calculation here must match the code in CopyXLogRecordToWAL,
1107 * where we actually copy the record to the reserved space.
1108 *
1109 * NB: Testing shows that XLogInsertRecord runs faster if this code is inlined;
1110 * however, because there are two call sites, the compiler is reluctant to
1111 * inline. We use pg_attribute_always_inline here to try to convince it.
1112 */
1116{
1121
1122 size = MAXALIGN(size);
1123
1124 /* All (non xlog-switch) records should contain data. */
1125 Assert(size > SizeOfXLogRecord);
1126
1127 /*
1128 * The duration the spinlock needs to be held is minimized by minimizing
1129 * the calculations that have to be done while holding the lock. The
1130 * current tip of reserved WAL is kept in CurrBytePos, as a byte position
1131 * that only counts "usable" bytes in WAL, that is, it excludes all WAL
1132 * page headers. The mapping between "usable" byte positions and physical
1133 * positions (XLogRecPtrs) can be done outside the locked region, and
1134 * because the usable byte position doesn't include any headers, reserving
1135 * X bytes from WAL is almost as simple as "CurrBytePos += X".
1136 */
1137 SpinLockAcquire(&Insert->insertpos_lck);
1138
1139 startbytepos = Insert->CurrBytePos;
1140 endbytepos = startbytepos + size;
1141 prevbytepos = Insert->PrevBytePos;
1142 Insert->CurrBytePos = endbytepos;
1143 Insert->PrevBytePos = startbytepos;
1144
1145 SpinLockRelease(&Insert->insertpos_lck);
1146
1150
1151 /*
1152 * Check that the conversions between "usable byte positions" and
1153 * XLogRecPtrs work consistently in both directions.
1154 */
1158}
1159
1160/*
1161 * Like ReserveXLogInsertLocation(), but for an xlog-switch record.
1162 *
1163 * A log-switch record is handled slightly differently. The rest of the
1164 * segment will be reserved for this insertion, as indicated by the returned
1165 * *EndPos value. However, if we are already at the beginning of the current
1166 * segment, *StartPos and *EndPos are set to the current location without
1167 * reserving any space, and the function returns false.
1168*/
1169static bool
1171{
1177 XLogRecPtr ptr;
1179
1180 /*
1181 * These calculations are a bit heavy-weight to be done while holding a
1182 * spinlock, but since we're holding all the WAL insertion locks, there
1183 * are no other inserters competing for it. GetXLogInsertRecPtr() does
1184 * compete for it, but that's not called very frequently.
1185 */
1186 SpinLockAcquire(&Insert->insertpos_lck);
1187
1188 startbytepos = Insert->CurrBytePos;
1189
1191 if (XLogSegmentOffset(ptr, wal_segment_size) == 0)
1192 {
1193 SpinLockRelease(&Insert->insertpos_lck);
1194 *EndPos = *StartPos = ptr;
1195 return false;
1196 }
1197
1198 endbytepos = startbytepos + size;
1199 prevbytepos = Insert->PrevBytePos;
1200
1203
1206 {
1207 /* consume the rest of the segment */
1208 *EndPos += segleft;
1210 }
1211 Insert->CurrBytePos = endbytepos;
1212 Insert->PrevBytePos = startbytepos;
1213
1214 SpinLockRelease(&Insert->insertpos_lck);
1215
1217
1222
1223 return true;
1224}
1225
1226/*
1227 * Subroutine of XLogInsertRecord. Copies a WAL record to an already-reserved
1228 * area in the WAL.
1229 */
1230static void
1233{
1234 char *currpos;
1235 int freespace;
1236 int written;
1239
1240 /*
1241 * Get a pointer to the right place in the right WAL buffer to start
1242 * inserting to.
1243 */
1244 CurrPos = StartPos;
1245 currpos = GetXLogBuffer(CurrPos, tli);
1246 freespace = INSERT_FREESPACE(CurrPos);
1247
1248 /*
1249 * there should be enough space for at least the first field (xl_tot_len)
1250 * on this page.
1251 */
1252 Assert(freespace >= sizeof(uint32));
1253
1254 /* Copy record data */
1255 written = 0;
1256 while (rdata != NULL)
1257 {
1258 const char *rdata_data = rdata->data;
1259 int rdata_len = rdata->len;
1260
1261 while (rdata_len > freespace)
1262 {
1263 /*
1264 * Write what fits on this page, and continue on the next page.
1265 */
1266 Assert(CurrPos % XLOG_BLCKSZ >= SizeOfXLogShortPHD || freespace == 0);
1267 memcpy(currpos, rdata_data, freespace);
1268 rdata_data += freespace;
1269 rdata_len -= freespace;
1270 written += freespace;
1271 CurrPos += freespace;
1272
1273 /*
1274 * Get pointer to beginning of next page, and set the xlp_rem_len
1275 * in the page header. Set XLP_FIRST_IS_CONTRECORD.
1276 *
1277 * It's safe to set the contrecord flag and xlp_rem_len without a
1278 * lock on the page. All the other flags were already set when the
1279 * page was initialized, in AdvanceXLInsertBuffer, and we're the
1280 * only backend that needs to set the contrecord flag.
1281 */
1282 currpos = GetXLogBuffer(CurrPos, tli);
1283 pagehdr = (XLogPageHeader) currpos;
1284 pagehdr->xlp_rem_len = write_len - written;
1285 pagehdr->xlp_info |= XLP_FIRST_IS_CONTRECORD;
1286
1287 /* skip over the page header */
1289 {
1291 currpos += SizeOfXLogLongPHD;
1292 }
1293 else
1294 {
1296 currpos += SizeOfXLogShortPHD;
1297 }
1298 freespace = INSERT_FREESPACE(CurrPos);
1299 }
1300
1301 Assert(CurrPos % XLOG_BLCKSZ >= SizeOfXLogShortPHD || rdata_len == 0);
1302 memcpy(currpos, rdata_data, rdata_len);
1303 currpos += rdata_len;
1304 CurrPos += rdata_len;
1305 freespace -= rdata_len;
1306 written += rdata_len;
1307
1308 rdata = rdata->next;
1309 }
1311
1312 /*
1313 * If this was an xlog-switch, it's not enough to write the switch record,
1314 * we also have to consume all the remaining space in the WAL segment. We
1315 * have already reserved that space, but we need to actually fill it.
1316 */
1318 {
1319 /* An xlog-switch record doesn't contain any data besides the header */
1321
1322 /* Assert that we did reserve the right amount of space */
1324
1325 /* Use up all the remaining space on the current page */
1326 CurrPos += freespace;
1327
1328 /*
1329 * Cause all remaining pages in the segment to be flushed, leaving the
1330 * XLog position where it should be, at the start of the next segment.
1331 * We do this one page at a time, to make sure we don't deadlock
1332 * against ourselves if wal_buffers < wal_segment_size.
1333 */
1334 while (CurrPos < EndPos)
1335 {
1336 /*
1337 * The minimal action to flush the page would be to call
1338 * WALInsertLockUpdateInsertingAt(CurrPos) followed by
1339 * AdvanceXLInsertBuffer(...). The page would be left initialized
1340 * mostly to zeros, except for the page header (always the short
1341 * variant, as this is never a segment's first page).
1342 *
1343 * The large vistas of zeros are good for compressibility, but the
1344 * headers interrupting them every XLOG_BLCKSZ (with values that
1345 * differ from page to page) are not. The effect varies with
1346 * compression tool, but bzip2 for instance compresses about an
1347 * order of magnitude worse if those headers are left in place.
1348 *
1349 * Rather than complicating AdvanceXLInsertBuffer itself (which is
1350 * called in heavily-loaded circumstances as well as this lightly-
1351 * loaded one) with variant behavior, we just use GetXLogBuffer
1352 * (which itself calls the two methods we need) to get the pointer
1353 * and zero most of the page. Then we just zero the page header.
1354 */
1355 currpos = GetXLogBuffer(CurrPos, tli);
1356 MemSet(currpos, 0, SizeOfXLogShortPHD);
1357
1359 }
1360 }
1361 else
1362 {
1363 /* Align the end position, so that the next record starts aligned */
1365 }
1366
1367 if (CurrPos != EndPos)
1368 ereport(PANIC,
1370 errmsg_internal("space reserved for WAL record does not match what was written"));
1371}
1372
1373/*
1374 * Acquire a WAL insertion lock, for inserting to WAL.
1375 */
1376static void
1378{
1379 bool immed;
1380
1381 /*
1382 * It doesn't matter which of the WAL insertion locks we acquire, so try
1383 * the one we used last time. If the system isn't particularly busy, it's
1384 * a good bet that it's still available, and it's good to have some
1385 * affinity to a particular lock so that you don't unnecessarily bounce
1386 * cache lines between processes when there's no contention.
1387 *
1388 * If this is the first time through in this backend, pick a lock
1389 * (semi-)randomly. This allows the locks to be used evenly if you have a
1390 * lot of very short connections.
1391 */
1392 static int lockToTry = -1;
1393
1394 if (lockToTry == -1)
1397
1398 /*
1399 * The insertingAt value is initially set to 0, as we don't know our
1400 * insert location yet.
1401 */
1403 if (!immed)
1404 {
1405 /*
1406 * If we couldn't get the lock immediately, try another lock next
1407 * time. On a system with more insertion locks than concurrent
1408 * inserters, this causes all the inserters to eventually migrate to a
1409 * lock that no-one else is using. On a system with more inserters
1410 * than locks, it still helps to distribute the inserters evenly
1411 * across the locks.
1412 */
1414 }
1415}
1416
1417/*
1418 * Acquire all WAL insertion locks, to prevent other backends from inserting
1419 * to WAL.
1420 */
1421static void
1423{
1424 int i;
1425
1426 /*
1427 * When holding all the locks, all but the last lock's insertingAt
1428 * indicator is set to 0xFFFFFFFFFFFFFFFF, which is higher than any real
1429 * XLogRecPtr value, to make sure that no-one blocks waiting on those.
1430 */
1431 for (i = 0; i < NUM_XLOGINSERT_LOCKS - 1; i++)
1432 {
1437 }
1438 /* Variable value reset to 0 at release */
1440
1441 holdingAllLocks = true;
1442}
1443
1444/*
1445 * Release our insertion lock (or locks, if we're holding them all).
1446 *
1447 * NB: Reset all variables to 0, so they cause LWLockWaitForVar to block the
1448 * next time the lock is acquired.
1449 */
1450static void
1452{
1453 if (holdingAllLocks)
1454 {
1455 int i;
1456
1457 for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
1460 0);
1461
1462 holdingAllLocks = false;
1463 }
1464 else
1465 {
1468 0);
1469 }
1470}
1471
1472/*
1473 * Update our insertingAt value, to let others know that we've finished
1474 * inserting up to that point.
1475 */
1476static void
1478{
1479 if (holdingAllLocks)
1480 {
1481 /*
1482 * We use the last lock to mark our actual position, see comments in
1483 * WALInsertLockAcquireExclusive.
1484 */
1487 insertingAt);
1488 }
1489 else
1492 insertingAt);
1493}
1494
1495/*
1496 * Wait for any WAL insertions < upto to finish.
1497 *
1498 * Returns the location of the oldest insertion that is still in-progress.
1499 * Any WAL prior to that point has been fully copied into WAL buffers, and
1500 * can be flushed out to disk. Because this waits for any insertions older
1501 * than 'upto' to finish, the return value is always >= 'upto'.
1502 *
1503 * Note: When you are about to write out WAL, you must call this function
1504 * *before* acquiring WALWriteLock, to avoid deadlocks. This function might
1505 * need to wait for an insertion to finish (or at least advance to next
1506 * uninitialized page), and the inserter might need to evict an old WAL buffer
1507 * to make room for a new one, which in turn requires WALWriteLock.
1508 */
1509static XLogRecPtr
1511{
1517 int i;
1518
1519 if (MyProc == NULL)
1520 elog(PANIC, "cannot wait without a PGPROC structure");
1521
1522 /*
1523 * Check if there's any work to do. Use a barrier to ensure we get the
1524 * freshest value.
1525 */
1527 if (upto <= inserted)
1528 return inserted;
1529
1530 /* Read the current insert position */
1531 SpinLockAcquire(&Insert->insertpos_lck);
1532 bytepos = Insert->CurrBytePos;
1533 SpinLockRelease(&Insert->insertpos_lck);
1535
1536 /*
1537 * No-one should request to flush a piece of WAL that hasn't even been
1538 * reserved yet. However, it can happen if there is a block with a bogus
1539 * LSN on disk, for example. XLogFlush checks for that situation and
1540 * complains, but only after the flush. Here we just assume that to mean
1541 * that all WAL that has been reserved needs to be finished. In this
1542 * corner-case, the return value can be smaller than 'upto' argument.
1543 */
1544 if (upto > reservedUpto)
1545 {
1546 ereport(LOG,
1547 errmsg("request to flush past end of generated WAL; request %X/%08X, current position %X/%08X",
1550 }
1551
1552 /*
1553 * Loop through all the locks, sleeping on any in-progress insert older
1554 * than 'upto'.
1555 *
1556 * finishedUpto is our return value, indicating the point upto which all
1557 * the WAL insertions have been finished. Initialize it to the head of
1558 * reserved WAL, and as we iterate through the insertion locks, back it
1559 * out for any insertion that's still in progress.
1560 */
1562 for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
1563 {
1565
1566 do
1567 {
1568 /*
1569 * See if this insertion is in progress. LWLockWaitForVar will
1570 * wait for the lock to be released, or for the 'value' to be set
1571 * by a LWLockUpdateVar call. When a lock is initially acquired,
1572 * its value is 0 (InvalidXLogRecPtr), which means that we don't
1573 * know where it's inserting yet. We will have to wait for it. If
1574 * it's a small insertion, the record will most likely fit on the
1575 * same page and the inserter will release the lock without ever
1576 * calling LWLockUpdateVar. But if it has to sleep, it will
1577 * advertise the insertion point with LWLockUpdateVar before
1578 * sleeping.
1579 *
1580 * In this loop we are only waiting for insertions that started
1581 * before WaitXLogInsertionsToFinish was called. The lack of
1582 * memory barriers in the loop means that we might see locks as
1583 * "unused" that have since become used. This is fine because
1584 * they only can be used for later insertions that we would not
1585 * want to wait on anyway. Not taking a lock to acquire the
1586 * current insertingAt value means that we might see older
1587 * insertingAt values. This is also fine, because if we read a
1588 * value too old, we will add ourselves to the wait queue, which
1589 * contains atomic operations.
1590 */
1591 if (LWLockWaitForVar(&WALInsertLocks[i].l.lock,
1594 {
1595 /* the lock was free, so no insertion in progress */
1597 break;
1598 }
1599
1600 /*
1601 * This insertion is still in progress. Have to wait, unless the
1602 * inserter has proceeded past 'upto'.
1603 */
1604 } while (insertingat < upto);
1605
1608 }
1609
1610 /*
1611 * Advance the limit we know to have been inserted and return the freshest
1612 * value we know of, which might be beyond what we requested if somebody
1613 * is concurrently doing this with an 'upto' pointer ahead of us.
1614 */
1616 finishedUpto);
1617
1618 return finishedUpto;
1619}
1620
1621/*
1622 * Get a pointer to the right location in the WAL buffer containing the
1623 * given XLogRecPtr.
1624 *
1625 * If the page is not initialized yet, it is initialized. That might require
1626 * evicting an old dirty buffer from the buffer cache, which means I/O.
1627 *
1628 * The caller must ensure that the page containing the requested location
1629 * isn't evicted yet, and won't be evicted. The way to ensure that is to
1630 * hold onto a WAL insertion lock with the insertingAt position set to
1631 * something <= ptr. GetXLogBuffer() will update insertingAt if it needs
1632 * to evict an old page from the buffer. (This means that once you call
1633 * GetXLogBuffer() with a given 'ptr', you must not access anything before
1634 * that point anymore, and must not call GetXLogBuffer() with an older 'ptr'
1635 * later, because older buffers might be recycled already)
1636 */
1637static char *
1639{
1640 int idx;
1641 XLogRecPtr endptr;
1642 static uint64 cachedPage = 0;
1643 static char *cachedPos = NULL;
1645
1646 /*
1647 * Fast path for the common case that we need to access again the same
1648 * page as last time.
1649 */
1650 if (ptr / XLOG_BLCKSZ == cachedPage)
1651 {
1653 Assert(((XLogPageHeader) cachedPos)->xlp_pageaddr == ptr - (ptr % XLOG_BLCKSZ));
1654 return cachedPos + ptr % XLOG_BLCKSZ;
1655 }
1656
1657 /*
1658 * The XLog buffer cache is organized so that a page is always loaded to a
1659 * particular buffer. That way we can easily calculate the buffer a given
1660 * page must be loaded into, from the XLogRecPtr alone.
1661 */
1662 idx = XLogRecPtrToBufIdx(ptr);
1663
1664 /*
1665 * See what page is loaded in the buffer at the moment. It could be the
1666 * page we're looking for, or something older. It can't be anything newer
1667 * - that would imply the page we're looking for has already been written
1668 * out to disk and evicted, and the caller is responsible for making sure
1669 * that doesn't happen.
1670 *
1671 * We don't hold a lock while we read the value. If someone is just about
1672 * to initialize or has just initialized the page, it's possible that we
1673 * get InvalidXLogRecPtr. That's ok, we'll grab the mapping lock (in
1674 * AdvanceXLInsertBuffer) and retry if we see anything other than the page
1675 * we're looking for.
1676 */
1677 expectedEndPtr = ptr;
1679
1681 if (expectedEndPtr != endptr)
1682 {
1684
1685 /*
1686 * Before calling AdvanceXLInsertBuffer(), which can block, let others
1687 * know how far we're finished with inserting the record.
1688 *
1689 * NB: If 'ptr' points to just after the page header, advertise a
1690 * position at the beginning of the page rather than 'ptr' itself. If
1691 * there are no other insertions running, someone might try to flush
1692 * up to our advertised location. If we advertised a position after
1693 * the page header, someone might try to flush the page header, even
1694 * though page might actually not be initialized yet. As the first
1695 * inserter on the page, we are effectively responsible for making
1696 * sure that it's initialized, before we let insertingAt to move past
1697 * the page header.
1698 */
1699 if (ptr % XLOG_BLCKSZ == SizeOfXLogShortPHD &&
1702 else if (ptr % XLOG_BLCKSZ == SizeOfXLogLongPHD &&
1705 else
1706 initializedUpto = ptr;
1707
1709
1710 AdvanceXLInsertBuffer(ptr, tli, false);
1712
1713 if (expectedEndPtr != endptr)
1714 elog(PANIC, "could not find WAL buffer for %X/%08X",
1715 LSN_FORMAT_ARGS(ptr));
1716 }
1717 else
1718 {
1719 /*
1720 * Make sure the initialization of the page is visible to us, and
1721 * won't arrive later to overwrite the WAL data we write on the page.
1722 */
1724 }
1725
1726 /*
1727 * Found the buffer holding this page. Return a pointer to the right
1728 * offset within the page.
1729 */
1730 cachedPage = ptr / XLOG_BLCKSZ;
1732
1734 Assert(((XLogPageHeader) cachedPos)->xlp_pageaddr == ptr - (ptr % XLOG_BLCKSZ));
1735
1736 return cachedPos + ptr % XLOG_BLCKSZ;
1737}
1738
1739/*
1740 * Read WAL data directly from WAL buffers, if available. Returns the number
1741 * of bytes read successfully.
1742 *
1743 * Fewer than 'count' bytes may be read if some of the requested WAL data has
1744 * already been evicted.
1745 *
1746 * No locks are taken.
1747 *
1748 * Caller should ensure that it reads no further than LogwrtResult.Write
1749 * (which should have been updated by the caller when determining how far to
1750 * read). The 'tli' argument is only used as a convenient safety check so that
1751 * callers do not read from WAL buffers on a historical timeline.
1752 */
1753Size
1755 TimeLineID tli)
1756{
1757 char *pdst = dstbuf;
1758 XLogRecPtr recptr = startptr;
1760 Size nbytes = count;
1761
1763 return 0;
1764
1765 Assert(XLogRecPtrIsValid(startptr));
1766
1767 /*
1768 * Caller should ensure that the requested data has been inserted into WAL
1769 * buffers before we try to read it.
1770 */
1772 if (startptr + count > inserted)
1773 ereport(ERROR,
1774 errmsg("cannot read past end of generated WAL: requested %X/%08X, current position %X/%08X",
1775 LSN_FORMAT_ARGS(startptr + count),
1777
1778 /*
1779 * Loop through the buffers without a lock. For each buffer, atomically
1780 * read and verify the end pointer, then copy the data out, and finally
1781 * re-read and re-verify the end pointer.
1782 *
1783 * Once a page is evicted, it never returns to the WAL buffers, so if the
1784 * end pointer matches the expected end pointer before and after we copy
1785 * the data, then the right page must have been present during the data
1786 * copy. Read barriers are necessary to ensure that the data copy actually
1787 * happens between the two verification steps.
1788 *
1789 * If either verification fails, we simply terminate the loop and return
1790 * with the data that had been already copied out successfully.
1791 */
1792 while (nbytes > 0)
1793 {
1794 uint32 offset = recptr % XLOG_BLCKSZ;
1797 XLogRecPtr endptr;
1798 const char *page;
1799 const char *psrc;
1801
1802 /*
1803 * Calculate the end pointer we expect in the xlblocks array if the
1804 * correct page is present.
1805 */
1806 expectedEndPtr = recptr + (XLOG_BLCKSZ - offset);
1807
1808 /*
1809 * First verification step: check that the correct page is present in
1810 * the WAL buffers.
1811 */
1813 if (expectedEndPtr != endptr)
1814 break;
1815
1816 /*
1817 * The correct page is present (or was at the time the endptr was
1818 * read; must re-verify later). Calculate pointer to source data and
1819 * determine how much data to read from this page.
1820 */
1821 page = XLogCtl->pages + idx * (Size) XLOG_BLCKSZ;
1822 psrc = page + offset;
1823 npagebytes = Min(nbytes, XLOG_BLCKSZ - offset);
1824
1825 /*
1826 * Ensure that the data copy and the first verification step are not
1827 * reordered.
1828 */
1830
1831 /* data copy */
1833
1834 /*
1835 * Ensure that the data copy and the second verification step are not
1836 * reordered.
1837 */
1839
1840 /*
1841 * Second verification step: check that the page we read from wasn't
1842 * evicted while we were copying the data.
1843 */
1845 if (expectedEndPtr != endptr)
1846 break;
1847
1848 pdst += npagebytes;
1849 recptr += npagebytes;
1850 nbytes -= npagebytes;
1851 }
1852
1853 Assert(pdst - dstbuf <= count);
1854
1855 return pdst - dstbuf;
1856}
1857
1858/*
1859 * Converts a "usable byte position" to XLogRecPtr. A usable byte position
1860 * is the position starting from the beginning of WAL, excluding all WAL
1861 * page headers.
1862 */
1863static XLogRecPtr
1865{
1870 XLogRecPtr result;
1871
1874
1876 {
1877 /* fits on first page of segment */
1879 }
1880 else
1881 {
1882 /* account for the first page on segment with long header */
1885
1888
1890 }
1891
1893
1894 return result;
1895}
1896
1897/*
1898 * Like XLogBytePosToRecPtr, but if the position is at a page boundary,
1899 * returns a pointer to the beginning of the page (ie. before page header),
1900 * not to where the first xlog record on that page would go to. This is used
1901 * when converting a pointer to the end of a record.
1902 */
1903static XLogRecPtr
1905{
1910 XLogRecPtr result;
1911
1914
1916 {
1917 /* fits on first page of segment */
1918 if (bytesleft == 0)
1919 seg_offset = 0;
1920 else
1922 }
1923 else
1924 {
1925 /* account for the first page on segment with long header */
1928
1931
1932 if (bytesleft == 0)
1934 else
1936 }
1937
1939
1940 return result;
1941}
1942
1943/*
1944 * Convert an XLogRecPtr to a "usable byte position".
1945 */
1946static uint64
1948{
1951 uint32 offset;
1952 uint64 result;
1953
1955
1957 offset = ptr % XLOG_BLCKSZ;
1958
1959 if (fullpages == 0)
1960 {
1961 result = fullsegs * UsableBytesInSegment;
1962 if (offset > 0)
1963 {
1964 Assert(offset >= SizeOfXLogLongPHD);
1965 result += offset - SizeOfXLogLongPHD;
1966 }
1967 }
1968 else
1969 {
1970 result = fullsegs * UsableBytesInSegment +
1971 (XLOG_BLCKSZ - SizeOfXLogLongPHD) + /* account for first page */
1972 (fullpages - 1) * UsableBytesInPage; /* full pages */
1973 if (offset > 0)
1974 {
1975 Assert(offset >= SizeOfXLogShortPHD);
1976 result += offset - SizeOfXLogShortPHD;
1977 }
1978 }
1979
1980 return result;
1981}
1982
1983/*
1984 * Initialize XLOG buffers, writing out old buffers if they still contain
1985 * unwritten data, upto the page containing 'upto'. Or if 'opportunistic' is
1986 * true, initialize as many pages as we can without having to write out
1987 * unwritten data. Any new pages are initialized to zeros, with pages headers
1988 * initialized properly.
1989 */
1990static void
1992{
1993 int nextidx;
1999 int npages pg_attribute_unused() = 0;
2000
2002
2003 /*
2004 * Now that we have the lock, check if someone initialized the page
2005 * already.
2006 */
2008 {
2010
2011 /*
2012 * Get ending-offset of the buffer page we need to replace (this may
2013 * be zero if the buffer hasn't been used yet). Fall through if it's
2014 * already written out.
2015 */
2018 {
2019 /*
2020 * Nope, got work to do. If we just want to pre-initialize as much
2021 * as we can without flushing, give up now.
2022 */
2023 if (opportunistic)
2024 break;
2025
2026 /* Advance shared memory write request position */
2031
2032 /*
2033 * Acquire an up-to-date LogwrtResult value and see if we still
2034 * need to write it or if someone else already did.
2035 */
2038 {
2039 /*
2040 * Must acquire write lock. Release WALBufMappingLock first,
2041 * to make sure that all insertions that we need to wait for
2042 * can finish (up to this same position). Otherwise we risk
2043 * deadlock.
2044 */
2046
2048
2050
2053 {
2054 /* OK, someone wrote it already */
2056 }
2057 else
2058 {
2059 /* Have to write it ourselves */
2061 WriteRqst.Write = OldPageRqstPtr;
2063 XLogWrite(WriteRqst, tli, false);
2067
2068 /*
2069 * Required for the flush of pending stats WAL data, per
2070 * update of pgWalUsage.
2071 */
2072 pgstat_report_fixed = true;
2073 }
2074 /* Re-acquire WALBufMappingLock and retry */
2076 continue;
2077 }
2078 }
2079
2080 /*
2081 * Now the next buffer slot is free and we can set it up to be the
2082 * next output page.
2083 */
2086
2088
2090
2091 /*
2092 * Mark the xlblock with InvalidXLogRecPtr and issue a write barrier
2093 * before initializing. Otherwise, the old page may be partially
2094 * zeroed but look valid.
2095 */
2098
2099 /*
2100 * Be sure to re-zero the buffer so that bytes beyond what we've
2101 * written will look like zeroes and not valid XLOG records...
2102 */
2104
2105 /*
2106 * Fill the new page's header
2107 */
2108 NewPage->xlp_magic = XLOG_PAGE_MAGIC;
2109
2110 /* NewPage->xlp_info = 0; */ /* done by memset */
2111 NewPage->xlp_tli = tli;
2112 NewPage->xlp_pageaddr = NewPageBeginPtr;
2113
2114 /* NewPage->xlp_rem_len = 0; */ /* done by memset */
2115
2116 /*
2117 * If first page of an XLOG segment file, make it a long header.
2118 */
2119 if ((XLogSegmentOffset(NewPage->xlp_pageaddr, wal_segment_size)) == 0)
2120 {
2122
2124 NewLongPage->xlp_seg_size = wal_segment_size;
2125 NewLongPage->xlp_xlog_blcksz = XLOG_BLCKSZ;
2126 NewPage->xlp_info |= XLP_LONG_HEADER;
2127 }
2128
2129 /*
2130 * Make sure the initialization of the page becomes visible to others
2131 * before the xlblocks update. GetXLogBuffer() reads xlblocks without
2132 * holding a lock.
2133 */
2135
2138
2139 npages++;
2140 }
2142
2143#ifdef WAL_DEBUG
2144 if (XLOG_DEBUG && npages > 0)
2145 {
2146 elog(DEBUG1, "initialized %d pages, up to %X/%08X",
2148 }
2149#endif
2150}
2151
2152/*
2153 * Calculate CheckPointSegments based on max_wal_size_mb and
2154 * checkpoint_completion_target.
2155 */
2156static void
2158{
2159 double target;
2160
2161 /*-------
2162 * Calculate the distance at which to trigger a checkpoint, to avoid
2163 * exceeding max_wal_size_mb. This is based on two assumptions:
2164 *
2165 * a) we keep WAL for only one checkpoint cycle (prior to PG11 we kept
2166 * WAL for two checkpoint cycles to allow us to recover from the
2167 * secondary checkpoint if the first checkpoint failed, though we
2168 * only did this on the primary anyway, not on standby. Keeping just
2169 * one checkpoint simplifies processing and reduces disk space in
2170 * many smaller databases.)
2171 * b) during checkpoint, we consume checkpoint_completion_target *
2172 * number of segments consumed between checkpoints.
2173 *-------
2174 */
2177
2178 /* round down */
2179 CheckPointSegments = (int) target;
2180
2181 if (CheckPointSegments < 1)
2183}
2184
2185void
2191
2192void
2198
2199bool
2201{
2203 {
2204 GUC_check_errdetail("The WAL segment size must be a power of two between 1 MB and 1 GB.");
2205 return false;
2206 }
2207
2208 return true;
2209}
2210
2211/*
2212 * At a checkpoint, how many WAL segments to recycle as preallocated future
2213 * XLOG segments? Returns the highest segment that should be preallocated.
2214 */
2215static XLogSegNo
2217{
2220 double distance;
2222
2223 /*
2224 * Calculate the segment numbers that min_wal_size_mb and max_wal_size_mb
2225 * correspond to. Always recycle enough segments to meet the minimum, and
2226 * remove enough segments to stay below the maximum.
2227 */
2232
2233 /*
2234 * Between those limits, recycle enough segments to get us through to the
2235 * estimated end of next checkpoint.
2236 *
2237 * To estimate where the next checkpoint will finish, assume that the
2238 * system runs steadily consuming CheckPointDistanceEstimate bytes between
2239 * every checkpoint.
2240 */
2242 /* add 10% for good measure. */
2243 distance *= 1.10;
2244
2245 recycleSegNo = (XLogSegNo) ceil(((double) lastredoptr + distance) /
2247
2248 if (recycleSegNo < minSegNo)
2250 if (recycleSegNo > maxSegNo)
2252
2253 return recycleSegNo;
2254}
2255
2256/*
2257 * Check whether we've consumed enough xlog space that a checkpoint is needed.
2258 *
2259 * new_segno indicates a log file that has just been filled up (or read
2260 * during recovery). We measure the distance from RedoRecPtr to new_segno
2261 * and see if that exceeds CheckPointSegments.
2262 *
2263 * Note: it is caller's responsibility that RedoRecPtr is up-to-date.
2264 */
2265bool
2267{
2269
2271
2273 return true;
2274 return false;
2275}
2276
2277/*
2278 * Write and/or fsync the log at least as far as WriteRqst indicates.
2279 *
2280 * If flexible == true, we don't have to write as far as WriteRqst, but
2281 * may stop at any convenient boundary (such as a cache or logfile boundary).
2282 * This option allows us to avoid uselessly issuing multiple writes when a
2283 * single one would do.
2284 *
2285 * Must be called with WALWriteLock held. WaitXLogInsertionsToFinish(WriteRqst)
2286 * must be called before grabbing the lock, to make sure the data is ready to
2287 * write.
2288 */
2289static void
2291{
2292 bool ispartialpage;
2293 bool last_iteration;
2294 bool finishing_seg;
2295 int curridx;
2296 int npages;
2297 int startidx;
2299
2300 /* We should always be inside a critical section here */
2302
2303 /*
2304 * Update local LogwrtResult (caller probably did this already, but...)
2305 */
2307
2308 /*
2309 * Since successive pages in the xlog cache are consecutively allocated,
2310 * we can usually gather multiple pages together and issue just one
2311 * write() call. npages is the number of pages we have determined can be
2312 * written together; startidx is the cache block index of the first one,
2313 * and startoffset is the file offset at which it should go. The latter
2314 * two variables are only valid when npages > 0, but we must initialize
2315 * all of them to keep the compiler quiet.
2316 */
2317 npages = 0;
2318 startidx = 0;
2319 startoffset = 0;
2320
2321 /*
2322 * Within the loop, curridx is the cache block index of the page to
2323 * consider writing. Begin at the buffer containing the next unwritten
2324 * page, or last partially written page.
2325 */
2327
2328 while (LogwrtResult.Write < WriteRqst.Write)
2329 {
2330 /*
2331 * Make sure we're not ahead of the insert process. This could happen
2332 * if we're passed a bogus WriteRqst.Write that is past the end of the
2333 * last page that's been initialized by AdvanceXLInsertBuffer.
2334 */
2336
2337 if (LogwrtResult.Write >= EndPtr)
2338 elog(PANIC, "xlog write request %X/%08X is past end of log %X/%08X",
2341
2342 /* Advance LogwrtResult.Write to end of current buffer page */
2345
2348 {
2349 /*
2350 * Switch to new logfile segment. We cannot have any pending
2351 * pages here (since we dump what we have at segment end).
2352 */
2353 Assert(npages == 0);
2354 if (openLogFile >= 0)
2355 XLogFileClose();
2358 openLogTLI = tli;
2359
2360 /* create/use new log file */
2363 }
2364
2365 /* Make sure we have the current logfile open */
2366 if (openLogFile < 0)
2367 {
2370 openLogTLI = tli;
2373 }
2374
2375 /* Add current page to the set of pending pages-to-dump */
2376 if (npages == 0)
2377 {
2378 /* first of group */
2379 startidx = curridx;
2382 }
2383 npages++;
2384
2385 /*
2386 * Dump the set if this will be the last loop iteration, or if we are
2387 * at the last page of the cache area (since the next page won't be
2388 * contiguous in memory), or if we are at the end of the logfile
2389 * segment.
2390 */
2392
2395
2396 if (last_iteration ||
2399 {
2400 char *from;
2401 Size nbytes;
2402 Size nleft;
2405
2406 /* OK to write the page(s) */
2407 from = XLogCtl->pages + startidx * (Size) XLOG_BLCKSZ;
2408 nbytes = npages * (Size) XLOG_BLCKSZ;
2409 nleft = nbytes;
2410 do
2411 {
2412 errno = 0;
2413
2414 /*
2415 * Measure I/O timing to write WAL data, for pg_stat_io.
2416 */
2418
2422
2424 IOOP_WRITE, start, 1, written);
2425
2426 if (written <= 0)
2427 {
2428 char xlogfname[MAXFNAMELEN];
2429 int save_errno;
2430
2431 if (errno == EINTR)
2432 continue;
2433
2434 save_errno = errno;
2437 errno = save_errno;
2438 ereport(PANIC,
2440 errmsg("could not write to log file \"%s\" at offset %u, length %zu: %m",
2442 }
2443 nleft -= written;
2444 from += written;
2446 } while (nleft > 0);
2447
2448 npages = 0;
2449
2450 /*
2451 * If we just wrote the whole last page of a logfile segment,
2452 * fsync the segment immediately. This avoids having to go back
2453 * and re-open prior segments when an fsync request comes along
2454 * later. Doing it here ensures that one and only one backend will
2455 * perform this fsync.
2456 *
2457 * This is also the right place to notify the Archiver that the
2458 * segment is ready to copy to archival storage, and to update the
2459 * timer for archive_timeout, and to signal for a checkpoint if
2460 * too many logfile segments have been used since the last
2461 * checkpoint.
2462 */
2463 if (finishing_seg)
2464 {
2466
2467 /* signal that we need to wakeup walsenders later */
2469
2470 LogwrtResult.Flush = LogwrtResult.Write; /* end of page */
2471
2472 if (XLogArchivingActive())
2474
2477
2478 /*
2479 * Request a checkpoint if we've consumed too much xlog since
2480 * the last one. For speed, we first check using the local
2481 * copy of RedoRecPtr, which might be out of date; if it looks
2482 * like a checkpoint is needed, forcibly update RedoRecPtr and
2483 * recheck.
2484 */
2486 {
2487 (void) GetRedoRecPtr();
2490 }
2491 }
2492 }
2493
2494 if (ispartialpage)
2495 {
2496 /* Only asked to write a partial page */
2498 break;
2499 }
2501
2502 /* If flexible, break out of loop as soon as we wrote something */
2503 if (flexible && npages == 0)
2504 break;
2505 }
2506
2507 Assert(npages == 0);
2508
2509 /*
2510 * If asked to flush, do so
2511 */
2512 if (LogwrtResult.Flush < WriteRqst.Flush &&
2514 {
2515 /*
2516 * Could get here without iterating above loop, in which case we might
2517 * have no open file or the wrong one. However, we do not need to
2518 * fsync more than one file.
2519 */
2522 {
2523 if (openLogFile >= 0 &&
2526 XLogFileClose();
2527 if (openLogFile < 0)
2528 {
2531 openLogTLI = tli;
2534 }
2535
2537 }
2538
2539 /* signal that we need to wakeup walsenders later */
2541
2543 }
2544
2545 /*
2546 * Update shared-memory status
2547 *
2548 * We make sure that the shared 'request' values do not fall behind the
2549 * 'result' values. This is not absolutely essential, but it saves some
2550 * code in a couple of places.
2551 */
2558
2559 /*
2560 * We write Write first, bar, then Flush. When reading, the opposite must
2561 * be done (with a matching barrier in between), so that we always see a
2562 * Flush value that trails behind the Write value seen.
2563 */
2567
2568#ifdef USE_ASSERT_CHECKING
2569 {
2573
2579
2580 /* WAL written to disk is always ahead of WAL flushed */
2581 Assert(Write >= Flush);
2582
2583 /* WAL inserted to buffers is always ahead of WAL written */
2584 Assert(Insert >= Write);
2585 }
2586#endif
2587}
2588
2589/*
2590 * Record the LSN for an asynchronous transaction commit/abort
2591 * and nudge the WALWriter if there is work for it to do.
2592 * (This should not be called for synchronous commits.)
2593 */
2594void
2596{
2597 XLogRecPtr WriteRqstPtr = asyncXactLSN;
2598 bool sleeping;
2599 bool wakeup = false;
2601
2605 if (XLogCtl->asyncXactLSN < asyncXactLSN)
2606 XLogCtl->asyncXactLSN = asyncXactLSN;
2608
2609 /*
2610 * If somebody else already called this function with a more aggressive
2611 * LSN, they will have done what we needed (and perhaps more).
2612 */
2613 if (asyncXactLSN <= prevAsyncXactLSN)
2614 return;
2615
2616 /*
2617 * If the WALWriter is sleeping, kick it to make it come out of low-power
2618 * mode, so that this async commit will reach disk within the expected
2619 * amount of time. Otherwise, determine whether it has enough WAL
2620 * available to flush, the same way that XLogBackgroundFlush() does.
2621 */
2622 if (sleeping)
2623 wakeup = true;
2624 else
2625 {
2626 int flushblocks;
2627
2629
2630 flushblocks =
2632
2634 wakeup = true;
2635 }
2636
2637 if (wakeup)
2638 {
2639 volatile PROC_HDR *procglobal = ProcGlobal;
2640 ProcNumber walwriterProc = procglobal->walwriterProc;
2641
2642 if (walwriterProc != INVALID_PROC_NUMBER)
2643 SetLatch(&GetPGProcByNumber(walwriterProc)->procLatch);
2644 }
2645}
2646
2647/*
2648 * Record the LSN up to which we can remove WAL because it's not required by
2649 * any replication slot.
2650 */
2651void
2658
2659
2660/*
2661 * Return the oldest LSN we must retain to satisfy the needs of some
2662 * replication slot.
2663 */
2666{
2667 XLogRecPtr retval;
2668
2672
2673 return retval;
2674}
2675
2676/*
2677 * Advance minRecoveryPoint in control file.
2678 *
2679 * If we crash during recovery, we must reach this point again before the
2680 * database is consistent.
2681 *
2682 * If 'force' is true, 'lsn' argument is ignored. Otherwise, minRecoveryPoint
2683 * is only updated if it's not already greater than or equal to 'lsn'.
2684 */
2685static void
2687{
2688 /* Quick check using our local copy of the variable */
2689 if (!updateMinRecoveryPoint || (!force && lsn <= LocalMinRecoveryPoint))
2690 return;
2691
2692 /*
2693 * An invalid minRecoveryPoint means that we need to recover all the WAL,
2694 * i.e., we're doing crash recovery. We never modify the control file's
2695 * value in that case, so we can short-circuit future checks here too. The
2696 * local values of minRecoveryPoint and minRecoveryPointTLI should not be
2697 * updated until crash recovery finishes. We only do this for the startup
2698 * process as it should not update its own reference of minRecoveryPoint
2699 * until it has finished crash recovery to make sure that all WAL
2700 * available is replayed in this case. This also saves from extra locks
2701 * taken on the control file from the startup process.
2702 */
2704 {
2705 updateMinRecoveryPoint = false;
2706 return;
2707 }
2708
2710
2711 /* update local copy */
2714
2716 updateMinRecoveryPoint = false;
2717 else if (force || LocalMinRecoveryPoint < lsn)
2718 {
2721
2722 /*
2723 * To avoid having to update the control file too often, we update it
2724 * all the way to the last record being replayed, even though 'lsn'
2725 * would suffice for correctness. This also allows the 'force' case
2726 * to not need a valid 'lsn' value.
2727 *
2728 * Another important reason for doing it this way is that the passed
2729 * 'lsn' value could be bogus, i.e., past the end of available WAL, if
2730 * the caller got it from a corrupted heap page. Accepting such a
2731 * value as the min recovery point would prevent us from coming up at
2732 * all. Instead, we just log a warning and continue with recovery.
2733 * (See also the comments about corrupt LSNs in XLogFlush.)
2734 */
2736 if (!force && newMinRecoveryPoint < lsn)
2737 elog(WARNING,
2738 "xlog min recovery request %X/%08X is past current point %X/%08X",
2740
2741 /* update control file */
2743 {
2749
2751 errmsg_internal("updated min recovery point to %X/%08X on timeline %u",
2754 }
2755 }
2757}
2758
2759/*
2760 * Ensure that all XLOG data through the given position is flushed to disk.
2761 *
2762 * NOTE: this differs from XLogWrite mainly in that the WALWriteLock is not
2763 * already held, and we try to avoid acquiring it if possible.
2764 */
2765void
2767{
2771
2772 /*
2773 * During REDO, we are reading not writing WAL. Therefore, instead of
2774 * trying to flush the WAL, we should update minRecoveryPoint instead. We
2775 * test XLogInsertAllowed(), not InRecovery, because we need checkpointer
2776 * to act this way too, and because when it tries to write the
2777 * end-of-recovery checkpoint, it should indeed flush.
2778 */
2779 if (!XLogInsertAllowed())
2780 {
2781 UpdateMinRecoveryPoint(record, false);
2782 return;
2783 }
2784
2785 /* Quick exit if already known flushed */
2786 if (record <= LogwrtResult.Flush)
2787 return;
2788
2789#ifdef WAL_DEBUG
2790 if (XLOG_DEBUG)
2791 elog(LOG, "xlog flush request %X/%08X; write %X/%08X; flush %X/%08X",
2792 LSN_FORMAT_ARGS(record),
2795#endif
2796
2798
2799 /*
2800 * Since fsync is usually a horribly expensive operation, we try to
2801 * piggyback as much data as we can on each fsync: if we see any more data
2802 * entered into the xlog buffer, we'll write and fsync that too, so that
2803 * the final value of LogwrtResult.Flush is as large as possible. This
2804 * gives us some chance of avoiding another fsync immediately after.
2805 */
2806
2807 /* initialize to given target; may increase below */
2808 WriteRqstPtr = record;
2809
2810 /*
2811 * Now wait until we get the write lock, or someone else does the flush
2812 * for us.
2813 */
2814 for (;;)
2815 {
2817
2818 /* done already? */
2820 if (record <= LogwrtResult.Flush)
2821 break;
2822
2823 /*
2824 * Before actually performing the write, wait for all in-flight
2825 * insertions to the pages we're about to write to finish.
2826 */
2828 if (WriteRqstPtr < XLogCtl->LogwrtRqst.Write)
2832
2833 /*
2834 * Try to get the write lock. If we can't get it immediately, wait
2835 * until it's released, and recheck if we still need to do the flush
2836 * or if the backend that held the lock did it for us already. This
2837 * helps to maintain a good rate of group committing when the system
2838 * is bottlenecked by the speed of fsyncing.
2839 */
2841 {
2842 /*
2843 * The lock is now free, but we didn't acquire it yet. Before we
2844 * do, loop back to check if someone else flushed the record for
2845 * us already.
2846 */
2847 continue;
2848 }
2849
2850 /* Got the lock; recheck whether request is satisfied */
2852 if (record <= LogwrtResult.Flush)
2853 {
2855 break;
2856 }
2857
2858 /*
2859 * Sleep before flush! By adding a delay here, we may give further
2860 * backends the opportunity to join the backlog of group commit
2861 * followers; this can significantly improve transaction throughput,
2862 * at the risk of increasing transaction latency.
2863 *
2864 * We do not sleep if enableFsync is not turned on, nor if there are
2865 * fewer than CommitSiblings other backends with active transactions.
2866 */
2867 if (CommitDelay > 0 && enableFsync &&
2869 {
2873
2874 /*
2875 * Re-check how far we can now flush the WAL. It's generally not
2876 * safe to call WaitXLogInsertionsToFinish while holding
2877 * WALWriteLock, because an in-progress insertion might need to
2878 * also grab WALWriteLock to make progress. But we know that all
2879 * the insertions up to insertpos have already finished, because
2880 * that's what the earlier WaitXLogInsertionsToFinish() returned.
2881 * We're only calling it again to allow insertpos to be moved
2882 * further forward, not to actually wait for anyone.
2883 */
2885 }
2886
2887 /* try to write/flush later additions to XLOG as well */
2888 WriteRqst.Write = insertpos;
2889 WriteRqst.Flush = insertpos;
2890
2891 XLogWrite(WriteRqst, insertTLI, false);
2892
2894 /* done */
2895 break;
2896 }
2897
2899
2900 /* wake up walsenders now that we've released heavily contended locks */
2902
2903 /*
2904 * If we flushed an LSN that someone was waiting for, notify the waiters.
2905 */
2906 if (waitLSNState &&
2910
2911 /*
2912 * If we still haven't flushed to the request point then we have a
2913 * problem; most likely, the requested flush point is past end of XLOG.
2914 * This has been seen to occur when a disk page has a corrupted LSN.
2915 *
2916 * Formerly we treated this as a PANIC condition, but that hurts the
2917 * system's robustness rather than helping it: we do not want to take down
2918 * the whole system due to corruption on one data page. In particular, if
2919 * the bad page is encountered again during recovery then we would be
2920 * unable to restart the database at all! (This scenario actually
2921 * happened in the field several times with 7.1 releases.) As of 8.4, bad
2922 * LSNs encountered during recovery are UpdateMinRecoveryPoint's problem;
2923 * the only time we can reach here during recovery is while flushing the
2924 * end-of-recovery checkpoint record, and we don't expect that to have a
2925 * bad LSN.
2926 *
2927 * Note that for calls from xact.c, the ERROR will be promoted to PANIC
2928 * since xact.c calls this routine inside a critical section. However,
2929 * calls from bufmgr.c are not within critical sections and so we will not
2930 * force a restart for a bad LSN on a data page.
2931 */
2932 if (LogwrtResult.Flush < record)
2933 elog(ERROR,
2934 "xlog flush request %X/%08X is not satisfied --- flushed only to %X/%08X",
2935 LSN_FORMAT_ARGS(record),
2937
2938 /*
2939 * Cross-check XLogNeedsFlush(). Some of the checks of XLogFlush() and
2940 * XLogNeedsFlush() are duplicated, and this assertion ensures that these
2941 * remain consistent.
2942 */
2943 Assert(!XLogNeedsFlush(record));
2944}
2945
2946/*
2947 * Write & flush xlog, but without specifying exactly where to.
2948 *
2949 * We normally write only completed blocks; but if there is nothing to do on
2950 * that basis, we check for unwritten async commits in the current incomplete
2951 * block, and write through the latest one of those. Thus, if async commits
2952 * are not being used, we will write complete blocks only.
2953 *
2954 * If, based on the above, there's anything to write we do so immediately. But
2955 * to avoid calling fsync, fdatasync et. al. at a rate that'd impact
2956 * concurrent IO, we only flush WAL every wal_writer_delay ms, or if there's
2957 * more than wal_writer_flush_after unflushed blocks.
2958 *
2959 * We can guarantee that async commits reach disk after at most three
2960 * wal_writer_delay cycles. (When flushing complete blocks, we allow XLogWrite
2961 * to write "flexibly", meaning it can stop at the end of the buffer ring;
2962 * this makes a difference only with very high load or long wal_writer_delay,
2963 * but imposes one extra cycle for the worst case for async commits.)
2964 *
2965 * This routine is invoked periodically by the background walwriter process.
2966 *
2967 * Returns true if there was any work to do, even if we skipped flushing due
2968 * to wal_writer_delay/wal_writer_flush_after.
2969 */
2970bool
2972{
2974 bool flexible = true;
2975 static TimestampTz lastflush;
2977 int flushblocks;
2979
2980 /* XLOG doesn't need flushing during recovery */
2981 if (RecoveryInProgress())
2982 return false;
2983
2984 /*
2985 * Since we're not in recovery, InsertTimeLineID is set and can't change,
2986 * so we can read it without a lock.
2987 */
2989
2990 /* read updated LogwrtRqst */
2994
2995 /* back off to last completed page boundary */
2996 WriteRqst.Write -= WriteRqst.Write % XLOG_BLCKSZ;
2997
2998 /* if we have already flushed that far, consider async commit records */
3000 if (WriteRqst.Write <= LogwrtResult.Flush)
3001 {
3005 flexible = false; /* ensure it all gets written */
3006 }
3007
3008 /*
3009 * If already known flushed, we're done. Just need to check if we are
3010 * holding an open file handle to a logfile that's no longer in use,
3011 * preventing the file from being deleted.
3012 */
3013 if (WriteRqst.Write <= LogwrtResult.Flush)
3014 {
3015 if (openLogFile >= 0)
3016 {
3019 {
3020 XLogFileClose();
3021 }
3022 }
3023 return false;
3024 }
3025
3026 /*
3027 * Determine how far to flush WAL, based on the wal_writer_delay and
3028 * wal_writer_flush_after GUCs.
3029 *
3030 * Note that XLogSetAsyncXactLSN() performs similar calculation based on
3031 * wal_writer_flush_after, to decide when to wake us up. Make sure the
3032 * logic is the same in both places if you change this.
3033 */
3035 flushblocks =
3037
3038 if (WalWriterFlushAfter == 0 || lastflush == 0)
3039 {
3040 /* first call, or block based limits disabled */
3041 WriteRqst.Flush = WriteRqst.Write;
3042 lastflush = now;
3043 }
3045 {
3046 /*
3047 * Flush the writes at least every WalWriterDelay ms. This is
3048 * important to bound the amount of time it takes for an asynchronous
3049 * commit to hit disk.
3050 */
3051 WriteRqst.Flush = WriteRqst.Write;
3052 lastflush = now;
3053 }
3054 else if (flushblocks >= WalWriterFlushAfter)
3055 {
3056 /* exceeded wal_writer_flush_after blocks, flush */
3057 WriteRqst.Flush = WriteRqst.Write;
3058 lastflush = now;
3059 }
3060 else
3061 {
3062 /* no flushing, this time round */
3064 }
3065
3066#ifdef WAL_DEBUG
3067 if (XLOG_DEBUG)
3068 elog(LOG, "xlog bg flush request write %X/%08X; flush: %X/%08X, current is write %X/%08X; flush %X/%08X",
3073#endif
3074
3076
3077 /* now wait for any in-progress insertions to finish and get write lock */
3081 if (WriteRqst.Write > LogwrtResult.Write ||
3083 {
3085 }
3087
3089
3090 /* wake up walsenders now that we've released heavily contended locks */
3092
3093 /*
3094 * If we flushed an LSN that someone was waiting for, notify the waiters.
3095 */
3096 if (waitLSNState &&
3100
3101 /*
3102 * Great, done. To take some work off the critical path, try to initialize
3103 * as many of the no-longer-needed WAL buffers for future use as we can.
3104 */
3106
3107 /*
3108 * If we determined that we need to write data, but somebody else
3109 * wrote/flushed already, it should be considered as being active, to
3110 * avoid hibernating too early.
3111 */
3112 return true;
3113}
3114
3115/*
3116 * Test whether XLOG data has been flushed up to (at least) the given
3117 * position, or whether the minimum recovery point has been updated past
3118 * the given position.
3119 *
3120 * Returns true if a flush is still needed, or if the minimum recovery point
3121 * must be updated.
3122 *
3123 * It is possible that someone else is already in the process of flushing
3124 * that far, or has updated the minimum recovery point up to the given
3125 * position.
3126 */
3127bool
3129{
3130 /*
3131 * During recovery, we don't flush WAL but update minRecoveryPoint
3132 * instead. So "needs flush" is taken to mean whether minRecoveryPoint
3133 * would need to be updated.
3134 *
3135 * Using XLogInsertAllowed() rather than RecoveryInProgress() matters for
3136 * the case of an end-of-recovery checkpoint, where WAL data is flushed.
3137 * This check should be consistent with the one in XLogFlush().
3138 */
3139 if (!XLogInsertAllowed())
3140 {
3141 /* Quick exit if already known to be updated or cannot be updated */
3143 return false;
3144
3145 /*
3146 * An invalid minRecoveryPoint means that we need to recover all the
3147 * WAL, i.e., we're doing crash recovery. We never modify the control
3148 * file's value in that case, so we can short-circuit future checks
3149 * here too. This triggers a quick exit path for the startup process,
3150 * which cannot update its local copy of minRecoveryPoint as long as
3151 * it has not replayed all WAL available when doing crash recovery.
3152 */
3154 {
3155 updateMinRecoveryPoint = false;
3156 return false;
3157 }
3158
3159 /*
3160 * Update local copy of minRecoveryPoint. But if the lock is busy,
3161 * just return a conservative guess.
3162 */
3164 return true;
3168
3169 /*
3170 * Check minRecoveryPoint for any other process than the startup
3171 * process doing crash recovery, which should not update the control
3172 * file value if crash recovery is still running.
3173 */
3175 updateMinRecoveryPoint = false;
3176
3177 /* check again */
3179 return false;
3180 else
3181 return true;
3182 }
3183
3184 /* Quick exit if already known flushed */
3185 if (record <= LogwrtResult.Flush)
3186 return false;
3187
3188 /* read LogwrtResult and update local state */
3190
3191 /* check again */
3192 if (record <= LogwrtResult.Flush)
3193 return false;
3194
3195 return true;
3196}
3197
3198/*
3199 * Try to make a given XLOG file segment exist.
3200 *
3201 * logsegno: identify segment.
3202 *
3203 * *added: on return, true if this call raised the number of extant segments.
3204 *
3205 * path: on return, this char[MAXPGPATH] has the path to the logsegno file.
3206 *
3207 * Returns -1 or FD of opened file. A -1 here is not an error; a caller
3208 * wanting an open segment should attempt to open "path", which usually will
3209 * succeed. (This is weird, but it's efficient for the callers.)
3210 */
3211static int
3213 bool *added, char *path)
3214{
3215 char tmppath[MAXPGPATH];
3218 int fd;
3219 int save_errno;
3222
3223 Assert(logtli != 0);
3224
3226
3227 /*
3228 * Try to use existent file (checkpoint maker may have created it already)
3229 */
3230 *added = false;
3233 if (fd < 0)
3234 {
3235 if (errno != ENOENT)
3236 ereport(ERROR,
3238 errmsg("could not open file \"%s\": %m", path)));
3239 }
3240 else
3241 return fd;
3242
3243 /*
3244 * Initialize an empty (all zeroes) segment. NOTE: it is possible that
3245 * another process is doing the same thing. If so, we will end up
3246 * pre-creating an extra log segment. That seems OK, and better than
3247 * holding the lock throughout this lengthy process.
3248 */
3249 elog(DEBUG2, "creating and filling new WAL file");
3250
3251 snprintf(tmppath, MAXPGPATH, XLOGDIR "/xlogtemp.%d", (int) getpid());
3252
3253 unlink(tmppath);
3254
3257
3258 /* do not use get_sync_bit() here --- want to fsync only at end of fill */
3260 if (fd < 0)
3261 ereport(ERROR,
3263 errmsg("could not create file \"%s\": %m", tmppath)));
3264
3265 /* Measure I/O timing when initializing segment */
3267
3269 save_errno = 0;
3270 if (wal_init_zero)
3271 {
3272 ssize_t rc;
3273
3274 /*
3275 * Zero-fill the file. With this setting, we do this the hard way to
3276 * ensure that all the file space has really been allocated. On
3277 * platforms that allow "holes" in files, just seeking to the end
3278 * doesn't allocate intermediate space. This way, we know that we
3279 * have all the space and (after the fsync below) that all the
3280 * indirect blocks are down on disk. Therefore, fdatasync(2) or
3281 * O_DSYNC will be sufficient to sync future writes to the log file.
3282 */
3284
3285 if (rc < 0)
3286 save_errno = errno;
3287 }
3288 else
3289 {
3290 /*
3291 * Otherwise, seeking to the end and writing a solitary byte is
3292 * enough.
3293 */
3294 errno = 0;
3295 if (pg_pwrite(fd, "\0", 1, wal_segment_size - 1) != 1)
3296 {
3297 /* if write didn't set errno, assume no disk space */
3299 }
3300 }
3302
3303 /*
3304 * A full segment worth of data is written when using wal_init_zero. One
3305 * byte is written when not using it.
3306 */
3308 io_start, 1,
3310
3311 if (save_errno)
3312 {
3313 /*
3314 * If we fail to make the file, delete it to release disk space
3315 */
3316 unlink(tmppath);
3317
3318 close(fd);
3319
3320 errno = save_errno;
3321
3322 ereport(ERROR,
3324 errmsg("could not write to file \"%s\": %m", tmppath)));
3325 }
3326
3327 /* Measure I/O timing when flushing segment */
3329
3331 if (pg_fsync(fd) != 0)
3332 {
3333 save_errno = errno;
3334 close(fd);
3335 errno = save_errno;
3336 ereport(ERROR,
3338 errmsg("could not fsync file \"%s\": %m", tmppath)));
3339 }
3341
3343 IOOP_FSYNC, io_start, 1, 0);
3344
3345 if (close(fd) != 0)
3346 ereport(ERROR,
3348 errmsg("could not close file \"%s\": %m", tmppath)));
3349
3350 /*
3351 * Now move the segment into place with its final name. Cope with
3352 * possibility that someone else has created the file while we were
3353 * filling ours: if so, use ours to pre-create a future log segment.
3354 */
3356
3357 /*
3358 * XXX: What should we use as max_segno? We used to use XLOGfileslop when
3359 * that was a constant, but that was always a bit dubious: normally, at a
3360 * checkpoint, XLOGfileslop was the offset from the checkpoint record, but
3361 * here, it was the offset from the insert location. We can't do the
3362 * normal XLOGfileslop calculation here because we don't have access to
3363 * the prior checkpoint's redo location. So somewhat arbitrarily, just use
3364 * CheckPointSegments.
3365 */
3368 logtli))
3369 {
3370 *added = true;
3371 elog(DEBUG2, "done creating and filling new WAL file");
3372 }
3373 else
3374 {
3375 /*
3376 * No need for any more future segments, or InstallXLogFileSegment()
3377 * failed to rename the file into place. If the rename failed, a
3378 * caller opening the file may fail.
3379 */
3380 unlink(tmppath);
3381 elog(DEBUG2, "abandoned new WAL file");
3382 }
3383
3384 return -1;
3385}
3386
3387/*
3388 * Create a new XLOG file segment, or open a pre-existing one.
3389 *
3390 * logsegno: identify segment to be created/opened.
3391 *
3392 * Returns FD of opened file.
3393 *
3394 * Note: errors here are ERROR not PANIC because we might or might not be
3395 * inside a critical section (eg, during checkpoint there is no reason to
3396 * take down the system on failure). They will promote to PANIC if we are
3397 * in a critical section.
3398 */
3399int
3401{
3402 bool ignore_added;
3403 char path[MAXPGPATH];
3404 int fd;
3405
3406 Assert(logtli != 0);
3407
3409 if (fd >= 0)
3410 return fd;
3411
3412 /* Now open original target segment (might not be file I just made) */
3415 if (fd < 0)
3416 ereport(ERROR,
3418 errmsg("could not open file \"%s\": %m", path)));
3419 return fd;
3420}
3421
3422/*
3423 * Create a new XLOG file segment by copying a pre-existing one.
3424 *
3425 * destsegno: identify segment to be created.
3426 *
3427 * srcTLI, srcsegno: identify segment to be copied (could be from
3428 * a different timeline)
3429 *
3430 * upto: how much of the source file to copy (the rest is filled with
3431 * zeros)
3432 *
3433 * Currently this is only used during recovery, and so there are no locking
3434 * considerations. But we should be just as tense as XLogFileInit to avoid
3435 * emplacing a bogus file.
3436 */
3437static void
3440 int upto)
3441{
3442 char path[MAXPGPATH];
3443 char tmppath[MAXPGPATH];
3444 PGAlignedXLogBlock buffer;
3445 int srcfd;
3446 int fd;
3447 int nbytes;
3448
3449 /*
3450 * Open the source file
3451 */
3454 if (srcfd < 0)
3455 ereport(ERROR,
3457 errmsg("could not open file \"%s\": %m", path)));
3458
3459 /*
3460 * Copy into a temp file name.
3461 */
3462 snprintf(tmppath, MAXPGPATH, XLOGDIR "/xlogtemp.%d", (int) getpid());
3463
3464 unlink(tmppath);
3465
3466 /* do not use get_sync_bit() here --- want to fsync only at end of fill */
3468 if (fd < 0)
3469 ereport(ERROR,
3471 errmsg("could not create file \"%s\": %m", tmppath)));
3472
3473 /*
3474 * Do the data copying.
3475 */
3476 for (nbytes = 0; nbytes < wal_segment_size; nbytes += sizeof(buffer))
3477 {
3478 int nread;
3479
3480 nread = upto - nbytes;
3481
3482 /*
3483 * The part that is not read from the source file is filled with
3484 * zeros.
3485 */
3486 if (nread < sizeof(buffer))
3487 memset(buffer.data, 0, sizeof(buffer));
3488
3489 if (nread > 0)
3490 {
3491 int r;
3492
3493 if (nread > sizeof(buffer))
3494 nread = sizeof(buffer);
3496 r = read(srcfd, buffer.data, nread);
3497 if (r != nread)
3498 {
3499 if (r < 0)
3500 ereport(ERROR,
3502 errmsg("could not read file \"%s\": %m",
3503 path)));
3504 else
3505 ereport(ERROR,
3507 errmsg("could not read file \"%s\": read %d of %zu",
3508 path, r, (Size) nread)));
3509 }
3511 }
3512 errno = 0;
3514 if ((int) write(fd, buffer.data, sizeof(buffer)) != (int) sizeof(buffer))
3515 {
3516 int save_errno = errno;
3517
3518 /*
3519 * If we fail to make the file, delete it to release disk space
3520 */
3521 unlink(tmppath);
3522 /* if write didn't set errno, assume problem is no disk space */
3524
3525 ereport(ERROR,
3527 errmsg("could not write to file \"%s\": %m", tmppath)));
3528 }
3530 }
3531
3533 if (pg_fsync(fd) != 0)
3536 errmsg("could not fsync file \"%s\": %m", tmppath)));
3538
3539 if (CloseTransientFile(fd) != 0)
3540 ereport(ERROR,
3542 errmsg("could not close file \"%s\": %m", tmppath)));
3543
3544 if (CloseTransientFile(srcfd) != 0)
3545 ereport(ERROR,
3547 errmsg("could not close file \"%s\": %m", path)));
3548
3549 /*
3550 * Now move the segment into place with its final name.
3551 */
3553 elog(ERROR, "InstallXLogFileSegment should not have failed");
3554}
3555
3556/*
3557 * Install a new XLOG segment file as a current or future log segment.
3558 *
3559 * This is used both to install a newly-created segment (which has a temp
3560 * filename while it's being created) and to recycle an old segment.
3561 *
3562 * *segno: identify segment to install as (or first possible target).
3563 * When find_free is true, this is modified on return to indicate the
3564 * actual installation location or last segment searched.
3565 *
3566 * tmppath: initial name of file to install. It will be renamed into place.
3567 *
3568 * find_free: if true, install the new segment at the first empty segno
3569 * number at or after the passed numbers. If false, install the new segment
3570 * exactly where specified, deleting any existing segment file there.
3571 *
3572 * max_segno: maximum segment number to install the new file as. Fail if no
3573 * free slot is found between *segno and max_segno. (Ignored when find_free
3574 * is false.)
3575 *
3576 * tli: The timeline on which the new segment should be installed.
3577 *
3578 * Returns true if the file was installed successfully. false indicates that
3579 * max_segno limit was exceeded, the startup process has disabled this
3580 * function for now, or an error occurred while renaming the file into place.
3581 */
3582static bool
3585{
3586 char path[MAXPGPATH];
3587 struct stat stat_buf;
3588
3589 Assert(tli != 0);
3590
3591 XLogFilePath(path, tli, *segno, wal_segment_size);
3592
3595 {
3597 return false;
3598 }
3599
3600 if (!find_free)
3601 {
3602 /* Force installation: get rid of any pre-existing segment file */
3603 durable_unlink(path, DEBUG1);
3604 }
3605 else
3606 {
3607 /* Find a free slot to put it in */
3608 while (stat(path, &stat_buf) == 0)
3609 {
3610 if ((*segno) >= max_segno)
3611 {
3612 /* Failed to find a free slot within specified range */
3614 return false;
3615 }
3616 (*segno)++;
3617 XLogFilePath(path, tli, *segno, wal_segment_size);
3618 }
3619 }
3620
3621 Assert(access(path, F_OK) != 0 && errno == ENOENT);
3622 if (durable_rename(tmppath, path, LOG) != 0)
3623 {
3625 /* durable_rename already emitted log message */
3626 return false;
3627 }
3628
3630
3631 return true;
3632}
3633
3634/*
3635 * Open a pre-existing logfile segment for writing.
3636 */
3637int
3639{
3640 char path[MAXPGPATH];
3641 int fd;
3642
3643 XLogFilePath(path, tli, segno, wal_segment_size);
3644
3647 if (fd < 0)
3648 ereport(PANIC,
3650 errmsg("could not open file \"%s\": %m", path)));
3651
3652 return fd;
3653}
3654
3655/*
3656 * Close the current logfile segment for writing.
3657 */
3658static void
3660{
3661 Assert(openLogFile >= 0);
3662
3663 /*
3664 * WAL segment files will not be re-read in normal operation, so we advise
3665 * the OS to release any cached pages. But do not do so if WAL archiving
3666 * or streaming is active, because archiver and walsender process could
3667 * use the cache to read the WAL segment.
3668 */
3669#if defined(USE_POSIX_FADVISE) && defined(POSIX_FADV_DONTNEED)
3670 if (!XLogIsNeeded() && (io_direct_flags & IO_DIRECT_WAL) == 0)
3672#endif
3673
3674 if (close(openLogFile) != 0)
3675 {
3676 char xlogfname[MAXFNAMELEN];
3677 int save_errno = errno;
3678
3680 errno = save_errno;
3681 ereport(PANIC,
3683 errmsg("could not close file \"%s\": %m", xlogfname)));
3684 }
3685
3686 openLogFile = -1;
3688}
3689
3690/*
3691 * Preallocate log files beyond the specified log endpoint.
3692 *
3693 * XXX this is currently extremely conservative, since it forces only one
3694 * future log segment to exist, and even that only if we are 75% done with
3695 * the current one. This is only appropriate for very low-WAL-volume systems.
3696 * High-volume systems will be OK once they've built up a sufficient set of
3697 * recycled log segments, but the startup transient is likely to include
3698 * a lot of segment creations by foreground processes, which is not so good.
3699 *
3700 * XLogFileInitInternal() can ereport(ERROR). All known causes indicate big
3701 * trouble; for example, a full filesystem is one cause. The checkpoint WAL
3702 * and/or ControlFile updates already completed. If a RequestCheckpoint()
3703 * initiated the present checkpoint and an ERROR ends this function, the
3704 * command that called RequestCheckpoint() fails. That's not ideal, but it's
3705 * not worth contorting more functions to use caller-specified elevel values.
3706 * (With or without RequestCheckpoint(), an ERROR forestalls some inessential
3707 * reporting and resource reclamation.)
3708 */
3709static void
3711{
3713 int lf;
3714 bool added;
3715 char path[MAXPGPATH];
3716 uint64 offset;
3717
3719 return; /* unlocked check says no */
3720
3722 offset = XLogSegmentOffset(endptr - 1, wal_segment_size);
3723 if (offset >= (uint32) (0.75 * wal_segment_size))
3724 {
3725 _logSegNo++;
3726 lf = XLogFileInitInternal(_logSegNo, tli, &added, path);
3727 if (lf >= 0)
3728 close(lf);
3729 if (added)
3731 }
3732}
3733
3734/*
3735 * Throws an error if the given log segment has already been removed or
3736 * recycled. The caller should only pass a segment that it knows to have
3737 * existed while the server has been running, as this function always
3738 * succeeds if no WAL segments have been removed since startup.
3739 * 'tli' is only used in the error message.
3740 *
3741 * Note: this function guarantees to keep errno unchanged on return.
3742 * This supports callers that use this to possibly deliver a better
3743 * error message about a missing file, while still being able to throw
3744 * a normal file-access error afterwards, if this does return.
3745 */
3746void
3748{
3749 int save_errno = errno;
3750 XLogSegNo lastRemovedSegNo;
3751
3753 lastRemovedSegNo = XLogCtl->lastRemovedSegNo;
3755
3756 if (segno <= lastRemovedSegNo)
3757 {
3758 char filename[MAXFNAMELEN];
3759
3761 errno = save_errno;
3762 ereport(ERROR,
3764 errmsg("requested WAL segment %s has already been removed",
3765 filename)));
3766 }
3767 errno = save_errno;
3768}
3769
3770/*
3771 * Return the last WAL segment removed, or 0 if no segment has been removed
3772 * since startup.
3773 *
3774 * NB: the result can be out of date arbitrarily fast, the caller has to deal
3775 * with that.
3776 */
3779{
3780 XLogSegNo lastRemovedSegNo;
3781
3783 lastRemovedSegNo = XLogCtl->lastRemovedSegNo;
3785
3786 return lastRemovedSegNo;
3787}
3788
3789/*
3790 * Return the oldest WAL segment on the given TLI that still exists in
3791 * XLOGDIR, or 0 if none.
3792 */
3795{
3796 DIR *xldir;
3797 struct dirent *xlde;
3799
3801 while ((xlde = ReadDir(xldir, XLOGDIR)) != NULL)
3802 {
3805
3806 /* Ignore files that are not XLOG segments. */
3807 if (!IsXLogFileName(xlde->d_name))
3808 continue;
3809
3810 /* Parse filename to get TLI and segno. */
3813
3814 /* Ignore anything that's not from the TLI of interest. */
3815 if (tli != file_tli)
3816 continue;
3817
3818 /* If it's the oldest so far, update oldest_segno. */
3819 if (oldest_segno == 0 || file_segno < oldest_segno)
3821 }
3822
3823 FreeDir(xldir);
3824 return oldest_segno;
3825}
3826
3827/*
3828 * Update the last removed segno pointer in shared memory, to reflect that the
3829 * given XLOG file has been removed.
3830 */
3831static void
3833{
3834 uint32 tli;
3835 XLogSegNo segno;
3836
3838
3840 if (segno > XLogCtl->lastRemovedSegNo)
3841 XLogCtl->lastRemovedSegNo = segno;
3843}
3844
3845/*
3846 * Remove all temporary log files in pg_wal
3847 *
3848 * This is called at the beginning of recovery after a previous crash,
3849 * at a point where no other processes write fresh WAL data.
3850 */
3851static void
3853{
3854 DIR *xldir;
3855 struct dirent *xlde;
3856
3857 elog(DEBUG2, "removing all temporary WAL segments");
3858
3860 while ((xlde = ReadDir(xldir, XLOGDIR)) != NULL)
3861 {
3862 char path[MAXPGPATH];
3863
3864 if (strncmp(xlde->d_name, "xlogtemp.", 9) != 0)
3865 continue;
3866
3867 snprintf(path, MAXPGPATH, XLOGDIR "/%s", xlde->d_name);
3868 unlink(path);
3869 elog(DEBUG2, "removed temporary WAL segment \"%s\"", path);
3870 }
3871 FreeDir(xldir);
3872}
3873
3874/*
3875 * Recycle or remove all log files older or equal to passed segno.
3876 *
3877 * endptr is current (or recent) end of xlog, and lastredoptr is the
3878 * redo pointer of the last checkpoint. These are used to determine
3879 * whether we want to recycle rather than delete no-longer-wanted log files.
3880 *
3881 * insertTLI is the current timeline for XLOG insertion. Any recycled
3882 * segments should be reused for this timeline.
3883 */
3884static void
3887{
3888 DIR *xldir;
3889 struct dirent *xlde;
3890 char lastoff[MAXFNAMELEN];
3893
3894 /* Initialize info about where to try to recycle to */
3897
3898 /*
3899 * Construct a filename of the last segment to be kept. The timeline ID
3900 * doesn't matter, we ignore that in the comparison. (During recovery,
3901 * InsertTimeLineID isn't set, so we can't use that.)
3902 */
3904
3905 elog(DEBUG2, "attempting to remove WAL segments older than log file %s",
3906 lastoff);
3907
3909
3910 while ((xlde = ReadDir(xldir, XLOGDIR)) != NULL)
3911 {
3912 /* Ignore files that are not XLOG segments */
3913 if (!IsXLogFileName(xlde->d_name) &&
3914 !IsPartialXLogFileName(xlde->d_name))
3915 continue;
3916
3917 /*
3918 * We ignore the timeline part of the XLOG segment identifiers in
3919 * deciding whether a segment is still needed. This ensures that we
3920 * won't prematurely remove a segment from a parent timeline. We could
3921 * probably be a little more proactive about removing segments of
3922 * non-parent timelines, but that would be a whole lot more
3923 * complicated.
3924 *
3925 * We use the alphanumeric sorting property of the filenames to decide
3926 * which ones are earlier than the lastoff segment.
3927 */
3928 if (strcmp(xlde->d_name + 8, lastoff + 8) <= 0)
3929 {
3930 if (XLogArchiveCheckDone(xlde->d_name))
3931 {
3932 /* Update the last removed location in shared memory first */
3933 UpdateLastRemovedPtr(xlde->d_name);
3934
3936 }
3937 }
3938 }
3939
3940 FreeDir(xldir);
3941}
3942
3943/*
3944 * Recycle or remove WAL files that are not part of the given timeline's
3945 * history.
3946 *
3947 * This is called during recovery, whenever we switch to follow a new
3948 * timeline, and at the end of recovery when we create a new timeline. We
3949 * wouldn't otherwise care about extra WAL files lying in pg_wal, but they
3950 * might be leftover pre-allocated or recycled WAL segments on the old timeline
3951 * that we haven't used yet, and contain garbage. If we just leave them in
3952 * pg_wal, they will eventually be archived, and we can't let that happen.
3953 * Files that belong to our timeline history are valid, because we have
3954 * successfully replayed them, but from others we can't be sure.
3955 *
3956 * 'switchpoint' is the current point in WAL where we switch to new timeline,
3957 * and 'newTLI' is the new timeline we switch to.
3958 */
3959void
3961{
3962 DIR *xldir;
3963 struct dirent *xlde;
3964 char switchseg[MAXFNAMELEN];
3968
3969 /*
3970 * Initialize info about where to begin the work. This will recycle,
3971 * somewhat arbitrarily, 10 future segments.
3972 */
3976
3977 /*
3978 * Construct a filename of the last segment to be kept.
3979 */
3981
3982 elog(DEBUG2, "attempting to remove WAL segments newer than log file %s",
3983 switchseg);
3984
3986
3987 while ((xlde = ReadDir(xldir, XLOGDIR)) != NULL)
3988 {
3989 /* Ignore files that are not XLOG segments */
3990 if (!IsXLogFileName(xlde->d_name))
3991 continue;
3992
3993 /*
3994 * Remove files that are on a timeline older than the new one we're
3995 * switching to, but with a segment number >= the first segment on the
3996 * new timeline.
3997 */
3998 if (strncmp(xlde->d_name, switchseg, 8) < 0 &&
3999 strcmp(xlde->d_name + 8, switchseg + 8) > 0)
4000 {
4001 /*
4002 * If the file has already been marked as .ready, however, don't
4003 * remove it yet. It should be OK to remove it - files that are
4004 * not part of our timeline history are not required for recovery
4005 * - but seems safer to let them be archived and removed later.
4006 */
4007 if (!XLogArchiveIsReady(xlde->d_name))
4009 }
4010 }
4011
4012 FreeDir(xldir);
4013}
4014
4015/*
4016 * Recycle or remove a log file that's no longer needed.
4017 *
4018 * segment_de is the dirent structure of the segment to recycle or remove.
4019 * recycleSegNo is the segment number to recycle up to. endlogSegNo is
4020 * the segment number of the current (or recent) end of WAL.
4021 *
4022 * endlogSegNo gets incremented if the segment is recycled so as it is not
4023 * checked again with future callers of this function.
4024 *
4025 * insertTLI is the current timeline for XLOG insertion. Any recycled segments
4026 * should be used for this timeline.
4027 */
4028static void
4032{
4033 char path[MAXPGPATH];
4034#ifdef WIN32
4035 char newpath[MAXPGPATH];
4036#endif
4037 const char *segname = segment_de->d_name;
4038
4039 snprintf(path, MAXPGPATH, XLOGDIR "/%s", segname);
4040
4041 /*
4042 * Before deleting the file, see if it can be recycled as a future log
4043 * segment. Only recycle normal files, because we don't want to recycle
4044 * symbolic links pointing to a separate archive directory.
4045 */
4046 if (wal_recycle &&
4048 XLogCtl->InstallXLogFileSegmentActive && /* callee rechecks this */
4049 get_dirent_type(path, segment_de, false, DEBUG2) == PGFILETYPE_REG &&
4051 true, recycleSegNo, insertTLI))
4052 {
4054 (errmsg_internal("recycled write-ahead log file \"%s\"",
4055 segname)));
4057 /* Needn't recheck that slot on future iterations */
4058 (*endlogSegNo)++;
4059 }
4060 else
4061 {
4062 /* No need for any more future segments, or recycling failed ... */
4063 int rc;
4064
4066 (errmsg_internal("removing write-ahead log file \"%s\"",
4067 segname)));
4068
4069#ifdef WIN32
4070
4071 /*
4072 * On Windows, if another process (e.g another backend) holds the file
4073 * open in FILE_SHARE_DELETE mode, unlink will succeed, but the file
4074 * will still show up in directory listing until the last handle is
4075 * closed. To avoid confusing the lingering deleted file for a live
4076 * WAL file that needs to be archived, rename it before deleting it.
4077 *
4078 * If another process holds the file open without FILE_SHARE_DELETE
4079 * flag, rename will fail. We'll try again at the next checkpoint.
4080 */
4081 snprintf(newpath, MAXPGPATH, "%s.deleted", path);
4082 if (rename(path, newpath) != 0)
4083 {
4084 ereport(LOG,
4086 errmsg("could not rename file \"%s\": %m",
4087 path)));
4088 return;
4089 }
4090 rc = durable_unlink(newpath, LOG);
4091#else
4092 rc = durable_unlink(path, LOG);
4093#endif
4094 if (rc != 0)
4095 {
4096 /* Message already logged by durable_unlink() */
4097 return;
4098 }
4100 }
4101
4103}
4104
4105/*
4106 * Verify whether pg_wal, pg_wal/archive_status, and pg_wal/summaries exist.
4107 * If the latter do not exist, recreate them.
4108 *
4109 * It is not the goal of this function to verify the contents of these
4110 * directories, but to help in cases where someone has performed a cluster
4111 * copy for PITR purposes but omitted pg_wal from the copy.
4112 *
4113 * We could also recreate pg_wal if it doesn't exist, but a deliberate
4114 * policy decision was made not to. It is fairly common for pg_wal to be
4115 * a symlink, and if that was the DBA's intent then automatically making a
4116 * plain directory would result in degraded performance with no notice.
4117 */
4118static void
4120{
4121 char path[MAXPGPATH];
4122 struct stat stat_buf;
4123
4124 /* Check for pg_wal; if it doesn't exist, error out */
4125 if (stat(XLOGDIR, &stat_buf) != 0 ||
4126 !S_ISDIR(stat_buf.st_mode))
4127 ereport(FATAL,
4129 errmsg("required WAL directory \"%s\" does not exist",
4130 XLOGDIR)));
4131
4132 /* Check for archive_status */
4133 snprintf(path, MAXPGPATH, XLOGDIR "/archive_status");
4134 if (stat(path, &stat_buf) == 0)
4135 {
4136 /* Check for weird cases where it exists but isn't a directory */
4137 if (!S_ISDIR(stat_buf.st_mode))
4138 ereport(FATAL,
4140 errmsg("required WAL directory \"%s\" does not exist",
4141 path)));
4142 }
4143 else
4144 {
4145 ereport(LOG,
4146 (errmsg("creating missing WAL directory \"%s\"", path)));
4147 if (MakePGDirectory(path) < 0)
4148 ereport(FATAL,
4150 errmsg("could not create missing directory \"%s\": %m",
4151 path)));
4152 }
4153
4154 /* Check for summaries */
4155 snprintf(path, MAXPGPATH, XLOGDIR "/summaries");
4156 if (stat(path, &stat_buf) == 0)
4157 {
4158 /* Check for weird cases where it exists but isn't a directory */
4159 if (!S_ISDIR(stat_buf.st_mode))
4160 ereport(FATAL,
4161 (errmsg("required WAL directory \"%s\" does not exist",
4162 path)));
4163 }
4164 else
4165 {
4166 ereport(LOG,
4167 (errmsg("creating missing WAL directory \"%s\"", path)));
4168 if (MakePGDirectory(path) < 0)
4169 ereport(FATAL,
4170 (errmsg("could not create missing directory \"%s\": %m",
4171 path)));
4172 }
4173}
4174
4175/*
4176 * Remove previous backup history files. This also retries creation of
4177 * .ready files for any backup history files for which XLogArchiveNotify
4178 * failed earlier.
4179 */
4180static void
4182{
4183 DIR *xldir;
4184 struct dirent *xlde;
4185 char path[MAXPGPATH + sizeof(XLOGDIR)];
4186
4188
4189 while ((xlde = ReadDir(xldir, XLOGDIR)) != NULL)
4190 {
4191 if (IsBackupHistoryFileName(xlde->d_name))
4192 {
4193 if (XLogArchiveCheckDone(xlde->d_name))
4194 {
4195 elog(DEBUG2, "removing WAL backup history file \"%s\"",
4196 xlde->d_name);
4197 snprintf(path, sizeof(path), XLOGDIR "/%s", xlde->d_name);
4198 unlink(path);
4199 XLogArchiveCleanup(xlde->d_name);
4200 }
4201 }
4202 }
4203
4204 FreeDir(xldir);
4205}
4206
4207/*
4208 * I/O routines for pg_control
4209 *
4210 * *ControlFile is a buffer in shared memory that holds an image of the
4211 * contents of pg_control. WriteControlFile() initializes pg_control
4212 * given a preloaded buffer, ReadControlFile() loads the buffer from
4213 * the pg_control file (during postmaster or standalone-backend startup),
4214 * and UpdateControlFile() rewrites pg_control after we modify xlog state.
4215 * InitControlFile() fills the buffer with initial values.
4216 *
4217 * For simplicity, WriteControlFile() initializes the fields of pg_control
4218 * that are related to checking backend/database compatibility, and
4219 * ReadControlFile() verifies they are correct. We could split out the
4220 * I/O and compatibility-check functions, but there seems no need currently.
4221 */
4222
4223static void
4224InitControlFile(uint64 sysidentifier, uint32 data_checksum_version)
4225{
4227
4228 /*
4229 * Generate a random nonce. This is used for authentication requests that
4230 * will fail because the user does not exist. The nonce is used to create
4231 * a genuine-looking password challenge for the non-existent user, in lieu
4232 * of an actual stored password.
4233 */
4235 ereport(PANIC,
4237 errmsg("could not generate secret authorization token")));
4238
4239 memset(ControlFile, 0, sizeof(ControlFileData));
4240 /* Initialize pg_control status fields */
4241 ControlFile->system_identifier = sysidentifier;
4245
4246 /* Set important parameter values for use when replaying WAL */
4255 ControlFile->data_checksum_version = data_checksum_version;
4256}
4257
4258static void
4260{
4261 int fd;
4262 char buffer[PG_CONTROL_FILE_SIZE]; /* need not be aligned */
4263
4264 /*
4265 * Initialize version and compatibility-check fields
4266 */
4269
4272
4278
4281
4284
4285 ControlFile->float8ByVal = true; /* vestigial */
4286
4287 /*
4288 * Initialize the default 'char' signedness.
4289 *
4290 * The signedness of the char type is implementation-defined. For instance
4291 * on x86 architecture CPUs, the char data type is typically treated as
4292 * signed by default, whereas on aarch architecture CPUs, it is typically
4293 * treated as unsigned by default. In v17 or earlier, we accidentally let
4294 * C implementation signedness affect persistent data. This led to
4295 * inconsistent results when comparing char data across different
4296 * platforms.
4297 *
4298 * This flag can be used as a hint to ensure consistent behavior for
4299 * pre-v18 data files that store data sorted by the 'char' type on disk,
4300 * especially in cross-platform replication scenarios.
4301 *
4302 * Newly created database clusters unconditionally set the default char
4303 * signedness to true. pg_upgrade changes this flag for clusters that were
4304 * initialized on signedness=false platforms. As a result,
4305 * signedness=false setting will become rare over time. If we had known
4306 * about this problem during the last development cycle that forced initdb
4307 * (v8.3), we would have made all clusters signed or all clusters
4308 * unsigned. Making pg_upgrade the only source of signedness=false will
4309 * cause the population of database clusters to converge toward that
4310 * retrospective ideal.
4311 */
4313
4314 /* Contents are protected with a CRC */
4320
4321 /*
4322 * We write out PG_CONTROL_FILE_SIZE bytes into pg_control, zero-padding
4323 * the excess over sizeof(ControlFileData). This reduces the odds of
4324 * premature-EOF errors when reading pg_control. We'll still fail when we
4325 * check the contents of the file, but hopefully with a more specific
4326 * error than "couldn't read pg_control".
4327 */
4328 memset(buffer, 0, PG_CONTROL_FILE_SIZE);
4329 memcpy(buffer, ControlFile, sizeof(ControlFileData));
4330
4333 if (fd < 0)
4334 ereport(PANIC,
4336 errmsg("could not create file \"%s\": %m",
4338
4339 errno = 0;
4342 {
4343 /* if write didn't set errno, assume problem is no disk space */
4344 if (errno == 0)
4345 errno = ENOSPC;
4346 ereport(PANIC,
4348 errmsg("could not write to file \"%s\": %m",
4350 }
4352
4354 if (pg_fsync(fd) != 0)
4355 ereport(PANIC,
4357 errmsg("could not fsync file \"%s\": %m",
4360
4361 if (close(fd) != 0)
4362 ereport(PANIC,
4364 errmsg("could not close file \"%s\": %m",
4366}
4367
4368static void
4370{
4371 pg_crc32c crc;
4372 int fd;
4373 char wal_segsz_str[20];
4374 int r;
4375
4376 /*
4377 * Read data...
4378 */
4380 O_RDWR | PG_BINARY);
4381 if (fd < 0)
4382 ereport(PANIC,
4384 errmsg("could not open file \"%s\": %m",
4386
4388 r = read(fd, ControlFile, sizeof(ControlFileData));
4389 if (r != sizeof(ControlFileData))
4390 {
4391 if (r < 0)
4392 ereport(PANIC,
4394 errmsg("could not read file \"%s\": %m",
4396 else
4397 ereport(PANIC,
4399 errmsg("could not read file \"%s\": read %d of %zu",
4400 XLOG_CONTROL_FILE, r, sizeof(ControlFileData))));
4401 }
4403
4404 close(fd);
4405
4406 /*
4407 * Check for expected pg_control format version. If this is wrong, the
4408 * CRC check will likely fail because we'll be checking the wrong number
4409 * of bytes. Complaining about wrong version will probably be more
4410 * enlightening than complaining about wrong CRC.
4411 */
4412
4414 ereport(FATAL,
4416 errmsg("database files are incompatible with server"),
4417 errdetail("The database cluster was initialized with PG_CONTROL_VERSION %d (0x%08x),"
4418 " but the server was compiled with PG_CONTROL_VERSION %d (0x%08x).",
4421 errhint("This could be a problem of mismatched byte ordering. It looks like you need to initdb.")));
4422
4424 ereport(FATAL,
4426 errmsg("database files are incompatible with server"),
4427 errdetail("The database cluster was initialized with PG_CONTROL_VERSION %d,"
4428 " but the server was compiled with PG_CONTROL_VERSION %d.",
4430 errhint("It looks like you need to initdb.")));
4431
4432 /* Now check the CRC. */
4437 FIN_CRC32C(crc);
4438
4439 if (!EQ_CRC32C(crc, ControlFile->crc))
4440 ereport(FATAL,
4442 errmsg("incorrect checksum in control file")));
4443
4444 /*
4445 * Do compatibility checking immediately. If the database isn't
4446 * compatible with the backend executable, we want to abort before we can
4447 * possibly do any damage.
4448 */
4450 ereport(FATAL,
4452 errmsg("database files are incompatible with server"),
4453 /* translator: %s is a variable name and %d is its value */
4454 errdetail("The database cluster was initialized with %s %d,"
4455 " but the server was compiled with %s %d.",
4456 "CATALOG_VERSION_NO", ControlFile->catalog_version_no,
4457 "CATALOG_VERSION_NO", CATALOG_VERSION_NO),
4458 errhint("It looks like you need to initdb.")));
4460 ereport(FATAL,
4462 errmsg("database files are incompatible with server"),
4463 /* translator: %s is a variable name and %d is its value */
4464 errdetail("The database cluster was initialized with %s %d,"
4465 " but the server was compiled with %s %d.",
4466 "MAXALIGN", ControlFile->maxAlign,
4467 "MAXALIGN", MAXIMUM_ALIGNOF),
4468 errhint("It looks like you need to initdb.")));
4470 ereport(FATAL,
4472 errmsg("database files are incompatible with server"),
4473 errdetail("The database cluster appears to use a different floating-point number format than the server executable."),
4474 errhint("It looks like you need to initdb.")));
4475 if (ControlFile->blcksz != BLCKSZ)
4476 ereport(FATAL,
4478 errmsg("database files are incompatible with server"),
4479 /* translator: %s is a variable name and %d is its value */
4480 errdetail("The database cluster was initialized with %s %d,"
4481 " but the server was compiled with %s %d.",
4482 "BLCKSZ", ControlFile->blcksz,
4483 "BLCKSZ", BLCKSZ),
4484 errhint("It looks like you need to recompile or initdb.")));
4486 ereport(FATAL,
4488 errmsg("database files are incompatible with server"),
4489 /* translator: %s is a variable name and %d is its value */
4490 errdetail("The database cluster was initialized with %s %d,"
4491 " but the server was compiled with %s %d.",
4492 "RELSEG_SIZE", ControlFile->relseg_size,
4493 "RELSEG_SIZE", RELSEG_SIZE),
4494 errhint("It looks like you need to recompile or initdb.")));
4496 ereport(FATAL,
4498 errmsg("database files are incompatible with server"),
4499 /* translator: %s is a variable name and %d is its value */
4500 errdetail("The database cluster was initialized with %s %d,"
4501 " but the server was compiled with %s %d.",
4502 "SLRU_PAGES_PER_SEGMENT", ControlFile->slru_pages_per_segment,
4503 "SLRU_PAGES_PER_SEGMENT", SLRU_PAGES_PER_SEGMENT),
4504 errhint("It looks like you need to recompile or initdb.")));
4506 ereport(FATAL,
4508 errmsg("database files are incompatible with server"),
4509 /* translator: %s is a variable name and %d is its value */
4510 errdetail("The database cluster was initialized with %s %d,"
4511 " but the server was compiled with %s %d.",
4512 "XLOG_BLCKSZ", ControlFile->xlog_blcksz,
4513 "XLOG_BLCKSZ", XLOG_BLCKSZ),
4514 errhint("It looks like you need to recompile or initdb.")));
4516 ereport(FATAL,
4518 errmsg("database files are incompatible with server"),
4519 /* translator: %s is a variable name and %d is its value */
4520 errdetail("The database cluster was initialized with %s %d,"
4521 " but the server was compiled with %s %d.",
4522 "NAMEDATALEN", ControlFile->nameDataLen,
4523 "NAMEDATALEN", NAMEDATALEN),
4524 errhint("It looks like you need to recompile or initdb.")));
4526 ereport(FATAL,
4528 errmsg("database files are incompatible with server"),
4529 /* translator: %s is a variable name and %d is its value */
4530 errdetail("The database cluster was initialized with %s %d,"
4531 " but the server was compiled with %s %d.",
4532 "INDEX_MAX_KEYS", ControlFile->indexMaxKeys,
4533 "INDEX_MAX_KEYS", INDEX_MAX_KEYS),
4534 errhint("It looks like you need to recompile or initdb.")));
4536 ereport(FATAL,
4538 errmsg("database files are incompatible with server"),
4539 /* translator: %s is a variable name and %d is its value */
4540 errdetail("The database cluster was initialized with %s %d,"
4541 " but the server was compiled with %s %d.",
4542 "TOAST_MAX_CHUNK_SIZE", ControlFile->toast_max_chunk_size,
4543 "TOAST_MAX_CHUNK_SIZE", (int) TOAST_MAX_CHUNK_SIZE),
4544 errhint("It looks like you need to recompile or initdb.")));
4546 ereport(FATAL,
4548 errmsg("database files are incompatible with server"),
4549 /* translator: %s is a variable name and %d is its value */
4550 errdetail("The database cluster was initialized with %s %d,"
4551 " but the server was compiled with %s %d.",
4552 "LOBLKSIZE", ControlFile->loblksize,
4553 "LOBLKSIZE", (int) LOBLKSIZE),
4554 errhint("It looks like you need to recompile or initdb.")));
4555
4556 Assert(ControlFile->float8ByVal); /* vestigial, not worth an error msg */
4557
4559
4562 errmsg_plural("invalid WAL segment size in control file (%d byte)",
4563 "invalid WAL segment size in control file (%d bytes)",
4566 errdetail("The WAL segment size must be a power of two between 1 MB and 1 GB.")));
4567
4569 SetConfigOption("wal_segment_size", wal_segsz_str, PGC_INTERNAL,
4571
4572 /* check and update variables dependent on wal_segment_size */
4575 /* translator: both %s are GUC names */
4576 errmsg("\"%s\" must be at least twice \"%s\"",
4577 "min_wal_size", "wal_segment_size")));
4578
4581 /* translator: both %s are GUC names */
4582 errmsg("\"%s\" must be at least twice \"%s\"",
4583 "max_wal_size", "wal_segment_size")));
4584
4588
4590
4591 /* Make the initdb settings visible as GUC variables, too */
4592 SetConfigOption("data_checksums", DataChecksumsEnabled() ? "yes" : "no",
4594}
4595
4596/*
4597 * Utility wrapper to update the control file. Note that the control
4598 * file gets flushed.
4599 */
4600static void
4605
4606/*
4607 * Returns the unique system identifier from control file.
4608 */
4609uint64
4615
4616/*
4617 * Returns the random nonce from control file.
4618 */
4619char *
4625
4626/*
4627 * Are checksums enabled for data pages?
4628 */
4629bool
4631{
4633 return (ControlFile->data_checksum_version > 0);
4634}
4635
4636/*
4637 * Return true if the cluster was initialized on a platform where the
4638 * default signedness of char is "signed". This function exists for code
4639 * that deals with pre-v18 data files that store data sorted by the 'char'
4640 * type on disk (e.g., GIN and GiST indexes). See the comments in
4641 * WriteControlFile() for details.
4642 */
4643bool
4648
4649/*
4650 * Returns a fake LSN for unlogged relations.
4651 *
4652 * Each call generates an LSN that is greater than any previous value
4653 * returned. The current counter value is saved and restored across clean
4654 * shutdowns, but like unlogged relations, does not survive a crash. This can
4655 * be used in lieu of real LSN values returned by XLogInsert, if you need an
4656 * LSN-like increasing sequence of numbers without writing any WAL.
4657 */
4663
4664/*
4665 * Auto-tune the number of XLOG buffers.
4666 *
4667 * The preferred setting for wal_buffers is about 3% of shared_buffers, with
4668 * a maximum of one XLOG segment (there is little reason to think that more
4669 * is helpful, at least so long as we force an fsync when switching log files)
4670 * and a minimum of 8 blocks (which was the default value prior to PostgreSQL
4671 * 9.1, when auto-tuning was added).
4672 *
4673 * This should not be called until NBuffers has received its final value.
4674 */
4675static int
4677{
4678 int xbuffers;
4679
4680 xbuffers = NBuffers / 32;
4683 if (xbuffers < 8)
4684 xbuffers = 8;
4685 return xbuffers;
4686}
4687
4688/*
4689 * GUC check_hook for wal_buffers
4690 */
4691bool
4693{
4694 /*
4695 * -1 indicates a request for auto-tune.
4696 */
4697 if (*newval == -1)
4698 {
4699 /*
4700 * If we haven't yet changed the boot_val default of -1, just let it
4701 * be. We'll fix it when XLOGShmemSize is called.
4702 */
4703 if (XLOGbuffers == -1)
4704 return true;
4705
4706 /* Otherwise, substitute the auto-tune value */
4708 }
4709
4710 /*
4711 * We clamp manually-set values to at least 4 blocks. Prior to PostgreSQL
4712 * 9.1, a minimum of 4 was enforced by guc.c, but since that is no longer
4713 * the case, we just silently treat such values as a request for the
4714 * minimum. (We could throw an error instead, but that doesn't seem very
4715 * helpful.)
4716 */
4717 if (*newval < 4)
4718 *newval = 4;
4719
4720 return true;
4721}
4722
4723/*
4724 * GUC check_hook for wal_consistency_checking
4725 */
4726bool
4728{
4729 char *rawstring;
4730 List *elemlist;
4731 ListCell *l;
4732 bool newwalconsistency[RM_MAX_ID + 1];
4733
4734 /* Initialize the array */
4735 MemSet(newwalconsistency, 0, (RM_MAX_ID + 1) * sizeof(bool));
4736
4737 /* Need a modifiable copy of string */
4739
4740 /* Parse string into list of identifiers */
4742 {
4743 /* syntax error in list */
4744 GUC_check_errdetail("List syntax is invalid.");
4747 return false;
4748 }
4749
4750 foreach(l, elemlist)
4751 {
4752 char *tok = (char *) lfirst(l);
4753 int rmid;
4754
4755 /* Check for 'all'. */
4756 if (pg_strcasecmp(tok, "all") == 0)
4757 {
4758 for (rmid = 0; rmid <= RM_MAX_ID; rmid++)
4759 if (RmgrIdExists(rmid) && GetRmgr(rmid).rm_mask != NULL)
4760 newwalconsistency[rmid] = true;
4761 }
4762 else
4763 {
4764 /* Check if the token matches any known resource manager. */
4765 bool found = false;
4766
4767 for (rmid = 0; rmid <= RM_MAX_ID; rmid++)
4768 {
4769 if (RmgrIdExists(rmid) && GetRmgr(rmid).rm_mask != NULL &&
4770 pg_strcasecmp(tok, GetRmgr(rmid).rm_name) == 0)
4771 {
4772 newwalconsistency[rmid] = true;
4773 found = true;
4774 break;
4775 }
4776 }
4777 if (!found)
4778 {
4779 /*
4780 * During startup, it might be a not-yet-loaded custom
4781 * resource manager. Defer checking until
4782 * InitializeWalConsistencyChecking().
4783 */
4785 {
4787 }
4788 else
4789 {
4790 GUC_check_errdetail("Unrecognized key word: \"%s\".", tok);
4793 return false;
4794 }
4795 }
4796 }
4797 }
4798
4801
4802 /* assign new value */
4803 *extra = guc_malloc(LOG, (RM_MAX_ID + 1) * sizeof(bool));
4804 if (!*extra)
4805 return false;
4806 memcpy(*extra, newwalconsistency, (RM_MAX_ID + 1) * sizeof(bool));
4807 return true;
4808}
4809
4810/*
4811 * GUC assign_hook for wal_consistency_checking
4812 */
4813void
4815{
4816 /*
4817 * If some checks were deferred, it's possible that the checks will fail
4818 * later during InitializeWalConsistencyChecking(). But in that case, the
4819 * postmaster will exit anyway, so it's safe to proceed with the
4820 * assignment.
4821 *
4822 * Any built-in resource managers specified are assigned immediately,
4823 * which affects WAL created before shared_preload_libraries are
4824 * processed. Any custom resource managers specified won't be assigned
4825 * until after shared_preload_libraries are processed, but that's OK
4826 * because WAL for a custom resource manager can't be written before the
4827 * module is loaded anyway.
4828 */
4830}
4831
4832/*
4833 * InitializeWalConsistencyChecking: run after loading custom resource managers
4834 *
4835 * If any unknown resource managers were specified in the
4836 * wal_consistency_checking GUC, processing was deferred. Now that
4837 * shared_preload_libraries have been loaded, process wal_consistency_checking
4838 * again.
4839 */
4840void
4842{
4844
4846 {
4847 struct config_generic *guc;
4848
4849 guc = find_option("wal_consistency_checking", false, false, ERROR);
4850
4852
4853 set_config_option_ext("wal_consistency_checking",
4855 guc->scontext, guc->source, guc->srole,
4856 GUC_ACTION_SET, true, ERROR, false);
4857
4858 /* checking should not be deferred again */
4860 }
4861}
4862
4863/*
4864 * GUC show_hook for archive_command
4865 */
4866const char *
4868{
4869 if (XLogArchivingActive())
4870 return XLogArchiveCommand;
4871 else
4872 return "(disabled)";
4873}
4874
4875/*
4876 * GUC show_hook for in_hot_standby
4877 */
4878const char *
4880{
4881 /*
4882 * We display the actual state based on shared memory, so that this GUC
4883 * reports up-to-date state if examined intra-query. The underlying
4884 * variable (in_hot_standby_guc) changes only when we transmit a new value
4885 * to the client.
4886 */
4887 return RecoveryInProgress() ? "on" : "off";
4888}
4889
4890/*
4891 * GUC show_hook for effective_wal_level
4892 */
4893const char *
4895{
4897 return "minimal";
4898
4899 /*
4900 * During recovery, effective_wal_level reflects the primary's
4901 * configuration rather than the local wal_level value.
4902 */
4903 if (RecoveryInProgress())
4904 return IsXLogLogicalInfoEnabled() ? "logical" : "replica";
4905
4906 return XLogLogicalInfoActive() ? "logical" : "replica";
4907}
4908
4909/*
4910 * Read the control file, set respective GUCs.
4911 *
4912 * This is to be called during startup, including a crash recovery cycle,
4913 * unless in bootstrap mode, where no control file yet exists. As there's no
4914 * usable shared memory yet (its sizing can depend on the contents of the
4915 * control file!), first store the contents in local memory. XLOGShmemInit()
4916 * will then copy it to shared memory later.
4917 *
4918 * reset just controls whether previous contents are to be expected (in the
4919 * reset case, there's a dangling pointer into old shared memory), or not.
4920 */
4921void
4928
4929/*
4930 * Get the wal_level from the control file. For a standby, this value should be
4931 * considered as its active wal_level, because it may be different from what
4932 * was originally configured on standby.
4933 */
4936{
4937 return ControlFile->wal_level;
4938}
4939
4940/*
4941 * Initialization of shared memory for XLOG
4942 */
4943Size
4945{
4946 Size size;
4947
4948 /*
4949 * If the value of wal_buffers is -1, use the preferred auto-tune value.
4950 * This isn't an amazingly clean place to do this, but we must wait till
4951 * NBuffers has received its final value, and must do it before using the
4952 * value of XLOGbuffers to do anything important.
4953 *
4954 * We prefer to report this value's source as PGC_S_DYNAMIC_DEFAULT.
4955 * However, if the DBA explicitly set wal_buffers = -1 in the config file,
4956 * then PGC_S_DYNAMIC_DEFAULT will fail to override that and we must force
4957 * the matter with PGC_S_OVERRIDE.
4958 */
4959 if (XLOGbuffers == -1)
4960 {
4961 char buf[32];
4962
4963 snprintf(buf, sizeof(buf), "%d", XLOGChooseNumBuffers());
4964 SetConfigOption("wal_buffers", buf, PGC_POSTMASTER,
4966 if (XLOGbuffers == -1) /* failed to apply it? */
4967 SetConfigOption("wal_buffers", buf, PGC_POSTMASTER,
4969 }
4970 Assert(XLOGbuffers > 0);
4971
4972 /* XLogCtl */
4973 size = sizeof(XLogCtlData);
4974
4975 /* WAL insertion locks, plus alignment */
4976 size = add_size(size, mul_size(sizeof(WALInsertLockPadded), NUM_XLOGINSERT_LOCKS + 1));
4977 /* xlblocks array */
4978 size = add_size(size, mul_size(sizeof(pg_atomic_uint64), XLOGbuffers));
4979 /* extra alignment padding for XLOG I/O buffers */
4980 size = add_size(size, Max(XLOG_BLCKSZ, PG_IO_ALIGN_SIZE));
4981 /* and the buffers themselves */
4982 size = add_size(size, mul_size(XLOG_BLCKSZ, XLOGbuffers));
4983
4984 /*
4985 * Note: we don't count ControlFileData, it comes out of the "slop factor"
4986 * added by CreateSharedMemoryAndSemaphores. This lets us use this
4987 * routine again below to compute the actual allocation size.
4988 */
4989
4990 return size;
4991}
4992
4993void
4995{
4996 bool foundCFile,
4997 foundXLog;
4998 char *allocptr;
4999 int i;
5001
5002#ifdef WAL_DEBUG
5003
5004 /*
5005 * Create a memory context for WAL debugging that's exempt from the normal
5006 * "no pallocs in critical section" rule. Yes, that can lead to a PANIC if
5007 * an allocation fails, but wal_debug is not for production use anyway.
5008 */
5009 if (walDebugCxt == NULL)
5010 {
5012 "WAL Debug",
5015 }
5016#endif
5017
5018
5019 XLogCtl = (XLogCtlData *)
5020 ShmemInitStruct("XLOG Ctl", XLOGShmemSize(), &foundXLog);
5021
5024 ShmemInitStruct("Control File", sizeof(ControlFileData), &foundCFile);
5025
5026 if (foundCFile || foundXLog)
5027 {
5028 /* both should be present or neither */
5030
5031 /* Initialize local copy of WALInsertLocks */
5033
5034 if (localControlFile)
5036 return;
5037 }
5038 memset(XLogCtl, 0, sizeof(XLogCtlData));
5039
5040 /*
5041 * Already have read control file locally, unless in bootstrap mode. Move
5042 * contents into shared memory.
5043 */
5044 if (localControlFile)
5045 {
5048 }
5049
5050 /*
5051 * Since XLogCtlData contains XLogRecPtr fields, its sizeof should be a
5052 * multiple of the alignment for same, so no extra alignment padding is
5053 * needed here.
5054 */
5055 allocptr = ((char *) XLogCtl) + sizeof(XLogCtlData);
5058
5059 for (i = 0; i < XLOGbuffers; i++)
5060 {
5062 }
5063
5064 /* WAL insertion locks. Ensure they're aligned to the full padded size */
5065 allocptr += sizeof(WALInsertLockPadded) -
5070
5071 for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
5072 {
5076 }
5077
5078 /*
5079 * Align the start of the page buffers to a full xlog block size boundary.
5080 * This simplifies some calculations in XLOG insertion. It is also
5081 * required for O_DIRECT.
5082 */
5086
5087 /*
5088 * Do basic initialization of XLogCtl shared data. (StartupXLOG will fill
5089 * in additional info.)
5090 */
5094 XLogCtl->WalWriterSleeping = false;
5095
5102}
5103
5104/*
5105 * This func must be called ONCE on system install. It creates pg_control
5106 * and the initial XLOG segment.
5107 */
5108void
5109BootStrapXLOG(uint32 data_checksum_version)
5110{
5111 CheckPoint checkPoint;
5112 PGAlignedXLogBlock buffer;
5113 XLogPageHeader page;
5115 XLogRecord *record;
5116 char *recptr;
5117 uint64 sysidentifier;
5118 struct timeval tv;
5119 pg_crc32c crc;
5120
5121 /* allow ordinary WAL segment creation, like StartupXLOG() would */
5123
5124 /*
5125 * Select a hopefully-unique system identifier code for this installation.
5126 * We use the result of gettimeofday(), including the fractional seconds
5127 * field, as being about as unique as we can easily get. (Think not to
5128 * use random(), since it hasn't been seeded and there's no portable way
5129 * to seed it other than the system clock value...) The upper half of the
5130 * uint64 value is just the tv_sec part, while the lower half contains the
5131 * tv_usec part (which must fit in 20 bits), plus 12 bits from our current
5132 * PID for a little extra uniqueness. A person knowing this encoding can
5133 * determine the initialization time of the installation, which could
5134 * perhaps be useful sometimes.
5135 */
5136 gettimeofday(&tv, NULL);
5137 sysidentifier = ((uint64) tv.tv_sec) << 32;
5138 sysidentifier |= ((uint64) tv.tv_usec) << 12;
5139 sysidentifier |= getpid() & 0xFFF;
5140
5141 memset(&buffer, 0, sizeof buffer);
5142 page = (XLogPageHeader) &buffer;
5143
5144 /*
5145 * Set up information for the initial checkpoint record
5146 *
5147 * The initial checkpoint record is written to the beginning of the WAL
5148 * segment with logid=0 logseg=1. The very first WAL segment, 0/0, is not
5149 * used, so that we can use 0/0 to mean "before any valid WAL segment".
5150 */
5154 checkPoint.fullPageWrites = fullPageWrites;
5156 checkPoint.wal_level = wal_level;
5157 checkPoint.nextXid =
5159 checkPoint.nextOid = FirstGenbkiObjectId;
5160 checkPoint.nextMulti = FirstMultiXactId;
5161 checkPoint.nextMultiOffset = 1;
5163 checkPoint.oldestXidDB = Template1DbOid;
5164 checkPoint.oldestMulti = FirstMultiXactId;
5165 checkPoint.oldestMultiDB = Template1DbOid;
5168 checkPoint.time = (pg_time_t) time(NULL);
5170
5171 TransamVariables->nextXid = checkPoint.nextXid;
5172 TransamVariables->nextOid = checkPoint.nextOid;
5174 MultiXactSetNextMXact(checkPoint.nextMulti, checkPoint.nextMultiOffset);
5175 AdvanceOldestClogXid(checkPoint.oldestXid);
5176 SetTransactionIdLimit(checkPoint.oldestXid, checkPoint.oldestXidDB);
5177 SetMultiXactIdLimit(checkPoint.oldestMulti, checkPoint.oldestMultiDB);
5179
5180 /* Set up the XLOG page header */
5181 page->xlp_magic = XLOG_PAGE_MAGIC;
5182 page->xlp_info = XLP_LONG_HEADER;
5186 longpage->xlp_sysid = sysidentifier;
5187 longpage->xlp_seg_size = wal_segment_size;
5188 longpage->xlp_xlog_blcksz = XLOG_BLCKSZ;
5189
5190 /* Insert the initial checkpoint record */
5191 recptr = ((char *) page + SizeOfXLogLongPHD);
5192 record = (XLogRecord *) recptr;
5193 record->xl_prev = InvalidXLogRecPtr;
5194 record->xl_xid = InvalidTransactionId;
5195 record->xl_tot_len = SizeOfXLogRecord + SizeOfXLogRecordDataHeaderShort + sizeof(checkPoint);
5197 record->xl_rmid = RM_XLOG_ID;
5199 /* fill the XLogRecordDataHeaderShort struct */
5200 *(recptr++) = (char) XLR_BLOCK_ID_DATA_SHORT;
5201 *(recptr++) = sizeof(checkPoint);
5202 memcpy(recptr, &checkPoint, sizeof(checkPoint));
5203 recptr += sizeof(checkPoint);
5204 Assert(recptr - (char *) record == record->xl_tot_len);
5205
5207 COMP_CRC32C(crc, ((char *) record) + SizeOfXLogRecord, record->xl_tot_len - SizeOfXLogRecord);
5208 COMP_CRC32C(crc, (char *) record, offsetof(XLogRecord, xl_crc));
5209 FIN_CRC32C(crc);
5210 record->xl_crc = crc;
5211
5212 /* Create first XLOG segment file */
5215
5216 /*
5217 * We needn't bother with Reserve/ReleaseExternalFD here, since we'll
5218 * close the file again in a moment.
5219 */
5220
5221 /* Write the first page with the initial record */
5222 errno = 0;
5224 if (write(openLogFile, &buffer, XLOG_BLCKSZ) != XLOG_BLCKSZ)
5225 {
5226 /* if write didn't set errno, assume problem is no disk space */
5227 if (errno == 0)
5228 errno = ENOSPC;
5229 ereport(PANIC,
5231 errmsg("could not write bootstrap write-ahead log file: %m")));
5232 }
5234
5236 if (pg_fsync(openLogFile) != 0)
5237 ereport(PANIC,
5239 errmsg("could not fsync bootstrap write-ahead log file: %m")));
5241
5242 if (close(openLogFile) != 0)
5243 ereport(PANIC,
5245 errmsg("could not close bootstrap write-ahead log file: %m")));
5246
5247 openLogFile = -1;
5248
5249 /* Now create pg_control */
5250 InitControlFile(sysidentifier, data_checksum_version);
5251 ControlFile->time = checkPoint.time;
5252 ControlFile->checkPoint = checkPoint.redo;
5253 ControlFile->checkPointCopy = checkPoint;
5254
5255 /* some additional ControlFile fields are set in WriteControlFile() */
5257
5258 /* Bootstrap the commit log, too */
5259 BootStrapCLOG();
5263
5264 /*
5265 * Force control file to be read - in contrast to normal processing we'd
5266 * otherwise never run the checks and GUC related initializations therein.
5267 */
5269}
5270
5271static char *
5273{
5275 "%Y-%m-%d %H:%M:%S %Z",
5277
5278 return buf;
5279}
5280
5281/*
5282 * Initialize the first WAL segment on new timeline.
5283 */
5284static void
5286{
5287 char xlogfname[MAXFNAMELEN];
5290
5291 /* we always switch to a new timeline after archive recovery */
5292 Assert(endTLI != newTLI);
5293
5294 /*
5295 * Update min recovery point one last time.
5296 */
5298
5299 /*
5300 * Calculate the last segment on the old timeline, and the first segment
5301 * on the new timeline. If the switch happens in the middle of a segment,
5302 * they are the same, but if the switch happens exactly at a segment
5303 * boundary, startLogSegNo will be endLogSegNo + 1.
5304 */
5307
5308 /*
5309 * Initialize the starting WAL segment for the new timeline. If the switch
5310 * happens in the middle of a segment, copy data from the last WAL segment
5311 * of the old timeline up to the switch point, to the starting WAL segment
5312 * on the new timeline.
5313 */
5315 {
5316 /*
5317 * Make a copy of the file on the new timeline.
5318 *
5319 * Writing WAL isn't allowed yet, so there are no locking
5320 * considerations. But we should be just as tense as XLogFileInit to
5321 * avoid emplacing a bogus file.
5322 */
5325 }
5326 else
5327 {
5328 /*
5329 * The switch happened at a segment boundary, so just create the next
5330 * segment on the new timeline.
5331 */
5332 int fd;
5333
5335
5336 if (close(fd) != 0)
5337 {
5338 int save_errno = errno;
5339
5341 errno = save_errno;
5342 ereport(ERROR,
5344 errmsg("could not close file \"%s\": %m", xlogfname)));
5345 }
5346 }
5347
5348 /*
5349 * Let's just make real sure there are not .ready or .done flags posted
5350 * for the new segment.
5351 */
5354}
5355
5356/*
5357 * Perform cleanup actions at the conclusion of archive recovery.
5358 */
5359static void
5362{
5363 /*
5364 * Execute the recovery_end_command, if any.
5365 */
5368 "recovery_end_command",
5369 true,
5371
5372 /*
5373 * We switched to a new timeline. Clean up segments on the old timeline.
5374 *
5375 * If there are any higher-numbered segments on the old timeline, remove
5376 * them. They might contain valid WAL, but they might also be
5377 * pre-allocated files containing garbage. In any case, they are not part
5378 * of the new timeline's history so we don't need them.
5379 */
5381
5382 /*
5383 * If the switch happened in the middle of a segment, what to do with the
5384 * last, partial segment on the old timeline? If we don't archive it, and
5385 * the server that created the WAL never archives it either (e.g. because
5386 * it was hit by a meteor), it will never make it to the archive. That's
5387 * OK from our point of view, because the new segment that we created with
5388 * the new TLI contains all the WAL from the old timeline up to the switch
5389 * point. But if you later try to do PITR to the "missing" WAL on the old
5390 * timeline, recovery won't find it in the archive. It's physically
5391 * present in the new file with new TLI, but recovery won't look there
5392 * when it's recovering to the older timeline. On the other hand, if we
5393 * archive the partial segment, and the original server on that timeline
5394 * is still running and archives the completed version of the same segment
5395 * later, it will fail. (We used to do that in 9.4 and below, and it
5396 * caused such problems).
5397 *
5398 * As a compromise, we rename the last segment with the .partial suffix,
5399 * and archive it. Archive recovery will never try to read .partial
5400 * segments, so they will normally go unused. But in the odd PITR case,
5401 * the administrator can copy them manually to the pg_wal directory
5402 * (removing the suffix). They can be useful in debugging, too.
5403 *
5404 * If a .done or .ready file already exists for the old timeline, however,
5405 * we had already determined that the segment is complete, so we can let
5406 * it be archived normally. (In particular, if it was restored from the
5407 * archive to begin with, it's expected to have a .done file).
5408 */
5411 {
5412 char origfname[MAXFNAMELEN];
5414
5417
5419 {
5420 char origpath[MAXPGPATH];
5422 char partialpath[MAXPGPATH];
5423
5424 /*
5425 * If we're summarizing WAL, we can't rename the partial file
5426 * until the summarizer finishes with it, else it will fail.
5427 */
5428 if (summarize_wal)
5430
5432 snprintf(partialfname, MAXFNAMELEN, "%s.partial", origfname);
5433 snprintf(partialpath, MAXPGPATH, "%s.partial", origpath);
5434
5435 /*
5436 * Make sure there's no .done or .ready file for the .partial
5437 * file.
5438 */
5440
5443 }
5444 }
5445}
5446
5447/*
5448 * Check to see if required parameters are set high enough on this server
5449 * for various aspects of recovery operation.
5450 *
5451 * Note that all the parameters which this function tests need to be
5452 * listed in Administrator's Overview section in high-availability.sgml.
5453 * If you change them, don't forget to update the list.
5454 */
5455static void
5457{
5458 /*
5459 * For archive recovery, the WAL must be generated with at least 'replica'
5460 * wal_level.
5461 */
5463 {
5464 ereport(FATAL,
5466 errmsg("WAL was generated with \"wal_level=minimal\", cannot continue recovering"),
5467 errdetail("This happens if you temporarily set \"wal_level=minimal\" on the server."),
5468 errhint("Use a backup taken after setting \"wal_level\" to higher than \"minimal\".")));
5469 }
5470
5471 /*
5472 * For Hot Standby, the WAL must be generated with 'replica' mode, and we
5473 * must have at least as many backend slots as the primary.
5474 */
5476 {
5477 /* We ignore autovacuum_worker_slots when we make this test. */
5478 RecoveryRequiresIntParameter("max_connections",
5481 RecoveryRequiresIntParameter("max_worker_processes",
5484 RecoveryRequiresIntParameter("max_wal_senders",
5487 RecoveryRequiresIntParameter("max_prepared_transactions",
5490 RecoveryRequiresIntParameter("max_locks_per_transaction",
5493 }
5494}
5495
5496/*
5497 * This must be called ONCE during postmaster or standalone-backend startup
5498 */
5499void
5501{
5503 CheckPoint checkPoint;
5504 bool wasShutdown;
5505 bool didCrash;
5506 bool haveTblspcMap;
5507 bool haveBackupLabel;
5516 bool promoted = false;
5517 char timebuf[128];
5518
5519 /*
5520 * We should have an aux process resource owner to use, and we should not
5521 * be in a transaction that's installed some other resowner.
5522 */
5527
5528 /*
5529 * Check that contents look valid.
5530 */
5532 ereport(FATAL,
5534 errmsg("control file contains invalid checkpoint location")));
5535
5536 switch (ControlFile->state)
5537 {
5538 case DB_SHUTDOWNED:
5539
5540 /*
5541 * This is the expected case, so don't be chatty in standalone
5542 * mode
5543 */
5545 (errmsg("database system was shut down at %s",
5547 timebuf, sizeof(timebuf)))));
5548 break;
5549
5551 ereport(LOG,
5552 (errmsg("database system was shut down in recovery at %s",
5554 timebuf, sizeof(timebuf)))));
5555 break;
5556
5557 case DB_SHUTDOWNING:
5558 ereport(LOG,
5559 (errmsg("database system shutdown was interrupted; last known up at %s",
5561 timebuf, sizeof(timebuf)))));
5562 break;
5563
5565 ereport(LOG,
5566 (errmsg("database system was interrupted while in recovery at %s",
5568 timebuf, sizeof(timebuf))),
5569 errhint("This probably means that some data is corrupted and"
5570 " you will have to use the last backup for recovery.")));
5571 break;
5572
5574 ereport(LOG,
5575 (errmsg("database system was interrupted while in recovery at log time %s",
5577 timebuf, sizeof(timebuf))),
5578 errhint("If this has occurred more than once some data might be corrupted"
5579 " and you might need to choose an earlier recovery target.")));
5580 break;
5581
5582 case DB_IN_PRODUCTION:
5583 ereport(LOG,
5584 (errmsg("database system was interrupted; last known up at %s",
5586 timebuf, sizeof(timebuf)))));
5587 break;
5588
5589 default:
5590 ereport(FATAL,
5592 errmsg("control file contains invalid database cluster state")));
5593 }
5594
5595 /* This is just to allow attaching to startup process with a debugger */
5596#ifdef XLOG_REPLAY_DELAY
5598 pg_usleep(60000000L);
5599#endif
5600
5601 /*
5602 * Verify that pg_wal, pg_wal/archive_status, and pg_wal/summaries exist.
5603 * In cases where someone has performed a copy for PITR, these directories
5604 * may have been excluded and need to be re-created.
5605 */
5607
5608 /* Set up timeout handler needed to report startup progress. */
5612
5613 /*----------
5614 * If we previously crashed, perform a couple of actions:
5615 *
5616 * - The pg_wal directory may still include some temporary WAL segments
5617 * used when creating a new segment, so perform some clean up to not
5618 * bloat this path. This is done first as there is no point to sync
5619 * this temporary data.
5620 *
5621 * - There might be data which we had written, intending to fsync it, but
5622 * which we had not actually fsync'd yet. Therefore, a power failure in
5623 * the near future might cause earlier unflushed writes to be lost, even
5624 * though more recent data written to disk from here on would be
5625 * persisted. To avoid that, fsync the entire data directory.
5626 */
5629 {
5632 didCrash = true;
5633 }
5634 else
5635 didCrash = false;
5636
5637 /*
5638 * Prepare for WAL recovery if needed.
5639 *
5640 * InitWalRecovery analyzes the control file and the backup label file, if
5641 * any. It updates the in-memory ControlFile buffer according to the
5642 * starting checkpoint, and sets InRecovery and ArchiveRecoveryRequested.
5643 * It also applies the tablespace map file, if any.
5644 */
5647 checkPoint = ControlFile->checkPointCopy;
5648
5649 /* initialize shared memory variables from the checkpoint record */
5650 TransamVariables->nextXid = checkPoint.nextXid;
5651 TransamVariables->nextOid = checkPoint.nextOid;
5653 MultiXactSetNextMXact(checkPoint.nextMulti, checkPoint.nextMultiOffset);
5654 AdvanceOldestClogXid(checkPoint.oldestXid);
5655 SetTransactionIdLimit(checkPoint.oldestXid, checkPoint.oldestXidDB);
5656 SetMultiXactIdLimit(checkPoint.oldestMulti, checkPoint.oldestMultiDB);
5658 checkPoint.newestCommitTsXid);
5659
5660 /*
5661 * Clear out any old relcache cache files. This is *necessary* if we do
5662 * any WAL replay, since that would probably result in the cache files
5663 * being out of sync with database reality. In theory we could leave them
5664 * in place if the database had been cleanly shut down, but it seems
5665 * safest to just remove them always and let them be rebuilt during the
5666 * first backend startup. These files needs to be removed from all
5667 * directories including pg_tblspc, however the symlinks are created only
5668 * after reading tablespace_map file in case of archive recovery from
5669 * backup, so needs to clear old relcache files here after creating
5670 * symlinks.
5671 */
5673
5674 /*
5675 * Initialize replication slots, before there's a chance to remove
5676 * required resources.
5677 */
5679
5680 /*
5681 * Startup the logical decoding status with the last status stored in the
5682 * checkpoint record.
5683 */
5685
5686 /*
5687 * Startup logical state, needs to be setup now so we have proper data
5688 * during crash recovery.
5689 */
5691
5692 /*
5693 * Startup CLOG. This must be done after TransamVariables->nextXid has
5694 * been initialized and before we accept connections or begin WAL replay.
5695 */
5696 StartupCLOG();
5697
5698 /*
5699 * Startup MultiXact. We need to do this early to be able to replay
5700 * truncations.
5701 */
5703
5704 /*
5705 * Ditto for commit timestamps. Activate the facility if the setting is
5706 * enabled in the control file, as there should be no tracking of commit
5707 * timestamps done when the setting was disabled. This facility can be
5708 * started or stopped when replaying a XLOG_PARAMETER_CHANGE record.
5709 */
5712
5713 /*
5714 * Recover knowledge about replay progress of known replication partners.
5715 */
5717
5718 /*
5719 * Initialize unlogged LSN. On a clean shutdown, it's restored from the
5720 * control file. On recovery, all unlogged relations are blown away, so
5721 * the unlogged LSN counter can be reset too.
5722 */
5726 else
5729
5730 /*
5731 * Copy any missing timeline history files between 'now' and the recovery
5732 * target timeline from archive to pg_wal. While we don't need those files
5733 * ourselves - the history file of the recovery target timeline covers all
5734 * the previous timelines in the history too - a cascading standby server
5735 * might be interested in them. Or, if you archive the WAL from this
5736 * server to a different archive than the primary, it'd be good for all
5737 * the history files to get archived there after failover, so that you can
5738 * use one of the old timelines as a PITR target. Timeline history files
5739 * are small, so it's better to copy them unnecessarily than not copy them
5740 * and regret later.
5741 */
5743
5744 /*
5745 * Before running in recovery, scan pg_twophase and fill in its status to
5746 * be able to work on entries generated by redo. Doing a scan before
5747 * taking any recovery action has the merit to discard any 2PC files that
5748 * are newer than the first record to replay, saving from any conflicts at
5749 * replay. This avoids as well any subsequent scans when doing recovery
5750 * of the on-disk two-phase data.
5751 */
5753
5754 /*
5755 * When starting with crash recovery, reset pgstat data - it might not be
5756 * valid. Otherwise restore pgstat data. It's safe to do this here,
5757 * because postmaster will not yet have started any other processes.
5758 *
5759 * NB: Restoring replication slot stats relies on slot state to have
5760 * already been restored from disk.
5761 *
5762 * TODO: With a bit of extra work we could just start with a pgstat file
5763 * associated with the checkpoint redo location we're starting from.
5764 */
5765 if (didCrash)
5767 else
5769
5771
5774
5775 /* REDO */
5776 if (InRecovery)
5777 {
5778 /* Initialize state for RecoveryInProgress() */
5782 else
5785
5786 /*
5787 * Update pg_control to show that we are recovering and to show the
5788 * selected checkpoint as the place we are starting from. We also mark
5789 * pg_control with any minimum recovery stop point obtained from a
5790 * backup history file.
5791 *
5792 * No need to hold ControlFileLock yet, we aren't up far enough.
5793 */
5795
5796 /*
5797 * If there was a backup label file, it's done its job and the info
5798 * has now been propagated into pg_control. We must get rid of the
5799 * label file so that if we crash during recovery, we'll pick up at
5800 * the latest recovery restartpoint instead of going all the way back
5801 * to the backup start point. It seems prudent though to just rename
5802 * the file out of the way rather than delete it completely.
5803 */
5804 if (haveBackupLabel)
5805 {
5808 }
5809
5810 /*
5811 * If there was a tablespace_map file, it's done its job and the
5812 * symlinks have been created. We must get rid of the map file so
5813 * that if we crash during recovery, we don't create symlinks again.
5814 * It seems prudent though to just rename the file out of the way
5815 * rather than delete it completely.
5816 */
5817 if (haveTblspcMap)
5818 {
5821 }
5822
5823 /*
5824 * Initialize our local copy of minRecoveryPoint. When doing crash
5825 * recovery we want to replay up to the end of WAL. Particularly, in
5826 * the case of a promoted standby minRecoveryPoint value in the
5827 * control file is only updated after the first checkpoint. However,
5828 * if the instance crashes before the first post-recovery checkpoint
5829 * is completed then recovery will use a stale location causing the
5830 * startup process to think that there are still invalid page
5831 * references when checking for data consistency.
5832 */
5834 {
5837 }
5838 else
5839 {
5842 }
5843
5844 /* Check that the GUCs used to generate the WAL allow recovery */
5846
5847 /*
5848 * We're in recovery, so unlogged relations may be trashed and must be
5849 * reset. This should be done BEFORE allowing Hot Standby
5850 * connections, so that read-only backends don't try to read whatever
5851 * garbage is left over from before.
5852 */
5854
5855 /*
5856 * Likewise, delete any saved transaction snapshot files that got left
5857 * behind by crashed backends.
5858 */
5860
5861 /*
5862 * Initialize for Hot Standby, if enabled. We won't let backends in
5863 * yet, not until we've reached the min recovery point specified in
5864 * control file and we've established a recovery snapshot from a
5865 * running-xacts WAL record.
5866 */
5868 {
5869 TransactionId *xids;
5870 int nxids;
5871
5873 (errmsg_internal("initializing for hot standby")));
5874
5876
5877 if (wasShutdown)
5879 else
5880 oldestActiveXID = checkPoint.oldestActiveXid;
5882
5883 /* Tell procarray about the range of xids it has to deal with */
5885
5886 /*
5887 * Startup subtrans only. CLOG, MultiXact and commit timestamp
5888 * have already been started up and other SLRUs are not maintained
5889 * during recovery and need not be started yet.
5890 */
5892
5893 /*
5894 * If we're beginning at a shutdown checkpoint, we know that
5895 * nothing was running on the primary at this point. So fake-up an
5896 * empty running-xacts record and use that here and now. Recover
5897 * additional standby state for prepared transactions.
5898 */
5899 if (wasShutdown)
5900 {
5902 TransactionId latestCompletedXid;
5903
5904 /* Update pg_subtrans entries for any prepared transactions */
5906
5907 /*
5908 * Construct a RunningTransactions snapshot representing a
5909 * shut down server, with only prepared transactions still
5910 * alive. We're never overflowed at this point because all
5911 * subxids are listed with their parent prepared transactions.
5912 */
5913 running.xcnt = nxids;
5914 running.subxcnt = 0;
5916 running.nextXid = XidFromFullTransactionId(checkPoint.nextXid);
5918 latestCompletedXid = XidFromFullTransactionId(checkPoint.nextXid);
5919 TransactionIdRetreat(latestCompletedXid);
5920 Assert(TransactionIdIsNormal(latestCompletedXid));
5921 running.latestCompletedXid = latestCompletedXid;
5922 running.xids = xids;
5923
5925 }
5926 }
5927
5928 /*
5929 * We're all set for replaying the WAL now. Do it.
5930 */
5932 performedWalRecovery = true;
5933 }
5934 else
5935 performedWalRecovery = false;
5936
5937 /*
5938 * Finish WAL recovery.
5939 */
5941 EndOfLog = endOfRecoveryInfo->endOfLog;
5942 EndOfLogTLI = endOfRecoveryInfo->endOfLogTLI;
5943 abortedRecPtr = endOfRecoveryInfo->abortedRecPtr;
5944 missingContrecPtr = endOfRecoveryInfo->missingContrecPtr;
5945
5946 /*
5947 * Reset ps status display, so as no information related to recovery shows
5948 * up.
5949 */
5950 set_ps_display("");
5951
5952 /*
5953 * When recovering from a backup (we are in recovery, and archive recovery
5954 * was requested), complain if we did not roll forward far enough to reach
5955 * the point where the database is consistent. For regular online
5956 * backup-from-primary, that means reaching the end-of-backup WAL record
5957 * (at which point we reset backupStartPoint to be Invalid), for
5958 * backup-from-replica (which can't inject records into the WAL stream),
5959 * that point is when we reach the minRecoveryPoint in pg_control (which
5960 * we purposefully copy last when backing up from a replica). For
5961 * pg_rewind (which creates a backup_label with a method of "pg_rewind")
5962 * or snapshot-style backups (which don't), backupEndRequired will be set
5963 * to false.
5964 *
5965 * Note: it is indeed okay to look at the local variable
5966 * LocalMinRecoveryPoint here, even though ControlFile->minRecoveryPoint
5967 * might be further ahead --- ControlFile->minRecoveryPoint cannot have
5968 * been advanced beyond the WAL we processed.
5969 */
5970 if (InRecovery &&
5973 {
5974 /*
5975 * Ran off end of WAL before reaching end-of-backup WAL record, or
5976 * minRecoveryPoint. That's a bad sign, indicating that you tried to
5977 * recover from an online backup but never called pg_backup_stop(), or
5978 * you didn't archive all the WAL needed.
5979 */
5981 {
5983 ereport(FATAL,
5985 errmsg("WAL ends before end of online backup"),
5986 errhint("All WAL generated while online backup was taken must be available at recovery.")));
5987 else
5988 ereport(FATAL,
5990 errmsg("WAL ends before consistent recovery point")));
5991 }
5992 }
5993
5994 /*
5995 * Reset unlogged relations to the contents of their INIT fork. This is
5996 * done AFTER recovery is complete so as to include any unlogged relations
5997 * created during recovery, but BEFORE recovery is marked as having
5998 * completed successfully. Otherwise we'd not retry if any of the post
5999 * end-of-recovery steps fail.
6000 */
6001 if (InRecovery)
6003
6004 /*
6005 * Pre-scan prepared transactions to find out the range of XIDs present.
6006 * This information is not quite needed yet, but it is positioned here so
6007 * as potential problems are detected before any on-disk change is done.
6008 */
6010
6011 /*
6012 * Allow ordinary WAL segment creation before possibly switching to a new
6013 * timeline, which creates a new segment, and after the last ReadRecord().
6014 */
6016
6017 /*
6018 * Consider whether we need to assign a new timeline ID.
6019 *
6020 * If we did archive recovery, we always assign a new ID. This handles a
6021 * couple of issues. If we stopped short of the end of WAL during
6022 * recovery, then we are clearly generating a new timeline and must assign
6023 * it a unique new ID. Even if we ran to the end, modifying the current
6024 * last segment is problematic because it may result in trying to
6025 * overwrite an already-archived copy of that segment, and we encourage
6026 * DBAs to make their archive_commands reject that. We can dodge the
6027 * problem by making the new active segment have a new timeline ID.
6028 *
6029 * In a normal crash recovery, we can just extend the timeline we were in.
6030 */
6031 newTLI = endOfRecoveryInfo->lastRecTLI;
6033 {
6035 ereport(LOG,
6036 (errmsg("selected new timeline ID: %u", newTLI)));
6037
6038 /*
6039 * Make a writable copy of the last WAL segment. (Note that we also
6040 * have a copy of the last block of the old WAL in
6041 * endOfRecovery->lastPage; we will use that below.)
6042 */
6044
6045 /*
6046 * Remove the signal files out of the way, so that we don't
6047 * accidentally re-enter archive recovery mode in a subsequent crash.
6048 */
6049 if (endOfRecoveryInfo->standby_signal_file_found)
6051
6052 if (endOfRecoveryInfo->recovery_signal_file_found)
6054
6055 /*
6056 * Write the timeline history file, and have it archived. After this
6057 * point (or rather, as soon as the file is archived), the timeline
6058 * will appear as "taken" in the WAL archive and to any standby
6059 * servers. If we crash before actually switching to the new
6060 * timeline, standby servers will nevertheless think that we switched
6061 * to the new timeline, and will try to connect to the new timeline.
6062 * To minimize the window for that, try to do as little as possible
6063 * between here and writing the end-of-recovery record.
6064 */
6066 EndOfLog, endOfRecoveryInfo->recoveryStopReason);
6067
6068 ereport(LOG,
6069 (errmsg("archive recovery complete")));
6070 }
6071
6072 /* Save the selected TimeLineID in shared memory, too */
6077
6078 /*
6079 * Actually, if WAL ended in an incomplete record, skip the parts that
6080 * made it through and start writing after the portion that persisted.
6081 * (It's critical to first write an OVERWRITE_CONTRECORD message, which
6082 * we'll do as soon as we're open for writing new WAL.)
6083 */
6085 {
6086 /*
6087 * We should only have a missingContrecPtr if we're not switching to a
6088 * new timeline. When a timeline switch occurs, WAL is copied from the
6089 * old timeline to the new only up to the end of the last complete
6090 * record, so there can't be an incomplete WAL record that we need to
6091 * disregard.
6092 */
6093 Assert(newTLI == endOfRecoveryInfo->lastRecTLI);
6096 }
6097
6098 /*
6099 * Prepare to write WAL starting at EndOfLog location, and init xlog
6100 * buffer cache using the block containing the last record from the
6101 * previous incarnation.
6102 */
6103 Insert = &XLogCtl->Insert;
6105 Insert->CurrBytePos = XLogRecPtrToBytePos(EndOfLog);
6106
6107 /*
6108 * Tricky point here: lastPage contains the *last* block that the LastRec
6109 * record spans, not the one it starts in. The last block is indeed the
6110 * one we want to use.
6111 */
6112 if (EndOfLog % XLOG_BLCKSZ != 0)
6113 {
6114 char *page;
6115 int len;
6116 int firstIdx;
6117
6119 len = EndOfLog - endOfRecoveryInfo->lastPageBeginPtr;
6121
6122 /* Copy the valid part of the last block, and zero the rest */
6123 page = &XLogCtl->pages[firstIdx * XLOG_BLCKSZ];
6124 memcpy(page, endOfRecoveryInfo->lastPage, len);
6125 memset(page + len, 0, XLOG_BLCKSZ - len);
6126
6129 }
6130 else
6131 {
6132 /*
6133 * There is no partial block to copy. Just set InitializedUpTo, and
6134 * let the first attempt to insert a log record to initialize the next
6135 * buffer.
6136 */
6138 }
6139
6140 /*
6141 * Update local and shared status. This is OK to do without any locks
6142 * because no other process can be reading or writing WAL yet.
6143 */
6150
6151 /*
6152 * Preallocate additional log files, if wanted.
6153 */
6155
6156 /*
6157 * Okay, we're officially UP.
6158 */
6159 InRecovery = false;
6160
6161 /* start the archive_timeout timer and LSN running */
6164
6165 /* also initialize latestCompletedXid, to nextXid - 1 */
6170
6171 /*
6172 * Start up subtrans, if not already done for hot standby. (commit
6173 * timestamps are started below, if necessary.)
6174 */
6177
6178 /*
6179 * Perform end of recovery actions for any SLRUs that need it.
6180 */
6181 TrimCLOG();
6182 TrimMultiXact();
6183
6184 /*
6185 * Reload shared-memory state for prepared transactions. This needs to
6186 * happen before renaming the last partial segment of the old timeline as
6187 * it may be possible that we have to recover some transactions from it.
6188 */
6190
6191 /* Shut down xlogreader */
6193
6194 /* Enable WAL writes for this backend only. */
6196
6197 /* If necessary, write overwrite-contrecord before doing anything else */
6199 {
6202 }
6203
6204 /*
6205 * Update full_page_writes in shared memory and write an XLOG_FPW_CHANGE
6206 * record before resource manager writes cleanup WAL records or checkpoint
6207 * record is written.
6208 */
6209 Insert->fullPageWrites = lastFullPageWrites;
6211
6212 /*
6213 * Emit checkpoint or end-of-recovery record in XLOG, if required.
6214 */
6217
6218 /*
6219 * If any of the critical GUCs have changed, log them before we allow
6220 * backends to write WAL.
6221 */
6223
6224 /* If this is archive recovery, perform post-recovery cleanup actions. */
6227
6228 /*
6229 * Local WAL inserts enabled, so it's time to finish initialization of
6230 * commit timestamp.
6231 */
6233
6234 /*
6235 * Update logical decoding status in shared memory and write an
6236 * XLOG_LOGICAL_DECODING_STATUS_CHANGE, if necessary.
6237 */
6239
6240 /* Clean up EndOfWalRecoveryInfo data to appease Valgrind leak checking */
6241 if (endOfRecoveryInfo->lastPage)
6242 pfree(endOfRecoveryInfo->lastPage);
6243 pfree(endOfRecoveryInfo->recoveryStopReason);
6245
6246 /*
6247 * All done with end-of-recovery actions.
6248 *
6249 * Now allow backends to write WAL and update the control file status in
6250 * consequence. SharedRecoveryState, that controls if backends can write
6251 * WAL, is updated while holding ControlFileLock to prevent other backends
6252 * to look at an inconsistent state of the control file in shared memory.
6253 * There is still a small window during which backends can write WAL and
6254 * the control file is still referring to a system not in DB_IN_PRODUCTION
6255 * state while looking at the on-disk control file.
6256 *
6257 * Also, we use info_lck to update SharedRecoveryState to ensure that
6258 * there are no race conditions concerning visibility of other recent
6259 * updates to shared memory.
6260 */
6263
6267
6270
6271 /*
6272 * Wake up the checkpointer process as there might be a request to disable
6273 * logical decoding by concurrent slot drop.
6274 */
6276
6277 /*
6278 * Wake up all waiters. They need to report an error that recovery was
6279 * ended before reaching the target LSN.
6280 */
6284
6285 /*
6286 * Shutdown the recovery environment. This must occur after
6287 * RecoverPreparedTransactions() (see notes in lock_twophase_recover())
6288 * and after switching SharedRecoveryState to RECOVERY_STATE_DONE so as
6289 * any session building a snapshot will not rely on KnownAssignedXids as
6290 * RecoveryInProgress() would return false at this stage. This is
6291 * particularly critical for prepared 2PC transactions, that would still
6292 * need to be included in snapshots once recovery has ended.
6293 */
6296
6297 /*
6298 * If there were cascading standby servers connected to us, nudge any wal
6299 * sender processes to notice that we've been promoted.
6300 */
6301 WalSndWakeup(true, true);
6302
6303 /*
6304 * If this was a promotion, request an (online) checkpoint now. This isn't
6305 * required for consistency, but the last restartpoint might be far back,
6306 * and in case of a crash, recovering from it might take a longer than is
6307 * appropriate now that we're not in standby mode anymore.
6308 */
6309 if (promoted)
6311}
6312
6313/*
6314 * Callback from PerformWalRecovery(), called when we switch from crash
6315 * recovery to archive recovery mode. Updates the control file accordingly.
6316 */
6317void
6319{
6320 /* initialize minRecoveryPoint to this record */
6323 if (ControlFile->minRecoveryPoint < EndRecPtr)
6324 {
6325 ControlFile->minRecoveryPoint = EndRecPtr;
6326 ControlFile->minRecoveryPointTLI = replayTLI;
6327 }
6328 /* update local copy */
6331
6332 /*
6333 * The startup process can update its local copy of minRecoveryPoint from
6334 * this point.
6335 */
6337
6339
6340 /*
6341 * We update SharedRecoveryState while holding the lock on ControlFileLock
6342 * so both states are consistent in shared memory.
6343 */
6347
6349}
6350
6351/*
6352 * Callback from PerformWalRecovery(), called when we reach the end of backup.
6353 * Updates the control file accordingly.
6354 */
6355void
6357{
6358 /*
6359 * We have reached the end of base backup, as indicated by pg_control. The
6360 * data on disk is now consistent (unless minRecoveryPoint is further
6361 * ahead, which can happen if we crashed during previous recovery). Reset
6362 * backupStartPoint and backupEndPoint, and update minRecoveryPoint to
6363 * make sure we don't allow starting up at an earlier point even if
6364 * recovery is stopped and restarted soon after this.
6365 */
6367
6368 if (ControlFile->minRecoveryPoint < EndRecPtr)
6369 {
6370 ControlFile->minRecoveryPoint = EndRecPtr;
6372 }
6373
6378
6380}
6381
6382/*
6383 * Perform whatever XLOG actions are necessary at end of REDO.
6384 *
6385 * The goal here is to make sure that we'll be able to recover properly if
6386 * we crash again. If we choose to write a checkpoint, we'll write a shutdown
6387 * checkpoint rather than an on-line one. This is not particularly critical,
6388 * but since we may be assigning a new TLI, using a shutdown checkpoint allows
6389 * us to have the rule that TLI only changes in shutdown checkpoints, which
6390 * allows some extra error checking in xlog_redo.
6391 */
6392static bool
6394{
6395 bool promoted = false;
6396
6397 /*
6398 * Perform a checkpoint to update all our recovery activity to disk.
6399 *
6400 * Note that we write a shutdown checkpoint rather than an on-line one.
6401 * This is not particularly critical, but since we may be assigning a new
6402 * TLI, using a shutdown checkpoint allows us to have the rule that TLI
6403 * only changes in shutdown checkpoints, which allows some extra error
6404 * checking in xlog_redo.
6405 *
6406 * In promotion, only create a lightweight end-of-recovery record instead
6407 * of a full checkpoint. A checkpoint is requested later, after we're
6408 * fully out of recovery mode and already accepting queries.
6409 */
6412 {
6413 promoted = true;
6414
6415 /*
6416 * Insert a special WAL record to mark the end of recovery, since we
6417 * aren't doing a checkpoint. That means that the checkpointer process
6418 * may likely be in the middle of a time-smoothed restartpoint and
6419 * could continue to be for minutes after this. That sounds strange,
6420 * but the effect is roughly the same and it would be stranger to try
6421 * to come out of the restartpoint and then checkpoint. We request a
6422 * checkpoint later anyway, just for safety.
6423 */
6425 }
6426 else
6427 {
6431 }
6432
6433 return promoted;
6434}
6435
6436/*
6437 * Is the system still in recovery?
6438 *
6439 * Unlike testing InRecovery, this works in any process that's connected to
6440 * shared memory.
6441 */
6442bool
6444{
6445 /*
6446 * We check shared state each time only until we leave recovery mode. We
6447 * can't re-enter recovery, so there's no need to keep checking after the
6448 * shared variable has once been seen false.
6449 */
6451 return false;
6452 else
6453 {
6454 /*
6455 * use volatile pointer to make sure we make a fresh read of the
6456 * shared variable.
6457 */
6458 volatile XLogCtlData *xlogctl = XLogCtl;
6459
6460 LocalRecoveryInProgress = (xlogctl->SharedRecoveryState != RECOVERY_STATE_DONE);
6461
6462 /*
6463 * Note: We don't need a memory barrier when we're still in recovery.
6464 * We might exit recovery immediately after return, so the caller
6465 * can't rely on 'true' meaning that we're still in recovery anyway.
6466 */
6467
6469 }
6470}
6471
6472/*
6473 * Returns current recovery state from shared memory.
6474 *
6475 * This returned state is kept consistent with the contents of the control
6476 * file. See details about the possible values of RecoveryState in xlog.h.
6477 */
6480{
6481 RecoveryState retval;
6482
6484 retval = XLogCtl->SharedRecoveryState;
6486
6487 return retval;
6488}
6489
6490/*
6491 * Is this process allowed to insert new WAL records?
6492 *
6493 * Ordinarily this is essentially equivalent to !RecoveryInProgress().
6494 * But we also have provisions for forcing the result "true" or "false"
6495 * within specific processes regardless of the global state.
6496 */
6497bool
6499{
6500 /*
6501 * If value is "unconditionally true" or "unconditionally false", just
6502 * return it. This provides the normal fast path once recovery is known
6503 * done.
6504 */
6505 if (LocalXLogInsertAllowed >= 0)
6506 return (bool) LocalXLogInsertAllowed;
6507
6508 /*
6509 * Else, must check to see if we're still in recovery.
6510 */
6511 if (RecoveryInProgress())
6512 return false;
6513
6514 /*
6515 * On exit from recovery, reset to "unconditionally true", since there is
6516 * no need to keep checking.
6517 */
6519 return true;
6520}
6521
6522/*
6523 * Make XLogInsertAllowed() return true in the current process only.
6524 *
6525 * Note: it is allowed to switch LocalXLogInsertAllowed back to -1 later,
6526 * and even call LocalSetXLogInsertAllowed() again after that.
6527 *
6528 * Returns the previous value of LocalXLogInsertAllowed.
6529 */
6530static int
6532{
6534
6536
6537 return oldXLogAllowed;
6538}
6539
6540/*
6541 * Return the current Redo pointer from shared memory.
6542 *
6543 * As a side-effect, the local RedoRecPtr copy is updated.
6544 */
6547{
6548 XLogRecPtr ptr;
6549
6550 /*
6551 * The possibly not up-to-date copy in XlogCtl is enough. Even if we
6552 * grabbed a WAL insertion lock to read the authoritative value in
6553 * Insert->RedoRecPtr, someone might update it just after we've released
6554 * the lock.
6555 */
6557 ptr = XLogCtl->RedoRecPtr;
6559
6560 if (RedoRecPtr < ptr)
6561 RedoRecPtr = ptr;
6562
6563 return RedoRecPtr;
6564}
6565
6566/*
6567 * Return information needed to decide whether a modified block needs a
6568 * full-page image to be included in the WAL record.
6569 *
6570 * The returned values are cached copies from backend-private memory, and
6571 * possibly out-of-date or, indeed, uninitialized, in which case they will
6572 * be InvalidXLogRecPtr and false, respectively. XLogInsertRecord will
6573 * re-check them against up-to-date values, while holding the WAL insert lock.
6574 */
6575void
6581
6582/*
6583 * GetInsertRecPtr -- Returns the current insert position.
6584 *
6585 * NOTE: The value *actually* returned is the position of the last full
6586 * xlog page. It lags behind the real insert position by at most 1 page.
6587 * For that, we don't need to scan through WAL insertion locks, and an
6588 * approximation is enough for the current usage of this function.
6589 */
6592{
6594
6598
6599 return recptr;
6600}
6601
6602/*
6603 * GetFlushRecPtr -- Returns the current flush position, ie, the last WAL
6604 * position known to be fsync'd to disk. This should only be used on a
6605 * system that is known not to be in recovery.
6606 */
6609{
6611
6613
6614 /*
6615 * If we're writing and flushing WAL, the time line can't be changing, so
6616 * no lock is required.
6617 */
6618 if (insertTLI)
6620
6621 return LogwrtResult.Flush;
6622}
6623
6624/*
6625 * GetWALInsertionTimeLine -- Returns the current timeline of a system that
6626 * is not in recovery.
6627 */
6630{
6632
6633 /* Since the value can't be changing, no lock is required. */
6634 return XLogCtl->InsertTimeLineID;
6635}
6636
6637/*
6638 * GetWALInsertionTimeLineIfSet -- If the system is not in recovery, returns
6639 * the WAL insertion timeline; else, returns 0. Wherever possible, use
6640 * GetWALInsertionTimeLine() instead, since it's cheaper. Note that this
6641 * function decides recovery has ended as soon as the insert TLI is set, which
6642 * happens before we set XLogCtl->SharedRecoveryState to RECOVERY_STATE_DONE.
6643 */
6655
6656/*
6657 * GetLastImportantRecPtr -- Returns the LSN of the last important record
6658 * inserted. All records not explicitly marked as unimportant are considered
6659 * important.
6660 *
6661 * The LSN is determined by computing the maximum of
6662 * WALInsertLocks[i].lastImportantAt.
6663 */
6666{
6668 int i;
6669
6670 for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++)
6671 {
6673
6674 /*
6675 * Need to take a lock to prevent torn reads of the LSN, which are
6676 * possible on some of the supported platforms. WAL insert locks only
6677 * support exclusive mode, so we have to use that.
6678 */
6681 LWLockRelease(&WALInsertLocks[i].l.lock);
6682
6683 if (res < last_important)
6684 res = last_important;
6685 }
6686
6687 return res;
6688}
6689
6690/*
6691 * Get the time and LSN of the last xlog segment switch
6692 */
6695{
6696 pg_time_t result;
6697
6698 /* Need WALWriteLock, but shared lock is sufficient */
6700 result = XLogCtl->lastSegSwitchTime;
6703
6704 return result;
6705}
6706
6707/*
6708 * This must be called ONCE during postmaster or standalone-backend shutdown
6709 */
6710void
6712{
6713 /*
6714 * We should have an aux process resource owner to use, and we should not
6715 * be in a transaction that's installed some other resowner.
6716 */
6721
6722 /* Don't be chatty in standalone mode */
6724 (errmsg("shutting down")));
6725
6726 /*
6727 * Signal walsenders to move to stopping state.
6728 */
6730
6731 /*
6732 * Wait for WAL senders to be in stopping state. This prevents commands
6733 * from writing new WAL.
6734 */
6736
6737 if (RecoveryInProgress())
6739 else
6740 {
6741 /*
6742 * If archiving is enabled, rotate the last XLOG file so that all the
6743 * remaining records are archived (postmaster wakes up the archiver
6744 * process one more time at the end of shutdown). The checkpoint
6745 * record will go to the next XLOG file and won't be archived (yet).
6746 */
6747 if (XLogArchivingActive())
6748 RequestXLogSwitch(false);
6749
6751 }
6752}
6753
6754/*
6755 * Format checkpoint request flags as a space-separated string for
6756 * log messages.
6757 */
6758static const char *
6760{
6761 static char buf[128];
6762
6763 snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s",
6764 (flags & CHECKPOINT_IS_SHUTDOWN) ? " shutdown" : "",
6765 (flags & CHECKPOINT_END_OF_RECOVERY) ? " end-of-recovery" : "",
6766 (flags & CHECKPOINT_FAST) ? " fast" : "",
6767 (flags & CHECKPOINT_FORCE) ? " force" : "",
6768 (flags & CHECKPOINT_WAIT) ? " wait" : "",
6769 (flags & CHECKPOINT_CAUSE_XLOG) ? " wal" : "",
6770 (flags & CHECKPOINT_CAUSE_TIME) ? " time" : "",
6771 (flags & CHECKPOINT_FLUSH_UNLOGGED) ? " flush-unlogged" : "");
6772
6773 return buf;
6774}
6775
6776/*
6777 * Log start of a checkpoint.
6778 */
6779static void
6781{
6782 if (restartpoint)
6783 ereport(LOG,
6784 /* translator: the placeholder shows checkpoint options */
6785 (errmsg("restartpoint starting:%s",
6786 CheckpointFlagsString(flags))));
6787 else
6788 ereport(LOG,
6789 /* translator: the placeholder shows checkpoint options */
6790 (errmsg("checkpoint starting:%s",
6791 CheckpointFlagsString(flags))));
6792}
6793
6794/*
6795 * Log end of a checkpoint.
6796 */
6797static void
6799{
6800 long write_msecs,
6801 sync_msecs,
6806
6808
6811
6814
6815 /* Accumulate checkpoint timing summary data, in milliseconds. */
6818
6819 /*
6820 * All of the published timing statistics are accounted for. Only
6821 * continue if a log message is to be written.
6822 */
6823 if (!log_checkpoints)
6824 return;
6825
6828
6829 /*
6830 * Timing values returned from CheckpointStats are in microseconds.
6831 * Convert to milliseconds for consistent printing.
6832 */
6834
6839 average_msecs = (long) ((average_sync_time + 999) / 1000);
6840
6841 /*
6842 * ControlFileLock is not required to see ControlFile->checkPoint and
6843 * ->checkPointCopy here as we are the only updator of those variables at
6844 * this moment.
6845 */
6846 if (restartpoint)
6847 ereport(LOG,
6848 (errmsg("restartpoint complete:%s: wrote %d buffers (%.1f%%), "
6849 "wrote %d SLRU buffers; %d WAL file(s) added, "
6850 "%d removed, %d recycled; write=%ld.%03d s, "
6851 "sync=%ld.%03d s, total=%ld.%03d s; sync files=%d, "
6852 "longest=%ld.%03d s, average=%ld.%03d s; distance=%d kB, "
6853 "estimate=%d kB; lsn=%X/%08X, redo lsn=%X/%08X",
6854 CheckpointFlagsString(flags),
6861 write_msecs / 1000, (int) (write_msecs % 1000),
6862 sync_msecs / 1000, (int) (sync_msecs % 1000),
6863 total_msecs / 1000, (int) (total_msecs % 1000),
6865 longest_msecs / 1000, (int) (longest_msecs % 1000),
6866 average_msecs / 1000, (int) (average_msecs % 1000),
6867 (int) (PrevCheckPointDistance / 1024.0),
6868 (int) (CheckPointDistanceEstimate / 1024.0),
6871 else
6872 ereport(LOG,
6873 (errmsg("checkpoint complete:%s: wrote %d buffers (%.1f%%), "
6874 "wrote %d SLRU buffers; %d WAL file(s) added, "
6875 "%d removed, %d recycled; write=%ld.%03d s, "
6876 "sync=%ld.%03d s, total=%ld.%03d s; sync files=%d, "
6877 "longest=%ld.%03d s, average=%ld.%03d s; distance=%d kB, "
6878 "estimate=%d kB; lsn=%X/%08X, redo lsn=%X/%08X",
6879 CheckpointFlagsString(flags),
6886 write_msecs / 1000, (int) (write_msecs % 1000),
6887 sync_msecs / 1000, (int) (sync_msecs % 1000),
6888 total_msecs / 1000, (int) (total_msecs % 1000),
6890 longest_msecs / 1000, (int) (longest_msecs % 1000),
6891 average_msecs / 1000, (int) (average_msecs % 1000),
6892 (int) (PrevCheckPointDistance / 1024.0),
6893 (int) (CheckPointDistanceEstimate / 1024.0),
6896}
6897
6898/*
6899 * Update the estimate of distance between checkpoints.
6900 *
6901 * The estimate is used to calculate the number of WAL segments to keep
6902 * preallocated, see XLOGfileslop().
6903 */
6904static void
6906{
6907 /*
6908 * To estimate the number of segments consumed between checkpoints, keep a
6909 * moving average of the amount of WAL generated in previous checkpoint
6910 * cycles. However, if the load is bursty, with quiet periods and busy
6911 * periods, we want to cater for the peak load. So instead of a plain
6912 * moving average, let the average decline slowly if the previous cycle
6913 * used less WAL than estimated, but bump it up immediately if it used
6914 * more.
6915 *
6916 * When checkpoints are triggered by max_wal_size, this should converge to
6917 * CheckpointSegments * wal_segment_size,
6918 *
6919 * Note: This doesn't pay any attention to what caused the checkpoint.
6920 * Checkpoints triggered manually with CHECKPOINT command, or by e.g.
6921 * starting a base backup, are counted the same as those created
6922 * automatically. The slow-decline will largely mask them out, if they are
6923 * not frequent. If they are frequent, it seems reasonable to count them
6924 * in as any others; if you issue a manual checkpoint every 5 minutes and
6925 * never let a timed checkpoint happen, it makes sense to base the
6926 * preallocation on that 5 minute interval rather than whatever
6927 * checkpoint_timeout is set to.
6928 */
6929 PrevCheckPointDistance = nbytes;
6930 if (CheckPointDistanceEstimate < nbytes)
6932 else
6934 (0.90 * CheckPointDistanceEstimate + 0.10 * (double) nbytes);
6935}
6936
6937/*
6938 * Update the ps display for a process running a checkpoint. Note that
6939 * this routine should not do any allocations so as it can be called
6940 * from a critical section.
6941 */
6942static void
6944{
6945 /*
6946 * The status is reported only for end-of-recovery and shutdown
6947 * checkpoints or shutdown restartpoints. Updating the ps display is
6948 * useful in those situations as it may not be possible to rely on
6949 * pg_stat_activity to see the status of the checkpointer or the startup
6950 * process.
6951 */
6953 return;
6954
6955 if (reset)
6956 set_ps_display("");
6957 else
6958 {
6959 char activitymsg[128];
6960
6961 snprintf(activitymsg, sizeof(activitymsg), "performing %s%s%s",
6962 (flags & CHECKPOINT_END_OF_RECOVERY) ? "end-of-recovery " : "",
6963 (flags & CHECKPOINT_IS_SHUTDOWN) ? "shutdown " : "",
6964 restartpoint ? "restartpoint" : "checkpoint");
6966 }
6967}
6968
6969
6970/*
6971 * Perform a checkpoint --- either during shutdown, or on-the-fly
6972 *
6973 * flags is a bitwise OR of the following:
6974 * CHECKPOINT_IS_SHUTDOWN: checkpoint is for database shutdown.
6975 * CHECKPOINT_END_OF_RECOVERY: checkpoint is for end of WAL recovery.
6976 * CHECKPOINT_FAST: finish the checkpoint ASAP, ignoring
6977 * checkpoint_completion_target parameter.
6978 * CHECKPOINT_FORCE: force a checkpoint even if no XLOG activity has occurred
6979 * since the last one (implied by CHECKPOINT_IS_SHUTDOWN or
6980 * CHECKPOINT_END_OF_RECOVERY).
6981 * CHECKPOINT_FLUSH_UNLOGGED: also flush buffers of unlogged tables.
6982 *
6983 * Note: flags contains other bits, of interest here only for logging purposes.
6984 * In particular note that this routine is synchronous and does not pay
6985 * attention to CHECKPOINT_WAIT.
6986 *
6987 * If !shutdown then we are writing an online checkpoint. An XLOG_CHECKPOINT_REDO
6988 * record is inserted into WAL at the logical location of the checkpoint, before
6989 * flushing anything to disk, and when the checkpoint is eventually completed,
6990 * and it is from this point that WAL replay will begin in the case of a recovery
6991 * from this checkpoint. Once everything is written to disk, an
6992 * XLOG_CHECKPOINT_ONLINE record is written to complete the checkpoint, and
6993 * points back to the earlier XLOG_CHECKPOINT_REDO record. This mechanism allows
6994 * other write-ahead log records to be written while the checkpoint is in
6995 * progress, but we must be very careful about order of operations. This function
6996 * may take many minutes to execute on a busy system.
6997 *
6998 * On the other hand, when shutdown is true, concurrent insertion into the
6999 * write-ahead log is impossible, so there is no need for two separate records.
7000 * In this case, we only insert an XLOG_CHECKPOINT_SHUTDOWN record, and it's
7001 * both the record marking the completion of the checkpoint and the location
7002 * from which WAL replay would begin if needed.
7003 *
7004 * Returns true if a new checkpoint was performed, or false if it was skipped
7005 * because the system was idle.
7006 */
7007bool
7009{
7010 bool shutdown;
7011 CheckPoint checkPoint;
7015 uint32 freespace;
7019 int nvxids;
7020 int oldXLogAllowed = 0;
7021
7022 /*
7023 * An end-of-recovery checkpoint is really a shutdown checkpoint, just
7024 * issued at a different time.
7025 */
7027 shutdown = true;
7028 else
7029 shutdown = false;
7030
7031 /* sanity check */
7032 if (RecoveryInProgress() && (flags & CHECKPOINT_END_OF_RECOVERY) == 0)
7033 elog(ERROR, "can't create a checkpoint during recovery");
7034
7035 /*
7036 * Prepare to accumulate statistics.
7037 *
7038 * Note: because it is possible for log_checkpoints to change while a
7039 * checkpoint proceeds, we always accumulate stats, even if
7040 * log_checkpoints is currently off.
7041 */
7044
7045 /*
7046 * Let smgr prepare for checkpoint; this has to happen outside the
7047 * critical section and before we determine the REDO pointer. Note that
7048 * smgr must not do anything that'd have to be undone if we decide no
7049 * checkpoint is needed.
7050 */
7052
7053 /* Run these points outside the critical section. */
7054 INJECTION_POINT("create-checkpoint-initial", NULL);
7055 INJECTION_POINT_LOAD("create-checkpoint-run");
7056
7057 /*
7058 * Use a critical section to force system panic if we have trouble.
7059 */
7061
7062 if (shutdown)
7063 {
7068 }
7069
7070 /* Begin filling in the checkpoint WAL record */
7071 MemSet(&checkPoint, 0, sizeof(checkPoint));
7072 checkPoint.time = (pg_time_t) time(NULL);
7073
7074 /*
7075 * For Hot Standby, derive the oldestActiveXid before we fix the redo
7076 * pointer. This allows us to begin accumulating changes to assemble our
7077 * starting snapshot of locks and transactions.
7078 */
7080 checkPoint.oldestActiveXid = GetOldestActiveTransactionId(false, true);
7081 else
7083
7084 /*
7085 * Get location of last important record before acquiring insert locks (as
7086 * GetLastImportantRecPtr() also locks WAL locks).
7087 */
7089
7090 /*
7091 * If this isn't a shutdown or forced checkpoint, and if there has been no
7092 * WAL activity requiring a checkpoint, skip it. The idea here is to
7093 * avoid inserting duplicate checkpoints when the system is idle.
7094 */
7096 CHECKPOINT_FORCE)) == 0)
7097 {
7099 {
7102 (errmsg_internal("checkpoint skipped because system is idle")));
7103 return false;
7104 }
7105 }
7106
7107 /*
7108 * An end-of-recovery checkpoint is created before anyone is allowed to
7109 * write WAL. To allow us to write the checkpoint record, temporarily
7110 * enable XLogInsertAllowed.
7111 */
7112 if (flags & CHECKPOINT_END_OF_RECOVERY)
7114
7116 if (flags & CHECKPOINT_END_OF_RECOVERY)
7118 else
7119 checkPoint.PrevTimeLineID = checkPoint.ThisTimeLineID;
7120
7121 /*
7122 * We must block concurrent insertions while examining insert state.
7123 */
7125
7126 checkPoint.fullPageWrites = Insert->fullPageWrites;
7127 checkPoint.wal_level = wal_level;
7128
7129 if (shutdown)
7130 {
7132
7133 /*
7134 * Compute new REDO record ptr = location of next XLOG record.
7135 *
7136 * Since this is a shutdown checkpoint, there can't be any concurrent
7137 * WAL insertion.
7138 */
7139 freespace = INSERT_FREESPACE(curInsert);
7140 if (freespace == 0)
7141 {
7144 else
7146 }
7147 checkPoint.redo = curInsert;
7148
7149 /*
7150 * Here we update the shared RedoRecPtr for future XLogInsert calls;
7151 * this must be done while holding all the insertion locks.
7152 *
7153 * Note: if we fail to complete the checkpoint, RedoRecPtr will be
7154 * left pointing past where it really needs to point. This is okay;
7155 * the only consequence is that XLogInsert might back up whole buffers
7156 * that it didn't really need to. We can't postpone advancing
7157 * RedoRecPtr because XLogInserts that happen while we are dumping
7158 * buffers must assume that their buffer changes are not included in
7159 * the checkpoint.
7160 */
7161 RedoRecPtr = XLogCtl->Insert.RedoRecPtr = checkPoint.redo;
7162 }
7163
7164 /*
7165 * Now we can release the WAL insertion locks, allowing other xacts to
7166 * proceed while we are flushing disk buffers.
7167 */
7169
7170 /*
7171 * If this is an online checkpoint, we have not yet determined the redo
7172 * point. We do so now by inserting the special XLOG_CHECKPOINT_REDO
7173 * record; the LSN at which it starts becomes the new redo pointer. We
7174 * don't do this for a shutdown checkpoint, because in that case no WAL
7175 * can be written between the redo point and the insertion of the
7176 * checkpoint record itself, so the checkpoint record itself serves to
7177 * mark the redo point.
7178 */
7179 if (!shutdown)
7180 {
7181 /* Include WAL level in record for WAL summarizer's benefit. */
7185
7186 /*
7187 * XLogInsertRecord will have updated XLogCtl->Insert.RedoRecPtr in
7188 * shared memory and RedoRecPtr in backend-local memory, but we need
7189 * to copy that into the record that will be inserted when the
7190 * checkpoint is complete.
7191 */
7192 checkPoint.redo = RedoRecPtr;
7193 }
7194
7195 /* Update the info_lck-protected copy of RedoRecPtr as well */
7197 XLogCtl->RedoRecPtr = checkPoint.redo;
7199
7200 /*
7201 * If enabled, log checkpoint start. We postpone this until now so as not
7202 * to log anything if we decided to skip the checkpoint.
7203 */
7204 if (log_checkpoints)
7205 LogCheckpointStart(flags, false);
7206
7207 INJECTION_POINT_CACHED("create-checkpoint-run", NULL);
7208
7209 /* Update the process title */
7210 update_checkpoint_display(flags, false, false);
7211
7213
7214 /*
7215 * Get the other info we need for the checkpoint record.
7216 *
7217 * We don't need to save oldestClogXid in the checkpoint, it only matters
7218 * for the short period in which clog is being truncated, and if we crash
7219 * during that we'll redo the clog truncation and fix up oldestClogXid
7220 * there.
7221 */
7223 checkPoint.nextXid = TransamVariables->nextXid;
7224 checkPoint.oldestXid = TransamVariables->oldestXid;
7227
7232
7234 checkPoint.nextOid = TransamVariables->nextOid;
7235 if (!shutdown)
7236 checkPoint.nextOid += TransamVariables->oidCount;
7238
7240
7242 &checkPoint.nextMulti,
7243 &checkPoint.nextMultiOffset,
7244 &checkPoint.oldestMulti,
7245 &checkPoint.oldestMultiDB);
7246
7247 /*
7248 * Having constructed the checkpoint record, ensure all shmem disk buffers
7249 * and commit-log buffers are flushed to disk.
7250 *
7251 * This I/O could fail for various reasons. If so, we will fail to
7252 * complete the checkpoint, but there is no reason to force a system
7253 * panic. Accordingly, exit critical section while doing it.
7254 */
7256
7257 /*
7258 * In some cases there are groups of actions that must all occur on one
7259 * side or the other of a checkpoint record. Before flushing the
7260 * checkpoint record we must explicitly wait for any backend currently
7261 * performing those groups of actions.
7262 *
7263 * One example is end of transaction, so we must wait for any transactions
7264 * that are currently in commit critical sections. If an xact inserted
7265 * its commit record into XLOG just before the REDO point, then a crash
7266 * restart from the REDO point would not replay that record, which means
7267 * that our flushing had better include the xact's update of pg_xact. So
7268 * we wait till he's out of his commit critical section before proceeding.
7269 * See notes in RecordTransactionCommit().
7270 *
7271 * Because we've already released the insertion locks, this test is a bit
7272 * fuzzy: it is possible that we will wait for xacts we didn't really need
7273 * to wait for. But the delay should be short and it seems better to make
7274 * checkpoint take a bit longer than to hold off insertions longer than
7275 * necessary. (In fact, the whole reason we have this issue is that xact.c
7276 * does commit record XLOG insertion and clog update as two separate steps
7277 * protected by different locks, but again that seems best on grounds of
7278 * minimizing lock contention.)
7279 *
7280 * A transaction that has not yet set delayChkptFlags when we look cannot
7281 * be at risk, since it has not inserted its commit record yet; and one
7282 * that's already cleared it is not at risk either, since it's done fixing
7283 * clog and we will correctly flush the update below. So we cannot miss
7284 * any xacts we need to wait for.
7285 */
7287 if (nvxids > 0)
7288 {
7289 do
7290 {
7291 /*
7292 * Keep absorbing fsync requests while we wait. There could even
7293 * be a deadlock if we don't, if the process that prevents the
7294 * checkpoint is trying to add a request to the queue.
7295 */
7297
7299 pg_usleep(10000L); /* wait for 10 msec */
7303 }
7304 pfree(vxids);
7305
7306 CheckPointGuts(checkPoint.redo, flags);
7307
7309 if (nvxids > 0)
7310 {
7311 do
7312 {
7314
7316 pg_usleep(10000L); /* wait for 10 msec */
7320 }
7321 pfree(vxids);
7322
7323 /*
7324 * Take a snapshot of running transactions and write this to WAL. This
7325 * allows us to reconstruct the state of running transactions during
7326 * archive recovery, if required. Skip, if this info disabled.
7327 *
7328 * If we are shutting down, or Startup process is completing crash
7329 * recovery we don't need to write running xact data.
7330 */
7333
7335
7336 /*
7337 * Now insert the checkpoint record into XLOG.
7338 */
7340 XLogRegisterData(&checkPoint, sizeof(checkPoint));
7344
7346
7347 /*
7348 * We mustn't write any new WAL after a shutdown checkpoint, or it will be
7349 * overwritten at next startup. No-one should even try, this just allows
7350 * sanity-checking. In the case of an end-of-recovery checkpoint, we want
7351 * to just temporarily disable writing until the system has exited
7352 * recovery.
7353 */
7354 if (shutdown)
7355 {
7356 if (flags & CHECKPOINT_END_OF_RECOVERY)
7358 else
7359 LocalXLogInsertAllowed = 0; /* never again write WAL */
7360 }
7361
7362 /*
7363 * We now have ProcLastRecPtr = start of actual checkpoint record, recptr
7364 * = end of actual checkpoint record.
7365 */
7366 if (shutdown && checkPoint.redo != ProcLastRecPtr)
7367 ereport(PANIC,
7368 (errmsg("concurrent write-ahead log activity while database system is shutting down")));
7369
7370 /*
7371 * Remember the prior checkpoint's redo ptr for
7372 * UpdateCheckPointDistanceEstimate()
7373 */
7375
7376 /*
7377 * Update the control file.
7378 */
7380 if (shutdown)
7383 ControlFile->checkPointCopy = checkPoint;
7384 /* crash recovery should always recover to the end of WAL */
7387
7388 /*
7389 * Persist unloggedLSN value. It's reset on crash recovery, so this goes
7390 * unused on non-shutdown checkpoints, but seems useful to store it always
7391 * for debugging purposes.
7392 */
7394
7397
7398 /*
7399 * We are now done with critical updates; no need for system panic if we
7400 * have trouble while fooling with old log segments.
7401 */
7403
7404 /*
7405 * WAL summaries end when the next XLOG_CHECKPOINT_REDO or
7406 * XLOG_CHECKPOINT_SHUTDOWN record is reached. This is the first point
7407 * where (a) we're not inside of a critical section and (b) we can be
7408 * certain that the relevant record has been flushed to disk, which must
7409 * happen before it can be summarized.
7410 *
7411 * If this is a shutdown checkpoint, then this happens reasonably
7412 * promptly: we've only just inserted and flushed the
7413 * XLOG_CHECKPOINT_SHUTDOWN record. If this is not a shutdown checkpoint,
7414 * then this might not be very prompt at all: the XLOG_CHECKPOINT_REDO
7415 * record was written before we began flushing data to disk, and that
7416 * could be many minutes ago at this point. However, we don't XLogFlush()
7417 * after inserting that record, so we're not guaranteed that it's on disk
7418 * until after the above call that flushes the XLOG_CHECKPOINT_ONLINE
7419 * record.
7420 */
7422
7423 /*
7424 * Let smgr do post-checkpoint cleanup (eg, deleting old files).
7425 */
7427
7428 /*
7429 * Update the average distance between checkpoints if the prior checkpoint
7430 * exists.
7431 */
7434
7435 INJECTION_POINT("checkpoint-before-old-wal-removal", NULL);
7436
7437 /*
7438 * Delete old log files, those no longer needed for last checkpoint to
7439 * prevent the disk holding the xlog from growing full.
7440 */
7446 {
7447 /*
7448 * Some slots have been invalidated; recalculate the old-segment
7449 * horizon, starting again from RedoRecPtr.
7450 */
7453 }
7454 _logSegNo--;
7456 checkPoint.ThisTimeLineID);
7457
7458 /*
7459 * Make more log segments if needed. (Do this after recycling old log
7460 * segments, since that may supply some of the needed files.)
7461 */
7462 if (!shutdown)
7464
7465 /*
7466 * Truncate pg_subtrans if possible. We can throw away all data before
7467 * the oldest XMIN of any running transaction. No future transaction will
7468 * attempt to reference any pg_subtrans entry older than that (see Asserts
7469 * in subtrans.c). During recovery, though, we mustn't do this because
7470 * StartupSUBTRANS hasn't been called yet.
7471 */
7472 if (!RecoveryInProgress())
7474
7475 /* Real work is done; log and update stats. */
7476 LogCheckpointEnd(false, flags);
7477
7478 /* Reset the process title */
7479 update_checkpoint_display(flags, false, true);
7480
7482 NBuffers,
7486
7487 return true;
7488}
7489
7490/*
7491 * Mark the end of recovery in WAL though without running a full checkpoint.
7492 * We can expect that a restartpoint is likely to be in progress as we
7493 * do this, though we are unwilling to wait for it to complete.
7494 *
7495 * CreateRestartPoint() allows for the case where recovery may end before
7496 * the restartpoint completes so there is no concern of concurrent behaviour.
7497 */
7498static void
7500{
7503
7504 /* sanity check */
7505 if (!RecoveryInProgress())
7506 elog(ERROR, "can only be used to end recovery");
7507
7508 xlrec.end_time = GetCurrentTimestamp();
7509 xlrec.wal_level = wal_level;
7510
7512 xlrec.ThisTimeLineID = XLogCtl->InsertTimeLineID;
7513 xlrec.PrevTimeLineID = XLogCtl->PrevTimeLineID;
7515
7517
7521
7523
7524 /*
7525 * Update the control file so that crash recovery can follow the timeline
7526 * changes to this point.
7527 */
7530 ControlFile->minRecoveryPointTLI = xlrec.ThisTimeLineID;
7533
7535}
7536
7537/*
7538 * Write an OVERWRITE_CONTRECORD message.
7539 *
7540 * When on WAL replay we expect a continuation record at the start of a page
7541 * that is not there, recovery ends and WAL writing resumes at that point.
7542 * But it's wrong to resume writing new WAL back at the start of the record
7543 * that was broken, because downstream consumers of that WAL (physical
7544 * replicas) are not prepared to "rewind". So the first action after
7545 * finishing replay of all valid WAL must be to write a record of this type
7546 * at the point where the contrecord was missing; to support xlogreader
7547 * detecting the special case, XLP_FIRST_IS_OVERWRITE_CONTRECORD is also added
7548 * to the page header where the record occurs. xlogreader has an ad-hoc
7549 * mechanism to report metadata about the broken record, which is what we
7550 * use here.
7551 *
7552 * At replay time, XLP_FIRST_IS_OVERWRITE_CONTRECORD instructs xlogreader to
7553 * skip the record it was reading, and pass back the LSN of the skipped
7554 * record, so that its caller can verify (on "replay" of that record) that the
7555 * XLOG_OVERWRITE_CONTRECORD matches what was effectively overwritten.
7556 *
7557 * 'aborted_lsn' is the beginning position of the record that was incomplete.
7558 * It is included in the WAL record. 'pagePtr' and 'newTLI' point to the
7559 * beginning of the XLOG page where the record is to be inserted. They must
7560 * match the current WAL insert position, they're passed here just so that we
7561 * can verify that.
7562 */
7563static XLogRecPtr
7566{
7571
7572 /* sanity checks */
7573 if (!RecoveryInProgress())
7574 elog(ERROR, "can only be used at end of recovery");
7575 if (pagePtr % XLOG_BLCKSZ != 0)
7576 elog(ERROR, "invalid position for missing continuation record %X/%08X",
7578
7579 /* The current WAL insert position should be right after the page header */
7580 startPos = pagePtr;
7583 else
7586 if (recptr != startPos)
7587 elog(ERROR, "invalid WAL insert position %X/%08X for OVERWRITE_CONTRECORD",
7589
7591
7592 /*
7593 * Initialize the XLOG page header (by GetXLogBuffer), and set the
7594 * XLP_FIRST_IS_OVERWRITE_CONTRECORD flag.
7595 *
7596 * No other backend is allowed to write WAL yet, so acquiring the WAL
7597 * insertion lock is just pro forma.
7598 */
7603
7604 /*
7605 * Insert the XLOG_OVERWRITE_CONTRECORD record as the first record on the
7606 * page. We know it becomes the first record, because no other backend is
7607 * allowed to write WAL yet.
7608 */
7610 xlrec.overwritten_lsn = aborted_lsn;
7611 xlrec.overwrite_time = GetCurrentTimestamp();
7614
7615 /* check that the record was inserted to the right place */
7616 if (ProcLastRecPtr != startPos)
7617 elog(ERROR, "OVERWRITE_CONTRECORD was inserted to unexpected position %X/%08X",
7619
7621
7623
7624 return recptr;
7625}
7626
7627/*
7628 * Flush all data in shared memory to disk, and fsync
7629 *
7630 * This is the common code shared between regular checkpoints and
7631 * recovery restartpoints.
7632 */
7633static void
7635{
7641
7642 /* Write out all dirty data in SLRUs and the main buffer pool */
7650 CheckPointBuffers(flags);
7651
7652 /* Perform all queued up fsyncs */
7658
7659 /* We deliberately delay 2PC checkpointing as long as possible */
7661}
7662
7663/*
7664 * Save a checkpoint for recovery restart if appropriate
7665 *
7666 * This function is called each time a checkpoint record is read from XLOG.
7667 * It must determine whether the checkpoint represents a safe restartpoint or
7668 * not. If so, the checkpoint record is stashed in shared memory so that
7669 * CreateRestartPoint can consult it. (Note that the latter function is
7670 * executed by the checkpointer, while this one will be executed by the
7671 * startup process.)
7672 */
7673static void
7675{
7676 /*
7677 * Also refrain from creating a restartpoint if we have seen any
7678 * references to non-existent pages. Restarting recovery from the
7679 * restartpoint would not see the references, so we would lose the
7680 * cross-check that the pages belonged to a relation that was dropped
7681 * later.
7682 */
7684 {
7685 elog(DEBUG2,
7686 "could not record restart point at %X/%08X because there are unresolved references to invalid pages",
7687 LSN_FORMAT_ARGS(checkPoint->redo));
7688 return;
7689 }
7690
7691 /*
7692 * Copy the checkpoint record to shared memory, so that checkpointer can
7693 * work out the next time it wants to perform a restartpoint.
7694 */
7698 XLogCtl->lastCheckPoint = *checkPoint;
7700}
7701
7702/*
7703 * Establish a restartpoint if possible.
7704 *
7705 * This is similar to CreateCheckPoint, but is used during WAL recovery
7706 * to establish a point from which recovery can roll forward without
7707 * replaying the entire recovery log.
7708 *
7709 * Returns true if a new restartpoint was established. We can only establish
7710 * a restartpoint if we have replayed a safe checkpoint record since last
7711 * restartpoint.
7712 */
7713bool
7715{
7716 XLogRecPtr lastCheckPointRecPtr;
7717 XLogRecPtr lastCheckPointEndPtr;
7718 CheckPoint lastCheckPoint;
7722 TimeLineID replayTLI;
7723 XLogRecPtr endptr;
7726
7727 /* Concurrent checkpoint/restartpoint cannot happen */
7729
7730 /* Get a local copy of the last safe checkpoint record. */
7732 lastCheckPointRecPtr = XLogCtl->lastCheckPointRecPtr;
7733 lastCheckPointEndPtr = XLogCtl->lastCheckPointEndPtr;
7734 lastCheckPoint = XLogCtl->lastCheckPoint;
7736
7737 /*
7738 * Check that we're still in recovery mode. It's ok if we exit recovery
7739 * mode after this check, the restart point is valid anyway.
7740 */
7741 if (!RecoveryInProgress())
7742 {
7744 (errmsg_internal("skipping restartpoint, recovery has already ended")));
7745 return false;
7746 }
7747
7748 /*
7749 * If the last checkpoint record we've replayed is already our last
7750 * restartpoint, we can't perform a new restart point. We still update
7751 * minRecoveryPoint in that case, so that if this is a shutdown restart
7752 * point, we won't start up earlier than before. That's not strictly
7753 * necessary, but when hot standby is enabled, it would be rather weird if
7754 * the database opened up for read-only connections at a point-in-time
7755 * before the last shutdown. Such time travel is still possible in case of
7756 * immediate shutdown, though.
7757 *
7758 * We don't explicitly advance minRecoveryPoint when we do create a
7759 * restartpoint. It's assumed that flushing the buffers will do that as a
7760 * side-effect.
7761 */
7762 if (!XLogRecPtrIsValid(lastCheckPointRecPtr) ||
7763 lastCheckPoint.redo <= ControlFile->checkPointCopy.redo)
7764 {
7766 errmsg_internal("skipping restartpoint, already performed at %X/%08X",
7767 LSN_FORMAT_ARGS(lastCheckPoint.redo)));
7768
7770 if (flags & CHECKPOINT_IS_SHUTDOWN)
7771 {
7776 }
7777 return false;
7778 }
7779
7780 /*
7781 * Update the shared RedoRecPtr so that the startup process can calculate
7782 * the number of segments replayed since last restartpoint, and request a
7783 * restartpoint if it exceeds CheckPointSegments.
7784 *
7785 * Like in CreateCheckPoint(), hold off insertions to update it, although
7786 * during recovery this is just pro forma, because no WAL insertions are
7787 * happening.
7788 */
7790 RedoRecPtr = XLogCtl->Insert.RedoRecPtr = lastCheckPoint.redo;
7792
7793 /* Also update the info_lck-protected copy */
7795 XLogCtl->RedoRecPtr = lastCheckPoint.redo;
7797
7798 /*
7799 * Prepare to accumulate statistics.
7800 *
7801 * Note: because it is possible for log_checkpoints to change while a
7802 * checkpoint proceeds, we always accumulate stats, even if
7803 * log_checkpoints is currently off.
7804 */
7807
7808 if (log_checkpoints)
7809 LogCheckpointStart(flags, true);
7810
7811 /* Update the process title */
7812 update_checkpoint_display(flags, true, false);
7813
7814 CheckPointGuts(lastCheckPoint.redo, flags);
7815
7816 /*
7817 * This location needs to be after CheckPointGuts() to ensure that some
7818 * work has already happened during this checkpoint.
7819 */
7820 INJECTION_POINT("create-restart-point", NULL);
7821
7822 /*
7823 * Remember the prior checkpoint's redo ptr for
7824 * UpdateCheckPointDistanceEstimate()
7825 */
7827
7828 /*
7829 * Update pg_control, using current time. Check that it still shows an
7830 * older checkpoint, else do nothing; this is a quick hack to make sure
7831 * nothing really bad happens if somehow we get here after the
7832 * end-of-recovery checkpoint.
7833 */
7835 if (ControlFile->checkPointCopy.redo < lastCheckPoint.redo)
7836 {
7837 /*
7838 * Update the checkpoint information. We do this even if the cluster
7839 * does not show DB_IN_ARCHIVE_RECOVERY to match with the set of WAL
7840 * segments recycled below.
7841 */
7842 ControlFile->checkPoint = lastCheckPointRecPtr;
7843 ControlFile->checkPointCopy = lastCheckPoint;
7844
7845 /*
7846 * Ensure minRecoveryPoint is past the checkpoint record and update it
7847 * if the control file still shows DB_IN_ARCHIVE_RECOVERY. Normally,
7848 * this will have happened already while writing out dirty buffers,
7849 * but not necessarily - e.g. because no buffers were dirtied. We do
7850 * this because a backup performed in recovery uses minRecoveryPoint
7851 * to determine which WAL files must be included in the backup, and
7852 * the file (or files) containing the checkpoint record must be
7853 * included, at a minimum. Note that for an ordinary restart of
7854 * recovery there's no value in having the minimum recovery point any
7855 * earlier than this anyway, because redo will begin just after the
7856 * checkpoint record.
7857 */
7859 {
7860 if (ControlFile->minRecoveryPoint < lastCheckPointEndPtr)
7861 {
7862 ControlFile->minRecoveryPoint = lastCheckPointEndPtr;
7864
7865 /* update local copy */
7868 }
7869 if (flags & CHECKPOINT_IS_SHUTDOWN)
7871 }
7873 }
7875
7876 /*
7877 * Update the average distance between checkpoints/restartpoints if the
7878 * prior checkpoint exists.
7879 */
7882
7883 /*
7884 * Delete old log files, those no longer needed for last restartpoint to
7885 * prevent the disk holding the xlog from growing full.
7886 */
7888
7889 /*
7890 * Retreat _logSegNo using the current end of xlog replayed or received,
7891 * whichever is later.
7892 */
7894 replayPtr = GetXLogReplayRecPtr(&replayTLI);
7895 endptr = (receivePtr < replayPtr) ? replayPtr : receivePtr;
7896 KeepLogSeg(endptr, &_logSegNo);
7897
7898 INJECTION_POINT("restartpoint-before-slot-invalidation", NULL);
7899
7903 {
7904 /*
7905 * Some slots have been invalidated; recalculate the old-segment
7906 * horizon, starting again from RedoRecPtr.
7907 */
7909 KeepLogSeg(endptr, &_logSegNo);
7910 }
7911 _logSegNo--;
7912
7913 /*
7914 * Try to recycle segments on a useful timeline. If we've been promoted
7915 * since the beginning of this restartpoint, use the new timeline chosen
7916 * at end of recovery. If we're still in recovery, use the timeline we're
7917 * currently replaying.
7918 *
7919 * There is no guarantee that the WAL segments will be useful on the
7920 * current timeline; if recovery proceeds to a new timeline right after
7921 * this, the pre-allocated WAL segments on this timeline will not be used,
7922 * and will go wasted until recycled on the next restartpoint. We'll live
7923 * with that.
7924 */
7925 if (!RecoveryInProgress())
7926 replayTLI = XLogCtl->InsertTimeLineID;
7927
7928 RemoveOldXlogFiles(_logSegNo, RedoRecPtr, endptr, replayTLI);
7929
7930 /*
7931 * Make more log segments if needed. (Do this after recycling old log
7932 * segments, since that may supply some of the needed files.)
7933 */
7934 PreallocXlogFiles(endptr, replayTLI);
7935
7936 /*
7937 * Truncate pg_subtrans if possible. We can throw away all data before
7938 * the oldest XMIN of any running transaction. No future transaction will
7939 * attempt to reference any pg_subtrans entry older than that (see Asserts
7940 * in subtrans.c). When hot standby is disabled, though, we mustn't do
7941 * this because StartupSUBTRANS hasn't been called yet.
7942 */
7943 if (EnableHotStandby)
7945
7946 /* Real work is done; log and update stats. */
7947 LogCheckpointEnd(true, flags);
7948
7949 /* Reset the process title */
7950 update_checkpoint_display(flags, true, true);
7951
7954 errmsg("recovery restart point at %X/%08X",
7955 LSN_FORMAT_ARGS(lastCheckPoint.redo)),
7956 xtime ? errdetail("Last completed transaction was at log time %s.",
7958
7959 /*
7960 * Finally, execute archive_cleanup_command, if any.
7961 */
7964 "archive_cleanup_command",
7965 false,
7967
7968 return true;
7969}
7970
7971/*
7972 * Report availability of WAL for the given target LSN
7973 * (typically a slot's restart_lsn)
7974 *
7975 * Returns one of the following enum values:
7976 *
7977 * * WALAVAIL_RESERVED means targetLSN is available and it is in the range of
7978 * max_wal_size.
7979 *
7980 * * WALAVAIL_EXTENDED means it is still available by preserving extra
7981 * segments beyond max_wal_size. If max_slot_wal_keep_size is smaller
7982 * than max_wal_size, this state is not returned.
7983 *
7984 * * WALAVAIL_UNRESERVED means it is being lost and the next checkpoint will
7985 * remove reserved segments. The walsender using this slot may return to the
7986 * above.
7987 *
7988 * * WALAVAIL_REMOVED means it has been removed. A replication stream on
7989 * a slot with this LSN cannot continue. (Any associated walsender
7990 * processes should have been terminated already.)
7991 *
7992 * * WALAVAIL_INVALID_LSN means the slot hasn't been set to reserve WAL.
7993 */
7996{
7997 XLogRecPtr currpos; /* current write LSN */
7998 XLogSegNo currSeg; /* segid of currpos */
7999 XLogSegNo targetSeg; /* segid of targetLSN */
8000 XLogSegNo oldestSeg; /* actual oldest segid */
8001 XLogSegNo oldestSegMaxWalSize; /* oldest segid kept by max_wal_size */
8002 XLogSegNo oldestSlotSeg; /* oldest segid kept by slot */
8004
8005 /*
8006 * slot does not reserve WAL. Either deactivated, or has never been active
8007 */
8009 return WALAVAIL_INVALID_LSN;
8010
8011 /*
8012 * Calculate the oldest segment currently reserved by all slots,
8013 * considering wal_keep_size and max_slot_wal_keep_size. Initialize
8014 * oldestSlotSeg to the current segment.
8015 */
8016 currpos = GetXLogWriteRecPtr();
8018 KeepLogSeg(currpos, &oldestSlotSeg);
8019
8020 /*
8021 * Find the oldest extant segment file. We get 1 until checkpoint removes
8022 * the first WAL segment file since startup, which causes the status being
8023 * wrong under certain abnormal conditions but that doesn't actually harm.
8024 */
8026
8027 /* calculate oldest segment by max_wal_size */
8030
8031 if (currSeg > keepSegs)
8033 else
8035
8036 /* the segment we care about */
8038
8039 /*
8040 * No point in returning reserved or extended status values if the
8041 * targetSeg is known to be lost.
8042 */
8043 if (targetSeg >= oldestSlotSeg)
8044 {
8045 /* show "reserved" when targetSeg is within max_wal_size */
8047 return WALAVAIL_RESERVED;
8048
8049 /* being retained by slots exceeding max_wal_size */
8050 return WALAVAIL_EXTENDED;
8051 }
8052
8053 /* WAL segments are no longer retained but haven't been removed yet */
8054 if (targetSeg >= oldestSeg)
8055 return WALAVAIL_UNRESERVED;
8056
8057 /* Definitely lost */
8058 return WALAVAIL_REMOVED;
8059}
8060
8061
8062/*
8063 * Retreat *logSegNo to the last segment that we need to retain because of
8064 * either wal_keep_size or replication slots.
8065 *
8066 * This is calculated by subtracting wal_keep_size from the given xlog
8067 * location, recptr and by making sure that that result is below the
8068 * requirement of replication slots. For the latter criterion we do consider
8069 * the effects of max_slot_wal_keep_size: reserve at most that much space back
8070 * from recptr.
8071 *
8072 * Note about replication slots: if this function calculates a value
8073 * that's further ahead than what slots need reserved, then affected
8074 * slots need to be invalidated and this function invoked again.
8075 * XXX it might be a good idea to rewrite this function so that
8076 * invalidation is optionally done here, instead.
8077 */
8078static void
8080{
8082 XLogSegNo segno;
8084
8086 segno = currSegNo;
8087
8088 /* Calculate how many segments are kept by slots. */
8091 {
8093
8094 /*
8095 * Account for max_slot_wal_keep_size to avoid keeping more than
8096 * configured. However, don't do that during a binary upgrade: if
8097 * slots were to be invalidated because of this, it would not be
8098 * possible to preserve logical ones during the upgrade.
8099 */
8101 {
8103
8106
8107 if (currSegNo - segno > slot_keep_segs)
8108 segno = currSegNo - slot_keep_segs;
8109 }
8110 }
8111
8112 /*
8113 * If WAL summarization is in use, don't remove WAL that has yet to be
8114 * summarized.
8115 */
8118 {
8120
8122 if (unsummarized_segno < segno)
8123 segno = unsummarized_segno;
8124 }
8125
8126 /* but, keep at least wal_keep_size if that's set */
8127 if (wal_keep_size_mb > 0)
8128 {
8130
8132 if (currSegNo - segno < keep_segs)
8133 {
8134 /* avoid underflow, don't go below 1 */
8135 if (currSegNo <= keep_segs)
8136 segno = 1;
8137 else
8138 segno = currSegNo - keep_segs;
8139 }
8140 }
8141
8142 /* don't delete WAL segments newer than the calculated segment */
8143 if (segno < *logSegNo)
8144 *logSegNo = segno;
8145}
8146
8147/*
8148 * Write a NEXTOID log record
8149 */
8150void
8152{
8154 XLogRegisterData(&nextOid, sizeof(Oid));
8156
8157 /*
8158 * We need not flush the NEXTOID record immediately, because any of the
8159 * just-allocated OIDs could only reach disk as part of a tuple insert or
8160 * update that would have its own XLOG record that must follow the NEXTOID
8161 * record. Therefore, the standard buffer LSN interlock applied to those
8162 * records will ensure no such OID reaches disk before the NEXTOID record
8163 * does.
8164 *
8165 * Note, however, that the above statement only covers state "within" the
8166 * database. When we use a generated OID as a file or directory name, we
8167 * are in a sense violating the basic WAL rule, because that filesystem
8168 * change may reach disk before the NEXTOID WAL record does. The impact
8169 * of this is that if a database crash occurs immediately afterward, we
8170 * might after restart re-generate the same OID and find that it conflicts
8171 * with the leftover file or directory. But since for safety's sake we
8172 * always loop until finding a nonconflicting filename, this poses no real
8173 * problem in practice. See pgsql-hackers discussion 27-Sep-2006.
8174 */
8175}
8176
8177/*
8178 * Write an XLOG SWITCH record.
8179 *
8180 * Here we just blindly issue an XLogInsert request for the record.
8181 * All the magic happens inside XLogInsert.
8182 *
8183 * The return value is either the end+1 address of the switch record,
8184 * or the end+1 address of the prior segment if we did not need to
8185 * write a switch record because we are already at segment start.
8186 */
8189{
8191
8192 /* XLOG SWITCH has no data */
8194
8195 if (mark_unimportant)
8198
8199 return RecPtr;
8200}
8201
8202/*
8203 * Write a RESTORE POINT record
8204 */
8207{
8210
8212 strlcpy(xlrec.rp_name, rpName, MAXFNAMELEN);
8213
8216
8218
8219 ereport(LOG,
8220 errmsg("restore point \"%s\" created at %X/%08X",
8222
8223 return RecPtr;
8224}
8225
8226/*
8227 * Check if any of the GUC parameters that are critical for hot standby
8228 * have changed, and update the value in pg_control file if necessary.
8229 */
8230static void
8232{
8241 {
8242 /*
8243 * The change in number of backend slots doesn't need to be WAL-logged
8244 * if archiving is not enabled, as you can't start archive recovery
8245 * with wal_level=minimal anyway. We don't really care about the
8246 * values in pg_control either if wal_level=minimal, but seems better
8247 * to keep them up-to-date to avoid confusion.
8248 */
8250 {
8253
8255 xlrec.max_worker_processes = max_worker_processes;
8256 xlrec.max_wal_senders = max_wal_senders;
8257 xlrec.max_prepared_xacts = max_prepared_xacts;
8258 xlrec.max_locks_per_xact = max_locks_per_xact;
8259 xlrec.wal_level = wal_level;
8260 xlrec.wal_log_hints = wal_log_hints;
8261 xlrec.track_commit_timestamp = track_commit_timestamp;
8262
8264 XLogRegisterData(&xlrec, sizeof(xlrec));
8265
8268 }
8269
8271
8281
8283 }
8284}
8285
8286/*
8287 * Update full_page_writes in shared memory, and write an
8288 * XLOG_FPW_CHANGE record if necessary.
8289 *
8290 * Note: this function assumes there is no other process running
8291 * concurrently that could update it.
8292 */
8293void
8295{
8297 bool recoveryInProgress;
8298
8299 /*
8300 * Do nothing if full_page_writes has not been changed.
8301 *
8302 * It's safe to check the shared full_page_writes without the lock,
8303 * because we assume that there is no concurrently running process which
8304 * can update it.
8305 */
8306 if (fullPageWrites == Insert->fullPageWrites)
8307 return;
8308
8309 /*
8310 * Perform this outside critical section so that the WAL insert
8311 * initialization done by RecoveryInProgress() doesn't trigger an
8312 * assertion failure.
8313 */
8315
8317
8318 /*
8319 * It's always safe to take full page images, even when not strictly
8320 * required, but not the other round. So if we're setting full_page_writes
8321 * to true, first set it true and then write the WAL record. If we're
8322 * setting it to false, first write the WAL record and then set the global
8323 * flag.
8324 */
8325 if (fullPageWrites)
8326 {
8328 Insert->fullPageWrites = true;
8330 }
8331
8332 /*
8333 * Write an XLOG_FPW_CHANGE record. This allows us to keep track of
8334 * full_page_writes during archive recovery, if required.
8335 */
8337 {
8339 XLogRegisterData(&fullPageWrites, sizeof(bool));
8340
8342 }
8343
8344 if (!fullPageWrites)
8345 {
8347 Insert->fullPageWrites = false;
8349 }
8351}
8352
8353/*
8354 * XLOG resource manager's routines
8355 *
8356 * Definitions of info values are in include/catalog/pg_control.h, though
8357 * not all record types are related to control file updates.
8358 *
8359 * NOTE: Some XLOG record types that are directly related to WAL recovery
8360 * are handled in xlogrecovery_redo().
8361 */
8362void
8364{
8365 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
8366 XLogRecPtr lsn = record->EndRecPtr;
8367
8368 /*
8369 * In XLOG rmgr, backup blocks are only used by XLOG_FPI and
8370 * XLOG_FPI_FOR_HINT records.
8371 */
8372 Assert(info == XLOG_FPI || info == XLOG_FPI_FOR_HINT ||
8373 !XLogRecHasAnyBlockRefs(record));
8374
8375 if (info == XLOG_NEXTOID)
8376 {
8377 Oid nextOid;
8378
8379 /*
8380 * We used to try to take the maximum of TransamVariables->nextOid and
8381 * the recorded nextOid, but that fails if the OID counter wraps
8382 * around. Since no OID allocation should be happening during replay
8383 * anyway, better to just believe the record exactly. We still take
8384 * OidGenLock while setting the variable, just in case.
8385 */
8386 memcpy(&nextOid, XLogRecGetData(record), sizeof(Oid));
8388 TransamVariables->nextOid = nextOid;
8391 }
8392 else if (info == XLOG_CHECKPOINT_SHUTDOWN)
8393 {
8394 CheckPoint checkPoint;
8395 TimeLineID replayTLI;
8396
8397 memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
8398 /* In a SHUTDOWN checkpoint, believe the counters exactly */
8400 TransamVariables->nextXid = checkPoint.nextXid;
8403 TransamVariables->nextOid = checkPoint.nextOid;
8407 checkPoint.nextMultiOffset);
8408
8410 checkPoint.oldestMultiDB);
8411
8412 /*
8413 * No need to set oldestClogXid here as well; it'll be set when we
8414 * redo an xl_clog_truncate if it changed since initialization.
8415 */
8416 SetTransactionIdLimit(checkPoint.oldestXid, checkPoint.oldestXidDB);
8417
8418 /*
8419 * If we see a shutdown checkpoint while waiting for an end-of-backup
8420 * record, the backup was canceled and the end-of-backup record will
8421 * never arrive.
8422 */
8426 ereport(PANIC,
8427 (errmsg("online backup was canceled, recovery cannot continue")));
8428
8429 /*
8430 * If we see a shutdown checkpoint, we know that nothing was running
8431 * on the primary at this point. So fake-up an empty running-xacts
8432 * record and use that here and now. Recover additional standby state
8433 * for prepared transactions.
8434 */
8436 {
8437 TransactionId *xids;
8438 int nxids;
8440 TransactionId latestCompletedXid;
8442
8444
8445 /* Update pg_subtrans entries for any prepared transactions */
8447
8448 /*
8449 * Construct a RunningTransactions snapshot representing a shut
8450 * down server, with only prepared transactions still alive. We're
8451 * never overflowed at this point because all subxids are listed
8452 * with their parent prepared transactions.
8453 */
8454 running.xcnt = nxids;
8455 running.subxcnt = 0;
8457 running.nextXid = XidFromFullTransactionId(checkPoint.nextXid);
8459 latestCompletedXid = XidFromFullTransactionId(checkPoint.nextXid);
8460 TransactionIdRetreat(latestCompletedXid);
8461 Assert(TransactionIdIsNormal(latestCompletedXid));
8462 running.latestCompletedXid = latestCompletedXid;
8463 running.xids = xids;
8464
8466 }
8467
8468 /* ControlFile->checkPointCopy always tracks the latest ckpt XID */
8472
8473 /*
8474 * We should've already switched to the new TLI before replaying this
8475 * record.
8476 */
8477 (void) GetCurrentReplayRecPtr(&replayTLI);
8478 if (checkPoint.ThisTimeLineID != replayTLI)
8479 ereport(PANIC,
8480 (errmsg("unexpected timeline ID %u (should be %u) in shutdown checkpoint record",
8481 checkPoint.ThisTimeLineID, replayTLI)));
8482
8483 RecoveryRestartPoint(&checkPoint, record);
8484
8485 /*
8486 * After replaying a checkpoint record, free all smgr objects.
8487 * Otherwise we would never do so for dropped relations, as the
8488 * startup does not process shared invalidation messages or call
8489 * AtEOXact_SMgr().
8490 */
8492 }
8493 else if (info == XLOG_CHECKPOINT_ONLINE)
8494 {
8495 CheckPoint checkPoint;
8496 TimeLineID replayTLI;
8497
8498 memcpy(&checkPoint, XLogRecGetData(record), sizeof(CheckPoint));
8499 /* In an ONLINE checkpoint, treat the XID counter as a minimum */
8502 checkPoint.nextXid))
8503 TransamVariables->nextXid = checkPoint.nextXid;
8505
8506 /*
8507 * We ignore the nextOid counter in an ONLINE checkpoint, preferring
8508 * to track OID assignment through XLOG_NEXTOID records. The nextOid
8509 * counter is from the start of the checkpoint and might well be stale
8510 * compared to later XLOG_NEXTOID records. We could try to take the
8511 * maximum of the nextOid counter and our latest value, but since
8512 * there's no particular guarantee about the speed with which the OID
8513 * counter wraps around, that's a risky thing to do. In any case,
8514 * users of the nextOid counter are required to avoid assignment of
8515 * duplicates, so that a somewhat out-of-date value should be safe.
8516 */
8517
8518 /* Handle multixact */
8520 checkPoint.nextMultiOffset);
8521
8522 /*
8523 * NB: This may perform multixact truncation when replaying WAL
8524 * generated by an older primary.
8525 */
8527 checkPoint.oldestMultiDB);
8529 checkPoint.oldestXid))
8531 checkPoint.oldestXidDB);
8532 /* ControlFile->checkPointCopy always tracks the latest ckpt XID */
8536
8537 /* TLI should not change in an on-line checkpoint */
8538 (void) GetCurrentReplayRecPtr(&replayTLI);
8539 if (checkPoint.ThisTimeLineID != replayTLI)
8540 ereport(PANIC,
8541 (errmsg("unexpected timeline ID %u (should be %u) in online checkpoint record",
8542 checkPoint.ThisTimeLineID, replayTLI)));
8543
8544 RecoveryRestartPoint(&checkPoint, record);
8545
8546 /*
8547 * After replaying a checkpoint record, free all smgr objects.
8548 * Otherwise we would never do so for dropped relations, as the
8549 * startup does not process shared invalidation messages or call
8550 * AtEOXact_SMgr().
8551 */
8553 }
8554 else if (info == XLOG_OVERWRITE_CONTRECORD)
8555 {
8556 /* nothing to do here, handled in xlogrecovery_redo() */
8557 }
8558 else if (info == XLOG_END_OF_RECOVERY)
8559 {
8561 TimeLineID replayTLI;
8562
8563 memcpy(&xlrec, XLogRecGetData(record), sizeof(xl_end_of_recovery));
8564
8565 /*
8566 * For Hot Standby, we could treat this like a Shutdown Checkpoint,
8567 * but this case is rarer and harder to test, so the benefit doesn't
8568 * outweigh the potential extra cost of maintenance.
8569 */
8570
8571 /*
8572 * We should've already switched to the new TLI before replaying this
8573 * record.
8574 */
8575 (void) GetCurrentReplayRecPtr(&replayTLI);
8576 if (xlrec.ThisTimeLineID != replayTLI)
8577 ereport(PANIC,
8578 (errmsg("unexpected timeline ID %u (should be %u) in end-of-recovery record",
8579 xlrec.ThisTimeLineID, replayTLI)));
8580 }
8581 else if (info == XLOG_NOOP)
8582 {
8583 /* nothing to do here */
8584 }
8585 else if (info == XLOG_SWITCH)
8586 {
8587 /* nothing to do here */
8588 }
8589 else if (info == XLOG_RESTORE_POINT)
8590 {
8591 /* nothing to do here, handled in xlogrecovery.c */
8592 }
8593 else if (info == XLOG_FPI || info == XLOG_FPI_FOR_HINT)
8594 {
8595 /*
8596 * XLOG_FPI records contain nothing else but one or more block
8597 * references. Every block reference must include a full-page image
8598 * even if full_page_writes was disabled when the record was generated
8599 * - otherwise there would be no point in this record.
8600 *
8601 * XLOG_FPI_FOR_HINT records are generated when a page needs to be
8602 * WAL-logged because of a hint bit update. They are only generated
8603 * when checksums and/or wal_log_hints are enabled. They may include
8604 * no full-page images if full_page_writes was disabled when they were
8605 * generated. In this case there is nothing to do here.
8606 *
8607 * No recovery conflicts are generated by these generic records - if a
8608 * resource manager needs to generate conflicts, it has to define a
8609 * separate WAL record type and redo routine.
8610 */
8611 for (uint8 block_id = 0; block_id <= XLogRecMaxBlockId(record); block_id++)
8612 {
8613 Buffer buffer;
8614
8615 if (!XLogRecHasBlockImage(record, block_id))
8616 {
8617 if (info == XLOG_FPI)
8618 elog(ERROR, "XLOG_FPI record did not contain a full-page image");
8619 continue;
8620 }
8621
8622 if (XLogReadBufferForRedo(record, block_id, &buffer) != BLK_RESTORED)
8623 elog(ERROR, "unexpected XLogReadBufferForRedo result when restoring backup block");
8624 UnlockReleaseBuffer(buffer);
8625 }
8626 }
8627 else if (info == XLOG_BACKUP_END)
8628 {
8629 /* nothing to do here, handled in xlogrecovery_redo() */
8630 }
8631 else if (info == XLOG_PARAMETER_CHANGE)
8632 {
8634
8635 /* Update our copy of the parameters in pg_control */
8636 memcpy(&xlrec, XLogRecGetData(record), sizeof(xl_parameter_change));
8637
8639 ControlFile->MaxConnections = xlrec.MaxConnections;
8640 ControlFile->max_worker_processes = xlrec.max_worker_processes;
8641 ControlFile->max_wal_senders = xlrec.max_wal_senders;
8642 ControlFile->max_prepared_xacts = xlrec.max_prepared_xacts;
8643 ControlFile->max_locks_per_xact = xlrec.max_locks_per_xact;
8644 ControlFile->wal_level = xlrec.wal_level;
8645 ControlFile->wal_log_hints = xlrec.wal_log_hints;
8646
8647 /*
8648 * Update minRecoveryPoint to ensure that if recovery is aborted, we
8649 * recover back up to this point before allowing hot standby again.
8650 * This is important if the max_* settings are decreased, to ensure
8651 * you don't run queries against the WAL preceding the change. The
8652 * local copies cannot be updated as long as crash recovery is
8653 * happening and we expect all the WAL to be replayed.
8654 */
8656 {
8659 }
8661 {
8662 TimeLineID replayTLI;
8663
8664 (void) GetCurrentReplayRecPtr(&replayTLI);
8666 ControlFile->minRecoveryPointTLI = replayTLI;
8667 }
8668
8669 CommitTsParameterChange(xlrec.track_commit_timestamp,
8671 ControlFile->track_commit_timestamp = xlrec.track_commit_timestamp;
8672
8675
8676 /* Check to see if any parameter change gives a problem on recovery */
8678 }
8679 else if (info == XLOG_FPW_CHANGE)
8680 {
8681 bool fpw;
8682
8683 memcpy(&fpw, XLogRecGetData(record), sizeof(bool));
8684
8685 /*
8686 * Update the LSN of the last replayed XLOG_FPW_CHANGE record so that
8687 * do_pg_backup_start() and do_pg_backup_stop() can check whether
8688 * full_page_writes has been disabled during online backup.
8689 */
8690 if (!fpw)
8691 {
8696 }
8697
8698 /* Keep track of full_page_writes */
8700 }
8701 else if (info == XLOG_CHECKPOINT_REDO)
8702 {
8703 /* nothing to do here, just for informational purposes */
8704 }
8705 else if (info == XLOG_LOGICAL_DECODING_STATUS_CHANGE)
8706 {
8707 bool status;
8708
8709 memcpy(&status, XLogRecGetData(record), sizeof(bool));
8710
8711 /*
8712 * We need to toggle the logical decoding status and update the
8713 * XLogLogicalInfo cache of processes synchronously because
8714 * XLogLogicalInfoActive() is used even during read-only queries
8715 * (e.g., via RelationIsAccessibleInLogicalDecoding()). In the
8716 * 'disable' case, it is safe to invalidate existing slots after
8717 * disabling logical decoding because logical decoding cannot process
8718 * subsequent WAL records, which may not contain logical information.
8719 */
8720 if (status)
8722 else
8724
8725 elog(DEBUG1, "update logical decoding status to %d during recovery",
8726 status);
8727
8728 if (InRecovery && InHotStandby)
8729 {
8730 if (!status)
8731 {
8732 /*
8733 * Invalidate logical slots if we are in hot standby and the
8734 * primary disabled logical decoding.
8735 */
8737 0, InvalidOid,
8739 }
8740 else if (sync_replication_slots)
8741 {
8742 /*
8743 * Signal the postmaster to launch the slotsync worker.
8744 *
8745 * XXX: For simplicity, we keep the slotsync worker running
8746 * even after logical decoding is disabled. A future
8747 * improvement can consider starting and stopping the worker
8748 * based on logical decoding status change.
8749 */
8751 }
8752 }
8753 }
8754}
8755
8756/*
8757 * Return the extra open flags used for opening a file, depending on the
8758 * value of the GUCs wal_sync_method, fsync and debug_io_direct.
8759 */
8760static int
8761get_sync_bit(int method)
8762{
8763 int o_direct_flag = 0;
8764
8765 /*
8766 * Use O_DIRECT if requested, except in walreceiver process. The WAL
8767 * written by walreceiver is normally read by the startup process soon
8768 * after it's written. Also, walreceiver performs unaligned writes, which
8769 * don't work with O_DIRECT, so it is required for correctness too.
8770 */
8773
8774 /* If fsync is disabled, never open in sync mode */
8775 if (!enableFsync)
8776 return o_direct_flag;
8777
8778 switch (method)
8779 {
8780 /*
8781 * enum values for all sync options are defined even if they are
8782 * not supported on the current platform. But if not, they are
8783 * not included in the enum option array, and therefore will never
8784 * be seen here.
8785 */
8789 return o_direct_flag;
8790#ifdef O_SYNC
8792 return O_SYNC | o_direct_flag;
8793#endif
8794#ifdef O_DSYNC
8796 return O_DSYNC | o_direct_flag;
8797#endif
8798 default:
8799 /* can't happen (unless we are out of sync with option array) */
8800 elog(ERROR, "unrecognized \"wal_sync_method\": %d", method);
8801 return 0; /* silence warning */
8802 }
8803}
8804
8805/*
8806 * GUC support
8807 */
8808void
8810{
8812 {
8813 /*
8814 * To ensure that no blocks escape unsynced, force an fsync on the
8815 * currently open log segment (if any). Also, if the open flag is
8816 * changing, close the log file so it will be reopened (with new flag
8817 * bit) at next use.
8818 */
8819 if (openLogFile >= 0)
8820 {
8822 if (pg_fsync(openLogFile) != 0)
8823 {
8824 char xlogfname[MAXFNAMELEN];
8825 int save_errno;
8826
8827 save_errno = errno;
8830 errno = save_errno;
8831 ereport(PANIC,
8833 errmsg("could not fsync file \"%s\": %m", xlogfname)));
8834 }
8835
8838 XLogFileClose();
8839 }
8840 }
8841}
8842
8843
8844/*
8845 * Issue appropriate kind of fsync (if any) for an XLOG output file.
8846 *
8847 * 'fd' is a file descriptor for the XLOG file to be fsync'd.
8848 * 'segno' is for error reporting purposes.
8849 */
8850void
8852{
8853 char *msg = NULL;
8855
8856 Assert(tli != 0);
8857
8858 /*
8859 * Quick exit if fsync is disabled or write() has already synced the WAL
8860 * file.
8861 */
8862 if (!enableFsync ||
8865 return;
8866
8867 /*
8868 * Measure I/O timing to sync the WAL file for pg_stat_io.
8869 */
8871
8873 switch (wal_sync_method)
8874 {
8876 if (pg_fsync_no_writethrough(fd) != 0)
8877 msg = _("could not fsync file \"%s\": %m");
8878 break;
8879#ifdef HAVE_FSYNC_WRITETHROUGH
8881 if (pg_fsync_writethrough(fd) != 0)
8882 msg = _("could not fsync write-through file \"%s\": %m");
8883 break;
8884#endif
8886 if (pg_fdatasync(fd) != 0)
8887 msg = _("could not fdatasync file \"%s\": %m");
8888 break;
8891 /* not reachable */
8892 Assert(false);
8893 break;
8894 default:
8895 ereport(PANIC,
8897 errmsg_internal("unrecognized \"wal_sync_method\": %d", wal_sync_method));
8898 break;
8899 }
8900
8901 /* PANIC if failed to fsync */
8902 if (msg)
8903 {
8904 char xlogfname[MAXFNAMELEN];
8905 int save_errno = errno;
8906
8908 errno = save_errno;
8909 ereport(PANIC,
8911 errmsg(msg, xlogfname)));
8912 }
8913
8915
8917 start, 1, 0);
8918}
8919
8920/*
8921 * do_pg_backup_start is the workhorse of the user-visible pg_backup_start()
8922 * function. It creates the necessary starting checkpoint and constructs the
8923 * backup state and tablespace map.
8924 *
8925 * Input parameters are "state" (the backup state), "fast" (if true, we do
8926 * the checkpoint in fast mode), and "tablespaces" (if non-NULL, indicates a
8927 * list of tablespaceinfo structs describing the cluster's tablespaces.).
8928 *
8929 * The tablespace map contents are appended to passed-in parameter
8930 * tablespace_map and the caller is responsible for including it in the backup
8931 * archive as 'tablespace_map'. The tablespace_map file is required mainly for
8932 * tar format in windows as native windows utilities are not able to create
8933 * symlinks while extracting files from tar. However for consistency and
8934 * platform-independence, we do it the same way everywhere.
8935 *
8936 * It fills in "state" with the information required for the backup, such
8937 * as the minimum WAL location that must be present to restore from this
8938 * backup (starttli) and the corresponding timeline ID (starttli).
8939 *
8940 * Every successfully started backup must be stopped by calling
8941 * do_pg_backup_stop() or do_pg_abort_backup(). There can be many
8942 * backups active at the same time.
8943 *
8944 * It is the responsibility of the caller of this function to verify the
8945 * permissions of the calling user!
8946 */
8947void
8948do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces,
8950{
8952
8953 Assert(state != NULL);
8955
8956 /*
8957 * During recovery, we don't need to check WAL level. Because, if WAL
8958 * level is not sufficient, it's impossible to get here during recovery.
8959 */
8961 ereport(ERROR,
8963 errmsg("WAL level not sufficient for making an online backup"),
8964 errhint("\"wal_level\" must be set to \"replica\" or \"logical\" at server start.")));
8965
8967 ereport(ERROR,
8969 errmsg("backup label too long (max %d bytes)",
8970 MAXPGPATH)));
8971
8972 strlcpy(state->name, backupidstr, sizeof(state->name));
8973
8974 /*
8975 * Mark backup active in shared memory. We must do full-page WAL writes
8976 * during an on-line backup even if not doing so at other times, because
8977 * it's quite possible for the backup dump to obtain a "torn" (partially
8978 * written) copy of a database page if it reads the page concurrently with
8979 * our write to the same page. This can be fixed as long as the first
8980 * write to the page in the WAL sequence is a full-page write. Hence, we
8981 * increment runningBackups then force a CHECKPOINT, to ensure there are
8982 * no dirty pages in shared memory that might get dumped while the backup
8983 * is in progress without having a corresponding WAL record. (Once the
8984 * backup is complete, we need not force full-page writes anymore, since
8985 * we expect that any pages not modified during the backup interval must
8986 * have been correctly captured by the backup.)
8987 *
8988 * Note that forcing full-page writes has no effect during an online
8989 * backup from the standby.
8990 *
8991 * We must hold all the insertion locks to change the value of
8992 * runningBackups, to ensure adequate interlocking against
8993 * XLogInsertRecord().
8994 */
8998
8999 /*
9000 * Ensure we decrement runningBackups if we fail below. NB -- for this to
9001 * work correctly, it is critical that sessionBackupState is only updated
9002 * after this block is over.
9003 */
9005 {
9006 bool gotUniqueStartpoint = false;
9007 DIR *tblspcdir;
9008 struct dirent *de;
9010 int datadirpathlen;
9011
9012 /*
9013 * Force an XLOG file switch before the checkpoint, to ensure that the
9014 * WAL segment the checkpoint is written to doesn't contain pages with
9015 * old timeline IDs. That would otherwise happen if you called
9016 * pg_backup_start() right after restoring from a PITR archive: the
9017 * first WAL segment containing the startup checkpoint has pages in
9018 * the beginning with the old timeline ID. That can cause trouble at
9019 * recovery: we won't have a history file covering the old timeline if
9020 * pg_wal directory was not included in the base backup and the WAL
9021 * archive was cleared too before starting the backup.
9022 *
9023 * During recovery, we skip forcing XLOG file switch, which means that
9024 * the backup taken during recovery is not available for the special
9025 * recovery case described above.
9026 */
9028 RequestXLogSwitch(false);
9029
9030 do
9031 {
9032 bool checkpointfpw;
9033
9034 /*
9035 * Force a CHECKPOINT. Aside from being necessary to prevent torn
9036 * page problems, this guarantees that two successive backup runs
9037 * will have different checkpoint positions and hence different
9038 * history file names, even if nothing happened in between.
9039 *
9040 * During recovery, establish a restartpoint if possible. We use
9041 * the last restartpoint as the backup starting checkpoint. This
9042 * means that two successive backup runs can have same checkpoint
9043 * positions.
9044 *
9045 * Since the fact that we are executing do_pg_backup_start()
9046 * during recovery means that checkpointer is running, we can use
9047 * RequestCheckpoint() to establish a restartpoint.
9048 *
9049 * We use CHECKPOINT_FAST only if requested by user (via passing
9050 * fast = true). Otherwise this can take awhile.
9051 */
9053 (fast ? CHECKPOINT_FAST : 0));
9054
9055 /*
9056 * Now we need to fetch the checkpoint record location, and also
9057 * its REDO pointer. The oldest point in WAL that would be needed
9058 * to restore starting from the checkpoint is precisely the REDO
9059 * pointer.
9060 */
9062 state->checkpointloc = ControlFile->checkPoint;
9063 state->startpoint = ControlFile->checkPointCopy.redo;
9067
9069 {
9071
9072 /*
9073 * Check to see if all WAL replayed during online backup
9074 * (i.e., since last restartpoint used as backup starting
9075 * checkpoint) contain full-page writes.
9076 */
9080
9081 if (!checkpointfpw || state->startpoint <= recptr)
9082 ereport(ERROR,
9084 errmsg("WAL generated with \"full_page_writes=off\" was replayed "
9085 "since last restartpoint"),
9086 errhint("This means that the backup being taken on the standby "
9087 "is corrupt and should not be used. "
9088 "Enable \"full_page_writes\" and run CHECKPOINT on the primary, "
9089 "and then try an online backup again.")));
9090
9091 /*
9092 * During recovery, since we don't use the end-of-backup WAL
9093 * record and don't write the backup history file, the
9094 * starting WAL location doesn't need to be unique. This means
9095 * that two base backups started at the same time might use
9096 * the same checkpoint as starting locations.
9097 */
9098 gotUniqueStartpoint = true;
9099 }
9100
9101 /*
9102 * If two base backups are started at the same time (in WAL sender
9103 * processes), we need to make sure that they use different
9104 * checkpoints as starting locations, because we use the starting
9105 * WAL location as a unique identifier for the base backup in the
9106 * end-of-backup WAL record and when we write the backup history
9107 * file. Perhaps it would be better generate a separate unique ID
9108 * for each backup instead of forcing another checkpoint, but
9109 * taking a checkpoint right after another is not that expensive
9110 * either because only few buffers have been dirtied yet.
9111 */
9113 if (XLogCtl->Insert.lastBackupStart < state->startpoint)
9114 {
9115 XLogCtl->Insert.lastBackupStart = state->startpoint;
9116 gotUniqueStartpoint = true;
9117 }
9119 } while (!gotUniqueStartpoint);
9120
9121 /*
9122 * Construct tablespace_map file.
9123 */
9125
9126 /* Collect information about all tablespaces */
9128 while ((de = ReadDir(tblspcdir, PG_TBLSPC_DIR)) != NULL)
9129 {
9130 char fullpath[MAXPGPATH + sizeof(PG_TBLSPC_DIR)];
9131 char linkpath[MAXPGPATH];
9132 char *relpath = NULL;
9133 char *s;
9135 char *badp;
9136 Oid tsoid;
9137
9138 /*
9139 * Try to parse the directory name as an unsigned integer.
9140 *
9141 * Tablespace directories should be positive integers that can be
9142 * represented in 32 bits, with no leading zeroes or trailing
9143 * garbage. If we come across a name that doesn't meet those
9144 * criteria, skip it.
9145 */
9146 if (de->d_name[0] < '1' || de->d_name[1] > '9')
9147 continue;
9148 errno = 0;
9149 tsoid = strtoul(de->d_name, &badp, 10);
9150 if (*badp != '\0' || errno == EINVAL || errno == ERANGE)
9151 continue;
9152
9153 snprintf(fullpath, sizeof(fullpath), "%s/%s", PG_TBLSPC_DIR, de->d_name);
9154
9155 de_type = get_dirent_type(fullpath, de, false, ERROR);
9156
9157 if (de_type == PGFILETYPE_LNK)
9158 {
9160 int rllen;
9161
9162 rllen = readlink(fullpath, linkpath, sizeof(linkpath));
9163 if (rllen < 0)
9164 {
9166 (errmsg("could not read symbolic link \"%s\": %m",
9167 fullpath)));
9168 continue;
9169 }
9170 else if (rllen >= sizeof(linkpath))
9171 {
9173 (errmsg("symbolic link \"%s\" target is too long",
9174 fullpath)));
9175 continue;
9176 }
9177 linkpath[rllen] = '\0';
9178
9179 /*
9180 * Relpath holds the relative path of the tablespace directory
9181 * when it's located within PGDATA, or NULL if it's located
9182 * elsewhere.
9183 */
9184 if (rllen > datadirpathlen &&
9188
9189 /*
9190 * Add a backslash-escaped version of the link path to the
9191 * tablespace map file.
9192 */
9194 for (s = linkpath; *s; s++)
9195 {
9196 if (*s == '\n' || *s == '\r' || *s == '\\')
9199 }
9201 de->d_name, escapedpath.data);
9202 pfree(escapedpath.data);
9203 }
9204 else if (de_type == PGFILETYPE_DIR)
9205 {
9206 /*
9207 * It's possible to use allow_in_place_tablespaces to create
9208 * directories directly under pg_tblspc, for testing purposes
9209 * only.
9210 *
9211 * In this case, we store a relative path rather than an
9212 * absolute path into the tablespaceinfo.
9213 */
9214 snprintf(linkpath, sizeof(linkpath), "%s/%s",
9215 PG_TBLSPC_DIR, de->d_name);
9217 }
9218 else
9219 {
9220 /* Skip any other file type that appears here. */
9221 continue;
9222 }
9223
9225 ti->oid = tsoid;
9226 ti->path = pstrdup(linkpath);
9227 ti->rpath = relpath;
9228 ti->size = -1;
9229
9230 if (tablespaces)
9231 *tablespaces = lappend(*tablespaces, ti);
9232 }
9234
9235 state->starttime = (pg_time_t) time(NULL);
9236 }
9238
9239 state->started_in_recovery = backup_started_in_recovery;
9240
9241 /*
9242 * Mark that the start phase has correctly finished for the backup.
9243 */
9245}
9246
9247/*
9248 * Utility routine to fetch the session-level status of a backup running.
9249 */
9252{
9253 return sessionBackupState;
9254}
9255
9256/*
9257 * do_pg_backup_stop
9258 *
9259 * Utility function called at the end of an online backup. It creates history
9260 * file (if required), resets sessionBackupState and so on. It can optionally
9261 * wait for WAL segments to be archived.
9262 *
9263 * "state" is filled with the information necessary to restore from this
9264 * backup with its stop LSN (stoppoint), its timeline ID (stoptli), etc.
9265 *
9266 * It is the responsibility of the caller of this function to verify the
9267 * permissions of the calling user!
9268 */
9269void
9271{
9272 bool backup_stopped_in_recovery = false;
9273 char histfilepath[MAXPGPATH];
9277 FILE *fp;
9279 int waits = 0;
9280 bool reported_waiting = false;
9281
9282 Assert(state != NULL);
9283
9285
9286 /*
9287 * During recovery, we don't need to check WAL level. Because, if WAL
9288 * level is not sufficient, it's impossible to get here during recovery.
9289 */
9291 ereport(ERROR,
9293 errmsg("WAL level not sufficient for making an online backup"),
9294 errhint("\"wal_level\" must be set to \"replica\" or \"logical\" at server start.")));
9295
9296 /*
9297 * OK to update backup counter and session-level lock.
9298 *
9299 * Note that CHECK_FOR_INTERRUPTS() must not occur while updating them,
9300 * otherwise they can be updated inconsistently, which might cause
9301 * do_pg_abort_backup() to fail.
9302 */
9304
9305 /*
9306 * It is expected that each do_pg_backup_start() call is matched by
9307 * exactly one do_pg_backup_stop() call.
9308 */
9311
9312 /*
9313 * Clean up session-level lock.
9314 *
9315 * You might think that WALInsertLockRelease() can be called before
9316 * cleaning up session-level lock because session-level lock doesn't need
9317 * to be protected with WAL insertion lock. But since
9318 * CHECK_FOR_INTERRUPTS() can occur in it, session-level lock must be
9319 * cleaned up before it.
9320 */
9322
9324
9325 /*
9326 * If we are taking an online backup from the standby, we confirm that the
9327 * standby has not been promoted during the backup.
9328 */
9329 if (state->started_in_recovery && !backup_stopped_in_recovery)
9330 ereport(ERROR,
9332 errmsg("the standby was promoted during online backup"),
9333 errhint("This means that the backup being taken is corrupt "
9334 "and should not be used. "
9335 "Try taking another online backup.")));
9336
9337 /*
9338 * During recovery, we don't write an end-of-backup record. We assume that
9339 * pg_control was backed up last and its minimum recovery point can be
9340 * available as the backup end location. Since we don't have an
9341 * end-of-backup record, we use the pg_control value to check whether
9342 * we've reached the end of backup when starting recovery from this
9343 * backup. We have no way of checking if pg_control wasn't backed up last
9344 * however.
9345 *
9346 * We don't force a switch to new WAL file but it is still possible to
9347 * wait for all the required files to be archived if waitforarchive is
9348 * true. This is okay if we use the backup to start a standby and fetch
9349 * the missing WAL using streaming replication. But in the case of an
9350 * archive recovery, a user should set waitforarchive to true and wait for
9351 * them to be archived to ensure that all the required files are
9352 * available.
9353 *
9354 * We return the current minimum recovery point as the backup end
9355 * location. Note that it can be greater than the exact backup end
9356 * location if the minimum recovery point is updated after the backup of
9357 * pg_control. This is harmless for current uses.
9358 *
9359 * XXX currently a backup history file is for informational and debug
9360 * purposes only. It's not essential for an online backup. Furthermore,
9361 * even if it's created, it will not be archived during recovery because
9362 * an archiver is not invoked. So it doesn't seem worthwhile to write a
9363 * backup history file during recovery.
9364 */
9366 {
9368
9369 /*
9370 * Check to see if all WAL replayed during online backup contain
9371 * full-page writes.
9372 */
9376
9377 if (state->startpoint <= recptr)
9378 ereport(ERROR,
9380 errmsg("WAL generated with \"full_page_writes=off\" was replayed "
9381 "during online backup"),
9382 errhint("This means that the backup being taken on the standby "
9383 "is corrupt and should not be used. "
9384 "Enable \"full_page_writes\" and run CHECKPOINT on the primary, "
9385 "and then try an online backup again.")));
9386
9387
9389 state->stoppoint = ControlFile->minRecoveryPoint;
9392 }
9393 else
9394 {
9395 char *history_file;
9396
9397 /*
9398 * Write the backup-end xlog record
9399 */
9401 XLogRegisterData(&state->startpoint,
9402 sizeof(state->startpoint));
9404
9405 /*
9406 * Given that we're not in recovery, InsertTimeLineID is set and can't
9407 * change, so we can read it without a lock.
9408 */
9409 state->stoptli = XLogCtl->InsertTimeLineID;
9410
9411 /*
9412 * Force a switch to a new xlog segment file, so that the backup is
9413 * valid as soon as archiver moves out the current segment file.
9414 */
9415 RequestXLogSwitch(false);
9416
9417 state->stoptime = (pg_time_t) time(NULL);
9418
9419 /*
9420 * Write the backup history file
9421 */
9424 state->startpoint, wal_segment_size);
9425 fp = AllocateFile(histfilepath, "w");
9426 if (!fp)
9427 ereport(ERROR,
9429 errmsg("could not create file \"%s\": %m",
9430 histfilepath)));
9431
9432 /* Build and save the contents of the backup history file */
9434 fprintf(fp, "%s", history_file);
9436
9437 if (fflush(fp) || ferror(fp) || FreeFile(fp))
9438 ereport(ERROR,
9440 errmsg("could not write file \"%s\": %m",
9441 histfilepath)));
9442
9443 /*
9444 * Clean out any no-longer-needed history files. As a side effect,
9445 * this will post a .ready file for the newly created history file,
9446 * notifying the archiver that history file may be archived
9447 * immediately.
9448 */
9450 }
9451
9452 /*
9453 * If archiving is enabled, wait for all the required WAL files to be
9454 * archived before returning. If archiving isn't enabled, the required WAL
9455 * needs to be transported via streaming replication (hopefully with
9456 * wal_keep_size set high enough), or some more exotic mechanism like
9457 * polling and copying files from pg_wal with script. We have no knowledge
9458 * of those mechanisms, so it's up to the user to ensure that he gets all
9459 * the required WAL.
9460 *
9461 * We wait until both the last WAL file filled during backup and the
9462 * history file have been archived, and assume that the alphabetic sorting
9463 * property of the WAL files ensures any earlier WAL files are safely
9464 * archived as well.
9465 *
9466 * We wait forever, since archive_command is supposed to work and we
9467 * assume the admin wanted his backup to work completely. If you don't
9468 * wish to wait, then either waitforarchive should be passed in as false,
9469 * or you can set statement_timeout. Also, some notices are issued to
9470 * clue in anyone who might be doing this interactively.
9471 */
9472
9473 if (waitforarchive &&
9476 {
9480
9483 state->startpoint, wal_segment_size);
9484
9486 waits = 0;
9487
9490 {
9492
9493 if (!reported_waiting && waits > 5)
9494 {
9496 (errmsg("base backup done, waiting for required WAL segments to be archived")));
9497 reported_waiting = true;
9498 }
9499
9502 1000L,
9505
9507 {
9508 seconds_before_warning *= 2; /* This wraps in >10 years... */
9510 (errmsg("still waiting for all required WAL segments to be archived (%d seconds elapsed)",
9511 waits),
9512 errhint("Check that your \"archive_command\" is executing properly. "
9513 "You can safely cancel this backup, "
9514 "but the database backup will not be usable without all the WAL segments.")));
9515 }
9516 }
9517
9519 (errmsg("all required WAL segments have been archived")));
9520 }
9521 else if (waitforarchive)
9523 (errmsg("WAL archiving is not enabled; you must ensure that all required WAL segments are copied through other means to complete the backup")));
9524}
9525
9526
9527/*
9528 * do_pg_abort_backup: abort a running backup
9529 *
9530 * This does just the most basic steps of do_pg_backup_stop(), by taking the
9531 * system out of backup mode, thus making it a lot more safe to call from
9532 * an error handler.
9533 *
9534 * 'arg' indicates that it's being called during backup setup; so
9535 * sessionBackupState has not been modified yet, but runningBackups has
9536 * already been incremented. When it's false, then it's invoked as a
9537 * before_shmem_exit handler, and therefore we must not change state
9538 * unless sessionBackupState indicates that a backup is actually running.
9539 *
9540 * NB: This gets used as a PG_ENSURE_ERROR_CLEANUP callback and
9541 * before_shmem_exit handler, hence the odd-looking signature.
9542 */
9543void
9545{
9547
9548 /* If called during backup start, there shouldn't be one already running */
9550
9552 {
9556
9559
9562 errmsg("aborting backup due to backend exiting before pg_backup_stop was called"));
9563 }
9564}
9565
9566/*
9567 * Register a handler that will warn about unterminated backups at end of
9568 * session, unless this has already been done.
9569 */
9570void
9572{
9573 static bool already_done = false;
9574
9575 if (already_done)
9576 return;
9578 already_done = true;
9579}
9580
9581/*
9582 * Get latest WAL insert pointer
9583 */
9586{
9589
9590 SpinLockAcquire(&Insert->insertpos_lck);
9591 current_bytepos = Insert->CurrBytePos;
9592 SpinLockRelease(&Insert->insertpos_lck);
9593
9595}
9596
9597/*
9598 * Get latest WAL write pointer
9599 */
9607
9608/*
9609 * Returns the redo pointer of the last checkpoint or restartpoint. This is
9610 * the oldest point in WAL that we still need, if we have to restart recovery.
9611 */
9612void
9620
9621/* Thin wrapper around ShutdownWalRcv(). */
9622void
9630
9631/* Enable WAL file recycling and preallocation. */
9632void
9639
9640/* Disable WAL file recycling and preallocation. */
9641void
9648
9649bool
9651{
9652 bool result;
9653
9657
9658 return result;
9659}
9660
9661/*
9662 * Update the WalWriterSleeping flag.
9663 */
9664void
Datum idx(PG_FUNCTION_ARGS)
Definition _int_op.c:262
static void pg_atomic_write_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition atomics.h:485
#define pg_memory_barrier()
Definition atomics.h:141
#define pg_read_barrier()
Definition atomics.h:154
static uint64 pg_atomic_read_membarrier_u64(volatile pg_atomic_uint64 *ptr)
Definition atomics.h:476
#define pg_write_barrier()
Definition atomics.h:155
static uint64 pg_atomic_monotonic_advance_u64(volatile pg_atomic_uint64 *ptr, uint64 target)
Definition atomics.h:595
static uint64 pg_atomic_fetch_add_u64(volatile pg_atomic_uint64 *ptr, int64 add_)
Definition atomics.h:532
static void pg_atomic_init_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition atomics.h:453
static void pg_atomic_write_membarrier_u64(volatile pg_atomic_uint64 *ptr, uint64 val)
Definition atomics.h:504
static uint64 pg_atomic_read_u64(volatile pg_atomic_uint64 *ptr)
Definition atomics.h:467
TimeLineID findNewestTimeLine(TimeLineID startTLI)
Definition timeline.c:264
void restoreTimeLineHistoryFiles(TimeLineID begin, TimeLineID end)
Definition timeline.c:50
void writeTimeLineHistory(TimeLineID newTLI, TimeLineID parentTLI, XLogRecPtr switchpoint, char *reason)
Definition timeline.c:304
void startup_progress_timeout_handler(void)
Definition startup.c:302
long TimestampDifferenceMilliseconds(TimestampTz start_time, TimestampTz stop_time)
Definition timestamp.c:1757
bool TimestampDifferenceExceeds(TimestampTz start_time, TimestampTz stop_time, int msec)
Definition timestamp.c:1781
TimestampTz GetCurrentTimestamp(void)
Definition timestamp.c:1645
const char * timestamptz_to_str(TimestampTz t)
Definition timestamp.c:1862
Datum now(PG_FUNCTION_ARGS)
Definition timestamp.c:1609
static bool backup_started_in_recovery
Definition basebackup.c:128
int Buffer
Definition buf.h:23
void CheckPointBuffers(int flags)
Definition bufmgr.c:4343
void UnlockReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5519
#define Min(x, y)
Definition c.h:1019
#define pg_attribute_unused()
Definition c.h:132
#define likely(x)
Definition c.h:423
#define MAXALIGN(LEN)
Definition c.h:838
#define TYPEALIGN(ALIGNVAL, LEN)
Definition c.h:831
uint8_t uint8
Definition c.h:556
#define Max(x, y)
Definition c.h:1013
#define Assert(condition)
Definition c.h:885
#define PG_BINARY
Definition c.h:1309
#define pg_attribute_always_inline
Definition c.h:291
uint64_t uint64
Definition c.h:559
#define unlikely(x)
Definition c.h:424
uint32_t uint32
Definition c.h:558
#define MAXALIGN64(LEN)
Definition c.h:863
#define PG_UINT64_MAX
Definition c.h:619
#define MemSet(start, val, len)
Definition c.h:1035
uint32 TransactionId
Definition c.h:678
size_t Size
Definition c.h:631
#define CATALOG_VERSION_NO
Definition catversion.h:60
void WakeupCheckpointer(void)
void AbsorbSyncRequests(void)
double CheckPointCompletionTarget
void RequestCheckpoint(int flags)
void BootStrapCLOG(void)
Definition clog.c:832
void StartupCLOG(void)
Definition clog.c:843
void CheckPointCLOG(void)
Definition clog.c:903
void TrimCLOG(void)
Definition clog.c:858
void StartupCommitTs(void)
Definition commit_ts.c:608
void CommitTsParameterChange(bool newvalue, bool oldvalue)
Definition commit_ts.c:640
bool track_commit_timestamp
Definition commit_ts.c:109
void CompleteCommitTsInitialization(void)
Definition commit_ts.c:618
void BootStrapCommitTs(void)
Definition commit_ts.c:594
void SetCommitTsLimit(TransactionId oldestXact, TransactionId newestXact)
Definition commit_ts.c:887
void CheckPointCommitTs(void)
Definition commit_ts.c:794
void update_controlfile(const char *DataDir, ControlFileData *ControlFile, bool do_sync)
#define fprintf(file, fmt, msg)
Definition cubescan.l:21
int64 TimestampTz
Definition timestamp.h:39
Datum arg
Definition elog.c:1322
int errcode_for_file_access(void)
Definition elog.c:897
int errcode(int sqlerrcode)
Definition elog.c:874
int errmsg(const char *fmt,...)
Definition elog.c:1093
#define _(x)
Definition elog.c:95
#define LOG
Definition elog.h:31
int errhint(const char *fmt,...) pg_attribute_printf(1
int errdetail(const char *fmt,...) pg_attribute_printf(1
#define FATAL
Definition elog.h:41
int int errmsg_internal(const char *fmt,...) pg_attribute_printf(1
#define WARNING
Definition elog.h:36
int int int errmsg_plural(const char *fmt_singular, const char *fmt_plural, unsigned long n,...) pg_attribute_printf(1
#define DEBUG2
Definition elog.h:29
#define PANIC
Definition elog.h:42
#define DEBUG1
Definition elog.h:30
#define ERROR
Definition elog.h:39
#define elog(elevel,...)
Definition elog.h:226
#define NOTICE
Definition elog.h:35
#define ereport(elevel,...)
Definition elog.h:150
int MakePGDirectory(const char *directoryName)
Definition fd.c:3962
int FreeDir(DIR *dir)
Definition fd.c:3008
int pg_fsync_no_writethrough(int fd)
Definition fd.c:441
int io_direct_flags
Definition fd.c:171
int durable_rename(const char *oldfile, const char *newfile, int elevel)
Definition fd.c:782
int pg_fdatasync(int fd)
Definition fd.c:480
int CloseTransientFile(int fd)
Definition fd.c:2854
int BasicOpenFile(const char *fileName, int fileFlags)
Definition fd.c:1089
int FreeFile(FILE *file)
Definition fd.c:2826
int pg_fsync_writethrough(int fd)
Definition fd.c:461
void ReleaseExternalFD(void)
Definition fd.c:1224
int data_sync_elevel(int elevel)
Definition fd.c:3985
static void Insert(File file)
Definition fd.c:1300
DIR * AllocateDir(const char *dirname)
Definition fd.c:2890
int durable_unlink(const char *fname, int elevel)
Definition fd.c:872
void ReserveExternalFD(void)
Definition fd.c:1206
struct dirent * ReadDir(DIR *dir, const char *dirname)
Definition fd.c:2956
int pg_fsync(int fd)
Definition fd.c:389
FILE * AllocateFile(const char *name, const char *mode)
Definition fd.c:2627
int OpenTransientFile(const char *fileName, int fileFlags)
Definition fd.c:2677
void SyncDataDirectory(void)
Definition fd.c:3593
#define IO_DIRECT_WAL
Definition fd.h:55
#define IO_DIRECT_WAL_INIT
Definition fd.h:56
#define PG_O_DIRECT
Definition fd.h:123
#define palloc_object(type)
Definition fe_memutils.h:74
ssize_t pg_pwrite_zeros(int fd, size_t size, pgoff_t offset)
Definition file_utils.c:709
PGFileType get_dirent_type(const char *path, const struct dirent *de, bool look_through_symlinks, int elevel)
Definition file_utils.c:547
PGFileType
Definition file_utils.h:19
@ PGFILETYPE_LNK
Definition file_utils.h:24
@ PGFILETYPE_DIR
Definition file_utils.h:23
@ PGFILETYPE_REG
Definition file_utils.h:22
bool IsBinaryUpgrade
Definition globals.c:121
int NBuffers
Definition globals.c:142
pid_t PostmasterPid
Definition globals.c:106
bool enableFsync
Definition globals.c:129
ProcNumber MyProcNumber
Definition globals.c:90
bool IsUnderPostmaster
Definition globals.c:120
int MaxConnections
Definition globals.c:143
volatile uint32 CritSectionCount
Definition globals.c:45
char * DataDir
Definition globals.c:71
bool IsPostmasterEnvironment
Definition globals.c:119
struct Latch * MyLatch
Definition globals.c:63
int max_worker_processes
Definition globals.c:144
int set_config_option_ext(const char *name, const char *value, GucContext context, GucSource source, Oid srole, GucAction action, bool changeVal, int elevel, bool is_reload)
Definition guc.c:3256
void SetConfigOption(const char *name, const char *value, GucContext context, GucSource source)
Definition guc.c:4196
void * guc_malloc(int elevel, size_t size)
Definition guc.c:636
#define newval
struct config_generic * find_option(const char *name, bool create_placeholders, bool skip_errors, int elevel)
Definition guc.c:1113
@ GUC_ACTION_SET
Definition guc.h:203
#define GUC_check_errdetail
Definition guc.h:506
GucSource
Definition guc.h:112
@ PGC_S_DYNAMIC_DEFAULT
Definition guc.h:114
@ PGC_S_OVERRIDE
Definition guc.h:123
@ PGC_INTERNAL
Definition guc.h:73
@ PGC_POSTMASTER
Definition guc.h:74
return str start
#define TOAST_MAX_CHUNK_SIZE
Definition heaptoast.h:84
#define bufsize
#define INJECTION_POINT(name, arg)
#define INJECTION_POINT_CACHED(name, arg)
#define INJECTION_POINT_LOAD(name)
WalUsage pgWalUsage
Definition instrument.c:22
#define close(a)
Definition win32.h:12
#define write(a, b, c)
Definition win32.h:14
#define read(a, b, c)
Definition win32.h:13
void before_shmem_exit(pg_on_exit_callback function, Datum arg)
Definition ipc.c:344
#define PG_ENSURE_ERROR_CLEANUP(cleanup_function, arg)
Definition ipc.h:47
#define PG_END_ENSURE_ERROR_CLEANUP(cleanup_function, arg)
Definition ipc.h:52
int i
Definition isn.c:77
#define LOBLKSIZE
void SetLatch(Latch *latch)
Definition latch.c:290
void ResetLatch(Latch *latch)
Definition latch.c:374
int WaitLatch(Latch *latch, int wakeEvents, long timeout, uint32 wait_event_info)
Definition latch.c:172
List * lappend(List *list, void *datum)
Definition list.c:339
void list_free(List *list)
Definition list.c:1546
int max_locks_per_xact
Definition lock.c:53
void UpdateLogicalDecodingStatusEndOfRecovery(void)
Definition logicalctl.c:554
bool IsLogicalDecodingEnabled(void)
Definition logicalctl.c:205
bool IsXLogLogicalInfoEnabled(void)
Definition logicalctl.c:221
void StartupLogicalDecodingStatus(bool last_status)
Definition logicalctl.c:147
void DisableLogicalDecoding(void)
Definition logicalctl.c:492
void EnableLogicalDecoding(void)
Definition logicalctl.c:341
void LWLockUpdateVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
Definition lwlock.c:1728
void LWLockReleaseClearVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 val)
Definition lwlock.c:1866
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1176
bool LWLockWaitForVar(LWLock *lock, pg_atomic_uint64 *valptr, uint64 oldval, uint64 *newval)
Definition lwlock.c:1592
void LWLockRelease(LWLock *lock)
Definition lwlock.c:1793
void LWLockInitialize(LWLock *lock, int tranche_id)
Definition lwlock.c:698
bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1347
bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode)
Definition lwlock.c:1404
@ LW_SHARED
Definition lwlock.h:113
@ LW_EXCLUSIVE
Definition lwlock.h:112
char * pstrdup(const char *in)
Definition mcxt.c:1781
void pfree(void *pointer)
Definition mcxt.c:1616
MemoryContext TopMemoryContext
Definition mcxt.c:166
void * palloc(Size size)
Definition mcxt.c:1387
void MemoryContextAllowInCriticalSection(MemoryContext context, bool allow)
Definition mcxt.c:743
#define AllocSetContextCreate
Definition memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition memutils.h:160
#define AmStartupProcess()
Definition miscadmin.h:390
#define IsBootstrapProcessingMode()
Definition miscadmin.h:477
#define START_CRIT_SECTION()
Definition miscadmin.h:150
#define CHECK_FOR_INTERRUPTS()
Definition miscadmin.h:123
@ B_CHECKPOINTER
Definition miscadmin.h:363
#define END_CRIT_SECTION()
Definition miscadmin.h:152
#define AmWalReceiverProcess()
Definition miscadmin.h:391
bool process_shared_preload_libraries_done
Definition miscinit.c:1787
BackendType MyBackendType
Definition miscinit.c:64
void MultiXactSetNextMXact(MultiXactId nextMulti, MultiXactOffset nextMultiOffset)
Definition multixact.c:1992
void MultiXactAdvanceOldest(MultiXactId oldestMulti, Oid oldestMultiDB)
Definition multixact.c:2191
void MultiXactGetCheckptMulti(bool is_shutdown, MultiXactId *nextMulti, MultiXactOffset *nextMultiOffset, MultiXactId *oldestMulti, Oid *oldestMultiDB)
Definition multixact.c:1946
void CheckPointMultiXact(void)
Definition multixact.c:1968
void TrimMultiXact(void)
Definition multixact.c:1834
void MultiXactAdvanceNextMXact(MultiXactId minMulti, MultiXactOffset minMultiOffset)
Definition multixact.c:2164
void BootStrapMultiXact(void)
Definition multixact.c:1793
void StartupMultiXact(void)
Definition multixact.c:1809
void SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
Definition multixact.c:2014
#define FirstMultiXactId
Definition multixact.h:26
void StartupReplicationOrigin(void)
Definition origin.c:730
void CheckPointReplicationOrigin(void)
Definition origin.c:604
static MemoryContext MemoryContextSwitchTo(MemoryContext context)
Definition palloc.h:124
#define ERRCODE_DATA_CORRUPTED
#define INDEX_MAX_KEYS
#define NAMEDATALEN
#define MAXPGPATH
#define DEFAULT_XLOG_SEG_SIZE
#define SLRU_PAGES_PER_SEGMENT
#define PG_IO_ALIGN_SIZE
#define PG_CACHE_LINE_SIZE
#define FLOATFORMAT_VALUE
Definition pg_control.h:203
#define XLOG_RESTORE_POINT
Definition pg_control.h:76
#define XLOG_FPW_CHANGE
Definition pg_control.h:77
#define XLOG_CHECKPOINT_REDO
Definition pg_control.h:83
#define PG_CONTROL_VERSION
Definition pg_control.h:25
#define XLOG_OVERWRITE_CONTRECORD
Definition pg_control.h:82
#define XLOG_FPI
Definition pg_control.h:80
#define XLOG_FPI_FOR_HINT
Definition pg_control.h:79
#define MOCK_AUTH_NONCE_LEN
Definition pg_control.h:28
#define XLOG_NEXTOID
Definition pg_control.h:72
@ DB_IN_PRODUCTION
Definition pg_control.h:99
@ DB_SHUTDOWNING
Definition pg_control.h:96
@ DB_IN_ARCHIVE_RECOVERY
Definition pg_control.h:98
@ DB_SHUTDOWNED_IN_RECOVERY
Definition pg_control.h:95
@ DB_SHUTDOWNED
Definition pg_control.h:94
@ DB_IN_CRASH_RECOVERY
Definition pg_control.h:97
#define XLOG_NOOP
Definition pg_control.h:71
#define XLOG_CHECKPOINT_SHUTDOWN
Definition pg_control.h:69
#define PG_CONTROL_FILE_SIZE
Definition pg_control.h:260
#define XLOG_SWITCH
Definition pg_control.h:73
#define XLOG_BACKUP_END
Definition pg_control.h:74
#define XLOG_PARAMETER_CHANGE
Definition pg_control.h:75
#define XLOG_LOGICAL_DECODING_STATUS_CHANGE
Definition pg_control.h:84
#define XLOG_CHECKPOINT_ONLINE
Definition pg_control.h:70
#define XLOG_END_OF_RECOVERY
Definition pg_control.h:78
uint32 pg_crc32c
Definition pg_crc32c.h:38
#define COMP_CRC32C(crc, data, len)
Definition pg_crc32c.h:153
#define EQ_CRC32C(c1, c2)
Definition pg_crc32c.h:42
#define INIT_CRC32C(crc)
Definition pg_crc32c.h:41
#define FIN_CRC32C(crc)
Definition pg_crc32c.h:158
const void size_t len
return crc
static char * filename
Definition pg_dumpall.c:132
#define lfirst(lc)
Definition pg_list.h:172
static rewind_source * source
Definition pg_rewind.c:89
static char buf[DEFAULT_XLOG_SEG_SIZE]
bool pgstat_report_fixed
Definition pgstat.c:218
void pgstat_restore_stats(void)
Definition pgstat.c:507
void pgstat_discard_stats(void)
Definition pgstat.c:519
@ IOOBJECT_WAL
Definition pgstat.h:283
@ IOCONTEXT_INIT
Definition pgstat.h:292
@ IOCONTEXT_NORMAL
Definition pgstat.h:293
@ IOOP_FSYNC
Definition pgstat.h:312
@ IOOP_WRITE
Definition pgstat.h:320
PgStat_CheckpointerStats PendingCheckpointerStats
instr_time pgstat_prepare_io_time(bool track_io_guc)
Definition pgstat_io.c:91
void pgstat_count_io_op_time(IOObject io_object, IOContext io_context, IOOp io_op, instr_time start_time, uint32 cnt, uint64 bytes)
Definition pgstat_io.c:122
int64 pg_time_t
Definition pgtime.h:23
size_t pg_strftime(char *s, size_t maxsize, const char *format, const struct pg_tm *t)
Definition strftime.c:128
struct pg_tm * pg_localtime(const pg_time_t *timep, const pg_tz *tz)
Definition localtime.c:1345
PGDLLIMPORT pg_tz * log_timezone
Definition pgtz.c:31
bool pg_strong_random(void *buf, size_t len)
int pg_strcasecmp(const char *s1, const char *s2)
#define pg_pwrite
Definition port.h:248
#define snprintf
Definition port.h:260
#define IS_DIR_SEP(ch)
Definition port.h:103
size_t strlcpy(char *dst, const char *src, size_t siz)
Definition strlcpy.c:45
static bool DatumGetBool(Datum X)
Definition postgres.h:100
static Datum BoolGetDatum(bool X)
Definition postgres.h:112
uint64_t Datum
Definition postgres.h:70
#define InvalidOid
unsigned int Oid
void CheckPointPredicate(void)
Definition predicate.c:1041
static int fd(const char *x, int i)
static int fb(int x)
short access
#define GetPGProcByNumber(n)
Definition proc.h:504
#define DELAY_CHKPT_START
Definition proc.h:136
#define DELAY_CHKPT_COMPLETE
Definition proc.h:137
bool MinimumActiveBackends(int min)
Definition procarray.c:3580
TransactionId GetOldestTransactionIdConsideredRunning(void)
Definition procarray.c:1985
bool HaveVirtualXIDsDelayingChkpt(VirtualTransactionId *vxids, int nvxids, int type)
Definition procarray.c:3053
void ProcArrayApplyRecoveryInfo(RunningTransactions running)
Definition procarray.c:1057
TransactionId GetOldestActiveTransactionId(bool inCommitOnly, bool allDbs)
Definition procarray.c:2836
void ProcArrayInitRecovery(TransactionId initializedUptoXID)
Definition procarray.c:1026
VirtualTransactionId * GetVirtualXIDsDelayingChkpt(int *nvxids, int type)
Definition procarray.c:3008
#define INVALID_PROC_NUMBER
Definition procnumber.h:26
int ProcNumber
Definition procnumber.h:24
static void set_ps_display(const char *activity)
Definition ps_status.h:40
void ResetUnloggedRelations(int op)
Definition reinit.c:47
#define UNLOGGED_RELATION_INIT
Definition reinit.h:28
#define UNLOGGED_RELATION_CLEANUP
Definition reinit.h:27
void RelationCacheInitFileRemove(void)
Definition relcache.c:6895
void CheckPointRelationMap(void)
Definition relmapper.c:611
#define relpath(rlocator, forknum)
Definition relpath.h:150
#define PG_TBLSPC_DIR
Definition relpath.h:41
void StartupReorderBuffer(void)
ResourceOwner CurrentResourceOwner
Definition resowner.c:173
ResourceOwner AuxProcessResourceOwner
Definition resowner.c:176
void CheckPointLogicalRewriteHeap(void)
#define RM_MAX_ID
Definition rmgr.h:33
Size add_size(Size s1, Size s2)
Definition shmem.c:482
Size mul_size(Size s1, Size s2)
Definition shmem.c:497
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition shmem.c:378
void pg_usleep(long microsec)
Definition signal.c:53
void CheckPointReplicationSlots(bool is_shutdown)
Definition slot.c:2309
void StartupReplicationSlots(void)
Definition slot.c:2387
bool InvalidateObsoleteReplicationSlots(uint32 possible_causes, XLogSegNo oldestSegno, Oid dboid, TransactionId snapshotConflictHorizon)
Definition slot.c:2205
@ RS_INVAL_WAL_REMOVED
Definition slot.h:62
@ RS_INVAL_IDLE_TIMEOUT
Definition slot.h:68
@ RS_INVAL_WAL_LEVEL
Definition slot.h:66
bool sync_replication_slots
Definition slotsync.c:117
void smgrdestroyall(void)
Definition smgr.c:386
void CheckPointSnapBuild(void)
Definition snapbuild.c:1969
void DeleteAllExportedSnapshotFiles(void)
Definition snapmgr.c:1587
static void SpinLockRelease(volatile slock_t *lock)
Definition spin.h:62
static void SpinLockAcquire(volatile slock_t *lock)
Definition spin.h:56
static void SpinLockInit(volatile slock_t *lock)
Definition spin.h:50
void reset(void)
PGPROC * MyProc
Definition proc.c:67
PROC_HDR * ProcGlobal
Definition proc.c:70
XLogRecPtr LogStandbySnapshot(void)
Definition standby.c:1282
void InitRecoveryTransactionEnvironment(void)
Definition standby.c:95
void ShutdownRecoveryTransactionEnvironment(void)
Definition standby.c:161
@ SUBXIDS_IN_SUBTRANS
Definition standby.h:120
void appendStringInfo(StringInfo str, const char *fmt,...)
Definition stringinfo.c:145
void appendBinaryStringInfo(StringInfo str, const void *data, int datalen)
Definition stringinfo.c:281
void appendStringInfoString(StringInfo str, const char *s)
Definition stringinfo.c:230
void appendStringInfoChar(StringInfo str, char ch)
Definition stringinfo.c:242
void initStringInfo(StringInfo str)
Definition stringinfo.c:97
Oid oldestMultiDB
Definition pg_control.h:52
MultiXactId oldestMulti
Definition pg_control.h:51
MultiXactOffset nextMultiOffset
Definition pg_control.h:48
TransactionId newestCommitTsXid
Definition pg_control.h:56
TransactionId oldestXid
Definition pg_control.h:49
TimeLineID PrevTimeLineID
Definition pg_control.h:40
TimeLineID ThisTimeLineID
Definition pg_control.h:39
TransactionId oldestActiveXid
Definition pg_control.h:65
bool fullPageWrites
Definition pg_control.h:42
MultiXactId nextMulti
Definition pg_control.h:47
FullTransactionId nextXid
Definition pg_control.h:45
TransactionId oldestCommitTsXid
Definition pg_control.h:54
pg_time_t time
Definition pg_control.h:53
int wal_level
Definition pg_control.h:43
bool logicalDecodingEnabled
Definition pg_control.h:44
XLogRecPtr redo
Definition pg_control.h:37
Oid oldestXidDB
Definition pg_control.h:50
uint64 ckpt_agg_sync_time
Definition xlog.h:187
uint64 ckpt_longest_sync
Definition xlog.h:186
TimestampTz ckpt_start_t
Definition xlog.h:172
TimestampTz ckpt_end_t
Definition xlog.h:176
int ckpt_segs_removed
Definition xlog.h:182
TimestampTz ckpt_write_t
Definition xlog.h:173
TimestampTz ckpt_sync_end_t
Definition xlog.h:175
TimestampTz ckpt_sync_t
Definition xlog.h:174
int ckpt_bufs_written
Definition xlog.h:178
int ckpt_segs_recycled
Definition xlog.h:183
int ckpt_slru_written
Definition xlog.h:179
char mock_authentication_nonce[MOCK_AUTH_NONCE_LEN]
Definition pg_control.h:239
uint32 pg_control_version
Definition pg_control.h:127
uint32 xlog_seg_size
Definition pg_control.h:215
XLogRecPtr backupStartPoint
Definition pg_control.h:172
bool track_commit_timestamp
Definition pg_control.h:187
CheckPoint checkPointCopy
Definition pg_control.h:137
uint32 slru_pages_per_segment
Definition pg_control.h:212
XLogRecPtr backupEndPoint
Definition pg_control.h:173
XLogRecPtr minRecoveryPoint
Definition pg_control.h:170
uint32 data_checksum_version
Definition pg_control.h:226
XLogRecPtr unloggedLSN
Definition pg_control.h:139
uint32 indexMaxKeys
Definition pg_control.h:218
pg_time_t time
Definition pg_control.h:134
bool default_char_signedness
Definition pg_control.h:232
XLogRecPtr checkPoint
Definition pg_control.h:135
uint64 system_identifier
Definition pg_control.h:112
uint32 catalog_version_no
Definition pg_control.h:128
TimeLineID minRecoveryPointTLI
Definition pg_control.h:171
pg_crc32c crc
Definition pg_control.h:242
uint32 toast_max_chunk_size
Definition pg_control.h:220
Definition dirent.c:26
Definition pg_list.h:54
char data[XLOG_BLCKSZ]
Definition c.h:1159
ProcNumber walwriterProc
Definition proc.h:488
PgStat_Counter sync_time
Definition pgstat.h:269
PgStat_Counter write_time
Definition pgstat.h:268
void(* rm_mask)(char *pagedata, BlockNumber blkno)
TransactionId oldestRunningXid
Definition standby.h:130
TransactionId nextXid
Definition standby.h:129
TransactionId latestCompletedXid
Definition standby.h:133
subxids_array_status subxid_status
Definition standby.h:128
TransactionId * xids
Definition standby.h:135
TransactionId oldestCommitTsXid
Definition transam.h:232
TransactionId newestCommitTsXid
Definition transam.h:233
FullTransactionId latestCompletedXid
Definition transam.h:238
FullTransactionId nextXid
Definition transam.h:220
TransactionId oldestXid
Definition transam.h:222
pg_atomic_uint64 insertingAt
Definition xlog.c:373
XLogRecPtr lastImportantAt
Definition xlog.c:374
LWLock lock
Definition xlog.c:372
pg_atomic_uint64 minWaitedLSN[WAIT_LSN_TYPE_COUNT]
Definition xlogwait.h:85
int64 wal_buffers_full
Definition instrument.h:57
uint64 wal_bytes
Definition instrument.h:55
int64 wal_fpi
Definition instrument.h:54
uint64 wal_fpi_bytes
Definition instrument.h:56
int64 wal_records
Definition instrument.h:53
CheckPoint lastCheckPoint
Definition xlog.c:547
XLogwrtRqst LogwrtRqst
Definition xlog.c:458
slock_t info_lck
Definition xlog.c:555
XLogRecPtr InitializedUpTo
Definition xlog.c:487
char * pages
Definition xlog.c:494
pg_time_t lastSegSwitchTime
Definition xlog.c:469
XLogRecPtr replicationSlotMinLSN
Definition xlog.c:461
RecoveryState SharedRecoveryState
Definition xlog.c:518
TimeLineID InsertTimeLineID
Definition xlog.c:511
XLogRecPtr lastSegSwitchLSN
Definition xlog.c:470
XLogSegNo lastRemovedSegNo
Definition xlog.c:463
pg_atomic_uint64 * xlblocks
Definition xlog.c:495
pg_atomic_uint64 logWriteResult
Definition xlog.c:474
int XLogCacheBlck
Definition xlog.c:496
XLogRecPtr RedoRecPtr
Definition xlog.c:459
XLogRecPtr lastCheckPointRecPtr
Definition xlog.c:545
XLogRecPtr lastFpwDisableRecPtr
Definition xlog.c:553
XLogCtlInsert Insert
Definition xlog.c:455
bool InstallXLogFileSegmentActive
Definition xlog.c:528
bool WalWriterSleeping
Definition xlog.c:535
XLogRecPtr asyncXactLSN
Definition xlog.c:460
XLogRecPtr lastCheckPointEndPtr
Definition xlog.c:546
pg_atomic_uint64 logFlushResult
Definition xlog.c:475
pg_atomic_uint64 logInsertResult
Definition xlog.c:473
TimeLineID PrevTimeLineID
Definition xlog.c:512
pg_atomic_uint64 unloggedLSN
Definition xlog.c:466
WALInsertLockPadded * WALInsertLocks
Definition xlog.c:447
XLogRecPtr RedoRecPtr
Definition xlog.c:433
uint64 PrevBytePos
Definition xlog.c:411
char pad[PG_CACHE_LINE_SIZE]
Definition xlog.c:420
int runningBackups
Definition xlog.c:441
slock_t insertpos_lck
Definition xlog.c:401
uint64 CurrBytePos
Definition xlog.c:410
bool fullPageWrites
Definition xlog.c:434
XLogRecPtr lastBackupStart
Definition xlog.c:442
XLogRecPtr xlp_pageaddr
XLogRecPtr EndRecPtr
Definition xlogreader.h:206
XLogRecPtr ReadRecPtr
Definition xlogreader.h:205
XLogRecPtr xl_prev
Definition xlogrecord.h:45
pg_crc32c xl_crc
Definition xlogrecord.h:49
uint8 xl_info
Definition xlogrecord.h:46
uint32 xl_tot_len
Definition xlogrecord.h:43
TransactionId xl_xid
Definition xlogrecord.h:44
RmgrId xl_rmid
Definition xlogrecord.h:47
XLogRecPtr Flush
Definition xlog.c:331
XLogRecPtr Write
Definition xlog.c:330
XLogRecPtr Flush
Definition xlog.c:325
XLogRecPtr Write
Definition xlog.c:324
Definition guc.h:174
TimestampTz rp_time
void StartupSUBTRANS(TransactionId oldestActiveXID)
Definition subtrans.c:283
void CheckPointSUBTRANS(void)
Definition subtrans.c:329
void BootStrapSUBTRANS(void)
Definition subtrans.c:269
void TruncateSUBTRANS(TransactionId oldestXact)
Definition subtrans.c:385
void ProcessSyncRequests(void)
Definition sync.c:286
void SyncPreCheckpoint(void)
Definition sync.c:177
void SyncPostCheckpoint(void)
Definition sync.c:202
TimeoutId RegisterTimeout(TimeoutId id, timeout_handler_proc handler)
Definition timeout.c:505
@ STARTUP_PROGRESS_TIMEOUT
Definition timeout.h:38
#define TransactionIdRetreat(dest)
Definition transam.h:141
#define InvalidTransactionId
Definition transam.h:31
static void FullTransactionIdRetreat(FullTransactionId *dest)
Definition transam.h:103
#define XidFromFullTransactionId(x)
Definition transam.h:48
#define FirstGenbkiObjectId
Definition transam.h:195
#define FirstNormalTransactionId
Definition transam.h:34
#define TransactionIdIsValid(xid)
Definition transam.h:41
static FullTransactionId FullTransactionIdFromEpochAndXid(uint32 epoch, TransactionId xid)
Definition transam.h:71
#define TransactionIdIsNormal(xid)
Definition transam.h:42
#define FullTransactionIdPrecedes(a, b)
Definition transam.h:51
static bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition transam.h:263
void RecoverPreparedTransactions(void)
Definition twophase.c:2083
void restoreTwoPhaseData(void)
Definition twophase.c:1904
int max_prepared_xacts
Definition twophase.c:116
TransactionId PrescanPreparedTransactions(TransactionId **xids_p, int *nxids_p)
Definition twophase.c:1966
void StandbyRecoverPreparedTransactions(void)
Definition twophase.c:2045
void CheckPointTwoPhase(XLogRecPtr redo_horizon)
Definition twophase.c:1822
WALInsertLock l
Definition xlog.c:386
char pad[PG_CACHE_LINE_SIZE]
Definition xlog.c:387
bool SplitIdentifierString(char *rawstring, char separator, List **namelist)
Definition varlena.c:2775
void SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid)
Definition varsup.c:372
void AdvanceOldestClogXid(TransactionId oldest_datfrozenxid)
Definition varsup.c:355
TransamVariablesData * TransamVariables
Definition varsup.c:34
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition wait_event.h:69
static void pgstat_report_wait_end(void)
Definition wait_event.h:85
#define WL_TIMEOUT
#define WL_EXIT_ON_PM_DEATH
#define WL_LATCH_SET
static TimestampTz wakeup[NUM_WALRCV_WAKEUPS]
XLogRecPtr Flush
XLogRecPtr Write
XLogRecPtr GetWalRcvFlushRecPtr(XLogRecPtr *latestChunkStart, TimeLineID *receiveTLI)
void ShutdownWalRcv(void)
void WalSndWakeup(bool physical, bool logical)
Definition walsender.c:3810
int max_wal_senders
Definition walsender.c:129
void WalSndInitStopping(void)
Definition walsender.c:3889
void WalSndWaitStopping(void)
Definition walsender.c:3915
static void WalSndWakeupProcessRequests(bool physical, bool logical)
Definition walsender.h:65
#define WalSndWakeupRequest()
Definition walsender.h:58
bool summarize_wal
void WaitForWalSummarization(XLogRecPtr lsn)
void WakeupWalSummarizer(void)
XLogRecPtr GetOldestUnsummarizedLSN(TimeLineID *tli, bool *lsn_is_exact)
int WalWriterFlushAfter
Definition walwriter.c:71
int WalWriterDelay
Definition walwriter.c:70
#define stat
Definition win32_port.h:74
#define EINTR
Definition win32_port.h:361
#define S_ISDIR(m)
Definition win32_port.h:315
#define kill(pid, sig)
Definition win32_port.h:490
#define SIGUSR1
Definition win32_port.h:170
#define readlink(path, buf, size)
Definition win32_port.h:226
#define O_CLOEXEC
Definition win32_port.h:344
#define O_DSYNC
Definition win32_port.h:346
int gettimeofday(struct timeval *tp, void *tzp)
void MarkSubxactTopXidLogged(void)
Definition xact.c:592
void MarkCurrentTransactionIdLoggedIfAny(void)
Definition xact.c:542
int XLogFileInit(XLogSegNo logsegno, TimeLineID logtli)
Definition xlog.c:3400
void assign_wal_sync_method(int new_wal_sync_method, void *extra)
Definition xlog.c:8809
static const char * CheckpointFlagsString(int flags)
Definition xlog.c:6759
static void CreateEndOfRecoveryRecord(void)
Definition xlog.c:7499
uint64 GetSystemIdentifier(void)
Definition xlog.c:4610
int wal_decode_buffer_size
Definition xlog.c:139
XLogRecPtr ProcLastRecPtr
Definition xlog.c:256
static XLogCtlData * XLogCtl
Definition xlog.c:568
bool fullPageWrites
Definition xlog.c:125
void UpdateFullPageWrites(void)
Definition xlog.c:8294
bool RecoveryInProgress(void)
Definition xlog.c:6443
static void CleanupBackupHistory(void)
Definition xlog.c:4181
void GetFullPageWriteInfo(XLogRecPtr *RedoRecPtr_p, bool *doPageWrites_p)
Definition xlog.c:6576
TimeLineID GetWALInsertionTimeLine(void)
Definition xlog.c:6629
XLogRecPtr RequestXLogSwitch(bool mark_unimportant)
Definition xlog.c:8188
void do_pg_abort_backup(int code, Datum arg)
Definition xlog.c:9544
XLogSegNo XLogGetLastRemovedSegno(void)
Definition xlog.c:3778
XLogRecPtr XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn, uint8 flags, int num_fpi, uint64 fpi_bytes, bool topxid_included)
Definition xlog.c:749
char * XLogArchiveCommand
Definition xlog.c:123
int wal_keep_size_mb
Definition xlog.c:119
Size WALReadFromBuffers(char *dstbuf, XLogRecPtr startptr, Size count, TimeLineID tli)
Definition xlog.c:1754
static XLogRecPtr WaitXLogInsertionsToFinish(XLogRecPtr upto)
Definition xlog.c:1510
static void WALInsertLockRelease(void)
Definition xlog.c:1451
static XLogRecPtr XLogBytePosToRecPtr(uint64 bytepos)
Definition xlog.c:1864
bool EnableHotStandby
Definition xlog.c:124
static void WALInsertLockUpdateInsertingAt(XLogRecPtr insertingAt)
Definition xlog.c:1477
XLogRecPtr GetRedoRecPtr(void)
Definition xlog.c:6546
void assign_wal_consistency_checking(const char *newval, void *extra)
Definition xlog.c:4814
static void InitControlFile(uint64 sysidentifier, uint32 data_checksum_version)
Definition xlog.c:4224
void SetInstallXLogFileSegmentActive(void)
Definition xlog.c:9633
static void AdvanceXLInsertBuffer(XLogRecPtr upto, TimeLineID tli, bool opportunistic)
Definition xlog.c:1991
static void WALInsertLockAcquireExclusive(void)
Definition xlog.c:1422
static void UpdateControlFile(void)
Definition xlog.c:4601
void StartupXLOG(void)
Definition xlog.c:5500
bool IsInstallXLogFileSegmentActive(void)
Definition xlog.c:9650
static int openLogFile
Definition xlog.c:637
void BootStrapXLOG(uint32 data_checksum_version)
Definition xlog.c:5109
XLogRecPtr XactLastRecEnd
Definition xlog.c:257
bool CreateRestartPoint(int flags)
Definition xlog.c:7714
static void ValidateXLOGDirectoryStructure(void)
Definition xlog.c:4119
int CommitDelay
Definition xlog.c:135
static void RemoveOldXlogFiles(XLogSegNo segno, XLogRecPtr lastredoptr, XLogRecPtr endptr, TimeLineID insertTLI)
Definition xlog.c:3885
static XLogRecPtr CreateOverwriteContrecordRecord(XLogRecPtr aborted_lsn, XLogRecPtr pagePtr, TimeLineID newTLI)
Definition xlog.c:7564
XLogRecPtr GetInsertRecPtr(void)
Definition xlog.c:6591
bool wal_init_zero
Definition xlog.c:130
static void CalculateCheckpointSegments(void)
Definition xlog.c:2157
XLogRecPtr XLogGetReplicationSlotMinimumLSN(void)
Definition xlog.c:2665
int XLogArchiveMode
Definition xlog.c:122
SessionBackupState get_backup_status(void)
Definition xlog.c:9251
static void XLogReportParameters(void)
Definition xlog.c:8231
#define RefreshXLogWriteResult(_target)
Definition xlog.c:622
void CheckXLogRemoved(XLogSegNo segno, TimeLineID tli)
Definition xlog.c:3747
int wal_level
Definition xlog.c:134
static void LogCheckpointStart(int flags, bool restartpoint)
Definition xlog.c:6780
static XLogRecPtr RedoRecPtr
Definition xlog.c:276
void assign_checkpoint_completion_target(double newval, void *extra)
Definition xlog.c:2193
static bool InstallXLogFileSegment(XLogSegNo *segno, char *tmppath, bool find_free, XLogSegNo max_segno, TimeLineID tli)
Definition xlog.c:3583
static void WriteControlFile(void)
Definition xlog.c:4259
int wal_segment_size
Definition xlog.c:146
WALAvailability GetWALAvailability(XLogRecPtr targetLSN)
Definition xlog.c:7995
const char * show_archive_command(void)
Definition xlog.c:4867
#define UsableBytesInPage
Definition xlog.c:599
int max_wal_size_mb
Definition xlog.c:117
void XLOGShmemInit(void)
Definition xlog.c:4994
void ShutdownXLOG(int code, Datum arg)
Definition xlog.c:6711
bool DataChecksumsEnabled(void)
Definition xlog.c:4630
static bool PerformRecoveryXLogAction(void)
Definition xlog.c:6393
RecoveryState GetRecoveryState(void)
Definition xlog.c:6479
int XLogArchiveTimeout
Definition xlog.c:121
static void CleanupAfterArchiveRecovery(TimeLineID EndOfLogTLI, XLogRecPtr EndOfLog, TimeLineID newTLI)
Definition xlog.c:5360
#define ConvertToXSegs(x, segsize)
Definition xlog.c:605
bool wal_recycle
Definition xlog.c:131
static void RemoveXlogFile(const struct dirent *segment_de, XLogSegNo recycleSegNo, XLogSegNo *endlogSegNo, TimeLineID insertTLI)
Definition xlog.c:4029
pg_time_t GetLastSegSwitchData(XLogRecPtr *lastSwitchLSN)
Definition xlog.c:6694
const char * show_effective_wal_level(void)
Definition xlog.c:4894
static int XLOGChooseNumBuffers(void)
Definition xlog.c:4676
static XLogRecPtr XLogBytePosToEndRecPtr(uint64 bytepos)
Definition xlog.c:1904
static void LogCheckpointEnd(bool restartpoint, int flags)
Definition xlog.c:6798
static int get_sync_bit(int method)
Definition xlog.c:8761
static XLogwrtResult LogwrtResult
Definition xlog.c:614
void XLogSetReplicationSlotMinimumLSN(XLogRecPtr lsn)
Definition xlog.c:2652
void SwitchIntoArchiveRecovery(XLogRecPtr EndRecPtr, TimeLineID replayTLI)
Definition xlog.c:6318
static bool lastFullPageWrites
Definition xlog.c:220
char * wal_consistency_checking_string
Definition xlog.c:128
static void WALInsertLockAcquire(void)
Definition xlog.c:1377
int CommitSiblings
Definition xlog.c:136
static void CopyXLogRecordToWAL(int write_len, bool isLogSwitch, XLogRecData *rdata, XLogRecPtr StartPos, XLogRecPtr EndPos, TimeLineID tli)
Definition xlog.c:1231
bool GetDefaultCharSignedness(void)
Definition xlog.c:4644
static double CheckPointDistanceEstimate
Definition xlog.c:162
static uint64 XLogRecPtrToBytePos(XLogRecPtr ptr)
Definition xlog.c:1947
const char * show_in_hot_standby(void)
Definition xlog.c:4879
XLogRecPtr GetXLogInsertRecPtr(void)
Definition xlog.c:9585
Size XLOGShmemSize(void)
Definition xlog.c:4944
void SetWalWriterSleeping(bool sleeping)
Definition xlog.c:9665
bool wal_log_hints
Definition xlog.c:126
static void XLogInitNewTimeline(TimeLineID endTLI, XLogRecPtr endOfLog, TimeLineID newTLI)
Definition xlog.c:5285
static void CheckRequiredParameterValues(void)
Definition xlog.c:5456
#define XLogRecPtrToBufIdx(recptr)
Definition xlog.c:593
int wal_sync_method
Definition xlog.c:133
int XLogFileOpen(XLogSegNo segno, TimeLineID tli)
Definition xlog.c:3638
int max_slot_wal_keep_size_mb
Definition xlog.c:138
XLogRecPtr GetFlushRecPtr(TimeLineID *insertTLI)
Definition xlog.c:6608
static void PreallocXlogFiles(XLogRecPtr endptr, TimeLineID tli)
Definition xlog.c:3710
static bool doPageWrites
Definition xlog.c:289
static bool holdingAllLocks
Definition xlog.c:654
static TimeLineID openLogTLI
Definition xlog.c:639
XLogRecPtr XactLastCommitEnd
Definition xlog.c:258
WalLevel GetActiveWalLevelOnStandby(void)
Definition xlog.c:4935
bool log_checkpoints
Definition xlog.c:132
static void KeepLogSeg(XLogRecPtr recptr, XLogSegNo *logSegNo)
Definition xlog.c:8079
static void XLogWrite(XLogwrtRqst WriteRqst, TimeLineID tli, bool flexible)
Definition xlog.c:2290
void InitializeWalConsistencyChecking(void)
Definition xlog.c:4841
static void UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force)
Definition xlog.c:2686
static int LocalSetXLogInsertAllowed(void)
Definition xlog.c:6531
void assign_max_wal_size(int newval, void *extra)
Definition xlog.c:2186
void RemoveNonParentXlogFiles(XLogRecPtr switchpoint, TimeLineID newTLI)
Definition xlog.c:3960
XLogRecPtr GetLastImportantRecPtr(void)
Definition xlog.c:6665
void xlog_redo(XLogReaderState *record)
Definition xlog.c:8363
static int MyLockNo
Definition xlog.c:653
static void RecoveryRestartPoint(const CheckPoint *checkPoint, XLogReaderState *record)
Definition xlog.c:7674
bool XLogNeedsFlush(XLogRecPtr record)
Definition xlog.c:3128
void register_persistent_abort_backup_handler(void)
Definition xlog.c:9571
static double PrevCheckPointDistance
Definition xlog.c:163
void ReachedEndOfBackup(XLogRecPtr EndRecPtr, TimeLineID tli)
Definition xlog.c:6356
void LocalProcessControlFile(bool reset)
Definition xlog.c:4922
static void XLogFileClose(void)
Definition xlog.c:3659
int wal_compression
Definition xlog.c:127
static void UpdateCheckPointDistanceEstimate(uint64 nbytes)
Definition xlog.c:6905
static bool LocalRecoveryInProgress
Definition xlog.c:227
XLogSegNo XLogGetOldestSegno(TimeLineID tli)
Definition xlog.c:3794
XLogRecPtr GetXLogWriteRecPtr(void)
Definition xlog.c:9601
void ResetInstallXLogFileSegmentActive(void)
Definition xlog.c:9642
static WALInsertLockPadded * WALInsertLocks
Definition xlog.c:571
static XLogSegNo openLogSegNo
Definition xlog.c:638
#define INSERT_FREESPACE(endptr)
Definition xlog.c:582
int wal_retrieve_retry_interval
Definition xlog.c:137
int XLOGbuffers
Definition xlog.c:120
bool XLogBackgroundFlush(void)
Definition xlog.c:2971
const struct config_enum_entry archive_mode_options[]
Definition xlog.c:194
void GetOldestRestartPoint(XLogRecPtr *oldrecptr, TimeLineID *oldtli)
Definition xlog.c:9613
char * GetMockAuthenticationNonce(void)
Definition xlog.c:4620
bool track_wal_io_timing
Definition xlog.c:140
static XLogSegNo XLOGfileslop(XLogRecPtr lastredoptr)
Definition xlog.c:2216
static int UsableBytesInSegment
Definition xlog.c:608
static char * GetXLogBuffer(XLogRecPtr ptr, TimeLineID tli)
Definition xlog.c:1638
WalInsertClass
Definition xlog.c:562
@ WALINSERT_SPECIAL_SWITCH
Definition xlog.c:564
@ WALINSERT_NORMAL
Definition xlog.c:563
@ WALINSERT_SPECIAL_CHECKPOINT
Definition xlog.c:565
bool XLogInsertAllowed(void)
Definition xlog.c:6498
void do_pg_backup_start(const char *backupidstr, bool fast, List **tablespaces, BackupState *state, StringInfo tblspcmapfile)
Definition xlog.c:8948
static ControlFileData * ControlFile
Definition xlog.c:576
bool check_wal_segment_size(int *newval, void **extra, GucSource source)
Definition xlog.c:2200
static void XLogFileCopy(TimeLineID destTLI, XLogSegNo destsegno, TimeLineID srcTLI, XLogSegNo srcsegno, int upto)
Definition xlog.c:3438
static int LocalXLogInsertAllowed
Definition xlog.c:239
static void RemoveTempXlogFiles(void)
Definition xlog.c:3852
XLogRecPtr XLogRestorePoint(const char *rpName)
Definition xlog.c:8206
static XLogRecPtr LocalMinRecoveryPoint
Definition xlog.c:648
#define NUM_XLOGINSERT_LOCKS
Definition xlog.c:153
TimeLineID GetWALInsertionTimeLineIfSet(void)
Definition xlog.c:6645
void do_pg_backup_stop(BackupState *state, bool waitforarchive)
Definition xlog.c:9270
bool check_wal_consistency_checking(char **newval, void **extra, GucSource source)
Definition xlog.c:4727
const struct config_enum_entry wal_sync_method_options[]
Definition xlog.c:174
int min_wal_size_mb
Definition xlog.c:118
bool CreateCheckPoint(int flags)
Definition xlog.c:7008
#define BootstrapTimeLineID
Definition xlog.c:114
CheckpointStatsData CheckpointStats
Definition xlog.c:212
bool check_wal_buffers(int *newval, void **extra, GucSource source)
Definition xlog.c:4692
XLogRecPtr GetFakeLSNForUnloggedRel(void)
Definition xlog.c:4659
static char * str_time(pg_time_t tnow, char *buf, size_t bufsize)
Definition xlog.c:5272
void XLogPutNextOid(Oid nextOid)
Definition xlog.c:8151
void XLogFlush(XLogRecPtr record)
Definition xlog.c:2766
static void ReadControlFile(void)
Definition xlog.c:4369
static SessionBackupState sessionBackupState
Definition xlog.c:394
static void CheckPointGuts(XLogRecPtr checkPointRedo, int flags)
Definition xlog.c:7634
static bool updateMinRecoveryPoint
Definition xlog.c:650
int CheckPointSegments
Definition xlog.c:159
static bool check_wal_consistency_checking_deferred
Definition xlog.c:169
static void ReserveXLogInsertLocation(int size, XLogRecPtr *StartPos, XLogRecPtr *EndPos, XLogRecPtr *PrevPtr)
Definition xlog.c:1114
void XLogShutdownWalRcv(void)
Definition xlog.c:9623
#define NextBufIdx(idx)
Definition xlog.c:586
static void UpdateLastRemovedPtr(char *filename)
Definition xlog.c:3832
static TimeLineID LocalMinRecoveryPointTLI
Definition xlog.c:649
void issue_xlog_fsync(int fd, XLogSegNo segno, TimeLineID tli)
Definition xlog.c:8851
static bool ReserveXLogSwitch(XLogRecPtr *StartPos, XLogRecPtr *EndPos, XLogRecPtr *PrevPtr)
Definition xlog.c:1170
void XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN)
Definition xlog.c:2595
bool XLogCheckpointNeeded(XLogSegNo new_segno)
Definition xlog.c:2266
bool * wal_consistency_checking
Definition xlog.c:129
static int XLogFileInitInternal(XLogSegNo logsegno, TimeLineID logtli, bool *added, char *path)
Definition xlog.c:3212
static void update_checkpoint_display(int flags, bool restartpoint, bool reset)
Definition xlog.c:6943
#define XLogArchivingActive()
Definition xlog.h:101
#define TABLESPACE_MAP_OLD
Definition xlog.h:323
#define XLOG_MARK_UNIMPORTANT
Definition xlog.h:166
#define TABLESPACE_MAP
Definition xlog.h:322
@ ARCHIVE_MODE_ALWAYS
Definition xlog.h:68
@ ARCHIVE_MODE_OFF
Definition xlog.h:66
@ ARCHIVE_MODE_ON
Definition xlog.h:67
#define CHECKPOINT_FLUSH_UNLOGGED
Definition xlog.h:154
#define XLogLogicalInfoActive()
Definition xlog.h:136
#define STANDBY_SIGNAL_FILE
Definition xlog.h:318
#define CHECKPOINT_CAUSE_XLOG
Definition xlog.h:159
WALAvailability
Definition xlog.h:199
@ WALAVAIL_REMOVED
Definition xlog.h:205
@ WALAVAIL_RESERVED
Definition xlog.h:201
@ WALAVAIL_UNRESERVED
Definition xlog.h:204
@ WALAVAIL_EXTENDED
Definition xlog.h:202
@ WALAVAIL_INVALID_LSN
Definition xlog.h:200
#define BACKUP_LABEL_OLD
Definition xlog.h:320
#define CHECKPOINT_END_OF_RECOVERY
Definition xlog.h:151
@ WAL_COMPRESSION_NONE
Definition xlog.h:83
#define BACKUP_LABEL_FILE
Definition xlog.h:319
#define CHECKPOINT_CAUSE_TIME
Definition xlog.h:160
#define CHECKPOINT_FORCE
Definition xlog.h:153
SessionBackupState
Definition xlog.h:303
@ SESSION_BACKUP_RUNNING
Definition xlog.h:305
@ SESSION_BACKUP_NONE
Definition xlog.h:304
#define CHECKPOINT_WAIT
Definition xlog.h:156
#define CHECKPOINT_FAST
Definition xlog.h:152
#define RECOVERY_SIGNAL_FILE
Definition xlog.h:317
#define CHECKPOINT_IS_SHUTDOWN
Definition xlog.h:150
#define XLogArchivingAlways()
Definition xlog.h:104
WalLevel
Definition xlog.h:74
@ WAL_LEVEL_REPLICA
Definition xlog.h:76
@ WAL_LEVEL_LOGICAL
Definition xlog.h:77
@ WAL_LEVEL_MINIMAL
Definition xlog.h:75
RecoveryState
Definition xlog.h:91
@ RECOVERY_STATE_CRASH
Definition xlog.h:92
@ RECOVERY_STATE_DONE
Definition xlog.h:94
@ RECOVERY_STATE_ARCHIVE
Definition xlog.h:93
#define XLogIsNeeded()
Definition xlog.h:111
@ WAL_SYNC_METHOD_OPEN
Definition xlog.h:27
@ WAL_SYNC_METHOD_FDATASYNC
Definition xlog.h:26
@ WAL_SYNC_METHOD_FSYNC_WRITETHROUGH
Definition xlog.h:28
@ WAL_SYNC_METHOD_OPEN_DSYNC
Definition xlog.h:29
@ WAL_SYNC_METHOD_FSYNC
Definition xlog.h:25
#define XLogStandbyInfoActive()
Definition xlog.h:125
#define XLP_FIRST_IS_CONTRECORD
static RmgrData GetRmgr(RmgrId rmid)
#define IsValidWalSegSize(size)
XLogLongPageHeaderData * XLogLongPageHeader
#define XLP_FIRST_IS_OVERWRITE_CONTRECORD
#define XLOG_CONTROL_FILE
#define XLogSegmentOffset(xlogptr, wal_segsz_bytes)
static bool IsXLogFileName(const char *fname)
static void XLogFromFileName(const char *fname, TimeLineID *tli, XLogSegNo *logSegNo, int wal_segsz_bytes)
#define XLByteToPrevSeg(xlrp, logSegNo, wal_segsz_bytes)
#define XLogSegNoOffsetToRecPtr(segno, offset, wal_segsz_bytes, dest)
#define MAXFNAMELEN
XLogPageHeaderData * XLogPageHeader
#define XLOGDIR
#define XLP_LONG_HEADER
static bool IsBackupHistoryFileName(const char *fname)
#define XLOG_PAGE_MAGIC
#define XLByteToSeg(xlrp, logSegNo, wal_segsz_bytes)
static void BackupHistoryFileName(char *fname, TimeLineID tli, XLogSegNo logSegNo, XLogRecPtr startpoint, int wal_segsz_bytes)
static void XLogFilePath(char *path, TimeLineID tli, XLogSegNo logSegNo, int wal_segsz_bytes)
#define XRecOffIsValid(xlrp)
#define SizeOfXLogShortPHD
#define SizeOfXLogLongPHD
static void XLogFileName(char *fname, TimeLineID tli, XLogSegNo logSegNo, int wal_segsz_bytes)
static void BackupHistoryFilePath(char *path, TimeLineID tli, XLogSegNo logSegNo, XLogRecPtr startpoint, int wal_segsz_bytes)
static bool RmgrIdExists(RmgrId rmid)
#define XLByteInPrevSeg(xlrp, logSegNo, wal_segsz_bytes)
static bool IsPartialXLogFileName(const char *fname)
bool XLogArchiveIsReadyOrDone(const char *xlog)
bool XLogArchiveIsBusy(const char *xlog)
bool XLogArchiveIsReady(const char *xlog)
void XLogArchiveNotifySeg(XLogSegNo segno, TimeLineID tli)
void ExecuteRecoveryCommand(const char *command, const char *commandName, bool failOnSignal, uint32 wait_event_info)
bool XLogArchiveCheckDone(const char *xlog)
void XLogArchiveNotify(const char *xlog)
void XLogArchiveCleanup(const char *xlog)
char * build_backup_content(BackupState *state, bool ishistoryfile)
Definition xlogbackup.c:29
#define XLogRecPtrIsValid(r)
Definition xlogdefs.h:29
#define LSN_FORMAT_ARGS(lsn)
Definition xlogdefs.h:47
#define FirstNormalUnloggedLSN
Definition xlogdefs.h:37
uint64 XLogRecPtr
Definition xlogdefs.h:21
#define InvalidXLogRecPtr
Definition xlogdefs.h:28
uint32 TimeLineID
Definition xlogdefs.h:63
#define DEFAULT_WAL_SYNC_METHOD
Definition xlogdefs.h:83
uint64 XLogSegNo
Definition xlogdefs.h:52
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition xloginsert.c:478
void XLogRegisterData(const void *data, uint32 len)
Definition xloginsert.c:368
void XLogSetRecordFlags(uint8 flags)
Definition xloginsert.c:460
void XLogBeginInsert(void)
Definition xloginsert.c:152
XLogReaderState * XLogReaderAllocate(int wal_segment_size, const char *waldir, XLogReaderRoutine *routine, void *private_data)
Definition xlogreader.c:107
bool DecodeXLogRecord(XLogReaderState *state, DecodedXLogRecord *decoded, XLogRecord *record, XLogRecPtr lsn, char **errormsg)
size_t DecodeXLogRecordRequiredSpace(size_t xl_tot_len)
#define XLogRecGetInfo(decoder)
Definition xlogreader.h:409
#define XLogRecGetData(decoder)
Definition xlogreader.h:414
#define XL_ROUTINE(...)
Definition xlogreader.h:117
#define XLogRecMaxBlockId(decoder)
Definition xlogreader.h:417
#define XLogRecHasBlockImage(decoder, block_id)
Definition xlogreader.h:422
#define XLogRecHasAnyBlockRefs(decoder)
Definition xlogreader.h:416
#define SizeOfXLogRecordDataHeaderShort
Definition xlogrecord.h:217
#define XLR_BLOCK_ID_DATA_SHORT
Definition xlogrecord.h:241
#define SizeOfXLogRecord
Definition xlogrecord.h:55
void ShutdownWalRecovery(void)
bool ArchiveRecoveryRequested
bool InArchiveRecovery
void RecoveryRequiresIntParameter(const char *param_name, int currValue, int minValue)
void PerformWalRecovery(void)
char * archiveCleanupCommand
XLogRecPtr GetCurrentReplayRecPtr(TimeLineID *replayEndTLI)
void xlog_outdesc(StringInfo buf, XLogReaderState *record)
bool PromoteIsTriggered(void)
static XLogRecPtr missingContrecPtr
XLogRecPtr GetXLogReplayRecPtr(TimeLineID *replayTLI)
static XLogRecPtr abortedRecPtr
EndOfWalRecoveryInfo * FinishWalRecovery(void)
void InitWalRecovery(ControlFileData *ControlFile, bool *wasShutdown_ptr, bool *haveBackupLabel_ptr, bool *haveTblspcMap_ptr)
char * recoveryEndCommand
TimeLineID recoveryTargetTLI
TimestampTz GetLatestXTime(void)
bool XLogHaveInvalidPages(void)
Definition xlogutils.c:224
XLogRedoAction XLogReadBufferForRedo(XLogReaderState *record, uint8 block_id, Buffer *buf)
Definition xlogutils.c:303
HotStandbyState standbyState
Definition xlogutils.c:53
bool InRecovery
Definition xlogutils.c:50
@ STANDBY_DISABLED
Definition xlogutils.h:52
@ STANDBY_INITIALIZED
Definition xlogutils.h:53
#define InHotStandby
Definition xlogutils.h:60
@ BLK_RESTORED
Definition xlogutils.h:76
struct WaitLSNState * waitLSNState
Definition xlogwait.c:68
void WaitLSNWakeup(WaitLSNType lsnType, XLogRecPtr currentLSN)
Definition xlogwait.c:317
@ WAIT_LSN_TYPE_PRIMARY_FLUSH
Definition xlogwait.h:44
@ WAIT_LSN_TYPE_STANDBY_REPLAY
Definition xlogwait.h:39
@ WAIT_LSN_TYPE_STANDBY_FLUSH
Definition xlogwait.h:41
@ WAIT_LSN_TYPE_STANDBY_WRITE
Definition xlogwait.h:40