PostgreSQL Source Code  git master
xlogutils.c File Reference
#include "postgres.h"
#include <unistd.h>
#include "access/timeline.h"
#include "access/xlog.h"
#include "access/xlog_internal.h"
#include "access/xlogutils.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "storage/smgr.h"
#include "utils/guc.h"
#include "utils/hsearch.h"
#include "utils/rel.h"
Include dependency graph for xlogutils.c:

Go to the source code of this file.

Data Structures

struct  xl_invalid_page_key
 
struct  xl_invalid_page
 
struct  FakeRelCacheEntryData
 

Typedefs

typedef struct xl_invalid_page_key xl_invalid_page_key
 
typedef struct xl_invalid_page xl_invalid_page
 
typedef FakeRelCacheEntryDataFakeRelCacheEntry
 

Functions

static void report_invalid_page (int elevel, RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
 
static void log_invalid_page (RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
 
static void forget_invalid_pages (RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
 
static void forget_invalid_pages_db (Oid dbid)
 
bool XLogHaveInvalidPages (void)
 
void XLogCheckInvalidPages (void)
 
XLogRedoAction XLogReadBufferForRedo (XLogReaderState *record, uint8 block_id, Buffer *buf)
 
Buffer XLogInitBufferForRedo (XLogReaderState *record, uint8 block_id)
 
XLogRedoAction XLogReadBufferForRedoExtended (XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)
 
Buffer XLogReadBufferExtended (RelFileNode rnode, ForkNumber forknum, BlockNumber blkno, ReadBufferMode mode)
 
Relation CreateFakeRelcacheEntry (RelFileNode rnode)
 
void FreeFakeRelcacheEntry (Relation fakerel)
 
void XLogDropRelation (RelFileNode rnode, ForkNumber forknum)
 
void XLogDropDatabase (Oid dbid)
 
void XLogTruncateRelation (RelFileNode rnode, ForkNumber forkNum, BlockNumber nblocks)
 
void XLogReadDetermineTimeline (XLogReaderState *state, XLogRecPtr wantPage, uint32 wantLength)
 
static int wal_segment_open (XLogSegNo nextSegNo, WALSegmentContext *segcxt, TimeLineID *tli_p)
 
int read_local_xlog_page (XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, char *cur_page)
 
void WALReadRaiseError (WALReadError *errinfo)
 

Variables

bool ignore_invalid_pages = false
 
static HTABinvalid_page_tab = NULL
 

Typedef Documentation

◆ FakeRelCacheEntry

Definition at line 540 of file xlogutils.c.

◆ xl_invalid_page

◆ xl_invalid_page_key

Function Documentation

◆ CreateFakeRelcacheEntry()

Relation CreateFakeRelcacheEntry ( RelFileNode  rnode)

Definition at line 555 of file xlogutils.c.

References Assert, LockRelId::dbId, RelFileNode::dbNode, InRecovery, InvalidBackendId, LockInfoData::lockRelId, palloc0(), FakeRelCacheEntryData::pgc, RelationData::rd_backend, RelationData::rd_lockInfo, RelationData::rd_node, RelationData::rd_rel, RelationData::rd_smgr, RelationGetRelationName, LockRelId::relId, RelFileNode::relNode, and sprintf.

Referenced by heap_xlog_delete(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), and smgr_redo().

556 {
557  FakeRelCacheEntry fakeentry;
558  Relation rel;
559 
561 
562  /* Allocate the Relation struct and all related space in one block. */
563  fakeentry = palloc0(sizeof(FakeRelCacheEntryData));
564  rel = (Relation) fakeentry;
565 
566  rel->rd_rel = &fakeentry->pgc;
567  rel->rd_node = rnode;
568  /* We will never be working with temp rels during recovery */
570 
571  /* It must be a permanent table if we're in recovery. */
572  rel->rd_rel->relpersistence = RELPERSISTENCE_PERMANENT;
573 
574  /* We don't know the name of the relation; use relfilenode instead */
575  sprintf(RelationGetRelationName(rel), "%u", rnode.relNode);
576 
577  /*
578  * We set up the lockRelId in case anything tries to lock the dummy
579  * relation. Note that this is fairly bogus since relNode may be
580  * different from the relation's OID. It shouldn't really matter though,
581  * since we are presumably running by ourselves and can't have any lock
582  * conflicts ...
583  */
584  rel->rd_lockInfo.lockRelId.dbId = rnode.dbNode;
585  rel->rd_lockInfo.lockRelId.relId = rnode.relNode;
586 
587  rel->rd_smgr = NULL;
588 
589  return rel;
590 }
LockRelId lockRelId
Definition: rel.h:44
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
bool InRecovery
Definition: xlog.c:202
Oid dbId
Definition: rel.h:39
Form_pg_class rd_rel
Definition: rel.h:89
#define sprintf
Definition: port.h:194
struct RelationData * Relation
Definition: relcache.h:27
LockInfoData rd_lockInfo
Definition: rel.h:92
#define RelationGetRelationName(relation)
Definition: rel.h:470
#define InvalidBackendId
Definition: backendid.h:23
void * palloc0(Size size)
Definition: mcxt.c:980
RelFileNode rd_node
Definition: rel.h:55
BackendId rd_backend
Definition: rel.h:59
#define Assert(condition)
Definition: c.h:738
FormData_pg_class pgc
Definition: xlogutils.c:537
Oid relId
Definition: rel.h:38

◆ forget_invalid_pages()

static void forget_invalid_pages ( RelFileNode  node,
ForkNumber  forkno,
BlockNumber  minblkno 
)
static

Definition at line 146 of file xlogutils.c.

References xl_invalid_page_key::blkno, client_min_messages, DEBUG2, elog, ERROR, xl_invalid_page_key::forkno, HASH_REMOVE, hash_search(), hash_seq_init(), hash_seq_search(), xl_invalid_page::key, log_min_messages, xl_invalid_page_key::node, pfree(), RelFileNodeEquals, relpathperm, and status().

Referenced by XLogDropRelation(), and XLogTruncateRelation().

147 {
149  xl_invalid_page *hentry;
150 
151  if (invalid_page_tab == NULL)
152  return; /* nothing to do */
153 
155 
156  while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
157  {
158  if (RelFileNodeEquals(hentry->key.node, node) &&
159  hentry->key.forkno == forkno &&
160  hentry->key.blkno >= minblkno)
161  {
163  {
164  char *path = relpathperm(hentry->key.node, forkno);
165 
166  elog(DEBUG2, "page %u of relation %s has been dropped",
167  hentry->key.blkno, path);
168  pfree(path);
169  }
170 
172  (void *) &hentry->key,
173  HASH_REMOVE, NULL) == NULL)
174  elog(ERROR, "hash table corrupted");
175  }
176  }
177 }
#define relpathperm(rnode, forknum)
Definition: relpath.h:83
ForkNumber forkno
Definition: xlogutils.c:50
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:907
RelFileNode node
Definition: xlogutils.c:49
void pfree(void *pointer)
Definition: mcxt.c:1056
#define ERROR
Definition: elog.h:43
#define DEBUG2
Definition: elog.h:24
BlockNumber blkno
Definition: xlogutils.c:51
xl_invalid_page_key key
Definition: xlogutils.c:56
int log_min_messages
Definition: guc.c:543
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1390
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1380
#define elog(elevel,...)
Definition: elog.h:214
static HTAB * invalid_page_tab
Definition: xlogutils.c:60
int client_min_messages
Definition: guc.c:544
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88

◆ forget_invalid_pages_db()

static void forget_invalid_pages_db ( Oid  dbid)
static

Definition at line 181 of file xlogutils.c.

References xl_invalid_page_key::blkno, client_min_messages, RelFileNode::dbNode, DEBUG2, elog, ERROR, xl_invalid_page_key::forkno, HASH_REMOVE, hash_search(), hash_seq_init(), hash_seq_search(), xl_invalid_page::key, log_min_messages, xl_invalid_page_key::node, pfree(), relpathperm, and status().

Referenced by XLogDropDatabase().

182 {
184  xl_invalid_page *hentry;
185 
186  if (invalid_page_tab == NULL)
187  return; /* nothing to do */
188 
190 
191  while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
192  {
193  if (hentry->key.node.dbNode == dbid)
194  {
196  {
197  char *path = relpathperm(hentry->key.node, hentry->key.forkno);
198 
199  elog(DEBUG2, "page %u of relation %s has been dropped",
200  hentry->key.blkno, path);
201  pfree(path);
202  }
203 
205  (void *) &hentry->key,
206  HASH_REMOVE, NULL) == NULL)
207  elog(ERROR, "hash table corrupted");
208  }
209  }
210 }
#define relpathperm(rnode, forknum)
Definition: relpath.h:83
ForkNumber forkno
Definition: xlogutils.c:50
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:907
RelFileNode node
Definition: xlogutils.c:49
void pfree(void *pointer)
Definition: mcxt.c:1056
#define ERROR
Definition: elog.h:43
#define DEBUG2
Definition: elog.h:24
BlockNumber blkno
Definition: xlogutils.c:51
xl_invalid_page_key key
Definition: xlogutils.c:56
int log_min_messages
Definition: guc.c:543
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1390
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1380
#define elog(elevel,...)
Definition: elog.h:214
static HTAB * invalid_page_tab
Definition: xlogutils.c:60
int client_min_messages
Definition: guc.c:544
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225

◆ FreeFakeRelcacheEntry()

void FreeFakeRelcacheEntry ( Relation  fakerel)

Definition at line 596 of file xlogutils.c.

References pfree(), RelationData::rd_smgr, and smgrclearowner().

Referenced by heap_xlog_delete(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), and smgr_redo().

597 {
598  /* make sure the fakerel is not referenced by the SmgrRelation anymore */
599  if (fakerel->rd_smgr != NULL)
600  smgrclearowner(&fakerel->rd_smgr, fakerel->rd_smgr);
601  pfree(fakerel);
602 }
void smgrclearowner(SMgrRelation *owner, SMgrRelation reln)
Definition: smgr.c:227
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
void pfree(void *pointer)
Definition: mcxt.c:1056

◆ log_invalid_page()

static void log_invalid_page ( RelFileNode  node,
ForkNumber  forkno,
BlockNumber  blkno,
bool  present 
)
static

Definition at line 81 of file xlogutils.c.

References xl_invalid_page_key::blkno, client_min_messages, DEBUG1, elog, HASHCTL::entrysize, xl_invalid_page_key::forkno, HASH_BLOBS, hash_create(), HASH_ELEM, HASH_ENTER, hash_search(), ignore_invalid_pages, sort-test::key, HASHCTL::keysize, log_min_messages, xl_invalid_page_key::node, PANIC, xl_invalid_page::present, reachedConsistency, report_invalid_page(), and WARNING.

Referenced by XLogReadBufferExtended().

83 {
85  xl_invalid_page *hentry;
86  bool found;
87 
88  /*
89  * Once recovery has reached a consistent state, the invalid-page table
90  * should be empty and remain so. If a reference to an invalid page is
91  * found after consistency is reached, PANIC immediately. This might seem
92  * aggressive, but it's better than letting the invalid reference linger
93  * in the hash table until the end of recovery and PANIC there, which
94  * might come only much later if this is a standby server.
95  */
97  {
98  report_invalid_page(WARNING, node, forkno, blkno, present);
100  "WAL contains references to invalid pages");
101  }
102 
103  /*
104  * Log references to invalid pages at DEBUG1 level. This allows some
105  * tracing of the cause (note the elog context mechanism will tell us
106  * something about the XLOG record that generated the reference).
107  */
109  report_invalid_page(DEBUG1, node, forkno, blkno, present);
110 
111  if (invalid_page_tab == NULL)
112  {
113  /* create hash table when first needed */
114  HASHCTL ctl;
115 
116  memset(&ctl, 0, sizeof(ctl));
117  ctl.keysize = sizeof(xl_invalid_page_key);
118  ctl.entrysize = sizeof(xl_invalid_page);
119 
120  invalid_page_tab = hash_create("XLOG invalid-page table",
121  100,
122  &ctl,
124  }
125 
126  /* we currently assume xl_invalid_page_key contains no padding */
127  key.node = node;
128  key.forkno = forkno;
129  key.blkno = blkno;
130  hentry = (xl_invalid_page *)
131  hash_search(invalid_page_tab, (void *) &key, HASH_ENTER, &found);
132 
133  if (!found)
134  {
135  /* hash_search already filled in the key */
136  hentry->present = present;
137  }
138  else
139  {
140  /* repeat reference ... leave "present" as it was */
141  }
142 }
#define DEBUG1
Definition: elog.h:25
struct xl_invalid_page xl_invalid_page
#define HASH_ELEM
Definition: hsearch.h:87
Size entrysize
Definition: hsearch.h:73
struct xl_invalid_page_key xl_invalid_page_key
ForkNumber forkno
Definition: xlogutils.c:50
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:907
#define PANIC
Definition: elog.h:53
RelFileNode node
Definition: xlogutils.c:49
static void report_invalid_page(int elevel, RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
Definition: xlogutils.c:65
BlockNumber blkno
Definition: xlogutils.c:51
bool ignore_invalid_pages
Definition: xlogutils.c:35
#define WARNING
Definition: elog.h:40
#define HASH_BLOBS
Definition: hsearch.h:88
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:317
Size keysize
Definition: hsearch.h:72
int log_min_messages
Definition: guc.c:543
bool reachedConsistency
Definition: xlog.c:868
#define elog(elevel,...)
Definition: elog.h:214
static HTAB * invalid_page_tab
Definition: xlogutils.c:60
int client_min_messages
Definition: guc.c:544

◆ read_local_xlog_page()

int read_local_xlog_page ( XLogReaderState state,
XLogRecPtr  targetPagePtr,
int  reqLen,
XLogRecPtr  targetRecPtr,
char *  cur_page 
)

Definition at line 822 of file xlogutils.c.

References CHECK_FOR_INTERRUPTS, XLogReaderState::currTLI, XLogReaderState::currTLIValidUntil, GetFlushRecPtr(), GetXLogReplayRecPtr(), pg_usleep(), RecoveryInProgress(), XLogReaderState::seg, XLogReaderState::segcxt, ThisTimeLineID, wal_segment_open(), WALRead(), WALReadRaiseError(), and XLogReadDetermineTimeline().

Referenced by create_logical_replication_slot(), pg_logical_replication_slot_advance(), pg_logical_slot_get_changes_guts(), and XlogReadTwoPhaseData().

824 {
825  XLogRecPtr read_upto,
826  loc;
827  TimeLineID tli;
828  int count;
829  WALReadError errinfo;
830 
831  loc = targetPagePtr + reqLen;
832 
833  /* Loop waiting for xlog to be available if necessary */
834  while (1)
835  {
836  /*
837  * Determine the limit of xlog we can currently read to, and what the
838  * most recent timeline is.
839  *
840  * RecoveryInProgress() will update ThisTimeLineID when it first
841  * notices recovery finishes, so we only have to maintain it for the
842  * local process until recovery ends.
843  */
844  if (!RecoveryInProgress())
845  read_upto = GetFlushRecPtr();
846  else
847  read_upto = GetXLogReplayRecPtr(&ThisTimeLineID);
848  tli = ThisTimeLineID;
849 
850  /*
851  * Check which timeline to get the record from.
852  *
853  * We have to do it each time through the loop because if we're in
854  * recovery as a cascading standby, the current timeline might've
855  * become historical. We can't rely on RecoveryInProgress() because in
856  * a standby configuration like
857  *
858  * A => B => C
859  *
860  * if we're a logical decoding session on C, and B gets promoted, our
861  * timeline will change while we remain in recovery.
862  *
863  * We can't just keep reading from the old timeline as the last WAL
864  * archive in the timeline will get renamed to .partial by
865  * StartupXLOG().
866  *
867  * If that happens after our caller updated ThisTimeLineID but before
868  * we actually read the xlog page, we might still try to read from the
869  * old (now renamed) segment and fail. There's not much we can do
870  * about this, but it can only happen when we're a leaf of a cascading
871  * standby whose master gets promoted while we're decoding, so a
872  * one-off ERROR isn't too bad.
873  */
874  XLogReadDetermineTimeline(state, targetPagePtr, reqLen);
875 
876  if (state->currTLI == ThisTimeLineID)
877  {
878 
879  if (loc <= read_upto)
880  break;
881 
883  pg_usleep(1000L);
884  }
885  else
886  {
887  /*
888  * We're on a historical timeline, so limit reading to the switch
889  * point where we moved to the next timeline.
890  *
891  * We don't need to GetFlushRecPtr or GetXLogReplayRecPtr. We know
892  * about the new timeline, so we must've received past the end of
893  * it.
894  */
895  read_upto = state->currTLIValidUntil;
896 
897  /*
898  * Setting tli to our wanted record's TLI is slightly wrong; the
899  * page might begin on an older timeline if it contains a timeline
900  * switch, since its xlog segment will have been copied from the
901  * prior timeline. This is pretty harmless though, as nothing
902  * cares so long as the timeline doesn't go backwards. We should
903  * read the page header instead; FIXME someday.
904  */
905  tli = state->currTLI;
906 
907  /* No need to wait on a historical timeline */
908  break;
909  }
910  }
911 
912  if (targetPagePtr + XLOG_BLCKSZ <= read_upto)
913  {
914  /*
915  * more than one block available; read only that block, have caller
916  * come back if they need more.
917  */
918  count = XLOG_BLCKSZ;
919  }
920  else if (targetPagePtr + reqLen > read_upto)
921  {
922  /* not enough data there */
923  return -1;
924  }
925  else
926  {
927  /* enough bytes available to satisfy the request */
928  count = read_upto - targetPagePtr;
929  }
930 
931  /*
932  * Even though we just determined how much of the page can be validly read
933  * as 'count', read the whole page anyway. It's guaranteed to be
934  * zero-padded up to the page boundary if it's incomplete.
935  */
936  if (!WALRead(cur_page, targetPagePtr, XLOG_BLCKSZ, tli, &state->seg,
937  &state->segcxt, wal_segment_open, &errinfo))
938  WALReadRaiseError(&errinfo);
939 
940  /* number of valid bytes in the buffer */
941  return count;
942 }
uint32 TimeLineID
Definition: xlogdefs.h:52
void WALReadRaiseError(WALReadError *errinfo)
Definition: xlogutils.c:949
XLogRecPtr GetFlushRecPtr(void)
Definition: xlog.c:8384
bool RecoveryInProgress(void)
Definition: xlog.c:8051
void XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wantLength)
Definition: xlogutils.c:686
WALOpenSegment seg
Definition: xlogreader.h:172
void pg_usleep(long microsec)
Definition: signal.c:53
XLogRecPtr GetXLogReplayRecPtr(TimeLineID *replayTLI)
Definition: xlog.c:11329
bool WALRead(char *buf, XLogRecPtr startptr, Size count, TimeLineID tli, WALOpenSegment *seg, WALSegmentContext *segcxt, WALSegmentOpen openSegment, WALReadError *errinfo)
Definition: xlogreader.c:1054
XLogRecPtr currTLIValidUntil
Definition: xlogreader.h:195
TimeLineID ThisTimeLineID
Definition: xlog.c:189
TimeLineID currTLI
Definition: xlogreader.h:185
uint64 XLogRecPtr
Definition: xlogdefs.h:21
static int wal_segment_open(XLogSegNo nextSegNo, WALSegmentContext *segcxt, TimeLineID *tli_p)
Definition: xlogutils.c:784
WALSegmentContext segcxt
Definition: xlogreader.h:171
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99

◆ report_invalid_page()

static void report_invalid_page ( int  elevel,
RelFileNode  node,
ForkNumber  forkno,
BlockNumber  blkno,
bool  present 
)
static

Definition at line 65 of file xlogutils.c.

References elog, pfree(), and relpathperm.

Referenced by log_invalid_page(), and XLogCheckInvalidPages().

67 {
68  char *path = relpathperm(node, forkno);
69 
70  if (present)
71  elog(elevel, "page %u of relation %s is uninitialized",
72  blkno, path);
73  else
74  elog(elevel, "page %u of relation %s does not exist",
75  blkno, path);
76  pfree(path);
77 }
#define relpathperm(rnode, forknum)
Definition: relpath.h:83
void pfree(void *pointer)
Definition: mcxt.c:1056
static int elevel
Definition: vacuumlazy.c:314
#define elog(elevel,...)
Definition: elog.h:214

◆ wal_segment_open()

static int wal_segment_open ( XLogSegNo  nextSegNo,
WALSegmentContext segcxt,
TimeLineID tli_p 
)
static

Definition at line 784 of file xlogutils.c.

References BasicOpenFile(), ereport, errcode_for_file_access(), errmsg(), ERROR, fd(), MAXPGPATH, PG_BINARY, WALSegmentContext::ws_segsize, and XLogFilePath.

Referenced by read_local_xlog_page().

786 {
787  TimeLineID tli = *tli_p;
788  char path[MAXPGPATH];
789  int fd;
790 
791  XLogFilePath(path, tli, nextSegNo, segcxt->ws_segsize);
792  fd = BasicOpenFile(path, O_RDONLY | PG_BINARY);
793  if (fd >= 0)
794  return fd;
795 
796  if (errno == ENOENT)
797  ereport(ERROR,
799  errmsg("requested WAL segment %s has already been removed",
800  path)));
801  else
802  ereport(ERROR,
804  errmsg("could not open file \"%s\": %m",
805  path)));
806 
807  return -1; /* keep compiler quiet */
808 }
uint32 TimeLineID
Definition: xlogdefs.h:52
static int fd(const char *x, int i)
Definition: preproc-init.c:105
#define PG_BINARY
Definition: c.h:1234
#define ERROR
Definition: elog.h:43
#define MAXPGPATH
int errcode_for_file_access(void)
Definition: elog.c:633
#define ereport(elevel,...)
Definition: elog.h:144
int BasicOpenFile(const char *fileName, int fileFlags)
Definition: fd.c:983
#define XLogFilePath(path, tli, logSegNo, wal_segsz_bytes)
int errmsg(const char *fmt,...)
Definition: elog.c:824

◆ WALReadRaiseError()

void WALReadRaiseError ( WALReadError errinfo)

Definition at line 949 of file xlogutils.c.

References ereport, errcode(), ERRCODE_DATA_CORRUPTED, errcode_for_file_access(), errmsg(), ERROR, MAXFNAMELEN, wal_segment_size, WALReadError::wre_errno, WALReadError::wre_off, WALReadError::wre_read, WALReadError::wre_req, WALReadError::wre_seg, WALOpenSegment::ws_segno, WALOpenSegment::ws_tli, and XLogFileName.

Referenced by logical_read_xlog_page(), read_local_xlog_page(), and XLogSendPhysical().

950 {
951  WALOpenSegment *seg = &errinfo->wre_seg;
952  char fname[MAXFNAMELEN];
953 
954  XLogFileName(fname, seg->ws_tli, seg->ws_segno, wal_segment_size);
955 
956  if (errinfo->wre_read < 0)
957  {
958  errno = errinfo->wre_errno;
959  ereport(ERROR,
961  errmsg("could not read from log segment %s, offset %u: %m",
962  fname, errinfo->wre_off)));
963  }
964  else if (errinfo->wre_read == 0)
965  {
966  ereport(ERROR,
968  errmsg("could not read from log segment %s, offset %u: read %d of %zu",
969  fname, errinfo->wre_off, errinfo->wre_read,
970  (Size) errinfo->wre_req)));
971  }
972 }
WALOpenSegment wre_seg
Definition: xlogreader.h:269
int wal_segment_size
Definition: xlog.c:114
int errcode(int sqlerrcode)
Definition: elog.c:610
#define ERROR
Definition: elog.h:43
XLogSegNo ws_segno
Definition: xlogreader.h:40
int errcode_for_file_access(void)
Definition: elog.c:633
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:45
#define MAXFNAMELEN
#define ereport(elevel,...)
Definition: elog.h:144
size_t Size
Definition: c.h:466
#define XLogFileName(fname, tli, logSegNo, wal_segsz_bytes)
TimeLineID ws_tli
Definition: xlogreader.h:41
int errmsg(const char *fmt,...)
Definition: elog.c:824

◆ XLogCheckInvalidPages()

void XLogCheckInvalidPages ( void  )

Definition at line 224 of file xlogutils.c.

References xl_invalid_page_key::blkno, elog, xl_invalid_page_key::forkno, hash_destroy(), hash_seq_init(), hash_seq_search(), ignore_invalid_pages, xl_invalid_page::key, xl_invalid_page_key::node, PANIC, xl_invalid_page::present, report_invalid_page(), status(), and WARNING.

Referenced by CheckRecoveryConsistency().

225 {
227  xl_invalid_page *hentry;
228  bool foundone = false;
229 
230  if (invalid_page_tab == NULL)
231  return; /* nothing to do */
232 
234 
235  /*
236  * Our strategy is to emit WARNING messages for all remaining entries and
237  * only PANIC after we've dumped all the available info.
238  */
239  while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
240  {
241  report_invalid_page(WARNING, hentry->key.node, hentry->key.forkno,
242  hentry->key.blkno, hentry->present);
243  foundone = true;
244  }
245 
246  if (foundone)
248  "WAL contains references to invalid pages");
249 
251  invalid_page_tab = NULL;
252 }
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:815
ForkNumber forkno
Definition: xlogutils.c:50
#define PANIC
Definition: elog.h:53
RelFileNode node
Definition: xlogutils.c:49
static void report_invalid_page(int elevel, RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
Definition: xlogutils.c:65
BlockNumber blkno
Definition: xlogutils.c:51
xl_invalid_page_key key
Definition: xlogutils.c:56
bool ignore_invalid_pages
Definition: xlogutils.c:35
#define WARNING
Definition: elog.h:40
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1390
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1380
#define elog(elevel,...)
Definition: elog.h:214
static HTAB * invalid_page_tab
Definition: xlogutils.c:60
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225

◆ XLogDropDatabase()

void XLogDropDatabase ( Oid  dbid)

Definition at line 622 of file xlogutils.c.

References forget_invalid_pages_db(), and smgrcloseall().

Referenced by dbase_redo().

623 {
624  /*
625  * This is unnecessarily heavy-handed, as it will close SMgrRelation
626  * objects for other databases as well. DROP DATABASE occurs seldom enough
627  * that it's not worth introducing a variant of smgrclose for just this
628  * purpose. XXX: Or should we rather leave the smgr entries dangling?
629  */
630  smgrcloseall();
631 
633 }
static void forget_invalid_pages_db(Oid dbid)
Definition: xlogutils.c:181
void smgrcloseall(void)
Definition: smgr.c:286

◆ XLogDropRelation()

void XLogDropRelation ( RelFileNode  rnode,
ForkNumber  forknum 
)

Definition at line 611 of file xlogutils.c.

References forget_invalid_pages().

Referenced by DropRelationFiles().

612 {
613  forget_invalid_pages(rnode, forknum, 0);
614 }
static void forget_invalid_pages(RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
Definition: xlogutils.c:146

◆ XLogHaveInvalidPages()

bool XLogHaveInvalidPages ( void  )

Definition at line 214 of file xlogutils.c.

References hash_get_num_entries().

Referenced by RecoveryRestartPoint().

215 {
216  if (invalid_page_tab != NULL &&
218  return true;
219  return false;
220 }
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1336
static HTAB * invalid_page_tab
Definition: xlogutils.c:60

◆ XLogInitBufferForRedo()

◆ XLogReadBufferExtended()

Buffer XLogReadBufferExtended ( RelFileNode  rnode,
ForkNumber  forknum,
BlockNumber  blkno,
ReadBufferMode  mode 
)

Definition at line 442 of file xlogutils.c.

References Assert, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage, InRecovery, InvalidBackendId, InvalidBuffer, LockBuffer(), log_invalid_page(), P_NEW, PageIsNew, RBM_NORMAL, RBM_NORMAL_NO_LOG, RBM_ZERO_AND_CLEANUP_LOCK, RBM_ZERO_AND_LOCK, ReadBufferWithoutRelcache(), ReleaseBuffer(), smgrcreate(), smgrnblocks(), and smgropen().

Referenced by checkXLogConsistency(), XLogReadBufferForRedoExtended(), and XLogRecordPageWithFreeSpace().

444 {
445  BlockNumber lastblock;
446  Buffer buffer;
447  SMgrRelation smgr;
448 
449  Assert(blkno != P_NEW);
450 
451  /* Open the relation at smgr level */
452  smgr = smgropen(rnode, InvalidBackendId);
453 
454  /*
455  * Create the target file if it doesn't already exist. This lets us cope
456  * if the replay sequence contains writes to a relation that is later
457  * deleted. (The original coding of this routine would instead suppress
458  * the writes, but that seems like it risks losing valuable data if the
459  * filesystem loses an inode during a crash. Better to write the data
460  * until we are actually told to delete the file.)
461  */
462  smgrcreate(smgr, forknum, true);
463 
464  lastblock = smgrnblocks(smgr, forknum);
465 
466  if (blkno < lastblock)
467  {
468  /* page exists in file */
469  buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno,
470  mode, NULL);
471  }
472  else
473  {
474  /* hm, page doesn't exist in file */
475  if (mode == RBM_NORMAL)
476  {
477  log_invalid_page(rnode, forknum, blkno, false);
478  return InvalidBuffer;
479  }
480  if (mode == RBM_NORMAL_NO_LOG)
481  return InvalidBuffer;
482  /* OK to extend the file */
483  /* we do this in recovery only - no rel-extension lock needed */
485  buffer = InvalidBuffer;
486  do
487  {
488  if (buffer != InvalidBuffer)
489  {
492  ReleaseBuffer(buffer);
493  }
494  buffer = ReadBufferWithoutRelcache(rnode, forknum,
495  P_NEW, mode, NULL);
496  }
497  while (BufferGetBlockNumber(buffer) < blkno);
498  /* Handle the corner case that P_NEW returns non-consecutive pages */
499  if (BufferGetBlockNumber(buffer) != blkno)
500  {
503  ReleaseBuffer(buffer);
504  buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno,
505  mode, NULL);
506  }
507  }
508 
509  if (mode == RBM_NORMAL)
510  {
511  /* check that page has been initialized */
512  Page page = (Page) BufferGetPage(buffer);
513 
514  /*
515  * We assume that PageIsNew is safe without a lock. During recovery,
516  * there should be no other backends that could modify the buffer at
517  * the same time.
518  */
519  if (PageIsNew(page))
520  {
521  ReleaseBuffer(buffer);
522  log_invalid_page(rnode, forknum, blkno, true);
523  return InvalidBuffer;
524  }
525  }
526 
527  return buffer;
528 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:84
void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition: smgr.c:333
bool InRecovery
Definition: xlog.c:202
#define InvalidBuffer
Definition: buf.h:25
Buffer ReadBufferWithoutRelcache(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:632
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3323
#define P_NEW
Definition: bufmgr.h:79
static void log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
Definition: xlogutils.c:81
#define BufferGetPage(buffer)
Definition: bufmgr.h:157
SMgrRelation smgropen(RelFileNode rnode, BackendId backend)
Definition: smgr.c:145
#define InvalidBackendId
Definition: backendid.h:23
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3560
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:555
#define Assert(condition)
Definition: c.h:738
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2571
#define PageIsNew(page)
Definition: bufpage.h:229
int Buffer
Definition: buf.h:23
Pointer Page
Definition: bufpage.h:78

◆ XLogReadBufferForRedo()

XLogRedoAction XLogReadBufferForRedo ( XLogReaderState record,
uint8  block_id,
Buffer buf 
)

Definition at line 294 of file xlogutils.c.

References RBM_NORMAL, and XLogReadBufferForRedoExtended().

Referenced by _bt_clear_incomplete_split(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_split(), btree_xlog_unlink_page(), generic_redo(), ginRedoClearIncompleteSplit(), ginRedoDeletePage(), ginRedoInsert(), ginRedoSplit(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginRedoVacuumPage(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageUpdateRecord(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_split_page(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), and xlog_redo().

296 {
297  return XLogReadBufferForRedoExtended(record, block_id, RBM_NORMAL,
298  false, buf);
299 }
static char * buf
Definition: pg_test_fsync.c:67
XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)
Definition: xlogutils.c:331

◆ XLogReadBufferForRedoExtended()

XLogRedoAction XLogReadBufferForRedoExtended ( XLogReaderState record,
uint8  block_id,
ReadBufferMode  mode,
bool  get_cleanup_lock,
Buffer buf 
)

Definition at line 331 of file xlogutils.c.

References Assert, BKPBLOCK_WILL_INIT, BLK_DONE, BLK_NEEDS_REDO, BLK_NOTFOUND, BLK_RESTORED, xl_invalid_page_key::blkno, XLogReaderState::blocks, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, BufferIsValid, elog, XLogReaderState::EndRecPtr, ERROR, DecodedBkpBlock::flags, FlushOneBuffer(), INIT_FORKNUM, LockBuffer(), LockBufferForCleanup(), MarkBufferDirty(), PageGetLSN, PageIsNew, PageSetLSN, PANIC, RBM_ZERO_AND_CLEANUP_LOCK, RBM_ZERO_AND_LOCK, RestoreBlockImage(), XLogReadBufferExtended(), XLogRecBlockImageApply, XLogRecGetBlockTag(), and XLogRecHasBlockImage.

Referenced by btree_xlog_vacuum(), hash_xlog_delete(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_squeeze_page(), hash_xlog_vacuum_one_page(), heap_xlog_clean(), heap_xlog_visible(), XLogInitBufferForRedo(), and XLogReadBufferForRedo().

335 {
336  XLogRecPtr lsn = record->EndRecPtr;
337  RelFileNode rnode;
338  ForkNumber forknum;
339  BlockNumber blkno;
340  Page page;
341  bool zeromode;
342  bool willinit;
343 
344  if (!XLogRecGetBlockTag(record, block_id, &rnode, &forknum, &blkno))
345  {
346  /* Caller specified a bogus block_id */
347  elog(PANIC, "failed to locate backup block with ID %d", block_id);
348  }
349 
350  /*
351  * Make sure that if the block is marked with WILL_INIT, the caller is
352  * going to initialize it. And vice versa.
353  */
355  willinit = (record->blocks[block_id].flags & BKPBLOCK_WILL_INIT) != 0;
356  if (willinit && !zeromode)
357  elog(PANIC, "block with WILL_INIT flag in WAL record must be zeroed by redo routine");
358  if (!willinit && zeromode)
359  elog(PANIC, "block to be initialized in redo routine must be marked with WILL_INIT flag in the WAL record");
360 
361  /* If it has a full-page image and it should be restored, do it. */
362  if (XLogRecBlockImageApply(record, block_id))
363  {
364  Assert(XLogRecHasBlockImage(record, block_id));
365  *buf = XLogReadBufferExtended(rnode, forknum, blkno,
366  get_cleanup_lock ? RBM_ZERO_AND_CLEANUP_LOCK : RBM_ZERO_AND_LOCK);
367  page = BufferGetPage(*buf);
368  if (!RestoreBlockImage(record, block_id, page))
369  elog(ERROR, "failed to restore block image");
370 
371  /*
372  * The page may be uninitialized. If so, we can't set the LSN because
373  * that would corrupt the page.
374  */
375  if (!PageIsNew(page))
376  {
377  PageSetLSN(page, lsn);
378  }
379 
381 
382  /*
383  * At the end of crash recovery the init forks of unlogged relations
384  * are copied, without going through shared buffers. So we need to
385  * force the on-disk state of init forks to always be in sync with the
386  * state in shared buffers.
387  */
388  if (forknum == INIT_FORKNUM)
390 
391  return BLK_RESTORED;
392  }
393  else
394  {
395  *buf = XLogReadBufferExtended(rnode, forknum, blkno, mode);
396  if (BufferIsValid(*buf))
397  {
399  {
400  if (get_cleanup_lock)
402  else
404  }
405  if (lsn <= PageGetLSN(BufferGetPage(*buf)))
406  return BLK_DONE;
407  else
408  return BLK_NEEDS_REDO;
409  }
410  else
411  return BLK_NOTFOUND;
412  }
413 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3617
#define XLogRecHasBlockImage(decoder, block_id)
Definition: xlogreader.h:293
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1406
Buffer XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum, BlockNumber blkno, ReadBufferMode mode)
Definition: xlogutils.c:442
uint32 BlockNumber
Definition: block.h:31
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:86
#define PANIC
Definition: elog.h:53
XLogRecPtr EndRecPtr
Definition: xlogreader.h:135
#define ERROR
Definition: elog.h:43
static char * buf
Definition: pg_test_fsync.c:67
#define BufferGetPage(buffer)
Definition: bufmgr.h:157
#define BKPBLOCK_WILL_INIT
Definition: xlogrecord.h:182
ForkNumber
Definition: relpath.h:40
bool XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id, RelFileNode *rnode, ForkNumber *forknum, BlockNumber *blknum)
Definition: xlogreader.c:1481
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3560
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:738
#define BufferIsValid(bufnum)
Definition: bufmgr.h:111
void FlushOneBuffer(Buffer buffer)
Definition: bufmgr.c:3303
bool RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page)
Definition: xlogreader.c:1534
#define PageGetLSN(page)
Definition: bufpage.h:366
#define PageIsNew(page)
Definition: bufpage.h:229
#define elog(elevel,...)
Definition: elog.h:214
#define XLogRecBlockImageApply(decoder, block_id)
Definition: xlogreader.h:295
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
Pointer Page
Definition: bufpage.h:78
DecodedBkpBlock blocks[XLR_MAX_BLOCK_ID+1]
Definition: xlogreader.h:154

◆ XLogReadDetermineTimeline()

void XLogReadDetermineTimeline ( XLogReaderState state,
XLogRecPtr  wantPage,
uint32  wantLength 
)

Definition at line 686 of file xlogutils.c.

References Assert, XLogReaderState::currTLI, XLogReaderState::currTLIValidUntil, DEBUG3, elog, InvalidXLogRecPtr, list_free_deep(), Min, XLogReaderState::nextTLI, XLogReaderState::readLen, readTimeLineHistory(), XLogReaderState::seg, XLogReaderState::segcxt, XLogReaderState::segoff, ThisTimeLineID, tliOfPointInHistory(), tliSwitchPoint(), WALOpenSegment::ws_segno, and WALSegmentContext::ws_segsize.

Referenced by logical_read_xlog_page(), and read_local_xlog_page().

687 {
688  const XLogRecPtr lastReadPage = (state->seg.ws_segno *
689  state->segcxt.ws_segsize + state->segoff);
690 
691  Assert(wantPage != InvalidXLogRecPtr && wantPage % XLOG_BLCKSZ == 0);
692  Assert(wantLength <= XLOG_BLCKSZ);
693  Assert(state->readLen == 0 || state->readLen <= XLOG_BLCKSZ);
694 
695  /*
696  * If the desired page is currently read in and valid, we have nothing to
697  * do.
698  *
699  * The caller should've ensured that it didn't previously advance readOff
700  * past the valid limit of this timeline, so it doesn't matter if the
701  * current TLI has since become historical.
702  */
703  if (lastReadPage == wantPage &&
704  state->readLen != 0 &&
705  lastReadPage + state->readLen >= wantPage + Min(wantLength, XLOG_BLCKSZ - 1))
706  return;
707 
708  /*
709  * If we're reading from the current timeline, it hasn't become historical
710  * and the page we're reading is after the last page read, we can again
711  * just carry on. (Seeking backwards requires a check to make sure the
712  * older page isn't on a prior timeline).
713  *
714  * ThisTimeLineID might've become historical since we last looked, but the
715  * caller is required not to read past the flush limit it saw at the time
716  * it looked up the timeline. There's nothing we can do about it if
717  * StartupXLOG() renames it to .partial concurrently.
718  */
719  if (state->currTLI == ThisTimeLineID && wantPage >= lastReadPage)
720  {
722  return;
723  }
724 
725  /*
726  * If we're just reading pages from a previously validated historical
727  * timeline and the timeline we're reading from is valid until the end of
728  * the current segment we can just keep reading.
729  */
730  if (state->currTLIValidUntil != InvalidXLogRecPtr &&
731  state->currTLI != ThisTimeLineID &&
732  state->currTLI != 0 &&
733  ((wantPage + wantLength) / state->segcxt.ws_segsize) <
734  (state->currTLIValidUntil / state->segcxt.ws_segsize))
735  return;
736 
737  /*
738  * If we reach this point we're either looking up a page for random
739  * access, the current timeline just became historical, or we're reading
740  * from a new segment containing a timeline switch. In all cases we need
741  * to determine the newest timeline on the segment.
742  *
743  * If it's the current timeline we can just keep reading from here unless
744  * we detect a timeline switch that makes the current timeline historical.
745  * If it's a historical timeline we can read all the segment on the newest
746  * timeline because it contains all the old timelines' data too. So only
747  * one switch check is required.
748  */
749  {
750  /*
751  * We need to re-read the timeline history in case it's been changed
752  * by a promotion or replay from a cascaded replica.
753  */
754  List *timelineHistory = readTimeLineHistory(ThisTimeLineID);
755  XLogRecPtr endOfSegment;
756 
757  endOfSegment = ((wantPage / state->segcxt.ws_segsize) + 1) *
758  state->segcxt.ws_segsize - 1;
759  Assert(wantPage / state->segcxt.ws_segsize ==
760  endOfSegment / state->segcxt.ws_segsize);
761 
762  /*
763  * Find the timeline of the last LSN on the segment containing
764  * wantPage.
765  */
766  state->currTLI = tliOfPointInHistory(endOfSegment, timelineHistory);
767  state->currTLIValidUntil = tliSwitchPoint(state->currTLI, timelineHistory,
768  &state->nextTLI);
769 
771  wantPage + wantLength < state->currTLIValidUntil);
772 
773  list_free_deep(timelineHistory);
774 
775  elog(DEBUG3, "switched to timeline %u valid until %X/%X",
776  state->currTLI,
777  (uint32) (state->currTLIValidUntil >> 32),
778  (uint32) (state->currTLIValidUntil));
779  }
780 }
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
TimeLineID tliOfPointInHistory(XLogRecPtr ptr, List *history)
Definition: timeline.c:536
#define DEBUG3
Definition: elog.h:23
#define Min(x, y)
Definition: c.h:920
List * readTimeLineHistory(TimeLineID targetTLI)
Definition: timeline.c:76
void list_free_deep(List *list)
Definition: list.c:1391
WALOpenSegment seg
Definition: xlogreader.h:172
XLogSegNo ws_segno
Definition: xlogreader.h:40
XLogRecPtr currTLIValidUntil
Definition: xlogreader.h:195
unsigned int uint32
Definition: c.h:367
TimeLineID nextTLI
Definition: xlogreader.h:201
XLogRecPtr tliSwitchPoint(TimeLineID tli, List *history, TimeLineID *nextTLI)
Definition: timeline.c:564
TimeLineID ThisTimeLineID
Definition: xlog.c:189
TimeLineID currTLI
Definition: xlogreader.h:185
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:738
#define elog(elevel,...)
Definition: elog.h:214
WALSegmentContext segcxt
Definition: xlogreader.h:171
Definition: pg_list.h:50

◆ XLogTruncateRelation()

void XLogTruncateRelation ( RelFileNode  rnode,
ForkNumber  forkNum,
BlockNumber  nblocks 
)

Definition at line 641 of file xlogutils.c.

References forget_invalid_pages().

Referenced by smgr_redo().

643 {
644  forget_invalid_pages(rnode, forkNum, nblocks);
645 }
static void forget_invalid_pages(RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
Definition: xlogutils.c:146

Variable Documentation

◆ ignore_invalid_pages

bool ignore_invalid_pages = false

Definition at line 35 of file xlogutils.c.

Referenced by log_invalid_page(), and XLogCheckInvalidPages().

◆ invalid_page_tab

HTAB* invalid_page_tab = NULL
static

Definition at line 60 of file xlogutils.c.