PostgreSQL Source Code  git master
xlogutils.c File Reference
#include "postgres.h"
#include <unistd.h>
#include "access/timeline.h"
#include "access/xlog.h"
#include "access/xlog_internal.h"
#include "access/xlogutils.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "storage/smgr.h"
#include "utils/guc.h"
#include "utils/hsearch.h"
#include "utils/rel.h"
Include dependency graph for xlogutils.c:

Go to the source code of this file.

Data Structures

struct  xl_invalid_page_key
 
struct  xl_invalid_page
 
struct  FakeRelCacheEntryData
 

Typedefs

typedef struct xl_invalid_page_key xl_invalid_page_key
 
typedef struct xl_invalid_page xl_invalid_page
 
typedef FakeRelCacheEntryDataFakeRelCacheEntry
 

Functions

static void report_invalid_page (int elevel, RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
 
static void log_invalid_page (RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
 
static void forget_invalid_pages (RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
 
static void forget_invalid_pages_db (Oid dbid)
 
bool XLogHaveInvalidPages (void)
 
void XLogCheckInvalidPages (void)
 
XLogRedoAction XLogReadBufferForRedo (XLogReaderState *record, uint8 block_id, Buffer *buf)
 
Buffer XLogInitBufferForRedo (XLogReaderState *record, uint8 block_id)
 
XLogRedoAction XLogReadBufferForRedoExtended (XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)
 
Buffer XLogReadBufferExtended (RelFileNode rnode, ForkNumber forknum, BlockNumber blkno, ReadBufferMode mode)
 
Relation CreateFakeRelcacheEntry (RelFileNode rnode)
 
void FreeFakeRelcacheEntry (Relation fakerel)
 
void XLogDropRelation (RelFileNode rnode, ForkNumber forknum)
 
void XLogDropDatabase (Oid dbid)
 
void XLogTruncateRelation (RelFileNode rnode, ForkNumber forkNum, BlockNumber nblocks)
 
void XLogReadDetermineTimeline (XLogReaderState *state, XLogRecPtr wantPage, uint32 wantLength)
 
void wal_segment_open (XLogReaderState *state, XLogSegNo nextSegNo, TimeLineID *tli_p)
 
void wal_segment_close (XLogReaderState *state)
 
int read_local_xlog_page (XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, char *cur_page)
 
void WALReadRaiseError (WALReadError *errinfo)
 

Variables

bool ignore_invalid_pages = false
 
static HTABinvalid_page_tab = NULL
 

Typedef Documentation

◆ FakeRelCacheEntry

Definition at line 537 of file xlogutils.c.

◆ xl_invalid_page

◆ xl_invalid_page_key

Function Documentation

◆ CreateFakeRelcacheEntry()

Relation CreateFakeRelcacheEntry ( RelFileNode  rnode)

Definition at line 554 of file xlogutils.c.

References LockRelId::dbId, RelFileNode::dbNode, InvalidBackendId, LockInfoData::lockRelId, palloc0(), FakeRelCacheEntryData::pgc, RelationData::rd_backend, RelationData::rd_lockInfo, RelationData::rd_node, RelationData::rd_rel, RelationData::rd_smgr, RelationGetRelationName, LockRelId::relId, RelFileNode::relNode, and sprintf.

Referenced by heap_xlog_delete(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), smgr_redo(), and smgrDoPendingSyncs().

555 {
556  FakeRelCacheEntry fakeentry;
557  Relation rel;
558 
559  /* Allocate the Relation struct and all related space in one block. */
560  fakeentry = palloc0(sizeof(FakeRelCacheEntryData));
561  rel = (Relation) fakeentry;
562 
563  rel->rd_rel = &fakeentry->pgc;
564  rel->rd_node = rnode;
565 
566  /*
567  * We will never be working with temp rels during recovery or while
568  * syncing WAL-skipped files.
569  */
571 
572  /* It must be a permanent table here */
573  rel->rd_rel->relpersistence = RELPERSISTENCE_PERMANENT;
574 
575  /* We don't know the name of the relation; use relfilenode instead */
576  sprintf(RelationGetRelationName(rel), "%u", rnode.relNode);
577 
578  /*
579  * We set up the lockRelId in case anything tries to lock the dummy
580  * relation. Note that this is fairly bogus since relNode may be
581  * different from the relation's OID. It shouldn't really matter though.
582  * In recovery, we are running by ourselves and can't have any lock
583  * conflicts. While syncing, we already hold AccessExclusiveLock.
584  */
585  rel->rd_lockInfo.lockRelId.dbId = rnode.dbNode;
586  rel->rd_lockInfo.lockRelId.relId = rnode.relNode;
587 
588  rel->rd_smgr = NULL;
589 
590  return rel;
591 }
LockRelId lockRelId
Definition: rel.h:44
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
Oid dbId
Definition: rel.h:39
Form_pg_class rd_rel
Definition: rel.h:110
#define sprintf
Definition: port.h:217
struct RelationData * Relation
Definition: relcache.h:27
LockInfoData rd_lockInfo
Definition: rel.h:113
#define RelationGetRelationName(relation)
Definition: rel.h:491
#define InvalidBackendId
Definition: backendid.h:23
void * palloc0(Size size)
Definition: mcxt.c:981
RelFileNode rd_node
Definition: rel.h:55
BackendId rd_backend
Definition: rel.h:59
FormData_pg_class pgc
Definition: xlogutils.c:534
Oid relId
Definition: rel.h:38

◆ forget_invalid_pages()

static void forget_invalid_pages ( RelFileNode  node,
ForkNumber  forkno,
BlockNumber  minblkno 
)
static

Definition at line 145 of file xlogutils.c.

References xl_invalid_page_key::blkno, DEBUG2, elog, ERROR, xl_invalid_page_key::forkno, HASH_REMOVE, hash_search(), hash_seq_init(), hash_seq_search(), xl_invalid_page::key, message_level_is_interesting(), xl_invalid_page_key::node, pfree(), RelFileNodeEquals, relpathperm, and status().

Referenced by XLogDropRelation(), and XLogTruncateRelation().

146 {
148  xl_invalid_page *hentry;
149 
150  if (invalid_page_tab == NULL)
151  return; /* nothing to do */
152 
154 
155  while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
156  {
157  if (RelFileNodeEquals(hentry->key.node, node) &&
158  hentry->key.forkno == forkno &&
159  hentry->key.blkno >= minblkno)
160  {
162  {
163  char *path = relpathperm(hentry->key.node, forkno);
164 
165  elog(DEBUG2, "page %u of relation %s has been dropped",
166  hentry->key.blkno, path);
167  pfree(path);
168  }
169 
171  (void *) &hentry->key,
172  HASH_REMOVE, NULL) == NULL)
173  elog(ERROR, "hash table corrupted");
174  }
175  }
176 }
#define relpathperm(rnode, forknum)
Definition: relpath.h:83
ForkNumber forkno
Definition: xlogutils.c:50
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:954
RelFileNode node
Definition: xlogutils.c:49
void pfree(void *pointer)
Definition: mcxt.c:1057
#define ERROR
Definition: elog.h:45
#define DEBUG2
Definition: elog.h:24
bool message_level_is_interesting(int elevel)
Definition: elog.c:270
BlockNumber blkno
Definition: xlogutils.c:51
xl_invalid_page_key key
Definition: xlogutils.c:56
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1436
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1426
#define elog(elevel,...)
Definition: elog.h:228
static HTAB * invalid_page_tab
Definition: xlogutils.c:60
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88

◆ forget_invalid_pages_db()

static void forget_invalid_pages_db ( Oid  dbid)
static

Definition at line 180 of file xlogutils.c.

References xl_invalid_page_key::blkno, RelFileNode::dbNode, DEBUG2, elog, ERROR, xl_invalid_page_key::forkno, HASH_REMOVE, hash_search(), hash_seq_init(), hash_seq_search(), xl_invalid_page::key, message_level_is_interesting(), xl_invalid_page_key::node, pfree(), relpathperm, and status().

Referenced by XLogDropDatabase().

181 {
183  xl_invalid_page *hentry;
184 
185  if (invalid_page_tab == NULL)
186  return; /* nothing to do */
187 
189 
190  while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
191  {
192  if (hentry->key.node.dbNode == dbid)
193  {
195  {
196  char *path = relpathperm(hentry->key.node, hentry->key.forkno);
197 
198  elog(DEBUG2, "page %u of relation %s has been dropped",
199  hentry->key.blkno, path);
200  pfree(path);
201  }
202 
204  (void *) &hentry->key,
205  HASH_REMOVE, NULL) == NULL)
206  elog(ERROR, "hash table corrupted");
207  }
208  }
209 }
#define relpathperm(rnode, forknum)
Definition: relpath.h:83
ForkNumber forkno
Definition: xlogutils.c:50
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:954
RelFileNode node
Definition: xlogutils.c:49
void pfree(void *pointer)
Definition: mcxt.c:1057
#define ERROR
Definition: elog.h:45
#define DEBUG2
Definition: elog.h:24
bool message_level_is_interesting(int elevel)
Definition: elog.c:270
BlockNumber blkno
Definition: xlogutils.c:51
xl_invalid_page_key key
Definition: xlogutils.c:56
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1436
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1426
#define elog(elevel,...)
Definition: elog.h:228
static HTAB * invalid_page_tab
Definition: xlogutils.c:60
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227

◆ FreeFakeRelcacheEntry()

void FreeFakeRelcacheEntry ( Relation  fakerel)

Definition at line 597 of file xlogutils.c.

References pfree(), RelationData::rd_smgr, and smgrclearowner().

Referenced by heap_xlog_delete(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), smgr_redo(), and smgrDoPendingSyncs().

598 {
599  /* make sure the fakerel is not referenced by the SmgrRelation anymore */
600  if (fakerel->rd_smgr != NULL)
601  smgrclearowner(&fakerel->rd_smgr, fakerel->rd_smgr);
602  pfree(fakerel);
603 }
void smgrclearowner(SMgrRelation *owner, SMgrRelation reln)
Definition: smgr.c:227
struct SMgrRelationData * rd_smgr
Definition: rel.h:57
void pfree(void *pointer)
Definition: mcxt.c:1057

◆ log_invalid_page()

static void log_invalid_page ( RelFileNode  node,
ForkNumber  forkno,
BlockNumber  blkno,
bool  present 
)
static

Definition at line 81 of file xlogutils.c.

References xl_invalid_page_key::blkno, DEBUG1, elog, HASHCTL::entrysize, xl_invalid_page_key::forkno, HASH_BLOBS, hash_create(), HASH_ELEM, HASH_ENTER, hash_search(), ignore_invalid_pages, sort-test::key, HASHCTL::keysize, message_level_is_interesting(), xl_invalid_page_key::node, PANIC, xl_invalid_page::present, reachedConsistency, report_invalid_page(), and WARNING.

Referenced by XLogReadBufferExtended().

83 {
85  xl_invalid_page *hentry;
86  bool found;
87 
88  /*
89  * Once recovery has reached a consistent state, the invalid-page table
90  * should be empty and remain so. If a reference to an invalid page is
91  * found after consistency is reached, PANIC immediately. This might seem
92  * aggressive, but it's better than letting the invalid reference linger
93  * in the hash table until the end of recovery and PANIC there, which
94  * might come only much later if this is a standby server.
95  */
97  {
98  report_invalid_page(WARNING, node, forkno, blkno, present);
100  "WAL contains references to invalid pages");
101  }
102 
103  /*
104  * Log references to invalid pages at DEBUG1 level. This allows some
105  * tracing of the cause (note the elog context mechanism will tell us
106  * something about the XLOG record that generated the reference).
107  */
109  report_invalid_page(DEBUG1, node, forkno, blkno, present);
110 
111  if (invalid_page_tab == NULL)
112  {
113  /* create hash table when first needed */
114  HASHCTL ctl;
115 
116  ctl.keysize = sizeof(xl_invalid_page_key);
117  ctl.entrysize = sizeof(xl_invalid_page);
118 
119  invalid_page_tab = hash_create("XLOG invalid-page table",
120  100,
121  &ctl,
123  }
124 
125  /* we currently assume xl_invalid_page_key contains no padding */
126  key.node = node;
127  key.forkno = forkno;
128  key.blkno = blkno;
129  hentry = (xl_invalid_page *)
130  hash_search(invalid_page_tab, (void *) &key, HASH_ENTER, &found);
131 
132  if (!found)
133  {
134  /* hash_search already filled in the key */
135  hentry->present = present;
136  }
137  else
138  {
139  /* repeat reference ... leave "present" as it was */
140  }
141 }
#define DEBUG1
Definition: elog.h:25
struct xl_invalid_page xl_invalid_page
#define HASH_ELEM
Definition: hsearch.h:95
Size entrysize
Definition: hsearch.h:76
struct xl_invalid_page_key xl_invalid_page_key
ForkNumber forkno
Definition: xlogutils.c:50
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:954
#define PANIC
Definition: elog.h:55
RelFileNode node
Definition: xlogutils.c:49
static void report_invalid_page(int elevel, RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
Definition: xlogutils.c:65
bool message_level_is_interesting(int elevel)
Definition: elog.c:270
BlockNumber blkno
Definition: xlogutils.c:51
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:349
bool ignore_invalid_pages
Definition: xlogutils.c:35
#define WARNING
Definition: elog.h:40
#define HASH_BLOBS
Definition: hsearch.h:97
Size keysize
Definition: hsearch.h:75
bool reachedConsistency
Definition: xlog.c:877
#define elog(elevel,...)
Definition: elog.h:228
static HTAB * invalid_page_tab
Definition: xlogutils.c:60

◆ read_local_xlog_page()

int read_local_xlog_page ( XLogReaderState state,
XLogRecPtr  targetPagePtr,
int  reqLen,
XLogRecPtr  targetRecPtr,
char *  cur_page 
)

Definition at line 828 of file xlogutils.c.

References CHECK_FOR_INTERRUPTS, XLogReaderState::currTLI, XLogReaderState::currTLIValidUntil, GetFlushRecPtr(), GetXLogReplayRecPtr(), pg_usleep(), RecoveryInProgress(), ThisTimeLineID, WALRead(), WALReadRaiseError(), and XLogReadDetermineTimeline().

Referenced by create_logical_replication_slot(), pg_logical_replication_slot_advance(), pg_logical_slot_get_changes_guts(), and XlogReadTwoPhaseData().

830 {
831  XLogRecPtr read_upto,
832  loc;
833  TimeLineID tli;
834  int count;
835  WALReadError errinfo;
836 
837  loc = targetPagePtr + reqLen;
838 
839  /* Loop waiting for xlog to be available if necessary */
840  while (1)
841  {
842  /*
843  * Determine the limit of xlog we can currently read to, and what the
844  * most recent timeline is.
845  *
846  * RecoveryInProgress() will update ThisTimeLineID when it first
847  * notices recovery finishes, so we only have to maintain it for the
848  * local process until recovery ends.
849  */
850  if (!RecoveryInProgress())
851  read_upto = GetFlushRecPtr();
852  else
853  read_upto = GetXLogReplayRecPtr(&ThisTimeLineID);
854  tli = ThisTimeLineID;
855 
856  /*
857  * Check which timeline to get the record from.
858  *
859  * We have to do it each time through the loop because if we're in
860  * recovery as a cascading standby, the current timeline might've
861  * become historical. We can't rely on RecoveryInProgress() because in
862  * a standby configuration like
863  *
864  * A => B => C
865  *
866  * if we're a logical decoding session on C, and B gets promoted, our
867  * timeline will change while we remain in recovery.
868  *
869  * We can't just keep reading from the old timeline as the last WAL
870  * archive in the timeline will get renamed to .partial by
871  * StartupXLOG().
872  *
873  * If that happens after our caller updated ThisTimeLineID but before
874  * we actually read the xlog page, we might still try to read from the
875  * old (now renamed) segment and fail. There's not much we can do
876  * about this, but it can only happen when we're a leaf of a cascading
877  * standby whose primary gets promoted while we're decoding, so a
878  * one-off ERROR isn't too bad.
879  */
880  XLogReadDetermineTimeline(state, targetPagePtr, reqLen);
881 
882  if (state->currTLI == ThisTimeLineID)
883  {
884 
885  if (loc <= read_upto)
886  break;
887 
889  pg_usleep(1000L);
890  }
891  else
892  {
893  /*
894  * We're on a historical timeline, so limit reading to the switch
895  * point where we moved to the next timeline.
896  *
897  * We don't need to GetFlushRecPtr or GetXLogReplayRecPtr. We know
898  * about the new timeline, so we must've received past the end of
899  * it.
900  */
901  read_upto = state->currTLIValidUntil;
902 
903  /*
904  * Setting tli to our wanted record's TLI is slightly wrong; the
905  * page might begin on an older timeline if it contains a timeline
906  * switch, since its xlog segment will have been copied from the
907  * prior timeline. This is pretty harmless though, as nothing
908  * cares so long as the timeline doesn't go backwards. We should
909  * read the page header instead; FIXME someday.
910  */
911  tli = state->currTLI;
912 
913  /* No need to wait on a historical timeline */
914  break;
915  }
916  }
917 
918  if (targetPagePtr + XLOG_BLCKSZ <= read_upto)
919  {
920  /*
921  * more than one block available; read only that block, have caller
922  * come back if they need more.
923  */
924  count = XLOG_BLCKSZ;
925  }
926  else if (targetPagePtr + reqLen > read_upto)
927  {
928  /* not enough data there */
929  return -1;
930  }
931  else
932  {
933  /* enough bytes available to satisfy the request */
934  count = read_upto - targetPagePtr;
935  }
936 
937  /*
938  * Even though we just determined how much of the page can be validly read
939  * as 'count', read the whole page anyway. It's guaranteed to be
940  * zero-padded up to the page boundary if it's incomplete.
941  */
942  if (!WALRead(state, cur_page, targetPagePtr, XLOG_BLCKSZ, tli,
943  &errinfo))
944  WALReadRaiseError(&errinfo);
945 
946  /* number of valid bytes in the buffer */
947  return count;
948 }
uint32 TimeLineID
Definition: xlogdefs.h:59
void WALReadRaiseError(WALReadError *errinfo)
Definition: xlogutils.c:955
XLogRecPtr GetFlushRecPtr(void)
Definition: xlog.c:8484
bool RecoveryInProgress(void)
Definition: xlog.c:8132
void XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wantLength)
Definition: xlogutils.c:687
void pg_usleep(long microsec)
Definition: signal.c:53
XLogRecPtr GetXLogReplayRecPtr(TimeLineID *replayTLI)
Definition: xlog.c:11596
XLogRecPtr currTLIValidUntil
Definition: xlogreader.h:238
TimeLineID ThisTimeLineID
Definition: xlog.c:193
TimeLineID currTLI
Definition: xlogreader.h:228
uint64 XLogRecPtr
Definition: xlogdefs.h:21
bool WALRead(XLogReaderState *state, char *buf, XLogRecPtr startptr, Size count, TimeLineID tli, WALReadError *errinfo)
Definition: xlogreader.c:1059
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:100

◆ report_invalid_page()

static void report_invalid_page ( int  elevel,
RelFileNode  node,
ForkNumber  forkno,
BlockNumber  blkno,
bool  present 
)
static

Definition at line 65 of file xlogutils.c.

References elog, pfree(), and relpathperm.

Referenced by log_invalid_page(), and XLogCheckInvalidPages().

67 {
68  char *path = relpathperm(node, forkno);
69 
70  if (present)
71  elog(elevel, "page %u of relation %s is uninitialized",
72  blkno, path);
73  else
74  elog(elevel, "page %u of relation %s does not exist",
75  blkno, path);
76  pfree(path);
77 }
#define relpathperm(rnode, forknum)
Definition: relpath.h:83
void pfree(void *pointer)
Definition: mcxt.c:1057
static int elevel
Definition: vacuumlazy.c:333
#define elog(elevel,...)
Definition: elog.h:228

◆ wal_segment_close()

void wal_segment_close ( XLogReaderState state)

◆ wal_segment_open()

void wal_segment_open ( XLogReaderState state,
XLogSegNo  nextSegNo,
TimeLineID tli_p 
)

Definition at line 784 of file xlogutils.c.

References BasicOpenFile(), ereport, errcode_for_file_access(), errmsg(), ERROR, MAXPGPATH, PG_BINARY, XLogReaderState::seg, XLogReaderState::segcxt, WALOpenSegment::ws_file, WALSegmentContext::ws_segsize, and XLogFilePath.

Referenced by create_logical_replication_slot(), pg_logical_replication_slot_advance(), pg_logical_slot_get_changes_guts(), and XlogReadTwoPhaseData().

786 {
787  TimeLineID tli = *tli_p;
788  char path[MAXPGPATH];
789 
790  XLogFilePath(path, tli, nextSegNo, state->segcxt.ws_segsize);
791  state->seg.ws_file = BasicOpenFile(path, O_RDONLY | PG_BINARY);
792  if (state->seg.ws_file >= 0)
793  return;
794 
795  if (errno == ENOENT)
796  ereport(ERROR,
798  errmsg("requested WAL segment %s has already been removed",
799  path)));
800  else
801  ereport(ERROR,
803  errmsg("could not open file \"%s\": %m",
804  path)));
805 }
uint32 TimeLineID
Definition: xlogdefs.h:59
#define PG_BINARY
Definition: c.h:1271
WALOpenSegment seg
Definition: xlogreader.h:215
#define ERROR
Definition: elog.h:45
#define MAXPGPATH
int errcode_for_file_access(void)
Definition: elog.c:727
#define ereport(elevel,...)
Definition: elog.h:155
int BasicOpenFile(const char *fileName, int fileFlags)
Definition: fd.c:1014
#define XLogFilePath(path, tli, logSegNo, wal_segsz_bytes)
int errmsg(const char *fmt,...)
Definition: elog.c:915
WALSegmentContext segcxt
Definition: xlogreader.h:214

◆ WALReadRaiseError()

void WALReadRaiseError ( WALReadError errinfo)

Definition at line 955 of file xlogutils.c.

References ereport, errcode(), ERRCODE_DATA_CORRUPTED, errcode_for_file_access(), errmsg(), ERROR, MAXFNAMELEN, wal_segment_size, WALReadError::wre_errno, WALReadError::wre_off, WALReadError::wre_read, WALReadError::wre_req, WALReadError::wre_seg, WALOpenSegment::ws_segno, WALOpenSegment::ws_tli, and XLogFileName.

Referenced by logical_read_xlog_page(), read_local_xlog_page(), and XLogSendPhysical().

956 {
957  WALOpenSegment *seg = &errinfo->wre_seg;
958  char fname[MAXFNAMELEN];
959 
960  XLogFileName(fname, seg->ws_tli, seg->ws_segno, wal_segment_size);
961 
962  if (errinfo->wre_read < 0)
963  {
964  errno = errinfo->wre_errno;
965  ereport(ERROR,
967  errmsg("could not read from log segment %s, offset %u: %m",
968  fname, errinfo->wre_off)));
969  }
970  else if (errinfo->wre_read == 0)
971  {
972  ereport(ERROR,
974  errmsg("could not read from log segment %s, offset %u: read %d of %zu",
975  fname, errinfo->wre_off, errinfo->wre_read,
976  (Size) errinfo->wre_req)));
977  }
978 }
WALOpenSegment wre_seg
Definition: xlogreader.h:291
int wal_segment_size
Definition: xlog.c:118
int errcode(int sqlerrcode)
Definition: elog.c:704
#define ERROR
Definition: elog.h:45
XLogSegNo ws_segno
Definition: xlogreader.h:47
int errcode_for_file_access(void)
Definition: elog.c:727
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:45
#define MAXFNAMELEN
#define ereport(elevel,...)
Definition: elog.h:155
size_t Size
Definition: c.h:540
#define XLogFileName(fname, tli, logSegNo, wal_segsz_bytes)
TimeLineID ws_tli
Definition: xlogreader.h:48
int errmsg(const char *fmt,...)
Definition: elog.c:915

◆ XLogCheckInvalidPages()

void XLogCheckInvalidPages ( void  )

Definition at line 223 of file xlogutils.c.

References xl_invalid_page_key::blkno, elog, xl_invalid_page_key::forkno, hash_destroy(), hash_seq_init(), hash_seq_search(), ignore_invalid_pages, xl_invalid_page::key, xl_invalid_page_key::node, PANIC, xl_invalid_page::present, report_invalid_page(), status(), and WARNING.

Referenced by CheckRecoveryConsistency().

224 {
226  xl_invalid_page *hentry;
227  bool foundone = false;
228 
229  if (invalid_page_tab == NULL)
230  return; /* nothing to do */
231 
233 
234  /*
235  * Our strategy is to emit WARNING messages for all remaining entries and
236  * only PANIC after we've dumped all the available info.
237  */
238  while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
239  {
240  report_invalid_page(WARNING, hentry->key.node, hentry->key.forkno,
241  hentry->key.blkno, hentry->present);
242  foundone = true;
243  }
244 
245  if (foundone)
247  "WAL contains references to invalid pages");
248 
250  invalid_page_tab = NULL;
251 }
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:862
ForkNumber forkno
Definition: xlogutils.c:50
#define PANIC
Definition: elog.h:55
RelFileNode node
Definition: xlogutils.c:49
static void report_invalid_page(int elevel, RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
Definition: xlogutils.c:65
BlockNumber blkno
Definition: xlogutils.c:51
xl_invalid_page_key key
Definition: xlogutils.c:56
bool ignore_invalid_pages
Definition: xlogutils.c:35
#define WARNING
Definition: elog.h:40
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1436
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1426
#define elog(elevel,...)
Definition: elog.h:228
static HTAB * invalid_page_tab
Definition: xlogutils.c:60
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227

◆ XLogDropDatabase()

void XLogDropDatabase ( Oid  dbid)

Definition at line 623 of file xlogutils.c.

References forget_invalid_pages_db(), and smgrcloseall().

Referenced by dbase_redo().

624 {
625  /*
626  * This is unnecessarily heavy-handed, as it will close SMgrRelation
627  * objects for other databases as well. DROP DATABASE occurs seldom enough
628  * that it's not worth introducing a variant of smgrclose for just this
629  * purpose. XXX: Or should we rather leave the smgr entries dangling?
630  */
631  smgrcloseall();
632 
634 }
static void forget_invalid_pages_db(Oid dbid)
Definition: xlogutils.c:180
void smgrcloseall(void)
Definition: smgr.c:286

◆ XLogDropRelation()

void XLogDropRelation ( RelFileNode  rnode,
ForkNumber  forknum 
)

Definition at line 612 of file xlogutils.c.

References forget_invalid_pages().

Referenced by DropRelationFiles().

613 {
614  forget_invalid_pages(rnode, forknum, 0);
615 }
static void forget_invalid_pages(RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
Definition: xlogutils.c:145

◆ XLogHaveInvalidPages()

bool XLogHaveInvalidPages ( void  )

Definition at line 213 of file xlogutils.c.

References hash_get_num_entries().

Referenced by RecoveryRestartPoint().

214 {
215  if (invalid_page_tab != NULL &&
217  return true;
218  return false;
219 }
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1382
static HTAB * invalid_page_tab
Definition: xlogutils.c:60

◆ XLogInitBufferForRedo()

◆ XLogReadBufferExtended()

Buffer XLogReadBufferExtended ( RelFileNode  rnode,
ForkNumber  forknum,
BlockNumber  blkno,
ReadBufferMode  mode 
)

Definition at line 439 of file xlogutils.c.

References Assert, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage, InRecovery, InvalidBackendId, InvalidBuffer, LockBuffer(), log_invalid_page(), P_NEW, PageIsNew, RBM_NORMAL, RBM_NORMAL_NO_LOG, RBM_ZERO_AND_CLEANUP_LOCK, RBM_ZERO_AND_LOCK, ReadBufferWithoutRelcache(), ReleaseBuffer(), smgrcreate(), smgrnblocks(), and smgropen().

Referenced by checkXLogConsistency(), XLogReadBufferForRedoExtended(), and XLogRecordPageWithFreeSpace().

441 {
442  BlockNumber lastblock;
443  Buffer buffer;
444  SMgrRelation smgr;
445 
446  Assert(blkno != P_NEW);
447 
448  /* Open the relation at smgr level */
449  smgr = smgropen(rnode, InvalidBackendId);
450 
451  /*
452  * Create the target file if it doesn't already exist. This lets us cope
453  * if the replay sequence contains writes to a relation that is later
454  * deleted. (The original coding of this routine would instead suppress
455  * the writes, but that seems like it risks losing valuable data if the
456  * filesystem loses an inode during a crash. Better to write the data
457  * until we are actually told to delete the file.)
458  */
459  smgrcreate(smgr, forknum, true);
460 
461  lastblock = smgrnblocks(smgr, forknum);
462 
463  if (blkno < lastblock)
464  {
465  /* page exists in file */
466  buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno,
467  mode, NULL);
468  }
469  else
470  {
471  /* hm, page doesn't exist in file */
472  if (mode == RBM_NORMAL)
473  {
474  log_invalid_page(rnode, forknum, blkno, false);
475  return InvalidBuffer;
476  }
477  if (mode == RBM_NORMAL_NO_LOG)
478  return InvalidBuffer;
479  /* OK to extend the file */
480  /* we do this in recovery only - no rel-extension lock needed */
482  buffer = InvalidBuffer;
483  do
484  {
485  if (buffer != InvalidBuffer)
486  {
489  ReleaseBuffer(buffer);
490  }
491  buffer = ReadBufferWithoutRelcache(rnode, forknum,
492  P_NEW, mode, NULL);
493  }
494  while (BufferGetBlockNumber(buffer) < blkno);
495  /* Handle the corner case that P_NEW returns non-consecutive pages */
496  if (BufferGetBlockNumber(buffer) != blkno)
497  {
500  ReleaseBuffer(buffer);
501  buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno,
502  mode, NULL);
503  }
504  }
505 
506  if (mode == RBM_NORMAL)
507  {
508  /* check that page has been initialized */
509  Page page = (Page) BufferGetPage(buffer);
510 
511  /*
512  * We assume that PageIsNew is safe without a lock. During recovery,
513  * there should be no other backends that could modify the buffer at
514  * the same time.
515  */
516  if (PageIsNew(page))
517  {
518  ReleaseBuffer(buffer);
519  log_invalid_page(rnode, forknum, blkno, true);
520  return InvalidBuffer;
521  }
522  }
523 
524  return buffer;
525 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition: smgr.c:333
bool InRecovery
Definition: xlog.c:206
#define InvalidBuffer
Definition: buf.h:25
Buffer ReadBufferWithoutRelcache(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:708
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3700
#define P_NEW
Definition: bufmgr.h:91
static void log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
Definition: xlogutils.c:81
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
SMgrRelation smgropen(RelFileNode rnode, BackendId backend)
Definition: smgr.c:146
#define InvalidBackendId
Definition: backendid.h:23
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3939
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:548
#define Assert(condition)
Definition: c.h:804
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2674
#define PageIsNew(page)
Definition: bufpage.h:229
int Buffer
Definition: buf.h:23
Pointer Page
Definition: bufpage.h:78

◆ XLogReadBufferForRedo()

XLogRedoAction XLogReadBufferForRedo ( XLogReaderState record,
uint8  block_id,
Buffer buf 
)

Definition at line 292 of file xlogutils.c.

References RBM_NORMAL, and XLogReadBufferForRedoExtended().

Referenced by _bt_clear_incomplete_split(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), btree_xlog_dedup(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_split(), btree_xlog_unlink_page(), generic_redo(), ginRedoClearIncompleteSplit(), ginRedoDeletePage(), ginRedoInsert(), ginRedoSplit(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginRedoVacuumPage(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageUpdateRecord(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_split_page(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), and xlog_redo().

294 {
295  return XLogReadBufferForRedoExtended(record, block_id, RBM_NORMAL,
296  false, buf);
297 }
static char * buf
Definition: pg_test_fsync.c:68
XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)
Definition: xlogutils.c:329

◆ XLogReadBufferForRedoExtended()

XLogRedoAction XLogReadBufferForRedoExtended ( XLogReaderState record,
uint8  block_id,
ReadBufferMode  mode,
bool  get_cleanup_lock,
Buffer buf 
)

Definition at line 329 of file xlogutils.c.

References Assert, BKPBLOCK_WILL_INIT, BLK_DONE, BLK_NEEDS_REDO, BLK_NOTFOUND, BLK_RESTORED, xl_invalid_page_key::blkno, XLogReaderState::blocks, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, BufferIsValid, elog, XLogReaderState::EndRecPtr, ERROR, DecodedBkpBlock::flags, FlushOneBuffer(), INIT_FORKNUM, LockBuffer(), LockBufferForCleanup(), MarkBufferDirty(), PageGetLSN, PageIsNew, PageSetLSN, PANIC, RBM_ZERO_AND_CLEANUP_LOCK, RBM_ZERO_AND_LOCK, RestoreBlockImage(), XLogReadBufferExtended(), XLogRecBlockImageApply, XLogRecGetBlockTag(), and XLogRecHasBlockImage.

Referenced by btree_xlog_vacuum(), hash_xlog_delete(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_squeeze_page(), hash_xlog_vacuum_one_page(), heap_xlog_clean(), heap_xlog_visible(), XLogInitBufferForRedo(), and XLogReadBufferForRedo().

333 {
334  XLogRecPtr lsn = record->EndRecPtr;
335  RelFileNode rnode;
336  ForkNumber forknum;
337  BlockNumber blkno;
338  Page page;
339  bool zeromode;
340  bool willinit;
341 
342  if (!XLogRecGetBlockTag(record, block_id, &rnode, &forknum, &blkno))
343  {
344  /* Caller specified a bogus block_id */
345  elog(PANIC, "failed to locate backup block with ID %d", block_id);
346  }
347 
348  /*
349  * Make sure that if the block is marked with WILL_INIT, the caller is
350  * going to initialize it. And vice versa.
351  */
353  willinit = (record->blocks[block_id].flags & BKPBLOCK_WILL_INIT) != 0;
354  if (willinit && !zeromode)
355  elog(PANIC, "block with WILL_INIT flag in WAL record must be zeroed by redo routine");
356  if (!willinit && zeromode)
357  elog(PANIC, "block to be initialized in redo routine must be marked with WILL_INIT flag in the WAL record");
358 
359  /* If it has a full-page image and it should be restored, do it. */
360  if (XLogRecBlockImageApply(record, block_id))
361  {
362  Assert(XLogRecHasBlockImage(record, block_id));
363  *buf = XLogReadBufferExtended(rnode, forknum, blkno,
364  get_cleanup_lock ? RBM_ZERO_AND_CLEANUP_LOCK : RBM_ZERO_AND_LOCK);
365  page = BufferGetPage(*buf);
366  if (!RestoreBlockImage(record, block_id, page))
367  elog(ERROR, "failed to restore block image");
368 
369  /*
370  * The page may be uninitialized. If so, we can't set the LSN because
371  * that would corrupt the page.
372  */
373  if (!PageIsNew(page))
374  {
375  PageSetLSN(page, lsn);
376  }
377 
379 
380  /*
381  * At the end of crash recovery the init forks of unlogged relations
382  * are copied, without going through shared buffers. So we need to
383  * force the on-disk state of init forks to always be in sync with the
384  * state in shared buffers.
385  */
386  if (forknum == INIT_FORKNUM)
388 
389  return BLK_RESTORED;
390  }
391  else
392  {
393  *buf = XLogReadBufferExtended(rnode, forknum, blkno, mode);
394  if (BufferIsValid(*buf))
395  {
397  {
398  if (get_cleanup_lock)
400  else
402  }
403  if (lsn <= PageGetLSN(BufferGetPage(*buf)))
404  return BLK_DONE;
405  else
406  return BLK_NEEDS_REDO;
407  }
408  else
409  return BLK_NOTFOUND;
410  }
411 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3996
#define XLogRecHasBlockImage(decoder, block_id)
Definition: xlogreader.h:315
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1483
Buffer XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum, BlockNumber blkno, ReadBufferMode mode)
Definition: xlogutils.c:439
uint32 BlockNumber
Definition: block.h:31
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
#define PANIC
Definition: elog.h:55
XLogRecPtr EndRecPtr
Definition: xlogreader.h:176
#define ERROR
Definition: elog.h:45
static char * buf
Definition: pg_test_fsync.c:68
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define BKPBLOCK_WILL_INIT
Definition: xlogrecord.h:182
ForkNumber
Definition: relpath.h:40
bool XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id, RelFileNode *rnode, ForkNumber *forknum, BlockNumber *blknum)
Definition: xlogreader.c:1491
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3939
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:804
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
void FlushOneBuffer(Buffer buffer)
Definition: bufmgr.c:3680
bool RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page)
Definition: xlogreader.c:1544
#define PageGetLSN(page)
Definition: bufpage.h:366
#define PageIsNew(page)
Definition: bufpage.h:229
#define elog(elevel,...)
Definition: elog.h:228
#define XLogRecBlockImageApply(decoder, block_id)
Definition: xlogreader.h:317
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
Pointer Page
Definition: bufpage.h:78
DecodedBkpBlock blocks[XLR_MAX_BLOCK_ID+1]
Definition: xlogreader.h:197

◆ XLogReadDetermineTimeline()

void XLogReadDetermineTimeline ( XLogReaderState state,
XLogRecPtr  wantPage,
uint32  wantLength 
)

Definition at line 687 of file xlogutils.c.

References Assert, XLogReaderState::currTLI, XLogReaderState::currTLIValidUntil, DEBUG3, elog, InvalidXLogRecPtr, list_free_deep(), LSN_FORMAT_ARGS, Min, XLogReaderState::nextTLI, XLogReaderState::readLen, readTimeLineHistory(), XLogReaderState::seg, XLogReaderState::segcxt, XLogReaderState::segoff, ThisTimeLineID, tliOfPointInHistory(), tliSwitchPoint(), WALOpenSegment::ws_segno, and WALSegmentContext::ws_segsize.

Referenced by logical_read_xlog_page(), and read_local_xlog_page().

688 {
689  const XLogRecPtr lastReadPage = (state->seg.ws_segno *
690  state->segcxt.ws_segsize + state->segoff);
691 
692  Assert(wantPage != InvalidXLogRecPtr && wantPage % XLOG_BLCKSZ == 0);
693  Assert(wantLength <= XLOG_BLCKSZ);
694  Assert(state->readLen == 0 || state->readLen <= XLOG_BLCKSZ);
695 
696  /*
697  * If the desired page is currently read in and valid, we have nothing to
698  * do.
699  *
700  * The caller should've ensured that it didn't previously advance readOff
701  * past the valid limit of this timeline, so it doesn't matter if the
702  * current TLI has since become historical.
703  */
704  if (lastReadPage == wantPage &&
705  state->readLen != 0 &&
706  lastReadPage + state->readLen >= wantPage + Min(wantLength, XLOG_BLCKSZ - 1))
707  return;
708 
709  /*
710  * If we're reading from the current timeline, it hasn't become historical
711  * and the page we're reading is after the last page read, we can again
712  * just carry on. (Seeking backwards requires a check to make sure the
713  * older page isn't on a prior timeline).
714  *
715  * ThisTimeLineID might've become historical since we last looked, but the
716  * caller is required not to read past the flush limit it saw at the time
717  * it looked up the timeline. There's nothing we can do about it if
718  * StartupXLOG() renames it to .partial concurrently.
719  */
720  if (state->currTLI == ThisTimeLineID && wantPage >= lastReadPage)
721  {
723  return;
724  }
725 
726  /*
727  * If we're just reading pages from a previously validated historical
728  * timeline and the timeline we're reading from is valid until the end of
729  * the current segment we can just keep reading.
730  */
731  if (state->currTLIValidUntil != InvalidXLogRecPtr &&
732  state->currTLI != ThisTimeLineID &&
733  state->currTLI != 0 &&
734  ((wantPage + wantLength) / state->segcxt.ws_segsize) <
735  (state->currTLIValidUntil / state->segcxt.ws_segsize))
736  return;
737 
738  /*
739  * If we reach this point we're either looking up a page for random
740  * access, the current timeline just became historical, or we're reading
741  * from a new segment containing a timeline switch. In all cases we need
742  * to determine the newest timeline on the segment.
743  *
744  * If it's the current timeline we can just keep reading from here unless
745  * we detect a timeline switch that makes the current timeline historical.
746  * If it's a historical timeline we can read all the segment on the newest
747  * timeline because it contains all the old timelines' data too. So only
748  * one switch check is required.
749  */
750  {
751  /*
752  * We need to re-read the timeline history in case it's been changed
753  * by a promotion or replay from a cascaded replica.
754  */
755  List *timelineHistory = readTimeLineHistory(ThisTimeLineID);
756  XLogRecPtr endOfSegment;
757 
758  endOfSegment = ((wantPage / state->segcxt.ws_segsize) + 1) *
759  state->segcxt.ws_segsize - 1;
760  Assert(wantPage / state->segcxt.ws_segsize ==
761  endOfSegment / state->segcxt.ws_segsize);
762 
763  /*
764  * Find the timeline of the last LSN on the segment containing
765  * wantPage.
766  */
767  state->currTLI = tliOfPointInHistory(endOfSegment, timelineHistory);
768  state->currTLIValidUntil = tliSwitchPoint(state->currTLI, timelineHistory,
769  &state->nextTLI);
770 
772  wantPage + wantLength < state->currTLIValidUntil);
773 
774  list_free_deep(timelineHistory);
775 
776  elog(DEBUG3, "switched to timeline %u valid until %X/%X",
777  state->currTLI,
779  }
780 }
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
TimeLineID tliOfPointInHistory(XLogRecPtr ptr, List *history)
Definition: timeline.c:552
#define DEBUG3
Definition: elog.h:23
#define Min(x, y)
Definition: c.h:986
List * readTimeLineHistory(TimeLineID targetTLI)
Definition: timeline.c:76
void list_free_deep(List *list)
Definition: list.c:1405
WALOpenSegment seg
Definition: xlogreader.h:215
#define LSN_FORMAT_ARGS(lsn)
Definition: xlogdefs.h:43
XLogSegNo ws_segno
Definition: xlogreader.h:47
XLogRecPtr currTLIValidUntil
Definition: xlogreader.h:238
TimeLineID nextTLI
Definition: xlogreader.h:244
XLogRecPtr tliSwitchPoint(TimeLineID tli, List *history, TimeLineID *nextTLI)
Definition: timeline.c:580
TimeLineID ThisTimeLineID
Definition: xlog.c:193
TimeLineID currTLI
Definition: xlogreader.h:228
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:804
#define elog(elevel,...)
Definition: elog.h:228
WALSegmentContext segcxt
Definition: xlogreader.h:214
Definition: pg_list.h:50

◆ XLogTruncateRelation()

void XLogTruncateRelation ( RelFileNode  rnode,
ForkNumber  forkNum,
BlockNumber  nblocks 
)

Definition at line 642 of file xlogutils.c.

References forget_invalid_pages().

Referenced by smgr_redo().

644 {
645  forget_invalid_pages(rnode, forkNum, nblocks);
646 }
static void forget_invalid_pages(RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
Definition: xlogutils.c:145

Variable Documentation

◆ ignore_invalid_pages

bool ignore_invalid_pages = false

Definition at line 35 of file xlogutils.c.

Referenced by log_invalid_page(), and XLogCheckInvalidPages().

◆ invalid_page_tab

HTAB* invalid_page_tab = NULL
static

Definition at line 60 of file xlogutils.c.