PostgreSQL Source Code  git master
xlogutils.c File Reference
#include "postgres.h"
#include <unistd.h>
#include "access/timeline.h"
#include "access/xlog.h"
#include "access/xlog_internal.h"
#include "access/xlogutils.h"
#include "catalog/catalog.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "storage/smgr.h"
#include "utils/guc.h"
#include "utils/hsearch.h"
#include "utils/rel.h"
Include dependency graph for xlogutils.c:

Go to the source code of this file.

Data Structures

struct  xl_invalid_page_key
 
struct  xl_invalid_page
 
struct  FakeRelCacheEntryData
 

Typedefs

typedef struct xl_invalid_page_key xl_invalid_page_key
 
typedef struct xl_invalid_page xl_invalid_page
 
typedef FakeRelCacheEntryDataFakeRelCacheEntry
 

Functions

static void report_invalid_page (int elevel, RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
 
static void log_invalid_page (RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
 
static void forget_invalid_pages (RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
 
static void forget_invalid_pages_db (Oid dbid)
 
bool XLogHaveInvalidPages (void)
 
void XLogCheckInvalidPages (void)
 
XLogRedoAction XLogReadBufferForRedo (XLogReaderState *record, uint8 block_id, Buffer *buf)
 
Buffer XLogInitBufferForRedo (XLogReaderState *record, uint8 block_id)
 
XLogRedoAction XLogReadBufferForRedoExtended (XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)
 
Buffer XLogReadBufferExtended (RelFileNode rnode, ForkNumber forknum, BlockNumber blkno, ReadBufferMode mode)
 
Relation CreateFakeRelcacheEntry (RelFileNode rnode)
 
void FreeFakeRelcacheEntry (Relation fakerel)
 
void XLogDropRelation (RelFileNode rnode, ForkNumber forknum)
 
void XLogDropDatabase (Oid dbid)
 
void XLogTruncateRelation (RelFileNode rnode, ForkNumber forkNum, BlockNumber nblocks)
 
static void XLogRead (char *buf, int segsize, TimeLineID tli, XLogRecPtr startptr, Size count)
 
void XLogReadDetermineTimeline (XLogReaderState *state, XLogRecPtr wantPage, uint32 wantLength)
 
int read_local_xlog_page (XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, char *cur_page, TimeLineID *pageTLI)
 

Variables

static HTABinvalid_page_tab = NULL
 

Typedef Documentation

◆ FakeRelCacheEntry

◆ xl_invalid_page

◆ xl_invalid_page_key

Function Documentation

◆ CreateFakeRelcacheEntry()

Relation CreateFakeRelcacheEntry ( RelFileNode  rnode)

Definition at line 551 of file xlogutils.c.

References Assert, LockRelId::dbId, RelFileNode::dbNode, InRecovery, InvalidBackendId, LockInfoData::lockRelId, palloc0(), FakeRelCacheEntryData::pgc, RelationData::rd_backend, RelationData::rd_lockInfo, RelationData::rd_node, RelationData::rd_rel, RelationData::rd_smgr, RelationGetRelationName, LockRelId::relId, RelFileNode::relNode, and RELPERSISTENCE_PERMANENT.

Referenced by heap_xlog_delete(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), and smgr_redo().

552 {
553  FakeRelCacheEntry fakeentry;
554  Relation rel;
555 
557 
558  /* Allocate the Relation struct and all related space in one block. */
559  fakeentry = palloc0(sizeof(FakeRelCacheEntryData));
560  rel = (Relation) fakeentry;
561 
562  rel->rd_rel = &fakeentry->pgc;
563  rel->rd_node = rnode;
564  /* We will never be working with temp rels during recovery */
566 
567  /* It must be a permanent table if we're in recovery. */
568  rel->rd_rel->relpersistence = RELPERSISTENCE_PERMANENT;
569 
570  /* We don't know the name of the relation; use relfilenode instead */
571  sprintf(RelationGetRelationName(rel), "%u", rnode.relNode);
572 
573  /*
574  * We set up the lockRelId in case anything tries to lock the dummy
575  * relation. Note that this is fairly bogus since relNode may be
576  * different from the relation's OID. It shouldn't really matter though,
577  * since we are presumably running by ourselves and can't have any lock
578  * conflicts ...
579  */
580  rel->rd_lockInfo.lockRelId.dbId = rnode.dbNode;
581  rel->rd_lockInfo.lockRelId.relId = rnode.relNode;
582 
583  rel->rd_smgr = NULL;
584 
585  return rel;
586 }
LockRelId lockRelId
Definition: rel.h:44
struct SMgrRelationData * rd_smgr
Definition: rel.h:87
bool InRecovery
Definition: xlog.c:194
Oid dbId
Definition: rel.h:39
Form_pg_class rd_rel
Definition: rel.h:114
#define RELPERSISTENCE_PERMANENT
Definition: pg_class.h:170
struct RelationData * Relation
Definition: relcache.h:26
LockInfoData rd_lockInfo
Definition: rel.h:117
#define RelationGetRelationName(relation)
Definition: rel.h:445
#define InvalidBackendId
Definition: backendid.h:23
void * palloc0(Size size)
Definition: mcxt.c:877
RelFileNode rd_node
Definition: rel.h:85
BackendId rd_backend
Definition: rel.h:89
#define Assert(condition)
Definition: c.h:670
FormData_pg_class pgc
Definition: xlogutils.c:533
Oid relId
Definition: rel.h:38

◆ forget_invalid_pages()

static void forget_invalid_pages ( RelFileNode  node,
ForkNumber  forkno,
BlockNumber  minblkno 
)
static

Definition at line 143 of file xlogutils.c.

References xl_invalid_page_key::blkno, client_min_messages, DEBUG2, elog, ERROR, xl_invalid_page_key::forkno, HASH_REMOVE, hash_search(), hash_seq_init(), hash_seq_search(), xl_invalid_page::key, log_min_messages, xl_invalid_page_key::node, pfree(), RelFileNodeEquals, relpathperm, and status().

Referenced by XLogDropRelation(), and XLogTruncateRelation().

144 {
146  xl_invalid_page *hentry;
147 
148  if (invalid_page_tab == NULL)
149  return; /* nothing to do */
150 
152 
153  while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
154  {
155  if (RelFileNodeEquals(hentry->key.node, node) &&
156  hentry->key.forkno == forkno &&
157  hentry->key.blkno >= minblkno)
158  {
160  {
161  char *path = relpathperm(hentry->key.node, forkno);
162 
163  elog(DEBUG2, "page %u of relation %s has been dropped",
164  hentry->key.blkno, path);
165  pfree(path);
166  }
167 
169  (void *) &hentry->key,
170  HASH_REMOVE, NULL) == NULL)
171  elog(ERROR, "hash table corrupted");
172  }
173  }
174 }
#define relpathperm(rnode, forknum)
Definition: relpath.h:67
ForkNumber forkno
Definition: xlogutils.c:48
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:902
RelFileNode node
Definition: xlogutils.c:47
void pfree(void *pointer)
Definition: mcxt.c:949
#define ERROR
Definition: elog.h:43
#define DEBUG2
Definition: elog.h:24
BlockNumber blkno
Definition: xlogutils.c:49
xl_invalid_page_key key
Definition: xlogutils.c:54
int log_min_messages
Definition: guc.c:452
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1385
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1375
static HTAB * invalid_page_tab
Definition: xlogutils.c:58
int client_min_messages
Definition: guc.c:453
#define elog
Definition: elog.h:219
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88

◆ forget_invalid_pages_db()

static void forget_invalid_pages_db ( Oid  dbid)
static

Definition at line 178 of file xlogutils.c.

References xl_invalid_page_key::blkno, client_min_messages, RelFileNode::dbNode, DEBUG2, elog, ERROR, xl_invalid_page_key::forkno, HASH_REMOVE, hash_search(), hash_seq_init(), hash_seq_search(), xl_invalid_page::key, log_min_messages, xl_invalid_page_key::node, pfree(), relpathperm, and status().

Referenced by XLogDropDatabase().

179 {
181  xl_invalid_page *hentry;
182 
183  if (invalid_page_tab == NULL)
184  return; /* nothing to do */
185 
187 
188  while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
189  {
190  if (hentry->key.node.dbNode == dbid)
191  {
193  {
194  char *path = relpathperm(hentry->key.node, hentry->key.forkno);
195 
196  elog(DEBUG2, "page %u of relation %s has been dropped",
197  hentry->key.blkno, path);
198  pfree(path);
199  }
200 
202  (void *) &hentry->key,
203  HASH_REMOVE, NULL) == NULL)
204  elog(ERROR, "hash table corrupted");
205  }
206  }
207 }
#define relpathperm(rnode, forknum)
Definition: relpath.h:67
ForkNumber forkno
Definition: xlogutils.c:48
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:902
RelFileNode node
Definition: xlogutils.c:47
void pfree(void *pointer)
Definition: mcxt.c:949
#define ERROR
Definition: elog.h:43
#define DEBUG2
Definition: elog.h:24
BlockNumber blkno
Definition: xlogutils.c:49
xl_invalid_page_key key
Definition: xlogutils.c:54
int log_min_messages
Definition: guc.c:452
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1385
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1375
static HTAB * invalid_page_tab
Definition: xlogutils.c:58
int client_min_messages
Definition: guc.c:453
#define elog
Definition: elog.h:219
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225

◆ FreeFakeRelcacheEntry()

void FreeFakeRelcacheEntry ( Relation  fakerel)

Definition at line 592 of file xlogutils.c.

References pfree(), RelationData::rd_smgr, and smgrclearowner().

Referenced by heap_xlog_delete(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), and smgr_redo().

593 {
594  /* make sure the fakerel is not referenced by the SmgrRelation anymore */
595  if (fakerel->rd_smgr != NULL)
596  smgrclearowner(&fakerel->rd_smgr, fakerel->rd_smgr);
597  pfree(fakerel);
598 }
void smgrclearowner(SMgrRelation *owner, SMgrRelation reln)
Definition: smgr.c:222
struct SMgrRelationData * rd_smgr
Definition: rel.h:87
void pfree(void *pointer)
Definition: mcxt.c:949

◆ log_invalid_page()

static void log_invalid_page ( RelFileNode  node,
ForkNumber  forkno,
BlockNumber  blkno,
bool  present 
)
static

Definition at line 79 of file xlogutils.c.

References xl_invalid_page_key::blkno, client_min_messages, DEBUG1, elog, HASHCTL::entrysize, xl_invalid_page_key::forkno, HASH_BLOBS, hash_create(), HASH_ELEM, HASH_ENTER, hash_search(), HASHCTL::keysize, log_min_messages, xl_invalid_page_key::node, PANIC, xl_invalid_page::present, reachedConsistency, report_invalid_page(), and WARNING.

Referenced by XLogReadBufferExtended().

81 {
83  xl_invalid_page *hentry;
84  bool found;
85 
86  /*
87  * Once recovery has reached a consistent state, the invalid-page table
88  * should be empty and remain so. If a reference to an invalid page is
89  * found after consistency is reached, PANIC immediately. This might seem
90  * aggressive, but it's better than letting the invalid reference linger
91  * in the hash table until the end of recovery and PANIC there, which
92  * might come only much later if this is a standby server.
93  */
95  {
96  report_invalid_page(WARNING, node, forkno, blkno, present);
97  elog(PANIC, "WAL contains references to invalid pages");
98  }
99 
100  /*
101  * Log references to invalid pages at DEBUG1 level. This allows some
102  * tracing of the cause (note the elog context mechanism will tell us
103  * something about the XLOG record that generated the reference).
104  */
106  report_invalid_page(DEBUG1, node, forkno, blkno, present);
107 
108  if (invalid_page_tab == NULL)
109  {
110  /* create hash table when first needed */
111  HASHCTL ctl;
112 
113  memset(&ctl, 0, sizeof(ctl));
114  ctl.keysize = sizeof(xl_invalid_page_key);
115  ctl.entrysize = sizeof(xl_invalid_page);
116 
117  invalid_page_tab = hash_create("XLOG invalid-page table",
118  100,
119  &ctl,
121  }
122 
123  /* we currently assume xl_invalid_page_key contains no padding */
124  key.node = node;
125  key.forkno = forkno;
126  key.blkno = blkno;
127  hentry = (xl_invalid_page *)
128  hash_search(invalid_page_tab, (void *) &key, HASH_ENTER, &found);
129 
130  if (!found)
131  {
132  /* hash_search already filled in the key */
133  hentry->present = present;
134  }
135  else
136  {
137  /* repeat reference ... leave "present" as it was */
138  }
139 }
#define DEBUG1
Definition: elog.h:25
struct xl_invalid_page xl_invalid_page
#define HASH_ELEM
Definition: hsearch.h:87
Size entrysize
Definition: hsearch.h:73
struct xl_invalid_page_key xl_invalid_page_key
ForkNumber forkno
Definition: xlogutils.c:48
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:902
#define PANIC
Definition: elog.h:53
RelFileNode node
Definition: xlogutils.c:47
static void report_invalid_page(int elevel, RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
Definition: xlogutils.c:63
BlockNumber blkno
Definition: xlogutils.c:49
#define WARNING
Definition: elog.h:40
#define HASH_BLOBS
Definition: hsearch.h:88
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:316
Size keysize
Definition: hsearch.h:72
int log_min_messages
Definition: guc.c:452
bool reachedConsistency
Definition: xlog.c:834
static HTAB * invalid_page_tab
Definition: xlogutils.c:58
int client_min_messages
Definition: guc.c:453
#define elog
Definition: elog.h:219

◆ read_local_xlog_page()

int read_local_xlog_page ( XLogReaderState state,
XLogRecPtr  targetPagePtr,
int  reqLen,
XLogRecPtr  targetRecPtr,
char *  cur_page,
TimeLineID pageTLI 
)

Definition at line 910 of file xlogutils.c.

References CHECK_FOR_INTERRUPTS, XLogReaderState::currTLI, XLogReaderState::currTLIValidUntil, GetFlushRecPtr(), GetXLogReplayRecPtr(), pg_usleep(), RecoveryInProgress(), ThisTimeLineID, XLogReaderState::wal_segment_size, XLogRead(), and XLogReadDetermineTimeline().

Referenced by logical_read_local_xlog_page(), and XlogReadTwoPhaseData().

913 {
914  XLogRecPtr read_upto,
915  loc;
916  int count;
917 
918  loc = targetPagePtr + reqLen;
919 
920  /* Loop waiting for xlog to be available if necessary */
921  while (1)
922  {
923  /*
924  * Determine the limit of xlog we can currently read to, and what the
925  * most recent timeline is.
926  *
927  * RecoveryInProgress() will update ThisTimeLineID when it first
928  * notices recovery finishes, so we only have to maintain it for the
929  * local process until recovery ends.
930  */
931  if (!RecoveryInProgress())
932  read_upto = GetFlushRecPtr();
933  else
934  read_upto = GetXLogReplayRecPtr(&ThisTimeLineID);
935 
936  *pageTLI = ThisTimeLineID;
937 
938  /*
939  * Check which timeline to get the record from.
940  *
941  * We have to do it each time through the loop because if we're in
942  * recovery as a cascading standby, the current timeline might've
943  * become historical. We can't rely on RecoveryInProgress() because in
944  * a standby configuration like
945  *
946  * A => B => C
947  *
948  * if we're a logical decoding session on C, and B gets promoted, our
949  * timeline will change while we remain in recovery.
950  *
951  * We can't just keep reading from the old timeline as the last WAL
952  * archive in the timeline will get renamed to .partial by
953  * StartupXLOG().
954  *
955  * If that happens after our caller updated ThisTimeLineID but before
956  * we actually read the xlog page, we might still try to read from the
957  * old (now renamed) segment and fail. There's not much we can do
958  * about this, but it can only happen when we're a leaf of a cascading
959  * standby whose master gets promoted while we're decoding, so a
960  * one-off ERROR isn't too bad.
961  */
962  XLogReadDetermineTimeline(state, targetPagePtr, reqLen);
963 
964  if (state->currTLI == ThisTimeLineID)
965  {
966 
967  if (loc <= read_upto)
968  break;
969 
971  pg_usleep(1000L);
972  }
973  else
974  {
975  /*
976  * We're on a historical timeline, so limit reading to the switch
977  * point where we moved to the next timeline.
978  *
979  * We don't need to GetFlushRecPtr or GetXLogReplayRecPtr. We know
980  * about the new timeline, so we must've received past the end of
981  * it.
982  */
983  read_upto = state->currTLIValidUntil;
984 
985  /*
986  * Setting pageTLI to our wanted record's TLI is slightly wrong;
987  * the page might begin on an older timeline if it contains a
988  * timeline switch, since its xlog segment will have been copied
989  * from the prior timeline. This is pretty harmless though, as
990  * nothing cares so long as the timeline doesn't go backwards. We
991  * should read the page header instead; FIXME someday.
992  */
993  *pageTLI = state->currTLI;
994 
995  /* No need to wait on a historical timeline */
996  break;
997  }
998  }
999 
1000  if (targetPagePtr + XLOG_BLCKSZ <= read_upto)
1001  {
1002  /*
1003  * more than one block available; read only that block, have caller
1004  * come back if they need more.
1005  */
1006  count = XLOG_BLCKSZ;
1007  }
1008  else if (targetPagePtr + reqLen > read_upto)
1009  {
1010  /* not enough data there */
1011  return -1;
1012  }
1013  else
1014  {
1015  /* enough bytes available to satisfy the request */
1016  count = read_upto - targetPagePtr;
1017  }
1018 
1019  /*
1020  * Even though we just determined how much of the page can be validly read
1021  * as 'count', read the whole page anyway. It's guaranteed to be
1022  * zero-padded up to the page boundary if it's incomplete.
1023  */
1024  XLogRead(cur_page, state->wal_segment_size, *pageTLI, targetPagePtr,
1025  XLOG_BLCKSZ);
1026 
1027  /* number of valid bytes in the buffer */
1028  return count;
1029 }
XLogRecPtr GetFlushRecPtr(void)
Definition: xlog.c:8261
bool RecoveryInProgress(void)
Definition: xlog.c:7929
void XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wantLength)
Definition: xlogutils.c:802
void pg_usleep(long microsec)
Definition: signal.c:53
XLogRecPtr GetXLogReplayRecPtr(TimeLineID *replayTLI)
Definition: xlog.c:11128
XLogRecPtr currTLIValidUntil
Definition: xlogreader.h:180
int wal_segment_size
Definition: xlogreader.h:79
TimeLineID ThisTimeLineID
Definition: xlog.c:181
TimeLineID currTLI
Definition: xlogreader.h:170
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:98
static void XLogRead(char *buf, int segsize, TimeLineID tli, XLogRecPtr startptr, Size count)
Definition: xlogutils.c:657

◆ report_invalid_page()

static void report_invalid_page ( int  elevel,
RelFileNode  node,
ForkNumber  forkno,
BlockNumber  blkno,
bool  present 
)
static

Definition at line 63 of file xlogutils.c.

References elog, pfree(), and relpathperm.

Referenced by log_invalid_page(), and XLogCheckInvalidPages().

65 {
66  char *path = relpathperm(node, forkno);
67 
68  if (present)
69  elog(elevel, "page %u of relation %s is uninitialized",
70  blkno, path);
71  else
72  elog(elevel, "page %u of relation %s does not exist",
73  blkno, path);
74  pfree(path);
75 }
#define relpathperm(rnode, forknum)
Definition: relpath.h:67
void pfree(void *pointer)
Definition: mcxt.c:949
static int elevel
Definition: vacuumlazy.c:136
#define elog
Definition: elog.h:219

◆ XLogCheckInvalidPages()

void XLogCheckInvalidPages ( void  )

Definition at line 221 of file xlogutils.c.

References xl_invalid_page_key::blkno, elog, xl_invalid_page_key::forkno, hash_destroy(), hash_seq_init(), hash_seq_search(), xl_invalid_page::key, xl_invalid_page_key::node, PANIC, xl_invalid_page::present, report_invalid_page(), status(), and WARNING.

Referenced by CheckRecoveryConsistency().

222 {
224  xl_invalid_page *hentry;
225  bool foundone = false;
226 
227  if (invalid_page_tab == NULL)
228  return; /* nothing to do */
229 
231 
232  /*
233  * Our strategy is to emit WARNING messages for all remaining entries and
234  * only PANIC after we've dumped all the available info.
235  */
236  while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
237  {
238  report_invalid_page(WARNING, hentry->key.node, hentry->key.forkno,
239  hentry->key.blkno, hentry->present);
240  foundone = true;
241  }
242 
243  if (foundone)
244  elog(PANIC, "WAL contains references to invalid pages");
245 
247  invalid_page_tab = NULL;
248 }
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:810
ForkNumber forkno
Definition: xlogutils.c:48
#define PANIC
Definition: elog.h:53
RelFileNode node
Definition: xlogutils.c:47
static void report_invalid_page(int elevel, RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
Definition: xlogutils.c:63
BlockNumber blkno
Definition: xlogutils.c:49
xl_invalid_page_key key
Definition: xlogutils.c:54
#define WARNING
Definition: elog.h:40
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1385
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1375
static HTAB * invalid_page_tab
Definition: xlogutils.c:58
#define elog
Definition: elog.h:219
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225

◆ XLogDropDatabase()

void XLogDropDatabase ( Oid  dbid)

Definition at line 618 of file xlogutils.c.

References forget_invalid_pages_db(), and smgrcloseall().

Referenced by dbase_redo().

619 {
620  /*
621  * This is unnecessarily heavy-handed, as it will close SMgrRelation
622  * objects for other databases as well. DROP DATABASE occurs seldom enough
623  * that it's not worth introducing a variant of smgrclose for just this
624  * purpose. XXX: Or should we rather leave the smgr entries dangling?
625  */
626  smgrcloseall();
627 
629 }
static void forget_invalid_pages_db(Oid dbid)
Definition: xlogutils.c:178
void smgrcloseall(void)
Definition: smgr.c:326

◆ XLogDropRelation()

void XLogDropRelation ( RelFileNode  rnode,
ForkNumber  forknum 
)

Definition at line 607 of file xlogutils.c.

References forget_invalid_pages().

Referenced by xact_redo_abort(), and xact_redo_commit().

608 {
609  forget_invalid_pages(rnode, forknum, 0);
610 }
static void forget_invalid_pages(RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
Definition: xlogutils.c:143

◆ XLogHaveInvalidPages()

bool XLogHaveInvalidPages ( void  )

Definition at line 211 of file xlogutils.c.

References hash_get_num_entries().

Referenced by RecoveryRestartPoint().

212 {
213  if (invalid_page_tab != NULL &&
215  return true;
216  return false;
217 }
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1331
static HTAB * invalid_page_tab
Definition: xlogutils.c:58

◆ XLogInitBufferForRedo()

◆ XLogRead()

static void XLogRead ( char *  buf,
int  segsize,
TimeLineID  tli,
XLogRecPtr  startptr,
Size  count 
)
static

Definition at line 657 of file xlogutils.c.

References Assert, BasicOpenFile(), buf, close, ereport, errcode_for_file_access(), errmsg(), ERROR, MAXPGPATH, PG_BINARY, pgstat_report_wait_end(), pgstat_report_wait_start(), read, sendFile, sendOff, sendSegNo, WAIT_EVENT_WAL_READ, wal_segment_size, XLByteInSeg, XLByteToSeg, XLogFilePath, and XLogSegmentOffset.

Referenced by read_local_xlog_page().

659 {
660  char *p;
661  XLogRecPtr recptr;
662  Size nbytes;
663 
664  /* state maintained across calls */
665  static int sendFile = -1;
666  static XLogSegNo sendSegNo = 0;
667  static TimeLineID sendTLI = 0;
668  static uint32 sendOff = 0;
669 
670  Assert(segsize == wal_segment_size);
671 
672  p = buf;
673  recptr = startptr;
674  nbytes = count;
675 
676  while (nbytes > 0)
677  {
678  uint32 startoff;
679  int segbytes;
680  int readbytes;
681 
682  startoff = XLogSegmentOffset(recptr, segsize);
683 
684  /* Do we need to switch to a different xlog segment? */
685  if (sendFile < 0 || !XLByteInSeg(recptr, sendSegNo, segsize) ||
686  sendTLI != tli)
687  {
688  char path[MAXPGPATH];
689 
690  if (sendFile >= 0)
691  close(sendFile);
692 
693  XLByteToSeg(recptr, sendSegNo, segsize);
694 
695  XLogFilePath(path, tli, sendSegNo, segsize);
696 
697  sendFile = BasicOpenFile(path, O_RDONLY | PG_BINARY);
698 
699  if (sendFile < 0)
700  {
701  if (errno == ENOENT)
702  ereport(ERROR,
704  errmsg("requested WAL segment %s has already been removed",
705  path)));
706  else
707  ereport(ERROR,
709  errmsg("could not open file \"%s\": %m",
710  path)));
711  }
712  sendOff = 0;
713  sendTLI = tli;
714  }
715 
716  /* Need to seek in the file? */
717  if (sendOff != startoff)
718  {
719  if (lseek(sendFile, (off_t) startoff, SEEK_SET) < 0)
720  {
721  char path[MAXPGPATH];
722 
723  XLogFilePath(path, tli, sendSegNo, segsize);
724 
725  ereport(ERROR,
727  errmsg("could not seek in log segment %s to offset %u: %m",
728  path, startoff)));
729  }
730  sendOff = startoff;
731  }
732 
733  /* How many bytes are within this segment? */
734  if (nbytes > (segsize - startoff))
735  segbytes = segsize - startoff;
736  else
737  segbytes = nbytes;
738 
740  readbytes = read(sendFile, p, segbytes);
742  if (readbytes <= 0)
743  {
744  char path[MAXPGPATH];
745 
746  XLogFilePath(path, tli, sendSegNo, segsize);
747 
748  ereport(ERROR,
750  errmsg("could not read from log segment %s, offset %u, length %lu: %m",
751  path, sendOff, (unsigned long) segbytes)));
752  }
753 
754  /* Update state for read */
755  recptr += readbytes;
756 
757  sendOff += readbytes;
758  nbytes -= readbytes;
759  p += readbytes;
760  }
761 }
uint32 TimeLineID
Definition: xlogdefs.h:45
int wal_segment_size
Definition: xlog.c:113
static int sendFile
Definition: walsender.c:135
#define PG_BINARY
Definition: c.h:1025
#define XLByteInSeg(xlrp, logSegNo, wal_segsz_bytes)
#define ERROR
Definition: elog.h:43
#define MAXPGPATH
static char * buf
Definition: pg_test_fsync.c:67
uint64 XLogSegNo
Definition: xlogdefs.h:34
int errcode_for_file_access(void)
Definition: elog.c:598
unsigned int uint32
Definition: c.h:296
static void pgstat_report_wait_end(void)
Definition: pgstat.h:1244
#define ereport(elevel, rest)
Definition: elog.h:122
#define XLogSegmentOffset(xlogptr, wal_segsz_bytes)
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:670
size_t Size
Definition: c.h:404
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition: pgstat.h:1220
static XLogSegNo sendSegNo
Definition: walsender.c:136
int BasicOpenFile(const char *fileName, int fileFlags)
Definition: fd.c:929
#define XLogFilePath(path, tli, logSegNo, wal_segsz_bytes)
int errmsg(const char *fmt,...)
Definition: elog.c:797
static uint32 sendOff
Definition: walsender.c:137
#define close(a)
Definition: win32.h:12
#define read(a, b, c)
Definition: win32.h:13
#define XLByteToSeg(xlrp, logSegNo, wal_segsz_bytes)

◆ XLogReadBufferExtended()

Buffer XLogReadBufferExtended ( RelFileNode  rnode,
ForkNumber  forknum,
BlockNumber  blkno,
ReadBufferMode  mode 
)

Definition at line 438 of file xlogutils.c.

References Assert, buffer, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage, InRecovery, InvalidBackendId, InvalidBuffer, LockBuffer(), log_invalid_page(), P_NEW, PageIsNew, RBM_NORMAL, RBM_NORMAL_NO_LOG, RBM_ZERO_AND_CLEANUP_LOCK, RBM_ZERO_AND_LOCK, ReadBufferWithoutRelcache(), ReleaseBuffer(), smgrcreate(), smgrnblocks(), and smgropen().

Referenced by btree_xlog_delete_get_latestRemovedXid(), btree_xlog_vacuum(), checkXLogConsistency(), hash_xlog_vacuum_get_latestRemovedXid(), XLogReadBufferForRedoExtended(), and XLogRecordPageWithFreeSpace().

440 {
441  BlockNumber lastblock;
442  Buffer buffer;
443  SMgrRelation smgr;
444 
445  Assert(blkno != P_NEW);
446 
447  /* Open the relation at smgr level */
448  smgr = smgropen(rnode, InvalidBackendId);
449 
450  /*
451  * Create the target file if it doesn't already exist. This lets us cope
452  * if the replay sequence contains writes to a relation that is later
453  * deleted. (The original coding of this routine would instead suppress
454  * the writes, but that seems like it risks losing valuable data if the
455  * filesystem loses an inode during a crash. Better to write the data
456  * until we are actually told to delete the file.)
457  */
458  smgrcreate(smgr, forknum, true);
459 
460  lastblock = smgrnblocks(smgr, forknum);
461 
462  if (blkno < lastblock)
463  {
464  /* page exists in file */
465  buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno,
466  mode, NULL);
467  }
468  else
469  {
470  /* hm, page doesn't exist in file */
471  if (mode == RBM_NORMAL)
472  {
473  log_invalid_page(rnode, forknum, blkno, false);
474  return InvalidBuffer;
475  }
476  if (mode == RBM_NORMAL_NO_LOG)
477  return InvalidBuffer;
478  /* OK to extend the file */
479  /* we do this in recovery only - no rel-extension lock needed */
481  buffer = InvalidBuffer;
482  do
483  {
484  if (buffer != InvalidBuffer)
485  {
486  if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
488  ReleaseBuffer(buffer);
489  }
490  buffer = ReadBufferWithoutRelcache(rnode, forknum,
491  P_NEW, mode, NULL);
492  }
493  while (BufferGetBlockNumber(buffer) < blkno);
494  /* Handle the corner case that P_NEW returns non-consecutive pages */
495  if (BufferGetBlockNumber(buffer) != blkno)
496  {
497  if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
499  ReleaseBuffer(buffer);
500  buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno,
501  mode, NULL);
502  }
503  }
504 
505  if (mode == RBM_NORMAL)
506  {
507  /* check that page has been initialized */
508  Page page = (Page) BufferGetPage(buffer);
509 
510  /*
511  * We assume that PageIsNew is safe without a lock. During recovery,
512  * there should be no other backends that could modify the buffer at
513  * the same time.
514  */
515  if (PageIsNew(page))
516  {
517  ReleaseBuffer(buffer);
518  log_invalid_page(rnode, forknum, blkno, true);
519  return InvalidBuffer;
520  }
521  }
522 
523  return buffer;
524 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition: smgr.c:376
bool InRecovery
Definition: xlog.c:194
#define InvalidBuffer
Definition: buf.h:25
Buffer ReadBufferWithoutRelcache(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:682
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define P_NEW
Definition: bufmgr.h:82
static void log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
Definition: xlogutils.c:79
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
SMgrRelation smgropen(RelFileNode rnode, BackendId backend)
Definition: smgr.c:137
#define InvalidBackendId
Definition: backendid.h:23
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:672
#define Assert(condition)
Definition: c.h:670
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
#define PageIsNew(page)
Definition: bufpage.h:225
int Buffer
Definition: buf.h:23
Pointer Page
Definition: bufpage.h:74

◆ XLogReadBufferForRedo()

XLogRedoAction XLogReadBufferForRedo ( XLogReaderState record,
uint8  block_id,
Buffer buf 
)

Definition at line 290 of file xlogutils.c.

References RBM_NORMAL, and XLogReadBufferForRedoExtended().

Referenced by _bt_clear_incomplete_split(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_split(), btree_xlog_unlink_page(), generic_redo(), ginRedoClearIncompleteSplit(), ginRedoDeletePage(), ginRedoInsert(), ginRedoSplit(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginRedoVacuumPage(), gistRedoClearFollowRight(), gistRedoPageUpdateRecord(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_split_page(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), and xlog_redo().

292 {
293  return XLogReadBufferForRedoExtended(record, block_id, RBM_NORMAL,
294  false, buf);
295 }
static char * buf
Definition: pg_test_fsync.c:67
XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)
Definition: xlogutils.c:327

◆ XLogReadBufferForRedoExtended()

XLogRedoAction XLogReadBufferForRedoExtended ( XLogReaderState record,
uint8  block_id,
ReadBufferMode  mode,
bool  get_cleanup_lock,
Buffer buf 
)

Definition at line 327 of file xlogutils.c.

References Assert, BKPBLOCK_WILL_INIT, BLK_DONE, BLK_NEEDS_REDO, BLK_NOTFOUND, BLK_RESTORED, xl_invalid_page_key::blkno, XLogReaderState::blocks, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, BufferIsValid, elog, XLogReaderState::EndRecPtr, ERROR, DecodedBkpBlock::flags, FlushOneBuffer(), INIT_FORKNUM, LockBuffer(), LockBufferForCleanup(), MarkBufferDirty(), PageGetLSN, PageIsNew, PageSetLSN, PANIC, RBM_ZERO_AND_CLEANUP_LOCK, RBM_ZERO_AND_LOCK, RestoreBlockImage(), XLogReadBufferExtended(), XLogRecBlockImageApply, XLogRecGetBlockTag(), and XLogRecHasBlockImage.

Referenced by btree_xlog_vacuum(), hash_xlog_delete(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_squeeze_page(), hash_xlog_vacuum_one_page(), heap_xlog_clean(), heap_xlog_visible(), XLogInitBufferForRedo(), and XLogReadBufferForRedo().

331 {
332  XLogRecPtr lsn = record->EndRecPtr;
333  RelFileNode rnode;
334  ForkNumber forknum;
335  BlockNumber blkno;
336  Page page;
337  bool zeromode;
338  bool willinit;
339 
340  if (!XLogRecGetBlockTag(record, block_id, &rnode, &forknum, &blkno))
341  {
342  /* Caller specified a bogus block_id */
343  elog(PANIC, "failed to locate backup block with ID %d", block_id);
344  }
345 
346  /*
347  * Make sure that if the block is marked with WILL_INIT, the caller is
348  * going to initialize it. And vice versa.
349  */
350  zeromode = (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK);
351  willinit = (record->blocks[block_id].flags & BKPBLOCK_WILL_INIT) != 0;
352  if (willinit && !zeromode)
353  elog(PANIC, "block with WILL_INIT flag in WAL record must be zeroed by redo routine");
354  if (!willinit && zeromode)
355  elog(PANIC, "block to be initialized in redo routine must be marked with WILL_INIT flag in the WAL record");
356 
357  /* If it has a full-page image and it should be restored, do it. */
358  if (XLogRecBlockImageApply(record, block_id))
359  {
360  Assert(XLogRecHasBlockImage(record, block_id));
361  *buf = XLogReadBufferExtended(rnode, forknum, blkno,
362  get_cleanup_lock ? RBM_ZERO_AND_CLEANUP_LOCK : RBM_ZERO_AND_LOCK);
363  page = BufferGetPage(*buf);
364  if (!RestoreBlockImage(record, block_id, page))
365  elog(ERROR, "failed to restore block image");
366 
367  /*
368  * The page may be uninitialized. If so, we can't set the LSN because
369  * that would corrupt the page.
370  */
371  if (!PageIsNew(page))
372  {
373  PageSetLSN(page, lsn);
374  }
375 
377 
378  /*
379  * At the end of crash recovery the init forks of unlogged relations
380  * are copied, without going through shared buffers. So we need to
381  * force the on-disk state of init forks to always be in sync with the
382  * state in shared buffers.
383  */
384  if (forknum == INIT_FORKNUM)
386 
387  return BLK_RESTORED;
388  }
389  else
390  {
391  *buf = XLogReadBufferExtended(rnode, forknum, blkno, mode);
392  if (BufferIsValid(*buf))
393  {
394  if (mode != RBM_ZERO_AND_LOCK && mode != RBM_ZERO_AND_CLEANUP_LOCK)
395  {
396  if (get_cleanup_lock)
398  else
400  }
401  if (lsn <= PageGetLSN(BufferGetPage(*buf)))
402  return BLK_DONE;
403  else
404  return BLK_NEEDS_REDO;
405  }
406  else
407  return BLK_NOTFOUND;
408  }
409 }
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3603
#define XLogRecHasBlockImage(decoder, block_id)
Definition: xlogreader.h:231
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
Buffer XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum, BlockNumber blkno, ReadBufferMode mode)
Definition: xlogutils.c:438
uint32 BlockNumber
Definition: block.h:31
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
#define PANIC
Definition: elog.h:53
XLogRecPtr EndRecPtr
Definition: xlogreader.h:120
#define ERROR
Definition: elog.h:43
static char * buf
Definition: pg_test_fsync.c:67
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define BKPBLOCK_WILL_INIT
Definition: xlogrecord.h:183
ForkNumber
Definition: relpath.h:24
bool XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id, RelFileNode *rnode, ForkNumber *forknum, BlockNumber *blknum)
Definition: xlogreader.c:1309
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:670
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
void FlushOneBuffer(Buffer buffer)
Definition: bufmgr.c:3289
bool RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page)
Definition: xlogreader.c:1362
#define PageGetLSN(page)
Definition: bufpage.h:362
#define PageIsNew(page)
Definition: bufpage.h:225
#define elog
Definition: elog.h:219
#define XLogRecBlockImageApply(decoder, block_id)
Definition: xlogreader.h:233
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
Pointer Page
Definition: bufpage.h:74
DecodedBkpBlock blocks[XLR_MAX_BLOCK_ID+1]
Definition: xlogreader.h:139

◆ XLogReadDetermineTimeline()

void XLogReadDetermineTimeline ( XLogReaderState state,
XLogRecPtr  wantPage,
uint32  wantLength 
)

Definition at line 802 of file xlogutils.c.

References Assert, XLogReaderState::currTLI, XLogReaderState::currTLIValidUntil, DEBUG3, elog, InvalidXLogRecPtr, list_free_deep(), Min, XLogReaderState::nextTLI, XLogReaderState::readLen, XLogReaderState::readOff, XLogReaderState::readSegNo, readTimeLineHistory(), ThisTimeLineID, tliOfPointInHistory(), tliSwitchPoint(), and XLogReaderState::wal_segment_size.

Referenced by logical_read_xlog_page(), and read_local_xlog_page().

803 {
804  const XLogRecPtr lastReadPage = state->readSegNo *
805  state->wal_segment_size + state->readOff;
806 
807  Assert(wantPage != InvalidXLogRecPtr && wantPage % XLOG_BLCKSZ == 0);
808  Assert(wantLength <= XLOG_BLCKSZ);
809  Assert(state->readLen == 0 || state->readLen <= XLOG_BLCKSZ);
810 
811  /*
812  * If the desired page is currently read in and valid, we have nothing to
813  * do.
814  *
815  * The caller should've ensured that it didn't previously advance readOff
816  * past the valid limit of this timeline, so it doesn't matter if the
817  * current TLI has since become historical.
818  */
819  if (lastReadPage == wantPage &&
820  state->readLen != 0 &&
821  lastReadPage + state->readLen >= wantPage + Min(wantLength, XLOG_BLCKSZ - 1))
822  return;
823 
824  /*
825  * If we're reading from the current timeline, it hasn't become historical
826  * and the page we're reading is after the last page read, we can again
827  * just carry on. (Seeking backwards requires a check to make sure the
828  * older page isn't on a prior timeline).
829  *
830  * ThisTimeLineID might've become historical since we last looked, but the
831  * caller is required not to read past the flush limit it saw at the time
832  * it looked up the timeline. There's nothing we can do about it if
833  * StartupXLOG() renames it to .partial concurrently.
834  */
835  if (state->currTLI == ThisTimeLineID && wantPage >= lastReadPage)
836  {
838  return;
839  }
840 
841  /*
842  * If we're just reading pages from a previously validated historical
843  * timeline and the timeline we're reading from is valid until the end of
844  * the current segment we can just keep reading.
845  */
846  if (state->currTLIValidUntil != InvalidXLogRecPtr &&
847  state->currTLI != ThisTimeLineID &&
848  state->currTLI != 0 &&
849  ((wantPage + wantLength) / state->wal_segment_size) <
850  (state->currTLIValidUntil / state->wal_segment_size))
851  return;
852 
853  /*
854  * If we reach this point we're either looking up a page for random
855  * access, the current timeline just became historical, or we're reading
856  * from a new segment containing a timeline switch. In all cases we need
857  * to determine the newest timeline on the segment.
858  *
859  * If it's the current timeline we can just keep reading from here unless
860  * we detect a timeline switch that makes the current timeline historical.
861  * If it's a historical timeline we can read all the segment on the newest
862  * timeline because it contains all the old timelines' data too. So only
863  * one switch check is required.
864  */
865  {
866  /*
867  * We need to re-read the timeline history in case it's been changed
868  * by a promotion or replay from a cascaded replica.
869  */
870  List *timelineHistory = readTimeLineHistory(ThisTimeLineID);
871 
872  XLogRecPtr endOfSegment = (((wantPage / state->wal_segment_size) + 1)
873  * state->wal_segment_size) - 1;
874 
875  Assert(wantPage / state->wal_segment_size ==
876  endOfSegment / state->wal_segment_size);
877 
878  /*
879  * Find the timeline of the last LSN on the segment containing
880  * wantPage.
881  */
882  state->currTLI = tliOfPointInHistory(endOfSegment, timelineHistory);
883  state->currTLIValidUntil = tliSwitchPoint(state->currTLI, timelineHistory,
884  &state->nextTLI);
885 
887  wantPage + wantLength < state->currTLIValidUntil);
888 
889  list_free_deep(timelineHistory);
890 
891  elog(DEBUG3, "switched to timeline %u valid until %X/%X",
892  state->currTLI,
893  (uint32) (state->currTLIValidUntil >> 32),
894  (uint32) (state->currTLIValidUntil));
895  }
896 }
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
TimeLineID tliOfPointInHistory(XLogRecPtr ptr, List *history)
Definition: timeline.c:533
#define DEBUG3
Definition: elog.h:23
#define Min(x, y)
Definition: c.h:802
List * readTimeLineHistory(TimeLineID targetTLI)
Definition: timeline.c:75
void list_free_deep(List *list)
Definition: list.c:1147
XLogRecPtr currTLIValidUntil
Definition: xlogreader.h:180
unsigned int uint32
Definition: c.h:296
int wal_segment_size
Definition: xlogreader.h:79
TimeLineID nextTLI
Definition: xlogreader.h:186
XLogRecPtr tliSwitchPoint(TimeLineID tli, List *history, TimeLineID *nextTLI)
Definition: timeline.c:561
TimeLineID ThisTimeLineID
Definition: xlog.c:181
TimeLineID currTLI
Definition: xlogreader.h:170
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:670
XLogSegNo readSegNo
Definition: xlogreader.h:156
#define elog
Definition: elog.h:219
Definition: pg_list.h:45

◆ XLogTruncateRelation()

void XLogTruncateRelation ( RelFileNode  rnode,
ForkNumber  forkNum,
BlockNumber  nblocks 
)

Definition at line 637 of file xlogutils.c.

References forget_invalid_pages().

Referenced by smgr_redo().

639 {
640  forget_invalid_pages(rnode, forkNum, nblocks);
641 }
static void forget_invalid_pages(RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
Definition: xlogutils.c:143

Variable Documentation

◆ invalid_page_tab

HTAB* invalid_page_tab = NULL
static

Definition at line 58 of file xlogutils.c.