PostgreSQL Source Code  git master
xlogutils.c File Reference
#include "postgres.h"
#include <unistd.h>
#include "access/timeline.h"
#include "access/xlog.h"
#include "access/xlog_internal.h"
#include "access/xlogutils.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "storage/smgr.h"
#include "utils/guc.h"
#include "utils/hsearch.h"
#include "utils/rel.h"
Include dependency graph for xlogutils.c:

Go to the source code of this file.

Data Structures

struct  xl_invalid_page_key
 
struct  xl_invalid_page
 
struct  FakeRelCacheEntryData
 

Typedefs

typedef struct xl_invalid_page_key xl_invalid_page_key
 
typedef struct xl_invalid_page xl_invalid_page
 
typedef FakeRelCacheEntryDataFakeRelCacheEntry
 

Functions

static void report_invalid_page (int elevel, RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
 
static void log_invalid_page (RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
 
static void forget_invalid_pages (RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
 
static void forget_invalid_pages_db (Oid dbid)
 
bool XLogHaveInvalidPages (void)
 
void XLogCheckInvalidPages (void)
 
XLogRedoAction XLogReadBufferForRedo (XLogReaderState *record, uint8 block_id, Buffer *buf)
 
Buffer XLogInitBufferForRedo (XLogReaderState *record, uint8 block_id)
 
XLogRedoAction XLogReadBufferForRedoExtended (XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)
 
Buffer XLogReadBufferExtended (RelFileNode rnode, ForkNumber forknum, BlockNumber blkno, ReadBufferMode mode)
 
Relation CreateFakeRelcacheEntry (RelFileNode rnode)
 
void FreeFakeRelcacheEntry (Relation fakerel)
 
void XLogDropRelation (RelFileNode rnode, ForkNumber forknum)
 
void XLogDropDatabase (Oid dbid)
 
void XLogTruncateRelation (RelFileNode rnode, ForkNumber forkNum, BlockNumber nblocks)
 
static void XLogRead (char *buf, int segsize, TimeLineID tli, XLogRecPtr startptr, Size count)
 
void XLogReadDetermineTimeline (XLogReaderState *state, XLogRecPtr wantPage, uint32 wantLength)
 
int read_local_xlog_page (XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, char *cur_page, TimeLineID *pageTLI)
 

Variables

static HTABinvalid_page_tab = NULL
 

Typedef Documentation

◆ FakeRelCacheEntry

◆ xl_invalid_page

◆ xl_invalid_page_key

Function Documentation

◆ CreateFakeRelcacheEntry()

Relation CreateFakeRelcacheEntry ( RelFileNode  rnode)

Definition at line 550 of file xlogutils.c.

References Assert, LockRelId::dbId, RelFileNode::dbNode, InRecovery, InvalidBackendId, LockInfoData::lockRelId, palloc0(), FakeRelCacheEntryData::pgc, RelationData::rd_backend, RelationData::rd_lockInfo, RelationData::rd_node, RelationData::rd_rel, RelationData::rd_smgr, RelationGetRelationName, LockRelId::relId, RelFileNode::relNode, and sprintf.

Referenced by heap_xlog_delete(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), and smgr_redo().

551 {
552  FakeRelCacheEntry fakeentry;
553  Relation rel;
554 
556 
557  /* Allocate the Relation struct and all related space in one block. */
558  fakeentry = palloc0(sizeof(FakeRelCacheEntryData));
559  rel = (Relation) fakeentry;
560 
561  rel->rd_rel = &fakeentry->pgc;
562  rel->rd_node = rnode;
563  /* We will never be working with temp rels during recovery */
565 
566  /* It must be a permanent table if we're in recovery. */
567  rel->rd_rel->relpersistence = RELPERSISTENCE_PERMANENT;
568 
569  /* We don't know the name of the relation; use relfilenode instead */
570  sprintf(RelationGetRelationName(rel), "%u", rnode.relNode);
571 
572  /*
573  * We set up the lockRelId in case anything tries to lock the dummy
574  * relation. Note that this is fairly bogus since relNode may be
575  * different from the relation's OID. It shouldn't really matter though,
576  * since we are presumably running by ourselves and can't have any lock
577  * conflicts ...
578  */
579  rel->rd_lockInfo.lockRelId.dbId = rnode.dbNode;
580  rel->rd_lockInfo.lockRelId.relId = rnode.relNode;
581 
582  rel->rd_smgr = NULL;
583 
584  return rel;
585 }
LockRelId lockRelId
Definition: rel.h:43
struct SMgrRelationData * rd_smgr
Definition: rel.h:56
bool InRecovery
Definition: xlog.c:200
Oid dbId
Definition: rel.h:38
Form_pg_class rd_rel
Definition: rel.h:83
#define sprintf
Definition: port.h:194
struct RelationData * Relation
Definition: relcache.h:26
LockInfoData rd_lockInfo
Definition: rel.h:86
#define RelationGetRelationName(relation)
Definition: rel.h:450
#define InvalidBackendId
Definition: backendid.h:23
void * palloc0(Size size)
Definition: mcxt.c:955
RelFileNode rd_node
Definition: rel.h:54
BackendId rd_backend
Definition: rel.h:58
#define Assert(condition)
Definition: c.h:732
FormData_pg_class pgc
Definition: xlogutils.c:532
Oid relId
Definition: rel.h:37

◆ forget_invalid_pages()

static void forget_invalid_pages ( RelFileNode  node,
ForkNumber  forkno,
BlockNumber  minblkno 
)
static

Definition at line 142 of file xlogutils.c.

References xl_invalid_page_key::blkno, client_min_messages, DEBUG2, elog, ERROR, xl_invalid_page_key::forkno, HASH_REMOVE, hash_search(), hash_seq_init(), hash_seq_search(), xl_invalid_page::key, log_min_messages, xl_invalid_page_key::node, pfree(), RelFileNodeEquals, relpathperm, and status().

Referenced by XLogDropRelation(), and XLogTruncateRelation().

143 {
145  xl_invalid_page *hentry;
146 
147  if (invalid_page_tab == NULL)
148  return; /* nothing to do */
149 
151 
152  while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
153  {
154  if (RelFileNodeEquals(hentry->key.node, node) &&
155  hentry->key.forkno == forkno &&
156  hentry->key.blkno >= minblkno)
157  {
159  {
160  char *path = relpathperm(hentry->key.node, forkno);
161 
162  elog(DEBUG2, "page %u of relation %s has been dropped",
163  hentry->key.blkno, path);
164  pfree(path);
165  }
166 
168  (void *) &hentry->key,
169  HASH_REMOVE, NULL) == NULL)
170  elog(ERROR, "hash table corrupted");
171  }
172  }
173 }
#define relpathperm(rnode, forknum)
Definition: relpath.h:83
ForkNumber forkno
Definition: xlogutils.c:47
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:906
RelFileNode node
Definition: xlogutils.c:46
void pfree(void *pointer)
Definition: mcxt.c:1031
#define ERROR
Definition: elog.h:43
#define DEBUG2
Definition: elog.h:24
BlockNumber blkno
Definition: xlogutils.c:48
xl_invalid_page_key key
Definition: xlogutils.c:53
int log_min_messages
Definition: guc.c:510
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1389
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1379
#define elog(elevel,...)
Definition: elog.h:226
static HTAB * invalid_page_tab
Definition: xlogutils.c:57
int client_min_messages
Definition: guc.c:511
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88

◆ forget_invalid_pages_db()

static void forget_invalid_pages_db ( Oid  dbid)
static

Definition at line 177 of file xlogutils.c.

References xl_invalid_page_key::blkno, client_min_messages, RelFileNode::dbNode, DEBUG2, elog, ERROR, xl_invalid_page_key::forkno, HASH_REMOVE, hash_search(), hash_seq_init(), hash_seq_search(), xl_invalid_page::key, log_min_messages, xl_invalid_page_key::node, pfree(), relpathperm, and status().

Referenced by XLogDropDatabase().

178 {
180  xl_invalid_page *hentry;
181 
182  if (invalid_page_tab == NULL)
183  return; /* nothing to do */
184 
186 
187  while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
188  {
189  if (hentry->key.node.dbNode == dbid)
190  {
192  {
193  char *path = relpathperm(hentry->key.node, hentry->key.forkno);
194 
195  elog(DEBUG2, "page %u of relation %s has been dropped",
196  hentry->key.blkno, path);
197  pfree(path);
198  }
199 
201  (void *) &hentry->key,
202  HASH_REMOVE, NULL) == NULL)
203  elog(ERROR, "hash table corrupted");
204  }
205  }
206 }
#define relpathperm(rnode, forknum)
Definition: relpath.h:83
ForkNumber forkno
Definition: xlogutils.c:47
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:906
RelFileNode node
Definition: xlogutils.c:46
void pfree(void *pointer)
Definition: mcxt.c:1031
#define ERROR
Definition: elog.h:43
#define DEBUG2
Definition: elog.h:24
BlockNumber blkno
Definition: xlogutils.c:48
xl_invalid_page_key key
Definition: xlogutils.c:53
int log_min_messages
Definition: guc.c:510
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1389
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1379
#define elog(elevel,...)
Definition: elog.h:226
static HTAB * invalid_page_tab
Definition: xlogutils.c:57
int client_min_messages
Definition: guc.c:511
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227

◆ FreeFakeRelcacheEntry()

void FreeFakeRelcacheEntry ( Relation  fakerel)

Definition at line 591 of file xlogutils.c.

References pfree(), RelationData::rd_smgr, and smgrclearowner().

Referenced by heap_xlog_delete(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), and smgr_redo().

592 {
593  /* make sure the fakerel is not referenced by the SmgrRelation anymore */
594  if (fakerel->rd_smgr != NULL)
595  smgrclearowner(&fakerel->rd_smgr, fakerel->rd_smgr);
596  pfree(fakerel);
597 }
void smgrclearowner(SMgrRelation *owner, SMgrRelation reln)
Definition: smgr.c:227
struct SMgrRelationData * rd_smgr
Definition: rel.h:56
void pfree(void *pointer)
Definition: mcxt.c:1031

◆ log_invalid_page()

static void log_invalid_page ( RelFileNode  node,
ForkNumber  forkno,
BlockNumber  blkno,
bool  present 
)
static

Definition at line 78 of file xlogutils.c.

References xl_invalid_page_key::blkno, client_min_messages, DEBUG1, elog, HASHCTL::entrysize, xl_invalid_page_key::forkno, HASH_BLOBS, hash_create(), HASH_ELEM, HASH_ENTER, hash_search(), sort-test::key, HASHCTL::keysize, log_min_messages, xl_invalid_page_key::node, PANIC, xl_invalid_page::present, reachedConsistency, report_invalid_page(), and WARNING.

Referenced by XLogReadBufferExtended().

80 {
82  xl_invalid_page *hentry;
83  bool found;
84 
85  /*
86  * Once recovery has reached a consistent state, the invalid-page table
87  * should be empty and remain so. If a reference to an invalid page is
88  * found after consistency is reached, PANIC immediately. This might seem
89  * aggressive, but it's better than letting the invalid reference linger
90  * in the hash table until the end of recovery and PANIC there, which
91  * might come only much later if this is a standby server.
92  */
94  {
95  report_invalid_page(WARNING, node, forkno, blkno, present);
96  elog(PANIC, "WAL contains references to invalid pages");
97  }
98 
99  /*
100  * Log references to invalid pages at DEBUG1 level. This allows some
101  * tracing of the cause (note the elog context mechanism will tell us
102  * something about the XLOG record that generated the reference).
103  */
105  report_invalid_page(DEBUG1, node, forkno, blkno, present);
106 
107  if (invalid_page_tab == NULL)
108  {
109  /* create hash table when first needed */
110  HASHCTL ctl;
111 
112  memset(&ctl, 0, sizeof(ctl));
113  ctl.keysize = sizeof(xl_invalid_page_key);
114  ctl.entrysize = sizeof(xl_invalid_page);
115 
116  invalid_page_tab = hash_create("XLOG invalid-page table",
117  100,
118  &ctl,
120  }
121 
122  /* we currently assume xl_invalid_page_key contains no padding */
123  key.node = node;
124  key.forkno = forkno;
125  key.blkno = blkno;
126  hentry = (xl_invalid_page *)
127  hash_search(invalid_page_tab, (void *) &key, HASH_ENTER, &found);
128 
129  if (!found)
130  {
131  /* hash_search already filled in the key */
132  hentry->present = present;
133  }
134  else
135  {
136  /* repeat reference ... leave "present" as it was */
137  }
138 }
#define DEBUG1
Definition: elog.h:25
struct xl_invalid_page xl_invalid_page
#define HASH_ELEM
Definition: hsearch.h:87
Size entrysize
Definition: hsearch.h:73
struct xl_invalid_page_key xl_invalid_page_key
ForkNumber forkno
Definition: xlogutils.c:47
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:906
#define PANIC
Definition: elog.h:53
RelFileNode node
Definition: xlogutils.c:46
static void report_invalid_page(int elevel, RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
Definition: xlogutils.c:62
BlockNumber blkno
Definition: xlogutils.c:48
#define WARNING
Definition: elog.h:40
#define HASH_BLOBS
Definition: hsearch.h:88
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:316
Size keysize
Definition: hsearch.h:72
int log_min_messages
Definition: guc.c:510
bool reachedConsistency
Definition: xlog.c:844
#define elog(elevel,...)
Definition: elog.h:226
static HTAB * invalid_page_tab
Definition: xlogutils.c:57
int client_min_messages
Definition: guc.c:511

◆ read_local_xlog_page()

int read_local_xlog_page ( XLogReaderState state,
XLogRecPtr  targetPagePtr,
int  reqLen,
XLogRecPtr  targetRecPtr,
char *  cur_page,
TimeLineID pageTLI 
)

Definition at line 911 of file xlogutils.c.

References CHECK_FOR_INTERRUPTS, XLogReaderState::currTLI, XLogReaderState::currTLIValidUntil, GetFlushRecPtr(), GetXLogReplayRecPtr(), pg_usleep(), RecoveryInProgress(), ThisTimeLineID, XLogReaderState::wal_segment_size, XLogRead(), and XLogReadDetermineTimeline().

Referenced by logical_read_local_xlog_page(), and XlogReadTwoPhaseData().

914 {
915  XLogRecPtr read_upto,
916  loc;
917  int count;
918 
919  loc = targetPagePtr + reqLen;
920 
921  /* Loop waiting for xlog to be available if necessary */
922  while (1)
923  {
924  /*
925  * Determine the limit of xlog we can currently read to, and what the
926  * most recent timeline is.
927  *
928  * RecoveryInProgress() will update ThisTimeLineID when it first
929  * notices recovery finishes, so we only have to maintain it for the
930  * local process until recovery ends.
931  */
932  if (!RecoveryInProgress())
933  read_upto = GetFlushRecPtr();
934  else
935  read_upto = GetXLogReplayRecPtr(&ThisTimeLineID);
936 
937  *pageTLI = ThisTimeLineID;
938 
939  /*
940  * Check which timeline to get the record from.
941  *
942  * We have to do it each time through the loop because if we're in
943  * recovery as a cascading standby, the current timeline might've
944  * become historical. We can't rely on RecoveryInProgress() because in
945  * a standby configuration like
946  *
947  * A => B => C
948  *
949  * if we're a logical decoding session on C, and B gets promoted, our
950  * timeline will change while we remain in recovery.
951  *
952  * We can't just keep reading from the old timeline as the last WAL
953  * archive in the timeline will get renamed to .partial by
954  * StartupXLOG().
955  *
956  * If that happens after our caller updated ThisTimeLineID but before
957  * we actually read the xlog page, we might still try to read from the
958  * old (now renamed) segment and fail. There's not much we can do
959  * about this, but it can only happen when we're a leaf of a cascading
960  * standby whose master gets promoted while we're decoding, so a
961  * one-off ERROR isn't too bad.
962  */
963  XLogReadDetermineTimeline(state, targetPagePtr, reqLen);
964 
965  if (state->currTLI == ThisTimeLineID)
966  {
967 
968  if (loc <= read_upto)
969  break;
970 
972  pg_usleep(1000L);
973  }
974  else
975  {
976  /*
977  * We're on a historical timeline, so limit reading to the switch
978  * point where we moved to the next timeline.
979  *
980  * We don't need to GetFlushRecPtr or GetXLogReplayRecPtr. We know
981  * about the new timeline, so we must've received past the end of
982  * it.
983  */
984  read_upto = state->currTLIValidUntil;
985 
986  /*
987  * Setting pageTLI to our wanted record's TLI is slightly wrong;
988  * the page might begin on an older timeline if it contains a
989  * timeline switch, since its xlog segment will have been copied
990  * from the prior timeline. This is pretty harmless though, as
991  * nothing cares so long as the timeline doesn't go backwards. We
992  * should read the page header instead; FIXME someday.
993  */
994  *pageTLI = state->currTLI;
995 
996  /* No need to wait on a historical timeline */
997  break;
998  }
999  }
1000 
1001  if (targetPagePtr + XLOG_BLCKSZ <= read_upto)
1002  {
1003  /*
1004  * more than one block available; read only that block, have caller
1005  * come back if they need more.
1006  */
1007  count = XLOG_BLCKSZ;
1008  }
1009  else if (targetPagePtr + reqLen > read_upto)
1010  {
1011  /* not enough data there */
1012  return -1;
1013  }
1014  else
1015  {
1016  /* enough bytes available to satisfy the request */
1017  count = read_upto - targetPagePtr;
1018  }
1019 
1020  /*
1021  * Even though we just determined how much of the page can be validly read
1022  * as 'count', read the whole page anyway. It's guaranteed to be
1023  * zero-padded up to the page boundary if it's incomplete.
1024  */
1025  XLogRead(cur_page, state->wal_segment_size, *pageTLI, targetPagePtr,
1026  XLOG_BLCKSZ);
1027 
1028  /* number of valid bytes in the buffer */
1029  return count;
1030 }
XLogRecPtr GetFlushRecPtr(void)
Definition: xlog.c:8230
bool RecoveryInProgress(void)
Definition: xlog.c:7898
void XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wantLength)
Definition: xlogutils.c:803
void pg_usleep(long microsec)
Definition: signal.c:53
XLogRecPtr GetXLogReplayRecPtr(TimeLineID *replayTLI)
Definition: xlog.c:11130
XLogRecPtr currTLIValidUntil
Definition: xlogreader.h:184
int wal_segment_size
Definition: xlogreader.h:83
TimeLineID ThisTimeLineID
Definition: xlog.c:187
TimeLineID currTLI
Definition: xlogreader.h:174
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
static void XLogRead(char *buf, int segsize, TimeLineID tli, XLogRecPtr startptr, Size count)
Definition: xlogutils.c:656

◆ report_invalid_page()

static void report_invalid_page ( int  elevel,
RelFileNode  node,
ForkNumber  forkno,
BlockNumber  blkno,
bool  present 
)
static

Definition at line 62 of file xlogutils.c.

References elog, pfree(), and relpathperm.

Referenced by log_invalid_page(), and XLogCheckInvalidPages().

64 {
65  char *path = relpathperm(node, forkno);
66 
67  if (present)
68  elog(elevel, "page %u of relation %s is uninitialized",
69  blkno, path);
70  else
71  elog(elevel, "page %u of relation %s does not exist",
72  blkno, path);
73  pfree(path);
74 }
#define relpathperm(rnode, forknum)
Definition: relpath.h:83
void pfree(void *pointer)
Definition: mcxt.c:1031
static int elevel
Definition: vacuumlazy.c:143
#define elog(elevel,...)
Definition: elog.h:226

◆ XLogCheckInvalidPages()

void XLogCheckInvalidPages ( void  )

Definition at line 220 of file xlogutils.c.

References xl_invalid_page_key::blkno, elog, xl_invalid_page_key::forkno, hash_destroy(), hash_seq_init(), hash_seq_search(), xl_invalid_page::key, xl_invalid_page_key::node, PANIC, xl_invalid_page::present, report_invalid_page(), status(), and WARNING.

Referenced by CheckRecoveryConsistency().

221 {
223  xl_invalid_page *hentry;
224  bool foundone = false;
225 
226  if (invalid_page_tab == NULL)
227  return; /* nothing to do */
228 
230 
231  /*
232  * Our strategy is to emit WARNING messages for all remaining entries and
233  * only PANIC after we've dumped all the available info.
234  */
235  while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
236  {
237  report_invalid_page(WARNING, hentry->key.node, hentry->key.forkno,
238  hentry->key.blkno, hentry->present);
239  foundone = true;
240  }
241 
242  if (foundone)
243  elog(PANIC, "WAL contains references to invalid pages");
244 
246  invalid_page_tab = NULL;
247 }
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:814
ForkNumber forkno
Definition: xlogutils.c:47
#define PANIC
Definition: elog.h:53
RelFileNode node
Definition: xlogutils.c:46
static void report_invalid_page(int elevel, RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
Definition: xlogutils.c:62
BlockNumber blkno
Definition: xlogutils.c:48
xl_invalid_page_key key
Definition: xlogutils.c:53
#define WARNING
Definition: elog.h:40
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1389
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1379
#define elog(elevel,...)
Definition: elog.h:226
static HTAB * invalid_page_tab
Definition: xlogutils.c:57
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227

◆ XLogDropDatabase()

void XLogDropDatabase ( Oid  dbid)

Definition at line 617 of file xlogutils.c.

References forget_invalid_pages_db(), and smgrcloseall().

Referenced by dbase_redo().

618 {
619  /*
620  * This is unnecessarily heavy-handed, as it will close SMgrRelation
621  * objects for other databases as well. DROP DATABASE occurs seldom enough
622  * that it's not worth introducing a variant of smgrclose for just this
623  * purpose. XXX: Or should we rather leave the smgr entries dangling?
624  */
625  smgrcloseall();
626 
628 }
static void forget_invalid_pages_db(Oid dbid)
Definition: xlogutils.c:177
void smgrcloseall(void)
Definition: smgr.c:286

◆ XLogDropRelation()

void XLogDropRelation ( RelFileNode  rnode,
ForkNumber  forknum 
)

Definition at line 606 of file xlogutils.c.

References forget_invalid_pages().

Referenced by DropRelationFiles().

607 {
608  forget_invalid_pages(rnode, forknum, 0);
609 }
static void forget_invalid_pages(RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
Definition: xlogutils.c:142

◆ XLogHaveInvalidPages()

bool XLogHaveInvalidPages ( void  )

Definition at line 210 of file xlogutils.c.

References hash_get_num_entries().

Referenced by RecoveryRestartPoint().

211 {
212  if (invalid_page_tab != NULL &&
214  return true;
215  return false;
216 }
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1335
static HTAB * invalid_page_tab
Definition: xlogutils.c:57

◆ XLogInitBufferForRedo()

◆ XLogRead()

static void XLogRead ( char *  buf,
int  segsize,
TimeLineID  tli,
XLogRecPtr  startptr,
Size  count 
)
static

Definition at line 656 of file xlogutils.c.

References Assert, BasicOpenFile(), buf, close, ereport, errcode_for_file_access(), errmsg(), ERROR, MAXPGPATH, PG_BINARY, pgstat_report_wait_end(), pgstat_report_wait_start(), read, sendFile, sendOff, sendSegNo, startptr, WAIT_EVENT_WAL_READ, wal_segment_size, XLByteInSeg, XLByteToSeg, XLogFilePath, and XLogSegmentOffset.

Referenced by read_local_xlog_page().

658 {
659  char *p;
660  XLogRecPtr recptr;
661  Size nbytes;
662 
663  /* state maintained across calls */
664  static int sendFile = -1;
665  static XLogSegNo sendSegNo = 0;
666  static TimeLineID sendTLI = 0;
667  static uint32 sendOff = 0;
668 
669  Assert(segsize == wal_segment_size);
670 
671  p = buf;
672  recptr = startptr;
673  nbytes = count;
674 
675  while (nbytes > 0)
676  {
677  uint32 startoff;
678  int segbytes;
679  int readbytes;
680 
681  startoff = XLogSegmentOffset(recptr, segsize);
682 
683  /* Do we need to switch to a different xlog segment? */
684  if (sendFile < 0 || !XLByteInSeg(recptr, sendSegNo, segsize) ||
685  sendTLI != tli)
686  {
687  char path[MAXPGPATH];
688 
689  if (sendFile >= 0)
690  close(sendFile);
691 
692  XLByteToSeg(recptr, sendSegNo, segsize);
693 
694  XLogFilePath(path, tli, sendSegNo, segsize);
695 
696  sendFile = BasicOpenFile(path, O_RDONLY | PG_BINARY);
697 
698  if (sendFile < 0)
699  {
700  if (errno == ENOENT)
701  ereport(ERROR,
703  errmsg("requested WAL segment %s has already been removed",
704  path)));
705  else
706  ereport(ERROR,
708  errmsg("could not open file \"%s\": %m",
709  path)));
710  }
711  sendOff = 0;
712  sendTLI = tli;
713  }
714 
715  /* Need to seek in the file? */
716  if (sendOff != startoff)
717  {
718  if (lseek(sendFile, (off_t) startoff, SEEK_SET) < 0)
719  {
720  char path[MAXPGPATH];
721  int save_errno = errno;
722 
723  XLogFilePath(path, tli, sendSegNo, segsize);
724  errno = save_errno;
725  ereport(ERROR,
727  errmsg("could not seek in log segment %s to offset %u: %m",
728  path, startoff)));
729  }
730  sendOff = startoff;
731  }
732 
733  /* How many bytes are within this segment? */
734  if (nbytes > (segsize - startoff))
735  segbytes = segsize - startoff;
736  else
737  segbytes = nbytes;
738 
740  readbytes = read(sendFile, p, segbytes);
742  if (readbytes <= 0)
743  {
744  char path[MAXPGPATH];
745  int save_errno = errno;
746 
747  XLogFilePath(path, tli, sendSegNo, segsize);
748  errno = save_errno;
749  ereport(ERROR,
751  errmsg("could not read from log segment %s, offset %u, length %lu: %m",
752  path, sendOff, (unsigned long) segbytes)));
753  }
754 
755  /* Update state for read */
756  recptr += readbytes;
757 
758  sendOff += readbytes;
759  nbytes -= readbytes;
760  p += readbytes;
761  }
762 }
uint32 TimeLineID
Definition: xlogdefs.h:52
int wal_segment_size
Definition: xlog.c:112
static int sendFile
Definition: walsender.c:135
#define PG_BINARY
Definition: c.h:1191
#define XLByteInSeg(xlrp, logSegNo, wal_segsz_bytes)
#define ERROR
Definition: elog.h:43
#define MAXPGPATH
static char * buf
Definition: pg_test_fsync.c:68
uint64 XLogSegNo
Definition: xlogdefs.h:41
int errcode_for_file_access(void)
Definition: elog.c:593
unsigned int uint32
Definition: c.h:358
static void pgstat_report_wait_end(void)
Definition: pgstat.h:1342
#define ereport(elevel, rest)
Definition: elog.h:141
#define XLogSegmentOffset(xlogptr, wal_segsz_bytes)
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:732
size_t Size
Definition: c.h:466
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition: pgstat.h:1318
static XLogSegNo sendSegNo
Definition: walsender.c:136
int BasicOpenFile(const char *fileName, int fileFlags)
Definition: fd.c:946
#define XLogFilePath(path, tli, logSegNo, wal_segsz_bytes)
int errmsg(const char *fmt,...)
Definition: elog.c:784
static uint32 sendOff
Definition: walsender.c:137
#define close(a)
Definition: win32.h:12
#define read(a, b, c)
Definition: win32.h:13
static XLogRecPtr startptr
Definition: basebackup.c:106
#define XLByteToSeg(xlrp, logSegNo, wal_segsz_bytes)

◆ XLogReadBufferExtended()

Buffer XLogReadBufferExtended ( RelFileNode  rnode,
ForkNumber  forknum,
BlockNumber  blkno,
ReadBufferMode  mode 
)

Definition at line 437 of file xlogutils.c.

References Assert, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage, InRecovery, InvalidBackendId, InvalidBuffer, LockBuffer(), log_invalid_page(), P_NEW, PageIsNew, RBM_NORMAL, RBM_NORMAL_NO_LOG, RBM_ZERO_AND_CLEANUP_LOCK, RBM_ZERO_AND_LOCK, ReadBufferWithoutRelcache(), ReleaseBuffer(), smgrcreate(), smgrnblocks(), and smgropen().

Referenced by btree_xlog_vacuum(), checkXLogConsistency(), XLogReadBufferForRedoExtended(), and XLogRecordPageWithFreeSpace().

439 {
440  BlockNumber lastblock;
441  Buffer buffer;
442  SMgrRelation smgr;
443 
444  Assert(blkno != P_NEW);
445 
446  /* Open the relation at smgr level */
447  smgr = smgropen(rnode, InvalidBackendId);
448 
449  /*
450  * Create the target file if it doesn't already exist. This lets us cope
451  * if the replay sequence contains writes to a relation that is later
452  * deleted. (The original coding of this routine would instead suppress
453  * the writes, but that seems like it risks losing valuable data if the
454  * filesystem loses an inode during a crash. Better to write the data
455  * until we are actually told to delete the file.)
456  */
457  smgrcreate(smgr, forknum, true);
458 
459  lastblock = smgrnblocks(smgr, forknum);
460 
461  if (blkno < lastblock)
462  {
463  /* page exists in file */
464  buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno,
465  mode, NULL);
466  }
467  else
468  {
469  /* hm, page doesn't exist in file */
470  if (mode == RBM_NORMAL)
471  {
472  log_invalid_page(rnode, forknum, blkno, false);
473  return InvalidBuffer;
474  }
475  if (mode == RBM_NORMAL_NO_LOG)
476  return InvalidBuffer;
477  /* OK to extend the file */
478  /* we do this in recovery only - no rel-extension lock needed */
480  buffer = InvalidBuffer;
481  do
482  {
483  if (buffer != InvalidBuffer)
484  {
487  ReleaseBuffer(buffer);
488  }
489  buffer = ReadBufferWithoutRelcache(rnode, forknum,
490  P_NEW, mode, NULL);
491  }
492  while (BufferGetBlockNumber(buffer) < blkno);
493  /* Handle the corner case that P_NEW returns non-consecutive pages */
494  if (BufferGetBlockNumber(buffer) != blkno)
495  {
498  ReleaseBuffer(buffer);
499  buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno,
500  mode, NULL);
501  }
502  }
503 
504  if (mode == RBM_NORMAL)
505  {
506  /* check that page has been initialized */
507  Page page = (Page) BufferGetPage(buffer);
508 
509  /*
510  * We assume that PageIsNew is safe without a lock. During recovery,
511  * there should be no other backends that could modify the buffer at
512  * the same time.
513  */
514  if (PageIsNew(page))
515  {
516  ReleaseBuffer(buffer);
517  log_invalid_page(rnode, forknum, blkno, true);
518  return InvalidBuffer;
519  }
520  }
521 
522  return buffer;
523 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:86
void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition: smgr.c:333
bool InRecovery
Definition: xlog.c:200
#define InvalidBuffer
Definition: buf.h:25
Buffer ReadBufferWithoutRelcache(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:684
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3353
#define P_NEW
Definition: bufmgr.h:81
static void log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
Definition: xlogutils.c:78
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
SMgrRelation smgropen(RelFileNode rnode, BackendId backend)
Definition: smgr.c:145
#define InvalidBackendId
Definition: backendid.h:23
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3590
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:609
#define Assert(condition)
Definition: c.h:732
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2613
#define PageIsNew(page)
Definition: bufpage.h:229
int Buffer
Definition: buf.h:23
Pointer Page
Definition: bufpage.h:78

◆ XLogReadBufferForRedo()

XLogRedoAction XLogReadBufferForRedo ( XLogReaderState record,
uint8  block_id,
Buffer buf 
)

Definition at line 289 of file xlogutils.c.

References RBM_NORMAL, and XLogReadBufferForRedoExtended().

Referenced by _bt_clear_incomplete_split(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_split(), btree_xlog_unlink_page(), generic_redo(), ginRedoClearIncompleteSplit(), ginRedoDeletePage(), ginRedoInsert(), ginRedoSplit(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginRedoVacuumPage(), gistRedoClearFollowRight(), gistRedoDeleteRecord(), gistRedoPageDelete(), gistRedoPageUpdateRecord(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_split_page(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), and xlog_redo().

291 {
292  return XLogReadBufferForRedoExtended(record, block_id, RBM_NORMAL,
293  false, buf);
294 }
static char * buf
Definition: pg_test_fsync.c:68
XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)
Definition: xlogutils.c:326

◆ XLogReadBufferForRedoExtended()

XLogRedoAction XLogReadBufferForRedoExtended ( XLogReaderState record,
uint8  block_id,
ReadBufferMode  mode,
bool  get_cleanup_lock,
Buffer buf 
)

Definition at line 326 of file xlogutils.c.

References Assert, BKPBLOCK_WILL_INIT, BLK_DONE, BLK_NEEDS_REDO, BLK_NOTFOUND, BLK_RESTORED, xl_invalid_page_key::blkno, XLogReaderState::blocks, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, BufferIsValid, elog, XLogReaderState::EndRecPtr, ERROR, DecodedBkpBlock::flags, FlushOneBuffer(), INIT_FORKNUM, LockBuffer(), LockBufferForCleanup(), MarkBufferDirty(), PageGetLSN, PageIsNew, PageSetLSN, PANIC, RBM_ZERO_AND_CLEANUP_LOCK, RBM_ZERO_AND_LOCK, RestoreBlockImage(), XLogReadBufferExtended(), XLogRecBlockImageApply, XLogRecGetBlockTag(), and XLogRecHasBlockImage.

Referenced by btree_xlog_vacuum(), hash_xlog_delete(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_squeeze_page(), hash_xlog_vacuum_one_page(), heap_xlog_clean(), heap_xlog_visible(), XLogInitBufferForRedo(), and XLogReadBufferForRedo().

330 {
331  XLogRecPtr lsn = record->EndRecPtr;
332  RelFileNode rnode;
333  ForkNumber forknum;
334  BlockNumber blkno;
335  Page page;
336  bool zeromode;
337  bool willinit;
338 
339  if (!XLogRecGetBlockTag(record, block_id, &rnode, &forknum, &blkno))
340  {
341  /* Caller specified a bogus block_id */
342  elog(PANIC, "failed to locate backup block with ID %d", block_id);
343  }
344 
345  /*
346  * Make sure that if the block is marked with WILL_INIT, the caller is
347  * going to initialize it. And vice versa.
348  */
350  willinit = (record->blocks[block_id].flags & BKPBLOCK_WILL_INIT) != 0;
351  if (willinit && !zeromode)
352  elog(PANIC, "block with WILL_INIT flag in WAL record must be zeroed by redo routine");
353  if (!willinit && zeromode)
354  elog(PANIC, "block to be initialized in redo routine must be marked with WILL_INIT flag in the WAL record");
355 
356  /* If it has a full-page image and it should be restored, do it. */
357  if (XLogRecBlockImageApply(record, block_id))
358  {
359  Assert(XLogRecHasBlockImage(record, block_id));
360  *buf = XLogReadBufferExtended(rnode, forknum, blkno,
361  get_cleanup_lock ? RBM_ZERO_AND_CLEANUP_LOCK : RBM_ZERO_AND_LOCK);
362  page = BufferGetPage(*buf);
363  if (!RestoreBlockImage(record, block_id, page))
364  elog(ERROR, "failed to restore block image");
365 
366  /*
367  * The page may be uninitialized. If so, we can't set the LSN because
368  * that would corrupt the page.
369  */
370  if (!PageIsNew(page))
371  {
372  PageSetLSN(page, lsn);
373  }
374 
376 
377  /*
378  * At the end of crash recovery the init forks of unlogged relations
379  * are copied, without going through shared buffers. So we need to
380  * force the on-disk state of init forks to always be in sync with the
381  * state in shared buffers.
382  */
383  if (forknum == INIT_FORKNUM)
385 
386  return BLK_RESTORED;
387  }
388  else
389  {
390  *buf = XLogReadBufferExtended(rnode, forknum, blkno, mode);
391  if (BufferIsValid(*buf))
392  {
394  {
395  if (get_cleanup_lock)
397  else
399  }
400  if (lsn <= PageGetLSN(BufferGetPage(*buf)))
401  return BLK_DONE;
402  else
403  return BLK_NEEDS_REDO;
404  }
405  else
406  return BLK_NOTFOUND;
407  }
408 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3647
#define XLogRecHasBlockImage(decoder, block_id)
Definition: xlogreader.h:242
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1458
Buffer XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum, BlockNumber blkno, ReadBufferMode mode)
Definition: xlogutils.c:437
uint32 BlockNumber
Definition: block.h:31
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:88
#define PANIC
Definition: elog.h:53
XLogRecPtr EndRecPtr
Definition: xlogreader.h:124
#define ERROR
Definition: elog.h:43
static char * buf
Definition: pg_test_fsync.c:68
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
#define BKPBLOCK_WILL_INIT
Definition: xlogrecord.h:182
ForkNumber
Definition: relpath.h:40
bool XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id, RelFileNode *rnode, ForkNumber *forknum, BlockNumber *blknum)
Definition: xlogreader.c:1348
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3590
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:732
#define BufferIsValid(bufnum)
Definition: bufmgr.h:113
void FlushOneBuffer(Buffer buffer)
Definition: bufmgr.c:3333
bool RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page)
Definition: xlogreader.c:1401
#define PageGetLSN(page)
Definition: bufpage.h:366
#define PageIsNew(page)
Definition: bufpage.h:229
#define elog(elevel,...)
Definition: elog.h:226
#define XLogRecBlockImageApply(decoder, block_id)
Definition: xlogreader.h:244
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
Pointer Page
Definition: bufpage.h:78
DecodedBkpBlock blocks[XLR_MAX_BLOCK_ID+1]
Definition: xlogreader.h:143

◆ XLogReadDetermineTimeline()

void XLogReadDetermineTimeline ( XLogReaderState state,
XLogRecPtr  wantPage,
uint32  wantLength 
)

Definition at line 803 of file xlogutils.c.

References Assert, XLogReaderState::currTLI, XLogReaderState::currTLIValidUntil, DEBUG3, elog, InvalidXLogRecPtr, list_free_deep(), Min, XLogReaderState::nextTLI, XLogReaderState::readLen, XLogReaderState::readOff, XLogReaderState::readSegNo, readTimeLineHistory(), ThisTimeLineID, tliOfPointInHistory(), tliSwitchPoint(), and XLogReaderState::wal_segment_size.

Referenced by logical_read_xlog_page(), and read_local_xlog_page().

804 {
805  const XLogRecPtr lastReadPage = state->readSegNo *
806  state->wal_segment_size + state->readOff;
807 
808  Assert(wantPage != InvalidXLogRecPtr && wantPage % XLOG_BLCKSZ == 0);
809  Assert(wantLength <= XLOG_BLCKSZ);
810  Assert(state->readLen == 0 || state->readLen <= XLOG_BLCKSZ);
811 
812  /*
813  * If the desired page is currently read in and valid, we have nothing to
814  * do.
815  *
816  * The caller should've ensured that it didn't previously advance readOff
817  * past the valid limit of this timeline, so it doesn't matter if the
818  * current TLI has since become historical.
819  */
820  if (lastReadPage == wantPage &&
821  state->readLen != 0 &&
822  lastReadPage + state->readLen >= wantPage + Min(wantLength, XLOG_BLCKSZ - 1))
823  return;
824 
825  /*
826  * If we're reading from the current timeline, it hasn't become historical
827  * and the page we're reading is after the last page read, we can again
828  * just carry on. (Seeking backwards requires a check to make sure the
829  * older page isn't on a prior timeline).
830  *
831  * ThisTimeLineID might've become historical since we last looked, but the
832  * caller is required not to read past the flush limit it saw at the time
833  * it looked up the timeline. There's nothing we can do about it if
834  * StartupXLOG() renames it to .partial concurrently.
835  */
836  if (state->currTLI == ThisTimeLineID && wantPage >= lastReadPage)
837  {
839  return;
840  }
841 
842  /*
843  * If we're just reading pages from a previously validated historical
844  * timeline and the timeline we're reading from is valid until the end of
845  * the current segment we can just keep reading.
846  */
847  if (state->currTLIValidUntil != InvalidXLogRecPtr &&
848  state->currTLI != ThisTimeLineID &&
849  state->currTLI != 0 &&
850  ((wantPage + wantLength) / state->wal_segment_size) <
851  (state->currTLIValidUntil / state->wal_segment_size))
852  return;
853 
854  /*
855  * If we reach this point we're either looking up a page for random
856  * access, the current timeline just became historical, or we're reading
857  * from a new segment containing a timeline switch. In all cases we need
858  * to determine the newest timeline on the segment.
859  *
860  * If it's the current timeline we can just keep reading from here unless
861  * we detect a timeline switch that makes the current timeline historical.
862  * If it's a historical timeline we can read all the segment on the newest
863  * timeline because it contains all the old timelines' data too. So only
864  * one switch check is required.
865  */
866  {
867  /*
868  * We need to re-read the timeline history in case it's been changed
869  * by a promotion or replay from a cascaded replica.
870  */
871  List *timelineHistory = readTimeLineHistory(ThisTimeLineID);
872 
873  XLogRecPtr endOfSegment = (((wantPage / state->wal_segment_size) + 1)
874  * state->wal_segment_size) - 1;
875 
876  Assert(wantPage / state->wal_segment_size ==
877  endOfSegment / state->wal_segment_size);
878 
879  /*
880  * Find the timeline of the last LSN on the segment containing
881  * wantPage.
882  */
883  state->currTLI = tliOfPointInHistory(endOfSegment, timelineHistory);
884  state->currTLIValidUntil = tliSwitchPoint(state->currTLI, timelineHistory,
885  &state->nextTLI);
886 
888  wantPage + wantLength < state->currTLIValidUntil);
889 
890  list_free_deep(timelineHistory);
891 
892  elog(DEBUG3, "switched to timeline %u valid until %X/%X",
893  state->currTLI,
894  (uint32) (state->currTLIValidUntil >> 32),
895  (uint32) (state->currTLIValidUntil));
896  }
897 }
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
TimeLineID tliOfPointInHistory(XLogRecPtr ptr, List *history)
Definition: timeline.c:535
#define DEBUG3
Definition: elog.h:23
#define Min(x, y)
Definition: c.h:904
List * readTimeLineHistory(TimeLineID targetTLI)
Definition: timeline.c:75
void list_free_deep(List *list)
Definition: list.c:1387
XLogRecPtr currTLIValidUntil
Definition: xlogreader.h:184
unsigned int uint32
Definition: c.h:358
int wal_segment_size
Definition: xlogreader.h:83
TimeLineID nextTLI
Definition: xlogreader.h:190
XLogRecPtr tliSwitchPoint(TimeLineID tli, List *history, TimeLineID *nextTLI)
Definition: timeline.c:563
TimeLineID ThisTimeLineID
Definition: xlog.c:187
TimeLineID currTLI
Definition: xlogreader.h:174
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:732
XLogSegNo readSegNo
Definition: xlogreader.h:160
#define elog(elevel,...)
Definition: elog.h:226
Definition: pg_list.h:50

◆ XLogTruncateRelation()

void XLogTruncateRelation ( RelFileNode  rnode,
ForkNumber  forkNum,
BlockNumber  nblocks 
)

Definition at line 636 of file xlogutils.c.

References forget_invalid_pages().

Referenced by smgr_redo().

638 {
639  forget_invalid_pages(rnode, forkNum, nblocks);
640 }
static void forget_invalid_pages(RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
Definition: xlogutils.c:142

Variable Documentation

◆ invalid_page_tab

HTAB* invalid_page_tab = NULL
static

Definition at line 57 of file xlogutils.c.