PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
xlogutils.c File Reference
#include "postgres.h"
#include <unistd.h>
#include "access/timeline.h"
#include "access/xlog.h"
#include "access/xlog_internal.h"
#include "access/xlogutils.h"
#include "catalog/catalog.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "storage/smgr.h"
#include "utils/guc.h"
#include "utils/hsearch.h"
#include "utils/rel.h"
Include dependency graph for xlogutils.c:

Go to the source code of this file.

Data Structures

struct  xl_invalid_page_key
 
struct  xl_invalid_page
 
struct  FakeRelCacheEntryData
 

Typedefs

typedef struct xl_invalid_page_key xl_invalid_page_key
 
typedef struct xl_invalid_page xl_invalid_page
 
typedef FakeRelCacheEntryDataFakeRelCacheEntry
 

Functions

static void report_invalid_page (int elevel, RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
 
static void log_invalid_page (RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
 
static void forget_invalid_pages (RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
 
static void forget_invalid_pages_db (Oid dbid)
 
bool XLogHaveInvalidPages (void)
 
void XLogCheckInvalidPages (void)
 
XLogRedoAction XLogReadBufferForRedo (XLogReaderState *record, uint8 block_id, Buffer *buf)
 
Buffer XLogInitBufferForRedo (XLogReaderState *record, uint8 block_id)
 
XLogRedoAction XLogReadBufferForRedoExtended (XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)
 
Buffer XLogReadBufferExtended (RelFileNode rnode, ForkNumber forknum, BlockNumber blkno, ReadBufferMode mode)
 
Relation CreateFakeRelcacheEntry (RelFileNode rnode)
 
void FreeFakeRelcacheEntry (Relation fakerel)
 
void XLogDropRelation (RelFileNode rnode, ForkNumber forknum)
 
void XLogDropDatabase (Oid dbid)
 
void XLogTruncateRelation (RelFileNode rnode, ForkNumber forkNum, BlockNumber nblocks)
 
static void XLogRead (char *buf, TimeLineID tli, XLogRecPtr startptr, Size count)
 
void XLogReadDetermineTimeline (XLogReaderState *state, XLogRecPtr wantPage, uint32 wantLength)
 
int read_local_xlog_page (XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, char *cur_page, TimeLineID *pageTLI)
 

Variables

static HTABinvalid_page_tab = NULL
 

Typedef Documentation

Function Documentation

Relation CreateFakeRelcacheEntry ( RelFileNode  rnode)

Definition at line 551 of file xlogutils.c.

References Assert, LockRelId::dbId, RelFileNode::dbNode, InRecovery, InvalidBackendId, LockInfoData::lockRelId, NULL, palloc0(), FakeRelCacheEntryData::pgc, RelationData::rd_backend, RelationData::rd_lockInfo, RelationData::rd_node, RelationData::rd_rel, RelationData::rd_smgr, RelationGetRelationName, LockRelId::relId, RelFileNode::relNode, and RELPERSISTENCE_PERMANENT.

Referenced by heap_xlog_delete(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), and smgr_redo().

552 {
553  FakeRelCacheEntry fakeentry;
554  Relation rel;
555 
557 
558  /* Allocate the Relation struct and all related space in one block. */
559  fakeentry = palloc0(sizeof(FakeRelCacheEntryData));
560  rel = (Relation) fakeentry;
561 
562  rel->rd_rel = &fakeentry->pgc;
563  rel->rd_node = rnode;
564  /* We will never be working with temp rels during recovery */
566 
567  /* It must be a permanent table if we're in recovery. */
568  rel->rd_rel->relpersistence = RELPERSISTENCE_PERMANENT;
569 
570  /* We don't know the name of the relation; use relfilenode instead */
571  sprintf(RelationGetRelationName(rel), "%u", rnode.relNode);
572 
573  /*
574  * We set up the lockRelId in case anything tries to lock the dummy
575  * relation. Note that this is fairly bogus since relNode may be
576  * different from the relation's OID. It shouldn't really matter though,
577  * since we are presumably running by ourselves and can't have any lock
578  * conflicts ...
579  */
580  rel->rd_lockInfo.lockRelId.dbId = rnode.dbNode;
581  rel->rd_lockInfo.lockRelId.relId = rnode.relNode;
582 
583  rel->rd_smgr = NULL;
584 
585  return rel;
586 }
LockRelId lockRelId
Definition: rel.h:44
struct SMgrRelationData * rd_smgr
Definition: rel.h:87
bool InRecovery
Definition: xlog.c:192
Oid dbId
Definition: rel.h:39
Form_pg_class rd_rel
Definition: rel.h:114
#define RELPERSISTENCE_PERMANENT
Definition: pg_class.h:170
struct RelationData * Relation
Definition: relcache.h:21
LockInfoData rd_lockInfo
Definition: rel.h:117
#define RelationGetRelationName(relation)
Definition: rel.h:437
#define InvalidBackendId
Definition: backendid.h:23
void * palloc0(Size size)
Definition: mcxt.c:878
RelFileNode rd_node
Definition: rel.h:85
#define NULL
Definition: c.h:229
BackendId rd_backend
Definition: rel.h:89
#define Assert(condition)
Definition: c.h:675
FormData_pg_class pgc
Definition: xlogutils.c:533
Oid relId
Definition: rel.h:38
static void forget_invalid_pages ( RelFileNode  node,
ForkNumber  forkno,
BlockNumber  minblkno 
)
static

Definition at line 143 of file xlogutils.c.

References xl_invalid_page_key::blkno, client_min_messages, DEBUG2, elog, ERROR, xl_invalid_page_key::forkno, HASH_REMOVE, hash_search(), hash_seq_init(), hash_seq_search(), xl_invalid_page::key, log_min_messages, xl_invalid_page_key::node, NULL, pfree(), RelFileNodeEquals, relpathperm, and status().

Referenced by XLogDropRelation(), and XLogTruncateRelation().

144 {
146  xl_invalid_page *hentry;
147 
148  if (invalid_page_tab == NULL)
149  return; /* nothing to do */
150 
152 
153  while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
154  {
155  if (RelFileNodeEquals(hentry->key.node, node) &&
156  hentry->key.forkno == forkno &&
157  hentry->key.blkno >= minblkno)
158  {
160  {
161  char *path = relpathperm(hentry->key.node, forkno);
162 
163  elog(DEBUG2, "page %u of relation %s has been dropped",
164  hentry->key.blkno, path);
165  pfree(path);
166  }
167 
169  (void *) &hentry->key,
170  HASH_REMOVE, NULL) == NULL)
171  elog(ERROR, "hash table corrupted");
172  }
173  }
174 }
#define relpathperm(rnode, forknum)
Definition: relpath.h:67
ForkNumber forkno
Definition: xlogutils.c:48
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:885
RelFileNode node
Definition: xlogutils.c:47
void pfree(void *pointer)
Definition: mcxt.c:950
#define ERROR
Definition: elog.h:43
#define DEBUG2
Definition: elog.h:24
BlockNumber blkno
Definition: xlogutils.c:49
xl_invalid_page_key key
Definition: xlogutils.c:54
int log_min_messages
Definition: guc.c:451
#define NULL
Definition: c.h:229
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1351
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1341
static HTAB * invalid_page_tab
Definition: xlogutils.c:58
int client_min_messages
Definition: guc.c:452
#define elog
Definition: elog.h:219
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
#define RelFileNodeEquals(node1, node2)
Definition: relfilenode.h:88
static void forget_invalid_pages_db ( Oid  dbid)
static

Definition at line 178 of file xlogutils.c.

References xl_invalid_page_key::blkno, client_min_messages, RelFileNode::dbNode, DEBUG2, elog, ERROR, xl_invalid_page_key::forkno, HASH_REMOVE, hash_search(), hash_seq_init(), hash_seq_search(), xl_invalid_page::key, log_min_messages, xl_invalid_page_key::node, NULL, pfree(), relpathperm, and status().

Referenced by XLogDropDatabase().

179 {
181  xl_invalid_page *hentry;
182 
183  if (invalid_page_tab == NULL)
184  return; /* nothing to do */
185 
187 
188  while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
189  {
190  if (hentry->key.node.dbNode == dbid)
191  {
193  {
194  char *path = relpathperm(hentry->key.node, hentry->key.forkno);
195 
196  elog(DEBUG2, "page %u of relation %s has been dropped",
197  hentry->key.blkno, path);
198  pfree(path);
199  }
200 
202  (void *) &hentry->key,
203  HASH_REMOVE, NULL) == NULL)
204  elog(ERROR, "hash table corrupted");
205  }
206  }
207 }
#define relpathperm(rnode, forknum)
Definition: relpath.h:67
ForkNumber forkno
Definition: xlogutils.c:48
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:885
RelFileNode node
Definition: xlogutils.c:47
void pfree(void *pointer)
Definition: mcxt.c:950
#define ERROR
Definition: elog.h:43
#define DEBUG2
Definition: elog.h:24
BlockNumber blkno
Definition: xlogutils.c:49
xl_invalid_page_key key
Definition: xlogutils.c:54
int log_min_messages
Definition: guc.c:451
#define NULL
Definition: c.h:229
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1351
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1341
static HTAB * invalid_page_tab
Definition: xlogutils.c:58
int client_min_messages
Definition: guc.c:452
#define elog
Definition: elog.h:219
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
void FreeFakeRelcacheEntry ( Relation  fakerel)

Definition at line 592 of file xlogutils.c.

References NULL, pfree(), RelationData::rd_smgr, and smgrclearowner().

Referenced by heap_xlog_delete(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), and smgr_redo().

593 {
594  /* make sure the fakerel is not referenced by the SmgrRelation anymore */
595  if (fakerel->rd_smgr != NULL)
596  smgrclearowner(&fakerel->rd_smgr, fakerel->rd_smgr);
597  pfree(fakerel);
598 }
void smgrclearowner(SMgrRelation *owner, SMgrRelation reln)
Definition: smgr.c:222
struct SMgrRelationData * rd_smgr
Definition: rel.h:87
void pfree(void *pointer)
Definition: mcxt.c:950
#define NULL
Definition: c.h:229
static void log_invalid_page ( RelFileNode  node,
ForkNumber  forkno,
BlockNumber  blkno,
bool  present 
)
static

Definition at line 79 of file xlogutils.c.

References xl_invalid_page_key::blkno, client_min_messages, DEBUG1, elog, HASHCTL::entrysize, xl_invalid_page_key::forkno, HASH_BLOBS, hash_create(), HASH_ELEM, HASH_ENTER, hash_search(), HASHCTL::keysize, log_min_messages, xl_invalid_page_key::node, NULL, PANIC, xl_invalid_page::present, reachedConsistency, report_invalid_page(), and WARNING.

Referenced by XLogReadBufferExtended().

81 {
83  xl_invalid_page *hentry;
84  bool found;
85 
86  /*
87  * Once recovery has reached a consistent state, the invalid-page table
88  * should be empty and remain so. If a reference to an invalid page is
89  * found after consistency is reached, PANIC immediately. This might seem
90  * aggressive, but it's better than letting the invalid reference linger
91  * in the hash table until the end of recovery and PANIC there, which
92  * might come only much later if this is a standby server.
93  */
95  {
96  report_invalid_page(WARNING, node, forkno, blkno, present);
97  elog(PANIC, "WAL contains references to invalid pages");
98  }
99 
100  /*
101  * Log references to invalid pages at DEBUG1 level. This allows some
102  * tracing of the cause (note the elog context mechanism will tell us
103  * something about the XLOG record that generated the reference).
104  */
106  report_invalid_page(DEBUG1, node, forkno, blkno, present);
107 
108  if (invalid_page_tab == NULL)
109  {
110  /* create hash table when first needed */
111  HASHCTL ctl;
112 
113  memset(&ctl, 0, sizeof(ctl));
114  ctl.keysize = sizeof(xl_invalid_page_key);
115  ctl.entrysize = sizeof(xl_invalid_page);
116 
117  invalid_page_tab = hash_create("XLOG invalid-page table",
118  100,
119  &ctl,
121  }
122 
123  /* we currently assume xl_invalid_page_key contains no padding */
124  key.node = node;
125  key.forkno = forkno;
126  key.blkno = blkno;
127  hentry = (xl_invalid_page *)
128  hash_search(invalid_page_tab, (void *) &key, HASH_ENTER, &found);
129 
130  if (!found)
131  {
132  /* hash_search already filled in the key */
133  hentry->present = present;
134  }
135  else
136  {
137  /* repeat reference ... leave "present" as it was */
138  }
139 }
#define DEBUG1
Definition: elog.h:25
struct xl_invalid_page xl_invalid_page
#define HASH_ELEM
Definition: hsearch.h:87
Size entrysize
Definition: hsearch.h:73
struct xl_invalid_page_key xl_invalid_page_key
ForkNumber forkno
Definition: xlogutils.c:48
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:885
#define PANIC
Definition: elog.h:53
RelFileNode node
Definition: xlogutils.c:47
static void report_invalid_page(int elevel, RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
Definition: xlogutils.c:63
BlockNumber blkno
Definition: xlogutils.c:49
#define WARNING
Definition: elog.h:40
#define HASH_BLOBS
Definition: hsearch.h:88
HTAB * hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
Definition: dynahash.c:301
Size keysize
Definition: hsearch.h:72
int log_min_messages
Definition: guc.c:451
bool reachedConsistency
Definition: xlog.c:831
#define NULL
Definition: c.h:229
static HTAB * invalid_page_tab
Definition: xlogutils.c:58
int client_min_messages
Definition: guc.c:452
#define elog
Definition: elog.h:219
int read_local_xlog_page ( XLogReaderState state,
XLogRecPtr  targetPagePtr,
int  reqLen,
XLogRecPtr  targetRecPtr,
char *  cur_page,
TimeLineID pageTLI 
)

Definition at line 903 of file xlogutils.c.

References CHECK_FOR_INTERRUPTS, XLogReaderState::currTLI, XLogReaderState::currTLIValidUntil, GetFlushRecPtr(), GetXLogReplayRecPtr(), pg_usleep(), RecoveryInProgress(), ThisTimeLineID, XLogRead(), and XLogReadDetermineTimeline().

Referenced by logical_read_local_xlog_page(), and XlogReadTwoPhaseData().

906 {
907  XLogRecPtr read_upto,
908  loc;
909  int count;
910 
911  loc = targetPagePtr + reqLen;
912 
913  /* Loop waiting for xlog to be available if necessary */
914  while (1)
915  {
916  /*
917  * Determine the limit of xlog we can currently read to, and what the
918  * most recent timeline is.
919  *
920  * RecoveryInProgress() will update ThisTimeLineID when it first
921  * notices recovery finishes, so we only have to maintain it for the
922  * local process until recovery ends.
923  */
924  if (!RecoveryInProgress())
925  read_upto = GetFlushRecPtr();
926  else
927  read_upto = GetXLogReplayRecPtr(&ThisTimeLineID);
928 
929  *pageTLI = ThisTimeLineID;
930 
931  /*
932  * Check which timeline to get the record from.
933  *
934  * We have to do it each time through the loop because if we're in
935  * recovery as a cascading standby, the current timeline might've
936  * become historical. We can't rely on RecoveryInProgress() because in
937  * a standby configuration like
938  *
939  * A => B => C
940  *
941  * if we're a logical decoding session on C, and B gets promoted, our
942  * timeline will change while we remain in recovery.
943  *
944  * We can't just keep reading from the old timeline as the last WAL
945  * archive in the timeline will get renamed to .partial by
946  * StartupXLOG().
947  *
948  * If that happens after our caller updated ThisTimeLineID but before
949  * we actually read the xlog page, we might still try to read from the
950  * old (now renamed) segment and fail. There's not much we can do
951  * about this, but it can only happen when we're a leaf of a cascading
952  * standby whose master gets promoted while we're decoding, so a
953  * one-off ERROR isn't too bad.
954  */
955  XLogReadDetermineTimeline(state, targetPagePtr, reqLen);
956 
957  if (state->currTLI == ThisTimeLineID)
958  {
959 
960  if (loc <= read_upto)
961  break;
962 
964  pg_usleep(1000L);
965  }
966  else
967  {
968  /*
969  * We're on a historical timeline, so limit reading to the switch
970  * point where we moved to the next timeline.
971  *
972  * We don't need to GetFlushRecPtr or GetXLogReplayRecPtr. We know
973  * about the new timeline, so we must've received past the end of
974  * it.
975  */
976  read_upto = state->currTLIValidUntil;
977 
978  /*
979  * Setting pageTLI to our wanted record's TLI is slightly wrong;
980  * the page might begin on an older timeline if it contains a
981  * timeline switch, since its xlog segment will have been copied
982  * from the prior timeline. This is pretty harmless though, as
983  * nothing cares so long as the timeline doesn't go backwards. We
984  * should read the page header instead; FIXME someday.
985  */
986  *pageTLI = state->currTLI;
987 
988  /* No need to wait on a historical timeline */
989  break;
990  }
991  }
992 
993  if (targetPagePtr + XLOG_BLCKSZ <= read_upto)
994  {
995  /*
996  * more than one block available; read only that block, have caller
997  * come back if they need more.
998  */
999  count = XLOG_BLCKSZ;
1000  }
1001  else if (targetPagePtr + reqLen > read_upto)
1002  {
1003  /* not enough data there */
1004  return -1;
1005  }
1006  else
1007  {
1008  /* enough bytes available to satisfy the request */
1009  count = read_upto - targetPagePtr;
1010  }
1011 
1012  /*
1013  * Even though we just determined how much of the page can be validly read
1014  * as 'count', read the whole page anyway. It's guaranteed to be
1015  * zero-padded up to the page boundary if it's incomplete.
1016  */
1017  XLogRead(cur_page, *pageTLI, targetPagePtr, XLOG_BLCKSZ);
1018 
1019  /* number of valid bytes in the buffer */
1020  return count;
1021 }
static void XLogRead(char *buf, TimeLineID tli, XLogRecPtr startptr, Size count)
Definition: xlogutils.c:657
XLogRecPtr GetFlushRecPtr(void)
Definition: xlog.c:8222
bool RecoveryInProgress(void)
Definition: xlog.c:7873
void XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wantLength)
Definition: xlogutils.c:799
void pg_usleep(long microsec)
Definition: signal.c:53
XLogRecPtr GetXLogReplayRecPtr(TimeLineID *replayTLI)
Definition: xlog.c:11084
XLogRecPtr currTLIValidUntil
Definition: xlogreader.h:175
TimeLineID ThisTimeLineID
Definition: xlog.c:179
TimeLineID currTLI
Definition: xlogreader.h:165
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:97
static void report_invalid_page ( int  elevel,
RelFileNode  node,
ForkNumber  forkno,
BlockNumber  blkno,
bool  present 
)
static

Definition at line 63 of file xlogutils.c.

References elog, pfree(), and relpathperm.

Referenced by log_invalid_page(), and XLogCheckInvalidPages().

65 {
66  char *path = relpathperm(node, forkno);
67 
68  if (present)
69  elog(elevel, "page %u of relation %s is uninitialized",
70  blkno, path);
71  else
72  elog(elevel, "page %u of relation %s does not exist",
73  blkno, path);
74  pfree(path);
75 }
#define relpathperm(rnode, forknum)
Definition: relpath.h:67
void pfree(void *pointer)
Definition: mcxt.c:950
static int elevel
Definition: vacuumlazy.c:137
#define elog
Definition: elog.h:219
void XLogCheckInvalidPages ( void  )

Definition at line 221 of file xlogutils.c.

References xl_invalid_page_key::blkno, elog, xl_invalid_page_key::forkno, hash_destroy(), hash_seq_init(), hash_seq_search(), xl_invalid_page::key, xl_invalid_page_key::node, NULL, PANIC, xl_invalid_page::present, report_invalid_page(), status(), and WARNING.

Referenced by CheckRecoveryConsistency().

222 {
224  xl_invalid_page *hentry;
225  bool foundone = false;
226 
227  if (invalid_page_tab == NULL)
228  return; /* nothing to do */
229 
231 
232  /*
233  * Our strategy is to emit WARNING messages for all remaining entries and
234  * only PANIC after we've dumped all the available info.
235  */
236  while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
237  {
238  report_invalid_page(WARNING, hentry->key.node, hentry->key.forkno,
239  hentry->key.blkno, hentry->present);
240  foundone = true;
241  }
242 
243  if (foundone)
244  elog(PANIC, "WAL contains references to invalid pages");
245 
248 }
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:793
ForkNumber forkno
Definition: xlogutils.c:48
#define PANIC
Definition: elog.h:53
RelFileNode node
Definition: xlogutils.c:47
static void report_invalid_page(int elevel, RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
Definition: xlogutils.c:63
BlockNumber blkno
Definition: xlogutils.c:49
xl_invalid_page_key key
Definition: xlogutils.c:54
#define WARNING
Definition: elog.h:40
#define NULL
Definition: c.h:229
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1351
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1341
static HTAB * invalid_page_tab
Definition: xlogutils.c:58
#define elog
Definition: elog.h:219
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
void XLogDropDatabase ( Oid  dbid)

Definition at line 618 of file xlogutils.c.

References forget_invalid_pages_db(), and smgrcloseall().

Referenced by dbase_redo().

619 {
620  /*
621  * This is unnecessarily heavy-handed, as it will close SMgrRelation
622  * objects for other databases as well. DROP DATABASE occurs seldom enough
623  * that it's not worth introducing a variant of smgrclose for just this
624  * purpose. XXX: Or should we rather leave the smgr entries dangling?
625  */
626  smgrcloseall();
627 
629 }
static void forget_invalid_pages_db(Oid dbid)
Definition: xlogutils.c:178
void smgrcloseall(void)
Definition: smgr.c:326
void XLogDropRelation ( RelFileNode  rnode,
ForkNumber  forknum 
)

Definition at line 607 of file xlogutils.c.

References forget_invalid_pages().

Referenced by xact_redo_abort(), and xact_redo_commit().

608 {
609  forget_invalid_pages(rnode, forknum, 0);
610 }
static void forget_invalid_pages(RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
Definition: xlogutils.c:143
bool XLogHaveInvalidPages ( void  )

Definition at line 211 of file xlogutils.c.

References hash_get_num_entries(), and NULL.

Referenced by RecoveryRestartPoint().

212 {
213  if (invalid_page_tab != NULL &&
215  return true;
216  return false;
217 }
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1297
#define NULL
Definition: c.h:229
static HTAB * invalid_page_tab
Definition: xlogutils.c:58
static void XLogRead ( char *  buf,
TimeLineID  tli,
XLogRecPtr  startptr,
Size  count 
)
static

Definition at line 657 of file xlogutils.c.

References BasicOpenFile(), buf, close, ereport, errcode_for_file_access(), errmsg(), ERROR, MAXPGPATH, PG_BINARY, pgstat_report_wait_end(), pgstat_report_wait_start(), read, sendFile, sendOff, sendSegNo, WAIT_EVENT_WAL_READ, XLByteInSeg, XLByteToSeg, XLogFilePath, and XLogSegSize.

Referenced by read_local_xlog_page().

658 {
659  char *p;
660  XLogRecPtr recptr;
661  Size nbytes;
662 
663  /* state maintained across calls */
664  static int sendFile = -1;
665  static XLogSegNo sendSegNo = 0;
666  static TimeLineID sendTLI = 0;
667  static uint32 sendOff = 0;
668 
669  p = buf;
670  recptr = startptr;
671  nbytes = count;
672 
673  while (nbytes > 0)
674  {
675  uint32 startoff;
676  int segbytes;
677  int readbytes;
678 
679  startoff = recptr % XLogSegSize;
680 
681  /* Do we need to switch to a different xlog segment? */
682  if (sendFile < 0 || !XLByteInSeg(recptr, sendSegNo) ||
683  sendTLI != tli)
684  {
685  char path[MAXPGPATH];
686 
687  if (sendFile >= 0)
688  close(sendFile);
689 
690  XLByteToSeg(recptr, sendSegNo);
691 
692  XLogFilePath(path, tli, sendSegNo);
693 
694  sendFile = BasicOpenFile(path, O_RDONLY | PG_BINARY, 0);
695 
696  if (sendFile < 0)
697  {
698  if (errno == ENOENT)
699  ereport(ERROR,
701  errmsg("requested WAL segment %s has already been removed",
702  path)));
703  else
704  ereport(ERROR,
706  errmsg("could not open file \"%s\": %m",
707  path)));
708  }
709  sendOff = 0;
710  sendTLI = tli;
711  }
712 
713  /* Need to seek in the file? */
714  if (sendOff != startoff)
715  {
716  if (lseek(sendFile, (off_t) startoff, SEEK_SET) < 0)
717  {
718  char path[MAXPGPATH];
719 
720  XLogFilePath(path, tli, sendSegNo);
721 
722  ereport(ERROR,
724  errmsg("could not seek in log segment %s to offset %u: %m",
725  path, startoff)));
726  }
727  sendOff = startoff;
728  }
729 
730  /* How many bytes are within this segment? */
731  if (nbytes > (XLogSegSize - startoff))
732  segbytes = XLogSegSize - startoff;
733  else
734  segbytes = nbytes;
735 
737  readbytes = read(sendFile, p, segbytes);
739  if (readbytes <= 0)
740  {
741  char path[MAXPGPATH];
742 
743  XLogFilePath(path, tli, sendSegNo);
744 
745  ereport(ERROR,
747  errmsg("could not read from log segment %s, offset %u, length %lu: %m",
748  path, sendOff, (unsigned long) segbytes)));
749  }
750 
751  /* Update state for read */
752  recptr += readbytes;
753 
754  sendOff += readbytes;
755  nbytes -= readbytes;
756  p += readbytes;
757  }
758 }
#define XLogSegSize
Definition: xlog_internal.h:92
uint32 TimeLineID
Definition: xlogdefs.h:45
static int sendFile
Definition: walsender.c:131
#define PG_BINARY
Definition: c.h:1038
#define ERROR
Definition: elog.h:43
#define MAXPGPATH
#define XLogFilePath(path, tli, logSegNo)
static char * buf
Definition: pg_test_fsync.c:66
uint64 XLogSegNo
Definition: xlogdefs.h:34
int errcode_for_file_access(void)
Definition: elog.c:598
unsigned int uint32
Definition: c.h:268
static void pgstat_report_wait_end(void)
Definition: pgstat.h:1232
#define ereport(elevel, rest)
Definition: elog.h:122
#define XLByteToSeg(xlrp, logSegNo)
uint64 XLogRecPtr
Definition: xlogdefs.h:21
size_t Size
Definition: c.h:356
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition: pgstat.h:1208
static XLogSegNo sendSegNo
Definition: walsender.c:132
int errmsg(const char *fmt,...)
Definition: elog.c:797
static uint32 sendOff
Definition: walsender.c:133
#define close(a)
Definition: win32.h:12
#define XLByteInSeg(xlrp, logSegNo)
#define read(a, b, c)
Definition: win32.h:13
int BasicOpenFile(FileName fileName, int fileFlags, int fileMode)
Definition: fd.c:936
Buffer XLogReadBufferExtended ( RelFileNode  rnode,
ForkNumber  forknum,
BlockNumber  blkno,
ReadBufferMode  mode 
)

Definition at line 438 of file xlogutils.c.

References Assert, buffer, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage, InRecovery, InvalidBackendId, InvalidBuffer, LockBuffer(), log_invalid_page(), NULL, P_NEW, PageIsNew, RBM_NORMAL, RBM_NORMAL_NO_LOG, RBM_ZERO_AND_CLEANUP_LOCK, RBM_ZERO_AND_LOCK, ReadBufferWithoutRelcache(), ReleaseBuffer(), smgrcreate(), smgrnblocks(), and smgropen().

Referenced by btree_xlog_delete_get_latestRemovedXid(), btree_xlog_vacuum(), checkXLogConsistency(), hash_xlog_vacuum_get_latestRemovedXid(), XLogReadBufferForRedoExtended(), and XLogRecordPageWithFreeSpace().

440 {
441  BlockNumber lastblock;
442  Buffer buffer;
443  SMgrRelation smgr;
444 
445  Assert(blkno != P_NEW);
446 
447  /* Open the relation at smgr level */
448  smgr = smgropen(rnode, InvalidBackendId);
449 
450  /*
451  * Create the target file if it doesn't already exist. This lets us cope
452  * if the replay sequence contains writes to a relation that is later
453  * deleted. (The original coding of this routine would instead suppress
454  * the writes, but that seems like it risks losing valuable data if the
455  * filesystem loses an inode during a crash. Better to write the data
456  * until we are actually told to delete the file.)
457  */
458  smgrcreate(smgr, forknum, true);
459 
460  lastblock = smgrnblocks(smgr, forknum);
461 
462  if (blkno < lastblock)
463  {
464  /* page exists in file */
465  buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno,
466  mode, NULL);
467  }
468  else
469  {
470  /* hm, page doesn't exist in file */
471  if (mode == RBM_NORMAL)
472  {
473  log_invalid_page(rnode, forknum, blkno, false);
474  return InvalidBuffer;
475  }
476  if (mode == RBM_NORMAL_NO_LOG)
477  return InvalidBuffer;
478  /* OK to extend the file */
479  /* we do this in recovery only - no rel-extension lock needed */
481  buffer = InvalidBuffer;
482  do
483  {
484  if (buffer != InvalidBuffer)
485  {
486  if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
488  ReleaseBuffer(buffer);
489  }
490  buffer = ReadBufferWithoutRelcache(rnode, forknum,
491  P_NEW, mode, NULL);
492  }
493  while (BufferGetBlockNumber(buffer) < blkno);
494  /* Handle the corner case that P_NEW returns non-consecutive pages */
495  if (BufferGetBlockNumber(buffer) != blkno)
496  {
497  if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
499  ReleaseBuffer(buffer);
500  buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno,
501  mode, NULL);
502  }
503  }
504 
505  if (mode == RBM_NORMAL)
506  {
507  /* check that page has been initialized */
508  Page page = (Page) BufferGetPage(buffer);
509 
510  /*
511  * We assume that PageIsNew is safe without a lock. During recovery,
512  * there should be no other backends that could modify the buffer at
513  * the same time.
514  */
515  if (PageIsNew(page))
516  {
517  ReleaseBuffer(buffer);
518  log_invalid_page(rnode, forknum, blkno, true);
519  return InvalidBuffer;
520  }
521  }
522 
523  return buffer;
524 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition: smgr.c:376
bool InRecovery
Definition: xlog.c:192
#define InvalidBuffer
Definition: buf.h:25
Buffer ReadBufferWithoutRelcache(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:682
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define P_NEW
Definition: bufmgr.h:82
static void log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
Definition: xlogutils.c:79
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
SMgrRelation smgropen(RelFileNode rnode, BackendId backend)
Definition: smgr.c:137
#define InvalidBackendId
Definition: backendid.h:23
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:672
#define NULL
Definition: c.h:229
#define Assert(condition)
Definition: c.h:675
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:211
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
#define PageIsNew(page)
Definition: bufpage.h:226
int Buffer
Definition: buf.h:23
Pointer Page
Definition: bufpage.h:74
XLogRedoAction XLogReadBufferForRedo ( XLogReaderState record,
uint8  block_id,
Buffer buf 
)

Definition at line 290 of file xlogutils.c.

References RBM_NORMAL, and XLogReadBufferForRedoExtended().

Referenced by _bt_clear_incomplete_split(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_split(), btree_xlog_unlink_page(), generic_redo(), ginRedoClearIncompleteSplit(), ginRedoDeletePage(), ginRedoInsert(), ginRedoSplit(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginRedoVacuumPage(), gistRedoClearFollowRight(), gistRedoPageUpdateRecord(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_split_page(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), and xlog_redo().

292 {
293  return XLogReadBufferForRedoExtended(record, block_id, RBM_NORMAL,
294  false, buf);
295 }
static char * buf
Definition: pg_test_fsync.c:66
XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)
Definition: xlogutils.c:327
XLogRedoAction XLogReadBufferForRedoExtended ( XLogReaderState record,
uint8  block_id,
ReadBufferMode  mode,
bool  get_cleanup_lock,
Buffer buf 
)

Definition at line 327 of file xlogutils.c.

References Assert, BKPBLOCK_WILL_INIT, BLK_DONE, BLK_NEEDS_REDO, BLK_NOTFOUND, BLK_RESTORED, XLogReaderState::blocks, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, BufferIsValid, elog, XLogReaderState::EndRecPtr, ERROR, DecodedBkpBlock::flags, FlushOneBuffer(), INIT_FORKNUM, LockBuffer(), LockBufferForCleanup(), MarkBufferDirty(), PageGetLSN, PageIsNew, PageSetLSN, PANIC, RBM_ZERO_AND_CLEANUP_LOCK, RBM_ZERO_AND_LOCK, RestoreBlockImage(), XLogReadBufferExtended(), XLogRecBlockImageApply, XLogRecGetBlockTag(), and XLogRecHasBlockImage.

Referenced by btree_xlog_vacuum(), hash_xlog_delete(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_squeeze_page(), hash_xlog_vacuum_one_page(), heap_xlog_clean(), heap_xlog_visible(), XLogInitBufferForRedo(), and XLogReadBufferForRedo().

331 {
332  XLogRecPtr lsn = record->EndRecPtr;
333  RelFileNode rnode;
334  ForkNumber forknum;
335  BlockNumber blkno;
336  Page page;
337  bool zeromode;
338  bool willinit;
339 
340  if (!XLogRecGetBlockTag(record, block_id, &rnode, &forknum, &blkno))
341  {
342  /* Caller specified a bogus block_id */
343  elog(PANIC, "failed to locate backup block with ID %d", block_id);
344  }
345 
346  /*
347  * Make sure that if the block is marked with WILL_INIT, the caller is
348  * going to initialize it. And vice versa.
349  */
350  zeromode = (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK);
351  willinit = (record->blocks[block_id].flags & BKPBLOCK_WILL_INIT) != 0;
352  if (willinit && !zeromode)
353  elog(PANIC, "block with WILL_INIT flag in WAL record must be zeroed by redo routine");
354  if (!willinit && zeromode)
355  elog(PANIC, "block to be initialized in redo routine must be marked with WILL_INIT flag in the WAL record");
356 
357  /* If it has a full-page image and it should be restored, do it. */
358  if (XLogRecBlockImageApply(record, block_id))
359  {
360  Assert(XLogRecHasBlockImage(record, block_id));
361  *buf = XLogReadBufferExtended(rnode, forknum, blkno,
362  get_cleanup_lock ? RBM_ZERO_AND_CLEANUP_LOCK : RBM_ZERO_AND_LOCK);
363  page = BufferGetPage(*buf);
364  if (!RestoreBlockImage(record, block_id, page))
365  elog(ERROR, "failed to restore block image");
366 
367  /*
368  * The page may be uninitialized. If so, we can't set the LSN because
369  * that would corrupt the page.
370  */
371  if (!PageIsNew(page))
372  {
373  PageSetLSN(page, lsn);
374  }
375 
377 
378  /*
379  * At the end of crash recovery the init forks of unlogged relations
380  * are copied, without going through shared buffers. So we need to
381  * force the on-disk state of init forks to always be in sync with the
382  * state in shared buffers.
383  */
384  if (forknum == INIT_FORKNUM)
386 
387  return BLK_RESTORED;
388  }
389  else
390  {
391  *buf = XLogReadBufferExtended(rnode, forknum, blkno, mode);
392  if (BufferIsValid(*buf))
393  {
394  if (mode != RBM_ZERO_AND_LOCK && mode != RBM_ZERO_AND_CLEANUP_LOCK)
395  {
396  if (get_cleanup_lock)
398  else
400  }
401  if (lsn <= PageGetLSN(BufferGetPage(*buf)))
402  return BLK_DONE;
403  else
404  return BLK_NEEDS_REDO;
405  }
406  else
407  return BLK_NOTFOUND;
408  }
409 }
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3603
#define XLogRecHasBlockImage(decoder, block_id)
Definition: xlogreader.h:225
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
Buffer XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum, BlockNumber blkno, ReadBufferMode mode)
Definition: xlogutils.c:438
uint32 BlockNumber
Definition: block.h:31
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
#define PANIC
Definition: elog.h:53
XLogRecPtr EndRecPtr
Definition: xlogreader.h:115
#define ERROR
Definition: elog.h:43
static char * buf
Definition: pg_test_fsync.c:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define BKPBLOCK_WILL_INIT
Definition: xlogrecord.h:182
ForkNumber
Definition: relpath.h:24
bool XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id, RelFileNode *rnode, ForkNumber *forknum, BlockNumber *blknum)
Definition: xlogreader.c:1307
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:675
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
void FlushOneBuffer(Buffer buffer)
Definition: bufmgr.c:3289
bool RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page)
Definition: xlogreader.c:1360
#define PageGetLSN(page)
Definition: bufpage.h:363
#define PageIsNew(page)
Definition: bufpage.h:226
#define elog
Definition: elog.h:219
#define XLogRecBlockImageApply(decoder, block_id)
Definition: xlogreader.h:227
#define PageSetLSN(page, lsn)
Definition: bufpage.h:365
Pointer Page
Definition: bufpage.h:74
DecodedBkpBlock blocks[XLR_MAX_BLOCK_ID+1]
Definition: xlogreader.h:134
void XLogReadDetermineTimeline ( XLogReaderState state,
XLogRecPtr  wantPage,
uint32  wantLength 
)

Definition at line 799 of file xlogutils.c.

References Assert, XLogReaderState::currTLI, XLogReaderState::currTLIValidUntil, DEBUG3, elog, InvalidXLogRecPtr, list_free_deep(), Min, XLogReaderState::nextTLI, XLogReaderState::readLen, XLogReaderState::readOff, XLogReaderState::readSegNo, readTimeLineHistory(), ThisTimeLineID, tliOfPointInHistory(), tliSwitchPoint(), and XLogSegSize.

Referenced by logical_read_xlog_page(), and read_local_xlog_page().

800 {
801  const XLogRecPtr lastReadPage = state->readSegNo * XLogSegSize + state->readOff;
802 
803  Assert(wantPage != InvalidXLogRecPtr && wantPage % XLOG_BLCKSZ == 0);
804  Assert(wantLength <= XLOG_BLCKSZ);
805  Assert(state->readLen == 0 || state->readLen <= XLOG_BLCKSZ);
806 
807  /*
808  * If the desired page is currently read in and valid, we have nothing to
809  * do.
810  *
811  * The caller should've ensured that it didn't previously advance readOff
812  * past the valid limit of this timeline, so it doesn't matter if the
813  * current TLI has since become historical.
814  */
815  if (lastReadPage == wantPage &&
816  state->readLen != 0 &&
817  lastReadPage + state->readLen >= wantPage + Min(wantLength, XLOG_BLCKSZ - 1))
818  return;
819 
820  /*
821  * If we're reading from the current timeline, it hasn't become historical
822  * and the page we're reading is after the last page read, we can again
823  * just carry on. (Seeking backwards requires a check to make sure the
824  * older page isn't on a prior timeline).
825  *
826  * ThisTimeLineID might've become historical since we last looked, but the
827  * caller is required not to read past the flush limit it saw at the time
828  * it looked up the timeline. There's nothing we can do about it if
829  * StartupXLOG() renames it to .partial concurrently.
830  */
831  if (state->currTLI == ThisTimeLineID && wantPage >= lastReadPage)
832  {
834  return;
835  }
836 
837  /*
838  * If we're just reading pages from a previously validated historical
839  * timeline and the timeline we're reading from is valid until the end of
840  * the current segment we can just keep reading.
841  */
842  if (state->currTLIValidUntil != InvalidXLogRecPtr &&
843  state->currTLI != ThisTimeLineID &&
844  state->currTLI != 0 &&
845  (wantPage + wantLength) / XLogSegSize < state->currTLIValidUntil / XLogSegSize)
846  return;
847 
848  /*
849  * If we reach this point we're either looking up a page for random
850  * access, the current timeline just became historical, or we're reading
851  * from a new segment containing a timeline switch. In all cases we need
852  * to determine the newest timeline on the segment.
853  *
854  * If it's the current timeline we can just keep reading from here unless
855  * we detect a timeline switch that makes the current timeline historical.
856  * If it's a historical timeline we can read all the segment on the newest
857  * timeline because it contains all the old timelines' data too. So only
858  * one switch check is required.
859  */
860  {
861  /*
862  * We need to re-read the timeline history in case it's been changed
863  * by a promotion or replay from a cascaded replica.
864  */
865  List *timelineHistory = readTimeLineHistory(ThisTimeLineID);
866 
867  XLogRecPtr endOfSegment = (((wantPage / XLogSegSize) + 1) * XLogSegSize) - 1;
868 
869  Assert(wantPage / XLogSegSize == endOfSegment / XLogSegSize);
870 
871  /*
872  * Find the timeline of the last LSN on the segment containing
873  * wantPage.
874  */
875  state->currTLI = tliOfPointInHistory(endOfSegment, timelineHistory);
876  state->currTLIValidUntil = tliSwitchPoint(state->currTLI, timelineHistory,
877  &state->nextTLI);
878 
880  wantPage + wantLength < state->currTLIValidUntil);
881 
882  list_free_deep(timelineHistory);
883 
884  elog(DEBUG3, "switched to timeline %u valid until %X/%X",
885  state->currTLI,
886  (uint32) (state->currTLIValidUntil >> 32),
887  (uint32) (state->currTLIValidUntil));
888  }
889 }
#define XLogSegSize
Definition: xlog_internal.h:92
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
TimeLineID tliOfPointInHistory(XLogRecPtr ptr, List *history)
Definition: timeline.c:535
#define DEBUG3
Definition: elog.h:23
#define Min(x, y)
Definition: c.h:806
List * readTimeLineHistory(TimeLineID targetTLI)
Definition: timeline.c:75
void list_free_deep(List *list)
Definition: list.c:1147
XLogRecPtr currTLIValidUntil
Definition: xlogreader.h:175
unsigned int uint32
Definition: c.h:268
TimeLineID nextTLI
Definition: xlogreader.h:181
XLogRecPtr tliSwitchPoint(TimeLineID tli, List *history, TimeLineID *nextTLI)
Definition: timeline.c:563
TimeLineID ThisTimeLineID
Definition: xlog.c:179
TimeLineID currTLI
Definition: xlogreader.h:165
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:675
XLogSegNo readSegNo
Definition: xlogreader.h:151
#define elog
Definition: elog.h:219
Definition: pg_list.h:45
void XLogTruncateRelation ( RelFileNode  rnode,
ForkNumber  forkNum,
BlockNumber  nblocks 
)

Definition at line 637 of file xlogutils.c.

References forget_invalid_pages().

Referenced by smgr_redo().

639 {
640  forget_invalid_pages(rnode, forkNum, nblocks);
641 }
static void forget_invalid_pages(RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
Definition: xlogutils.c:143

Variable Documentation

HTAB* invalid_page_tab = NULL
static

Definition at line 58 of file xlogutils.c.