PostgreSQL Source Code  git master
xlogutils.h File Reference
#include "access/xlogreader.h"
#include "storage/bufmgr.h"
Include dependency graph for xlogutils.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Enumerations

enum  XLogRedoAction { BLK_NEEDS_REDO, BLK_DONE, BLK_RESTORED, BLK_NOTFOUND }
 

Functions

bool XLogHaveInvalidPages (void)
 
void XLogCheckInvalidPages (void)
 
void XLogDropRelation (RelFileNode rnode, ForkNumber forknum)
 
void XLogDropDatabase (Oid dbid)
 
void XLogTruncateRelation (RelFileNode rnode, ForkNumber forkNum, BlockNumber nblocks)
 
XLogRedoAction XLogReadBufferForRedo (XLogReaderState *record, uint8 buffer_id, Buffer *buf)
 
Buffer XLogInitBufferForRedo (XLogReaderState *record, uint8 block_id)
 
XLogRedoAction XLogReadBufferForRedoExtended (XLogReaderState *record, uint8 buffer_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)
 
Buffer XLogReadBufferExtended (RelFileNode rnode, ForkNumber forknum, BlockNumber blkno, ReadBufferMode mode)
 
Relation CreateFakeRelcacheEntry (RelFileNode rnode)
 
void FreeFakeRelcacheEntry (Relation fakerel)
 
int read_local_xlog_page (XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, char *cur_page, TimeLineID *pageTLI)
 
void XLogReadDetermineTimeline (XLogReaderState *state, XLogRecPtr wantPage, uint32 wantLength)
 

Enumeration Type Documentation

◆ XLogRedoAction

Enumerator
BLK_NEEDS_REDO 
BLK_DONE 
BLK_RESTORED 
BLK_NOTFOUND 

Definition at line 27 of file xlogutils.h.

28 {
29  BLK_NEEDS_REDO, /* changes from WAL record need to be applied */
30  BLK_DONE, /* block is already up-to-date */
31  BLK_RESTORED, /* block was restored from a full-page image */
32  BLK_NOTFOUND /* block was not found (and hence does not
33  * need to be replayed) */
XLogRedoAction
Definition: xlogutils.h:27

Function Documentation

◆ CreateFakeRelcacheEntry()

Relation CreateFakeRelcacheEntry ( RelFileNode  rnode)

Definition at line 551 of file xlogutils.c.

References Assert, LockRelId::dbId, RelFileNode::dbNode, InRecovery, InvalidBackendId, LockInfoData::lockRelId, palloc0(), FakeRelCacheEntryData::pgc, RelationData::rd_backend, RelationData::rd_lockInfo, RelationData::rd_node, RelationData::rd_rel, RelationData::rd_smgr, RelationGetRelationName, LockRelId::relId, RelFileNode::relNode, and RELPERSISTENCE_PERMANENT.

Referenced by heap_xlog_delete(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), and smgr_redo().

552 {
553  FakeRelCacheEntry fakeentry;
554  Relation rel;
555 
557 
558  /* Allocate the Relation struct and all related space in one block. */
559  fakeentry = palloc0(sizeof(FakeRelCacheEntryData));
560  rel = (Relation) fakeentry;
561 
562  rel->rd_rel = &fakeentry->pgc;
563  rel->rd_node = rnode;
564  /* We will never be working with temp rels during recovery */
566 
567  /* It must be a permanent table if we're in recovery. */
568  rel->rd_rel->relpersistence = RELPERSISTENCE_PERMANENT;
569 
570  /* We don't know the name of the relation; use relfilenode instead */
571  sprintf(RelationGetRelationName(rel), "%u", rnode.relNode);
572 
573  /*
574  * We set up the lockRelId in case anything tries to lock the dummy
575  * relation. Note that this is fairly bogus since relNode may be
576  * different from the relation's OID. It shouldn't really matter though,
577  * since we are presumably running by ourselves and can't have any lock
578  * conflicts ...
579  */
580  rel->rd_lockInfo.lockRelId.dbId = rnode.dbNode;
581  rel->rd_lockInfo.lockRelId.relId = rnode.relNode;
582 
583  rel->rd_smgr = NULL;
584 
585  return rel;
586 }
LockRelId lockRelId
Definition: rel.h:44
struct SMgrRelationData * rd_smgr
Definition: rel.h:87
bool InRecovery
Definition: xlog.c:194
Oid dbId
Definition: rel.h:39
Form_pg_class rd_rel
Definition: rel.h:114
#define RELPERSISTENCE_PERMANENT
Definition: pg_class.h:170
struct RelationData * Relation
Definition: relcache.h:26
LockInfoData rd_lockInfo
Definition: rel.h:117
#define RelationGetRelationName(relation)
Definition: rel.h:436
#define InvalidBackendId
Definition: backendid.h:23
void * palloc0(Size size)
Definition: mcxt.c:877
RelFileNode rd_node
Definition: rel.h:85
BackendId rd_backend
Definition: rel.h:89
#define Assert(condition)
Definition: c.h:670
FormData_pg_class pgc
Definition: xlogutils.c:533
Oid relId
Definition: rel.h:38

◆ FreeFakeRelcacheEntry()

void FreeFakeRelcacheEntry ( Relation  fakerel)

Definition at line 592 of file xlogutils.c.

References pfree(), RelationData::rd_smgr, and smgrclearowner().

Referenced by heap_xlog_delete(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), and smgr_redo().

593 {
594  /* make sure the fakerel is not referenced by the SmgrRelation anymore */
595  if (fakerel->rd_smgr != NULL)
596  smgrclearowner(&fakerel->rd_smgr, fakerel->rd_smgr);
597  pfree(fakerel);
598 }
void smgrclearowner(SMgrRelation *owner, SMgrRelation reln)
Definition: smgr.c:222
struct SMgrRelationData * rd_smgr
Definition: rel.h:87
void pfree(void *pointer)
Definition: mcxt.c:949

◆ read_local_xlog_page()

int read_local_xlog_page ( XLogReaderState state,
XLogRecPtr  targetPagePtr,
int  reqLen,
XLogRecPtr  targetRecPtr,
char *  cur_page,
TimeLineID pageTLI 
)

Definition at line 910 of file xlogutils.c.

References CHECK_FOR_INTERRUPTS, XLogReaderState::currTLI, XLogReaderState::currTLIValidUntil, GetFlushRecPtr(), GetXLogReplayRecPtr(), pg_usleep(), RecoveryInProgress(), ThisTimeLineID, XLogReaderState::wal_segment_size, XLogRead(), and XLogReadDetermineTimeline().

Referenced by logical_read_local_xlog_page(), and XlogReadTwoPhaseData().

913 {
914  XLogRecPtr read_upto,
915  loc;
916  int count;
917 
918  loc = targetPagePtr + reqLen;
919 
920  /* Loop waiting for xlog to be available if necessary */
921  while (1)
922  {
923  /*
924  * Determine the limit of xlog we can currently read to, and what the
925  * most recent timeline is.
926  *
927  * RecoveryInProgress() will update ThisTimeLineID when it first
928  * notices recovery finishes, so we only have to maintain it for the
929  * local process until recovery ends.
930  */
931  if (!RecoveryInProgress())
932  read_upto = GetFlushRecPtr();
933  else
934  read_upto = GetXLogReplayRecPtr(&ThisTimeLineID);
935 
936  *pageTLI = ThisTimeLineID;
937 
938  /*
939  * Check which timeline to get the record from.
940  *
941  * We have to do it each time through the loop because if we're in
942  * recovery as a cascading standby, the current timeline might've
943  * become historical. We can't rely on RecoveryInProgress() because in
944  * a standby configuration like
945  *
946  * A => B => C
947  *
948  * if we're a logical decoding session on C, and B gets promoted, our
949  * timeline will change while we remain in recovery.
950  *
951  * We can't just keep reading from the old timeline as the last WAL
952  * archive in the timeline will get renamed to .partial by
953  * StartupXLOG().
954  *
955  * If that happens after our caller updated ThisTimeLineID but before
956  * we actually read the xlog page, we might still try to read from the
957  * old (now renamed) segment and fail. There's not much we can do
958  * about this, but it can only happen when we're a leaf of a cascading
959  * standby whose master gets promoted while we're decoding, so a
960  * one-off ERROR isn't too bad.
961  */
962  XLogReadDetermineTimeline(state, targetPagePtr, reqLen);
963 
964  if (state->currTLI == ThisTimeLineID)
965  {
966 
967  if (loc <= read_upto)
968  break;
969 
971  pg_usleep(1000L);
972  }
973  else
974  {
975  /*
976  * We're on a historical timeline, so limit reading to the switch
977  * point where we moved to the next timeline.
978  *
979  * We don't need to GetFlushRecPtr or GetXLogReplayRecPtr. We know
980  * about the new timeline, so we must've received past the end of
981  * it.
982  */
983  read_upto = state->currTLIValidUntil;
984 
985  /*
986  * Setting pageTLI to our wanted record's TLI is slightly wrong;
987  * the page might begin on an older timeline if it contains a
988  * timeline switch, since its xlog segment will have been copied
989  * from the prior timeline. This is pretty harmless though, as
990  * nothing cares so long as the timeline doesn't go backwards. We
991  * should read the page header instead; FIXME someday.
992  */
993  *pageTLI = state->currTLI;
994 
995  /* No need to wait on a historical timeline */
996  break;
997  }
998  }
999 
1000  if (targetPagePtr + XLOG_BLCKSZ <= read_upto)
1001  {
1002  /*
1003  * more than one block available; read only that block, have caller
1004  * come back if they need more.
1005  */
1006  count = XLOG_BLCKSZ;
1007  }
1008  else if (targetPagePtr + reqLen > read_upto)
1009  {
1010  /* not enough data there */
1011  return -1;
1012  }
1013  else
1014  {
1015  /* enough bytes available to satisfy the request */
1016  count = read_upto - targetPagePtr;
1017  }
1018 
1019  /*
1020  * Even though we just determined how much of the page can be validly read
1021  * as 'count', read the whole page anyway. It's guaranteed to be
1022  * zero-padded up to the page boundary if it's incomplete.
1023  */
1024  XLogRead(cur_page, state->wal_segment_size, *pageTLI, targetPagePtr,
1025  XLOG_BLCKSZ);
1026 
1027  /* number of valid bytes in the buffer */
1028  return count;
1029 }
XLogRecPtr GetFlushRecPtr(void)
Definition: xlog.c:8261
bool RecoveryInProgress(void)
Definition: xlog.c:7929
void XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wantLength)
Definition: xlogutils.c:802
void pg_usleep(long microsec)
Definition: signal.c:53
XLogRecPtr GetXLogReplayRecPtr(TimeLineID *replayTLI)
Definition: xlog.c:11128
XLogRecPtr currTLIValidUntil
Definition: xlogreader.h:180
int wal_segment_size
Definition: xlogreader.h:79
TimeLineID ThisTimeLineID
Definition: xlog.c:181
TimeLineID currTLI
Definition: xlogreader.h:170
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:98
static void XLogRead(char *buf, int segsize, TimeLineID tli, XLogRecPtr startptr, Size count)
Definition: xlogutils.c:657

◆ XLogCheckInvalidPages()

void XLogCheckInvalidPages ( void  )

Definition at line 221 of file xlogutils.c.

References xl_invalid_page_key::blkno, elog, xl_invalid_page_key::forkno, hash_destroy(), hash_seq_init(), hash_seq_search(), xl_invalid_page::key, xl_invalid_page_key::node, PANIC, xl_invalid_page::present, report_invalid_page(), status(), and WARNING.

Referenced by CheckRecoveryConsistency().

222 {
224  xl_invalid_page *hentry;
225  bool foundone = false;
226 
227  if (invalid_page_tab == NULL)
228  return; /* nothing to do */
229 
231 
232  /*
233  * Our strategy is to emit WARNING messages for all remaining entries and
234  * only PANIC after we've dumped all the available info.
235  */
236  while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
237  {
238  report_invalid_page(WARNING, hentry->key.node, hentry->key.forkno,
239  hentry->key.blkno, hentry->present);
240  foundone = true;
241  }
242 
243  if (foundone)
244  elog(PANIC, "WAL contains references to invalid pages");
245 
247  invalid_page_tab = NULL;
248 }
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:810
ForkNumber forkno
Definition: xlogutils.c:48
#define PANIC
Definition: elog.h:53
RelFileNode node
Definition: xlogutils.c:47
static void report_invalid_page(int elevel, RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
Definition: xlogutils.c:63
BlockNumber blkno
Definition: xlogutils.c:49
xl_invalid_page_key key
Definition: xlogutils.c:54
#define WARNING
Definition: elog.h:40
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1385
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1375
static HTAB * invalid_page_tab
Definition: xlogutils.c:58
#define elog
Definition: elog.h:219
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225

◆ XLogDropDatabase()

void XLogDropDatabase ( Oid  dbid)

Definition at line 618 of file xlogutils.c.

References forget_invalid_pages_db(), and smgrcloseall().

Referenced by dbase_redo().

619 {
620  /*
621  * This is unnecessarily heavy-handed, as it will close SMgrRelation
622  * objects for other databases as well. DROP DATABASE occurs seldom enough
623  * that it's not worth introducing a variant of smgrclose for just this
624  * purpose. XXX: Or should we rather leave the smgr entries dangling?
625  */
626  smgrcloseall();
627 
629 }
static void forget_invalid_pages_db(Oid dbid)
Definition: xlogutils.c:178
void smgrcloseall(void)
Definition: smgr.c:326

◆ XLogDropRelation()

void XLogDropRelation ( RelFileNode  rnode,
ForkNumber  forknum 
)

Definition at line 607 of file xlogutils.c.

References forget_invalid_pages().

Referenced by xact_redo_abort(), and xact_redo_commit().

608 {
609  forget_invalid_pages(rnode, forknum, 0);
610 }
static void forget_invalid_pages(RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
Definition: xlogutils.c:143

◆ XLogHaveInvalidPages()

bool XLogHaveInvalidPages ( void  )

Definition at line 211 of file xlogutils.c.

References hash_get_num_entries().

Referenced by RecoveryRestartPoint().

212 {
213  if (invalid_page_tab != NULL &&
215  return true;
216  return false;
217 }
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1331
static HTAB * invalid_page_tab
Definition: xlogutils.c:58

◆ XLogInitBufferForRedo()

◆ XLogReadBufferExtended()

Buffer XLogReadBufferExtended ( RelFileNode  rnode,
ForkNumber  forknum,
BlockNumber  blkno,
ReadBufferMode  mode 
)

Definition at line 438 of file xlogutils.c.

References Assert, buffer, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage, InRecovery, InvalidBackendId, InvalidBuffer, LockBuffer(), log_invalid_page(), P_NEW, PageIsNew, RBM_NORMAL, RBM_NORMAL_NO_LOG, RBM_ZERO_AND_CLEANUP_LOCK, RBM_ZERO_AND_LOCK, ReadBufferWithoutRelcache(), ReleaseBuffer(), smgrcreate(), smgrnblocks(), and smgropen().

Referenced by btree_xlog_delete_get_latestRemovedXid(), btree_xlog_vacuum(), checkXLogConsistency(), hash_xlog_vacuum_get_latestRemovedXid(), XLogReadBufferForRedoExtended(), and XLogRecordPageWithFreeSpace().

440 {
441  BlockNumber lastblock;
442  Buffer buffer;
443  SMgrRelation smgr;
444 
445  Assert(blkno != P_NEW);
446 
447  /* Open the relation at smgr level */
448  smgr = smgropen(rnode, InvalidBackendId);
449 
450  /*
451  * Create the target file if it doesn't already exist. This lets us cope
452  * if the replay sequence contains writes to a relation that is later
453  * deleted. (The original coding of this routine would instead suppress
454  * the writes, but that seems like it risks losing valuable data if the
455  * filesystem loses an inode during a crash. Better to write the data
456  * until we are actually told to delete the file.)
457  */
458  smgrcreate(smgr, forknum, true);
459 
460  lastblock = smgrnblocks(smgr, forknum);
461 
462  if (blkno < lastblock)
463  {
464  /* page exists in file */
465  buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno,
466  mode, NULL);
467  }
468  else
469  {
470  /* hm, page doesn't exist in file */
471  if (mode == RBM_NORMAL)
472  {
473  log_invalid_page(rnode, forknum, blkno, false);
474  return InvalidBuffer;
475  }
476  if (mode == RBM_NORMAL_NO_LOG)
477  return InvalidBuffer;
478  /* OK to extend the file */
479  /* we do this in recovery only - no rel-extension lock needed */
481  buffer = InvalidBuffer;
482  do
483  {
484  if (buffer != InvalidBuffer)
485  {
486  if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
488  ReleaseBuffer(buffer);
489  }
490  buffer = ReadBufferWithoutRelcache(rnode, forknum,
491  P_NEW, mode, NULL);
492  }
493  while (BufferGetBlockNumber(buffer) < blkno);
494  /* Handle the corner case that P_NEW returns non-consecutive pages */
495  if (BufferGetBlockNumber(buffer) != blkno)
496  {
497  if (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK)
499  ReleaseBuffer(buffer);
500  buffer = ReadBufferWithoutRelcache(rnode, forknum, blkno,
501  mode, NULL);
502  }
503  }
504 
505  if (mode == RBM_NORMAL)
506  {
507  /* check that page has been initialized */
508  Page page = (Page) BufferGetPage(buffer);
509 
510  /*
511  * We assume that PageIsNew is safe without a lock. During recovery,
512  * there should be no other backends that could modify the buffer at
513  * the same time.
514  */
515  if (PageIsNew(page))
516  {
517  ReleaseBuffer(buffer);
518  log_invalid_page(rnode, forknum, blkno, true);
519  return InvalidBuffer;
520  }
521  }
522 
523  return buffer;
524 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition: smgr.c:376
bool InRecovery
Definition: xlog.c:194
#define InvalidBuffer
Definition: buf.h:25
Buffer ReadBufferWithoutRelcache(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy)
Definition: bufmgr.c:682
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define P_NEW
Definition: bufmgr.h:82
static void log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno, bool present)
Definition: xlogutils.c:79
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
SMgrRelation smgropen(RelFileNode rnode, BackendId backend)
Definition: smgr.c:137
#define InvalidBackendId
Definition: backendid.h:23
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:672
#define Assert(condition)
Definition: c.h:670
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
#define PageIsNew(page)
Definition: bufpage.h:225
int Buffer
Definition: buf.h:23
Pointer Page
Definition: bufpage.h:74

◆ XLogReadBufferForRedo()

XLogRedoAction XLogReadBufferForRedo ( XLogReaderState record,
uint8  buffer_id,
Buffer buf 
)

Definition at line 290 of file xlogutils.c.

References RBM_NORMAL, and XLogReadBufferForRedoExtended().

Referenced by _bt_clear_incomplete_split(), brin_xlog_desummarize_page(), brin_xlog_insert_update(), brin_xlog_revmap_extend(), brin_xlog_samepage_update(), brin_xlog_update(), btree_xlog_delete(), btree_xlog_insert(), btree_xlog_mark_page_halfdead(), btree_xlog_split(), btree_xlog_unlink_page(), generic_redo(), ginRedoClearIncompleteSplit(), ginRedoDeletePage(), ginRedoInsert(), ginRedoSplit(), ginRedoUpdateMetapage(), ginRedoVacuumDataLeafPage(), ginRedoVacuumPage(), gistRedoClearFollowRight(), gistRedoPageUpdateRecord(), hash_xlog_add_ovfl_page(), hash_xlog_delete(), hash_xlog_init_bitmap_page(), hash_xlog_insert(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_split_cleanup(), hash_xlog_split_complete(), hash_xlog_split_page(), hash_xlog_squeeze_page(), hash_xlog_update_meta_page(), hash_xlog_vacuum_one_page(), heap_xlog_confirm(), heap_xlog_delete(), heap_xlog_freeze_page(), heap_xlog_inplace(), heap_xlog_insert(), heap_xlog_lock(), heap_xlog_lock_updated(), heap_xlog_multi_insert(), heap_xlog_update(), heap_xlog_visible(), spgRedoAddLeaf(), spgRedoAddNode(), spgRedoMoveLeafs(), spgRedoPickSplit(), spgRedoSplitTuple(), spgRedoVacuumLeaf(), spgRedoVacuumRedirect(), spgRedoVacuumRoot(), and xlog_redo().

292 {
293  return XLogReadBufferForRedoExtended(record, block_id, RBM_NORMAL,
294  false, buf);
295 }
static char * buf
Definition: pg_test_fsync.c:67
XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)
Definition: xlogutils.c:327

◆ XLogReadBufferForRedoExtended()

XLogRedoAction XLogReadBufferForRedoExtended ( XLogReaderState record,
uint8  buffer_id,
ReadBufferMode  mode,
bool  get_cleanup_lock,
Buffer buf 
)

Definition at line 327 of file xlogutils.c.

References Assert, BKPBLOCK_WILL_INIT, BLK_DONE, BLK_NEEDS_REDO, BLK_NOTFOUND, BLK_RESTORED, xl_invalid_page_key::blkno, XLogReaderState::blocks, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, BufferIsValid, elog, XLogReaderState::EndRecPtr, ERROR, DecodedBkpBlock::flags, FlushOneBuffer(), INIT_FORKNUM, LockBuffer(), LockBufferForCleanup(), MarkBufferDirty(), PageGetLSN, PageIsNew, PageSetLSN, PANIC, RBM_ZERO_AND_CLEANUP_LOCK, RBM_ZERO_AND_LOCK, RestoreBlockImage(), XLogReadBufferExtended(), XLogRecBlockImageApply, XLogRecGetBlockTag(), and XLogRecHasBlockImage.

Referenced by btree_xlog_vacuum(), hash_xlog_delete(), hash_xlog_move_page_contents(), hash_xlog_split_allocate_page(), hash_xlog_squeeze_page(), hash_xlog_vacuum_one_page(), heap_xlog_clean(), heap_xlog_visible(), XLogInitBufferForRedo(), and XLogReadBufferForRedo().

331 {
332  XLogRecPtr lsn = record->EndRecPtr;
333  RelFileNode rnode;
334  ForkNumber forknum;
335  BlockNumber blkno;
336  Page page;
337  bool zeromode;
338  bool willinit;
339 
340  if (!XLogRecGetBlockTag(record, block_id, &rnode, &forknum, &blkno))
341  {
342  /* Caller specified a bogus block_id */
343  elog(PANIC, "failed to locate backup block with ID %d", block_id);
344  }
345 
346  /*
347  * Make sure that if the block is marked with WILL_INIT, the caller is
348  * going to initialize it. And vice versa.
349  */
350  zeromode = (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK);
351  willinit = (record->blocks[block_id].flags & BKPBLOCK_WILL_INIT) != 0;
352  if (willinit && !zeromode)
353  elog(PANIC, "block with WILL_INIT flag in WAL record must be zeroed by redo routine");
354  if (!willinit && zeromode)
355  elog(PANIC, "block to be initialized in redo routine must be marked with WILL_INIT flag in the WAL record");
356 
357  /* If it has a full-page image and it should be restored, do it. */
358  if (XLogRecBlockImageApply(record, block_id))
359  {
360  Assert(XLogRecHasBlockImage(record, block_id));
361  *buf = XLogReadBufferExtended(rnode, forknum, blkno,
362  get_cleanup_lock ? RBM_ZERO_AND_CLEANUP_LOCK : RBM_ZERO_AND_LOCK);
363  page = BufferGetPage(*buf);
364  if (!RestoreBlockImage(record, block_id, page))
365  elog(ERROR, "failed to restore block image");
366 
367  /*
368  * The page may be uninitialized. If so, we can't set the LSN because
369  * that would corrupt the page.
370  */
371  if (!PageIsNew(page))
372  {
373  PageSetLSN(page, lsn);
374  }
375 
377 
378  /*
379  * At the end of crash recovery the init forks of unlogged relations
380  * are copied, without going through shared buffers. So we need to
381  * force the on-disk state of init forks to always be in sync with the
382  * state in shared buffers.
383  */
384  if (forknum == INIT_FORKNUM)
386 
387  return BLK_RESTORED;
388  }
389  else
390  {
391  *buf = XLogReadBufferExtended(rnode, forknum, blkno, mode);
392  if (BufferIsValid(*buf))
393  {
394  if (mode != RBM_ZERO_AND_LOCK && mode != RBM_ZERO_AND_CLEANUP_LOCK)
395  {
396  if (get_cleanup_lock)
398  else
400  }
401  if (lsn <= PageGetLSN(BufferGetPage(*buf)))
402  return BLK_DONE;
403  else
404  return BLK_NEEDS_REDO;
405  }
406  else
407  return BLK_NOTFOUND;
408  }
409 }
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:3603
#define XLogRecHasBlockImage(decoder, block_id)
Definition: xlogreader.h:231
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
Buffer XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum, BlockNumber blkno, ReadBufferMode mode)
Definition: xlogutils.c:438
uint32 BlockNumber
Definition: block.h:31
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
#define PANIC
Definition: elog.h:53
XLogRecPtr EndRecPtr
Definition: xlogreader.h:120
#define ERROR
Definition: elog.h:43
static char * buf
Definition: pg_test_fsync.c:67
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define BKPBLOCK_WILL_INIT
Definition: xlogrecord.h:183
ForkNumber
Definition: relpath.h:24
bool XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id, RelFileNode *rnode, ForkNumber *forknum, BlockNumber *blknum)
Definition: xlogreader.c:1309
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:670
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
void FlushOneBuffer(Buffer buffer)
Definition: bufmgr.c:3289
bool RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page)
Definition: xlogreader.c:1362
#define PageGetLSN(page)
Definition: bufpage.h:362
#define PageIsNew(page)
Definition: bufpage.h:225
#define elog
Definition: elog.h:219
#define XLogRecBlockImageApply(decoder, block_id)
Definition: xlogreader.h:233
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
Pointer Page
Definition: bufpage.h:74
DecodedBkpBlock blocks[XLR_MAX_BLOCK_ID+1]
Definition: xlogreader.h:139

◆ XLogReadDetermineTimeline()

void XLogReadDetermineTimeline ( XLogReaderState state,
XLogRecPtr  wantPage,
uint32  wantLength 
)

Definition at line 802 of file xlogutils.c.

References Assert, XLogReaderState::currTLI, XLogReaderState::currTLIValidUntil, DEBUG3, elog, InvalidXLogRecPtr, list_free_deep(), Min, XLogReaderState::nextTLI, XLogReaderState::readLen, XLogReaderState::readOff, XLogReaderState::readSegNo, readTimeLineHistory(), ThisTimeLineID, tliOfPointInHistory(), tliSwitchPoint(), and XLogReaderState::wal_segment_size.

Referenced by logical_read_xlog_page(), and read_local_xlog_page().

803 {
804  const XLogRecPtr lastReadPage = state->readSegNo *
805  state->wal_segment_size + state->readOff;
806 
807  Assert(wantPage != InvalidXLogRecPtr && wantPage % XLOG_BLCKSZ == 0);
808  Assert(wantLength <= XLOG_BLCKSZ);
809  Assert(state->readLen == 0 || state->readLen <= XLOG_BLCKSZ);
810 
811  /*
812  * If the desired page is currently read in and valid, we have nothing to
813  * do.
814  *
815  * The caller should've ensured that it didn't previously advance readOff
816  * past the valid limit of this timeline, so it doesn't matter if the
817  * current TLI has since become historical.
818  */
819  if (lastReadPage == wantPage &&
820  state->readLen != 0 &&
821  lastReadPage + state->readLen >= wantPage + Min(wantLength, XLOG_BLCKSZ - 1))
822  return;
823 
824  /*
825  * If we're reading from the current timeline, it hasn't become historical
826  * and the page we're reading is after the last page read, we can again
827  * just carry on. (Seeking backwards requires a check to make sure the
828  * older page isn't on a prior timeline).
829  *
830  * ThisTimeLineID might've become historical since we last looked, but the
831  * caller is required not to read past the flush limit it saw at the time
832  * it looked up the timeline. There's nothing we can do about it if
833  * StartupXLOG() renames it to .partial concurrently.
834  */
835  if (state->currTLI == ThisTimeLineID && wantPage >= lastReadPage)
836  {
838  return;
839  }
840 
841  /*
842  * If we're just reading pages from a previously validated historical
843  * timeline and the timeline we're reading from is valid until the end of
844  * the current segment we can just keep reading.
845  */
846  if (state->currTLIValidUntil != InvalidXLogRecPtr &&
847  state->currTLI != ThisTimeLineID &&
848  state->currTLI != 0 &&
849  ((wantPage + wantLength) / state->wal_segment_size) <
850  (state->currTLIValidUntil / state->wal_segment_size))
851  return;
852 
853  /*
854  * If we reach this point we're either looking up a page for random
855  * access, the current timeline just became historical, or we're reading
856  * from a new segment containing a timeline switch. In all cases we need
857  * to determine the newest timeline on the segment.
858  *
859  * If it's the current timeline we can just keep reading from here unless
860  * we detect a timeline switch that makes the current timeline historical.
861  * If it's a historical timeline we can read all the segment on the newest
862  * timeline because it contains all the old timelines' data too. So only
863  * one switch check is required.
864  */
865  {
866  /*
867  * We need to re-read the timeline history in case it's been changed
868  * by a promotion or replay from a cascaded replica.
869  */
870  List *timelineHistory = readTimeLineHistory(ThisTimeLineID);
871 
872  XLogRecPtr endOfSegment = (((wantPage / state->wal_segment_size) + 1)
873  * state->wal_segment_size) - 1;
874 
875  Assert(wantPage / state->wal_segment_size ==
876  endOfSegment / state->wal_segment_size);
877 
878  /*
879  * Find the timeline of the last LSN on the segment containing
880  * wantPage.
881  */
882  state->currTLI = tliOfPointInHistory(endOfSegment, timelineHistory);
883  state->currTLIValidUntil = tliSwitchPoint(state->currTLI, timelineHistory,
884  &state->nextTLI);
885 
887  wantPage + wantLength < state->currTLIValidUntil);
888 
889  list_free_deep(timelineHistory);
890 
891  elog(DEBUG3, "switched to timeline %u valid until %X/%X",
892  state->currTLI,
893  (uint32) (state->currTLIValidUntil >> 32),
894  (uint32) (state->currTLIValidUntil));
895  }
896 }
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
TimeLineID tliOfPointInHistory(XLogRecPtr ptr, List *history)
Definition: timeline.c:533
#define DEBUG3
Definition: elog.h:23
#define Min(x, y)
Definition: c.h:802
List * readTimeLineHistory(TimeLineID targetTLI)
Definition: timeline.c:75
void list_free_deep(List *list)
Definition: list.c:1147
XLogRecPtr currTLIValidUntil
Definition: xlogreader.h:180
unsigned int uint32
Definition: c.h:296
int wal_segment_size
Definition: xlogreader.h:79
TimeLineID nextTLI
Definition: xlogreader.h:186
XLogRecPtr tliSwitchPoint(TimeLineID tli, List *history, TimeLineID *nextTLI)
Definition: timeline.c:561
TimeLineID ThisTimeLineID
Definition: xlog.c:181
TimeLineID currTLI
Definition: xlogreader.h:170
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:670
XLogSegNo readSegNo
Definition: xlogreader.h:156
#define elog
Definition: elog.h:219
Definition: pg_list.h:45

◆ XLogTruncateRelation()

void XLogTruncateRelation ( RelFileNode  rnode,
ForkNumber  forkNum,
BlockNumber  nblocks 
)

Definition at line 637 of file xlogutils.c.

References forget_invalid_pages().

Referenced by smgr_redo().

639 {
640  forget_invalid_pages(rnode, forkNum, nblocks);
641 }
static void forget_invalid_pages(RelFileNode node, ForkNumber forkno, BlockNumber minblkno)
Definition: xlogutils.c:143