PostgreSQL Source Code  git master
xlogutils.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * xlogutils.c
4  *
5  * PostgreSQL write-ahead log manager utility routines
6  *
7  * This file contains support routines that are used by XLOG replay functions.
8  * None of this code is used during normal system operation.
9  *
10  *
11  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
12  * Portions Copyright (c) 1994, Regents of the University of California
13  *
14  * src/backend/access/transam/xlogutils.c
15  *
16  *-------------------------------------------------------------------------
17  */
18 #include "postgres.h"
19 
20 #include <unistd.h>
21 
22 #include "access/timeline.h"
23 #include "access/xlogrecovery.h"
24 #include "access/xlog_internal.h"
25 #include "access/xlogutils.h"
26 #include "miscadmin.h"
27 #include "storage/fd.h"
28 #include "storage/smgr.h"
29 #include "utils/hsearch.h"
30 #include "utils/rel.h"
31 
32 
33 /* GUC variable */
34 bool ignore_invalid_pages = false;
35 
36 /*
37  * Are we doing recovery from XLOG?
38  *
39  * This is only ever true in the startup process; it should be read as meaning
40  * "this process is replaying WAL records", rather than "the system is in
41  * recovery mode". It should be examined primarily by functions that need
42  * to act differently when called from a WAL redo function (e.g., to skip WAL
43  * logging). To check whether the system is in recovery regardless of which
44  * process you're running in, use RecoveryInProgress() but only after shared
45  * memory startup and lock initialization.
46  *
47  * This is updated from xlog.c and xlogrecovery.c, but lives here because
48  * it's mostly read by WAL redo functions.
49  */
50 bool InRecovery = false;
51 
52 /* Are we in Hot Standby mode? Only valid in startup process, see xlogutils.h */
54 
55 /*
56  * During XLOG replay, we may see XLOG records for incremental updates of
57  * pages that no longer exist, because their relation was later dropped or
58  * truncated. (Note: this is only possible when full_page_writes = OFF,
59  * since when it's ON, the first reference we see to a page should always
60  * be a full-page rewrite not an incremental update.) Rather than simply
61  * ignoring such records, we make a note of the referenced page, and then
62  * complain if we don't actually see a drop or truncate covering the page
63  * later in replay.
64  */
65 typedef struct xl_invalid_page_key
66 {
67  RelFileLocator locator; /* the relation */
68  ForkNumber forkno; /* the fork number */
69  BlockNumber blkno; /* the page */
71 
72 typedef struct xl_invalid_page
73 {
74  xl_invalid_page_key key; /* hash key ... must be first */
75  bool present; /* page existed but contained zeroes */
77 
78 static HTAB *invalid_page_tab = NULL;
79 
81  int reqLen, XLogRecPtr targetRecPtr,
82  char *cur_page, bool wait_for_wal);
83 
84 /* Report a reference to an invalid page */
85 static void
86 report_invalid_page(int elevel, RelFileLocator locator, ForkNumber forkno,
87  BlockNumber blkno, bool present)
88 {
89  char *path = relpathperm(locator, forkno);
90 
91  if (present)
92  elog(elevel, "page %u of relation %s is uninitialized",
93  blkno, path);
94  else
95  elog(elevel, "page %u of relation %s does not exist",
96  blkno, path);
97  pfree(path);
98 }
99 
100 /* Log a reference to an invalid page */
101 static void
103  bool present)
104 {
106  xl_invalid_page *hentry;
107  bool found;
108 
109  /*
110  * Once recovery has reached a consistent state, the invalid-page table
111  * should be empty and remain so. If a reference to an invalid page is
112  * found after consistency is reached, PANIC immediately. This might seem
113  * aggressive, but it's better than letting the invalid reference linger
114  * in the hash table until the end of recovery and PANIC there, which
115  * might come only much later if this is a standby server.
116  */
117  if (reachedConsistency)
118  {
119  report_invalid_page(WARNING, locator, forkno, blkno, present);
121  "WAL contains references to invalid pages");
122  }
123 
124  /*
125  * Log references to invalid pages at DEBUG1 level. This allows some
126  * tracing of the cause (note the elog context mechanism will tell us
127  * something about the XLOG record that generated the reference).
128  */
130  report_invalid_page(DEBUG1, locator, forkno, blkno, present);
131 
132  if (invalid_page_tab == NULL)
133  {
134  /* create hash table when first needed */
135  HASHCTL ctl;
136 
137  ctl.keysize = sizeof(xl_invalid_page_key);
138  ctl.entrysize = sizeof(xl_invalid_page);
139 
140  invalid_page_tab = hash_create("XLOG invalid-page table",
141  100,
142  &ctl,
144  }
145 
146  /* we currently assume xl_invalid_page_key contains no padding */
147  key.locator = locator;
148  key.forkno = forkno;
149  key.blkno = blkno;
150  hentry = (xl_invalid_page *)
152 
153  if (!found)
154  {
155  /* hash_search already filled in the key */
156  hentry->present = present;
157  }
158  else
159  {
160  /* repeat reference ... leave "present" as it was */
161  }
162 }
163 
164 /* Forget any invalid pages >= minblkno, because they've been dropped */
165 static void
167  BlockNumber minblkno)
168 {
169  HASH_SEQ_STATUS status;
170  xl_invalid_page *hentry;
171 
172  if (invalid_page_tab == NULL)
173  return; /* nothing to do */
174 
176 
177  while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
178  {
179  if (RelFileLocatorEquals(hentry->key.locator, locator) &&
180  hentry->key.forkno == forkno &&
181  hentry->key.blkno >= minblkno)
182  {
184  {
185  char *path = relpathperm(hentry->key.locator, forkno);
186 
187  elog(DEBUG2, "page %u of relation %s has been dropped",
188  hentry->key.blkno, path);
189  pfree(path);
190  }
191 
193  &hentry->key,
194  HASH_REMOVE, NULL) == NULL)
195  elog(ERROR, "hash table corrupted");
196  }
197  }
198 }
199 
200 /* Forget any invalid pages in a whole database */
201 static void
203 {
204  HASH_SEQ_STATUS status;
205  xl_invalid_page *hentry;
206 
207  if (invalid_page_tab == NULL)
208  return; /* nothing to do */
209 
211 
212  while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
213  {
214  if (hentry->key.locator.dbOid == dbid)
215  {
217  {
218  char *path = relpathperm(hentry->key.locator, hentry->key.forkno);
219 
220  elog(DEBUG2, "page %u of relation %s has been dropped",
221  hentry->key.blkno, path);
222  pfree(path);
223  }
224 
226  &hentry->key,
227  HASH_REMOVE, NULL) == NULL)
228  elog(ERROR, "hash table corrupted");
229  }
230  }
231 }
232 
233 /* Are there any unresolved references to invalid pages? */
234 bool
236 {
237  if (invalid_page_tab != NULL &&
239  return true;
240  return false;
241 }
242 
243 /* Complain about any remaining invalid-page entries */
244 void
246 {
247  HASH_SEQ_STATUS status;
248  xl_invalid_page *hentry;
249  bool foundone = false;
250 
251  if (invalid_page_tab == NULL)
252  return; /* nothing to do */
253 
255 
256  /*
257  * Our strategy is to emit WARNING messages for all remaining entries and
258  * only PANIC after we've dumped all the available info.
259  */
260  while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
261  {
262  report_invalid_page(WARNING, hentry->key.locator, hentry->key.forkno,
263  hentry->key.blkno, hentry->present);
264  foundone = true;
265  }
266 
267  if (foundone)
269  "WAL contains references to invalid pages");
270 
272  invalid_page_tab = NULL;
273 }
274 
275 
276 /*
277  * XLogReadBufferForRedo
278  * Read a page during XLOG replay
279  *
280  * Reads a block referenced by a WAL record into shared buffer cache, and
281  * determines what needs to be done to redo the changes to it. If the WAL
282  * record includes a full-page image of the page, it is restored.
283  *
284  * 'record.EndRecPtr' is compared to the page's LSN to determine if the record
285  * has already been replayed. 'block_id' is the ID number the block was
286  * registered with, when the WAL record was created.
287  *
288  * Returns one of the following:
289  *
290  * BLK_NEEDS_REDO - changes from the WAL record need to be applied
291  * BLK_DONE - block doesn't need replaying
292  * BLK_RESTORED - block was restored from a full-page image included in
293  * the record
294  * BLK_NOTFOUND - block was not found (because it was truncated away by
295  * an operation later in the WAL stream)
296  *
297  * On return, the buffer is locked in exclusive-mode, and returned in *buf.
298  * Note that the buffer is locked and returned even if it doesn't need
299  * replaying. (Getting the buffer lock is not really necessary during
300  * single-process crash recovery, but some subroutines such as MarkBufferDirty
301  * will complain if we don't have the lock. In hot standby mode it's
302  * definitely necessary.)
303  *
304  * Note: when a backup block is available in XLOG with the BKPIMAGE_APPLY flag
305  * set, we restore it, even if the page in the database appears newer. This
306  * is to protect ourselves against database pages that were partially or
307  * incorrectly written during a crash. We assume that the XLOG data must be
308  * good because it has passed a CRC check, while the database page might not
309  * be. This will force us to replay all subsequent modifications of the page
310  * that appear in XLOG, rather than possibly ignoring them as already
311  * applied, but that's not a huge drawback.
312  */
315  Buffer *buf)
316 {
317  return XLogReadBufferForRedoExtended(record, block_id, RBM_NORMAL,
318  false, buf);
319 }
320 
321 /*
322  * Pin and lock a buffer referenced by a WAL record, for the purpose of
323  * re-initializing it.
324  */
325 Buffer
327 {
328  Buffer buf;
329 
330  XLogReadBufferForRedoExtended(record, block_id, RBM_ZERO_AND_LOCK, false,
331  &buf);
332  return buf;
333 }
334 
335 /*
336  * XLogReadBufferForRedoExtended
337  * Like XLogReadBufferForRedo, but with extra options.
338  *
339  * In RBM_ZERO_* modes, if the page doesn't exist, the relation is extended
340  * with all-zeroes pages up to the referenced block number. In
341  * RBM_ZERO_AND_LOCK and RBM_ZERO_AND_CLEANUP_LOCK modes, the return value
342  * is always BLK_NEEDS_REDO.
343  *
344  * (The RBM_ZERO_AND_CLEANUP_LOCK mode is redundant with the get_cleanup_lock
345  * parameter. Do not use an inconsistent combination!)
346  *
347  * If 'get_cleanup_lock' is true, a "cleanup lock" is acquired on the buffer
348  * using LockBufferForCleanup(), instead of a regular exclusive lock.
349  */
352  uint8 block_id,
353  ReadBufferMode mode, bool get_cleanup_lock,
354  Buffer *buf)
355 {
356  XLogRecPtr lsn = record->EndRecPtr;
357  RelFileLocator rlocator;
358  ForkNumber forknum;
359  BlockNumber blkno;
360  Buffer prefetch_buffer;
361  Page page;
362  bool zeromode;
363  bool willinit;
364 
365  if (!XLogRecGetBlockTagExtended(record, block_id, &rlocator, &forknum, &blkno,
366  &prefetch_buffer))
367  {
368  /* Caller specified a bogus block_id */
369  elog(PANIC, "failed to locate backup block with ID %d in WAL record",
370  block_id);
371  }
372 
373  /*
374  * Make sure that if the block is marked with WILL_INIT, the caller is
375  * going to initialize it. And vice versa.
376  */
378  willinit = (XLogRecGetBlock(record, block_id)->flags & BKPBLOCK_WILL_INIT) != 0;
379  if (willinit && !zeromode)
380  elog(PANIC, "block with WILL_INIT flag in WAL record must be zeroed by redo routine");
381  if (!willinit && zeromode)
382  elog(PANIC, "block to be initialized in redo routine must be marked with WILL_INIT flag in the WAL record");
383 
384  /* If it has a full-page image and it should be restored, do it. */
385  if (XLogRecBlockImageApply(record, block_id))
386  {
387  Assert(XLogRecHasBlockImage(record, block_id));
388  *buf = XLogReadBufferExtended(rlocator, forknum, blkno,
389  get_cleanup_lock ? RBM_ZERO_AND_CLEANUP_LOCK : RBM_ZERO_AND_LOCK,
390  prefetch_buffer);
391  page = BufferGetPage(*buf);
392  if (!RestoreBlockImage(record, block_id, page))
393  ereport(ERROR,
394  (errcode(ERRCODE_INTERNAL_ERROR),
395  errmsg_internal("%s", record->errormsg_buf)));
396 
397  /*
398  * The page may be uninitialized. If so, we can't set the LSN because
399  * that would corrupt the page.
400  */
401  if (!PageIsNew(page))
402  {
403  PageSetLSN(page, lsn);
404  }
405 
407 
408  /*
409  * At the end of crash recovery the init forks of unlogged relations
410  * are copied, without going through shared buffers. So we need to
411  * force the on-disk state of init forks to always be in sync with the
412  * state in shared buffers.
413  */
414  if (forknum == INIT_FORKNUM)
416 
417  return BLK_RESTORED;
418  }
419  else
420  {
421  *buf = XLogReadBufferExtended(rlocator, forknum, blkno, mode, prefetch_buffer);
422  if (BufferIsValid(*buf))
423  {
425  {
426  if (get_cleanup_lock)
428  else
430  }
431  if (lsn <= PageGetLSN(BufferGetPage(*buf)))
432  return BLK_DONE;
433  else
434  return BLK_NEEDS_REDO;
435  }
436  else
437  return BLK_NOTFOUND;
438  }
439 }
440 
441 /*
442  * XLogReadBufferExtended
443  * Read a page during XLOG replay
444  *
445  * This is functionally comparable to ReadBufferExtended. There's some
446  * differences in the behavior wrt. the "mode" argument:
447  *
448  * In RBM_NORMAL mode, if the page doesn't exist, or contains all-zeroes, we
449  * return InvalidBuffer. In this case the caller should silently skip the
450  * update on this page. (In this situation, we expect that the page was later
451  * dropped or truncated. If we don't see evidence of that later in the WAL
452  * sequence, we'll complain at the end of WAL replay.)
453  *
454  * In RBM_ZERO_* modes, if the page doesn't exist, the relation is extended
455  * with all-zeroes pages up to the given block number.
456  *
457  * In RBM_NORMAL_NO_LOG mode, we return InvalidBuffer if the page doesn't
458  * exist, and we don't check for all-zeroes. Thus, no log entry is made
459  * to imply that the page should be dropped or truncated later.
460  *
461  * Optionally, recent_buffer can be used to provide a hint about the location
462  * of the page in the buffer pool; it does not have to be correct, but avoids
463  * a buffer mapping table probe if it is.
464  *
465  * NB: A redo function should normally not call this directly. To get a page
466  * to modify, use XLogReadBufferForRedoExtended instead. It is important that
467  * all pages modified by a WAL record are registered in the WAL records, or
468  * they will be invisible to tools that need to know which pages are modified.
469  */
470 Buffer
473  Buffer recent_buffer)
474 {
475  BlockNumber lastblock;
476  Buffer buffer;
477  SMgrRelation smgr;
478 
479  Assert(blkno != P_NEW);
480 
481  /* Do we have a clue where the buffer might be already? */
482  if (BufferIsValid(recent_buffer) &&
483  mode == RBM_NORMAL &&
484  ReadRecentBuffer(rlocator, forknum, blkno, recent_buffer))
485  {
486  buffer = recent_buffer;
487  goto recent_buffer_fast_path;
488  }
489 
490  /* Open the relation at smgr level */
491  smgr = smgropen(rlocator, INVALID_PROC_NUMBER);
492 
493  /*
494  * Create the target file if it doesn't already exist. This lets us cope
495  * if the replay sequence contains writes to a relation that is later
496  * deleted. (The original coding of this routine would instead suppress
497  * the writes, but that seems like it risks losing valuable data if the
498  * filesystem loses an inode during a crash. Better to write the data
499  * until we are actually told to delete the file.)
500  */
501  smgrcreate(smgr, forknum, true);
502 
503  lastblock = smgrnblocks(smgr, forknum);
504 
505  if (blkno < lastblock)
506  {
507  /* page exists in file */
508  buffer = ReadBufferWithoutRelcache(rlocator, forknum, blkno,
509  mode, NULL, true);
510  }
511  else
512  {
513  /* hm, page doesn't exist in file */
514  if (mode == RBM_NORMAL)
515  {
516  log_invalid_page(rlocator, forknum, blkno, false);
517  return InvalidBuffer;
518  }
519  if (mode == RBM_NORMAL_NO_LOG)
520  return InvalidBuffer;
521  /* OK to extend the file */
522  /* we do this in recovery only - no rel-extension lock needed */
524  buffer = ExtendBufferedRelTo(BMR_SMGR(smgr, RELPERSISTENCE_PERMANENT),
525  forknum,
526  NULL,
529  blkno + 1,
530  mode);
531  }
532 
533 recent_buffer_fast_path:
534  if (mode == RBM_NORMAL)
535  {
536  /* check that page has been initialized */
537  Page page = (Page) BufferGetPage(buffer);
538 
539  /*
540  * We assume that PageIsNew is safe without a lock. During recovery,
541  * there should be no other backends that could modify the buffer at
542  * the same time.
543  */
544  if (PageIsNew(page))
545  {
546  ReleaseBuffer(buffer);
547  log_invalid_page(rlocator, forknum, blkno, true);
548  return InvalidBuffer;
549  }
550  }
551 
552  return buffer;
553 }
554 
555 /*
556  * Struct actually returned by CreateFakeRelcacheEntry, though the declared
557  * return type is Relation.
558  */
559 typedef struct
560 {
561  RelationData reldata; /* Note: this must be first */
564 
566 
567 /*
568  * Create a fake relation cache entry for a physical relation
569  *
570  * It's often convenient to use the same functions in XLOG replay as in the
571  * main codepath, but those functions typically work with a relcache entry.
572  * We don't have a working relation cache during XLOG replay, but this
573  * function can be used to create a fake relcache entry instead. Only the
574  * fields related to physical storage, like rd_rel, are initialized, so the
575  * fake entry is only usable in low-level operations like ReadBuffer().
576  *
577  * This is also used for syncing WAL-skipped files.
578  *
579  * Caller must free the returned entry with FreeFakeRelcacheEntry().
580  */
581 Relation
583 {
584  FakeRelCacheEntry fakeentry;
585  Relation rel;
586 
587  /* Allocate the Relation struct and all related space in one block. */
588  fakeentry = palloc0(sizeof(FakeRelCacheEntryData));
589  rel = (Relation) fakeentry;
590 
591  rel->rd_rel = &fakeentry->pgc;
592  rel->rd_locator = rlocator;
593 
594  /*
595  * We will never be working with temp rels during recovery or while
596  * syncing WAL-skipped files.
597  */
599 
600  /* It must be a permanent table here */
601  rel->rd_rel->relpersistence = RELPERSISTENCE_PERMANENT;
602 
603  /* We don't know the name of the relation; use relfilenumber instead */
604  sprintf(RelationGetRelationName(rel), "%u", rlocator.relNumber);
605 
606  /*
607  * We set up the lockRelId in case anything tries to lock the dummy
608  * relation. Note that this is fairly bogus since relNumber may be
609  * different from the relation's OID. It shouldn't really matter though.
610  * In recovery, we are running by ourselves and can't have any lock
611  * conflicts. While syncing, we already hold AccessExclusiveLock.
612  */
613  rel->rd_lockInfo.lockRelId.dbId = rlocator.dbOid;
614  rel->rd_lockInfo.lockRelId.relId = rlocator.relNumber;
615 
616  /*
617  * Set up a non-pinned SMgrRelation reference, so that we don't need to
618  * worry about unpinning it on error.
619  */
620  rel->rd_smgr = smgropen(rlocator, INVALID_PROC_NUMBER);
621 
622  return rel;
623 }
624 
625 /*
626  * Free a fake relation cache entry.
627  */
628 void
630 {
631  pfree(fakerel);
632 }
633 
634 /*
635  * Drop a relation during XLOG replay
636  *
637  * This is called when the relation is about to be deleted; we need to remove
638  * any open "invalid-page" records for the relation.
639  */
640 void
642 {
643  forget_invalid_pages(rlocator, forknum, 0);
644 }
645 
646 /*
647  * Drop a whole database during XLOG replay
648  *
649  * As above, but for DROP DATABASE instead of dropping a single rel
650  */
651 void
653 {
654  /*
655  * This is unnecessarily heavy-handed, as it will close SMgrRelation
656  * objects for other databases as well. DROP DATABASE occurs seldom enough
657  * that it's not worth introducing a variant of smgrdestroy for just this
658  * purpose.
659  */
660  smgrdestroyall();
661 
663 }
664 
665 /*
666  * Truncate a relation during XLOG replay
667  *
668  * We need to clean up any open "invalid-page" records for the dropped pages.
669  */
670 void
672  BlockNumber nblocks)
673 {
674  forget_invalid_pages(rlocator, forkNum, nblocks);
675 }
676 
677 /*
678  * Determine which timeline to read an xlog page from and set the
679  * XLogReaderState's currTLI to that timeline ID.
680  *
681  * We care about timelines in xlogreader when we might be reading xlog
682  * generated prior to a promotion, either if we're currently a standby in
683  * recovery or if we're a promoted primary reading xlogs generated by the old
684  * primary before our promotion.
685  *
686  * wantPage must be set to the start address of the page to read and
687  * wantLength to the amount of the page that will be read, up to
688  * XLOG_BLCKSZ. If the amount to be read isn't known, pass XLOG_BLCKSZ.
689  *
690  * The currTLI argument should be the system-wide current timeline.
691  * Note that this may be different from state->currTLI, which is the timeline
692  * from which the caller is currently reading previous xlog records.
693  *
694  * We switch to an xlog segment from the new timeline eagerly when on a
695  * historical timeline, as soon as we reach the start of the xlog segment
696  * containing the timeline switch. The server copied the segment to the new
697  * timeline so all the data up to the switch point is the same, but there's no
698  * guarantee the old segment will still exist. It may have been deleted or
699  * renamed with a .partial suffix so we can't necessarily keep reading from
700  * the old TLI even though tliSwitchPoint says it's OK.
701  *
702  * We can't just check the timeline when we read a page on a different segment
703  * to the last page. We could've received a timeline switch from a cascading
704  * upstream, so the current segment ends abruptly (possibly getting renamed to
705  * .partial) and we have to switch to a new one. Even in the middle of reading
706  * a page we could have to dump the cached page and switch to a new TLI.
707  *
708  * Because of this, callers MAY NOT assume that currTLI is the timeline that
709  * will be in a page's xlp_tli; the page may begin on an older timeline or we
710  * might be reading from historical timeline data on a segment that's been
711  * copied to a new timeline.
712  *
713  * The caller must also make sure it doesn't read past the current replay
714  * position (using GetXLogReplayRecPtr) if executing in recovery, so it
715  * doesn't fail to notice that the current timeline became historical.
716  */
717 void
719  uint32 wantLength, TimeLineID currTLI)
720 {
721  const XLogRecPtr lastReadPage = (state->seg.ws_segno *
722  state->segcxt.ws_segsize + state->segoff);
723 
724  Assert(wantPage != InvalidXLogRecPtr && wantPage % XLOG_BLCKSZ == 0);
725  Assert(wantLength <= XLOG_BLCKSZ);
726  Assert(state->readLen == 0 || state->readLen <= XLOG_BLCKSZ);
727  Assert(currTLI != 0);
728 
729  /*
730  * If the desired page is currently read in and valid, we have nothing to
731  * do.
732  *
733  * The caller should've ensured that it didn't previously advance readOff
734  * past the valid limit of this timeline, so it doesn't matter if the
735  * current TLI has since become historical.
736  */
737  if (lastReadPage == wantPage &&
738  state->readLen != 0 &&
739  lastReadPage + state->readLen >= wantPage + Min(wantLength, XLOG_BLCKSZ - 1))
740  return;
741 
742  /*
743  * If we're reading from the current timeline, it hasn't become historical
744  * and the page we're reading is after the last page read, we can again
745  * just carry on. (Seeking backwards requires a check to make sure the
746  * older page isn't on a prior timeline).
747  *
748  * currTLI might've become historical since the caller obtained the value,
749  * but the caller is required not to read past the flush limit it saw at
750  * the time it looked up the timeline. There's nothing we can do about it
751  * if StartupXLOG() renames it to .partial concurrently.
752  */
753  if (state->currTLI == currTLI && wantPage >= lastReadPage)
754  {
755  Assert(state->currTLIValidUntil == InvalidXLogRecPtr);
756  return;
757  }
758 
759  /*
760  * If we're just reading pages from a previously validated historical
761  * timeline and the timeline we're reading from is valid until the end of
762  * the current segment we can just keep reading.
763  */
764  if (state->currTLIValidUntil != InvalidXLogRecPtr &&
765  state->currTLI != currTLI &&
766  state->currTLI != 0 &&
767  ((wantPage + wantLength) / state->segcxt.ws_segsize) <
768  (state->currTLIValidUntil / state->segcxt.ws_segsize))
769  return;
770 
771  /*
772  * If we reach this point we're either looking up a page for random
773  * access, the current timeline just became historical, or we're reading
774  * from a new segment containing a timeline switch. In all cases we need
775  * to determine the newest timeline on the segment.
776  *
777  * If it's the current timeline we can just keep reading from here unless
778  * we detect a timeline switch that makes the current timeline historical.
779  * If it's a historical timeline we can read all the segment on the newest
780  * timeline because it contains all the old timelines' data too. So only
781  * one switch check is required.
782  */
783  {
784  /*
785  * We need to re-read the timeline history in case it's been changed
786  * by a promotion or replay from a cascaded replica.
787  */
788  List *timelineHistory = readTimeLineHistory(currTLI);
789  XLogRecPtr endOfSegment;
790 
791  endOfSegment = ((wantPage / state->segcxt.ws_segsize) + 1) *
792  state->segcxt.ws_segsize - 1;
793  Assert(wantPage / state->segcxt.ws_segsize ==
794  endOfSegment / state->segcxt.ws_segsize);
795 
796  /*
797  * Find the timeline of the last LSN on the segment containing
798  * wantPage.
799  */
800  state->currTLI = tliOfPointInHistory(endOfSegment, timelineHistory);
801  state->currTLIValidUntil = tliSwitchPoint(state->currTLI, timelineHistory,
802  &state->nextTLI);
803 
804  Assert(state->currTLIValidUntil == InvalidXLogRecPtr ||
805  wantPage + wantLength < state->currTLIValidUntil);
806 
807  list_free_deep(timelineHistory);
808 
809  elog(DEBUG3, "switched to timeline %u valid until %X/%X",
810  state->currTLI,
811  LSN_FORMAT_ARGS(state->currTLIValidUntil));
812  }
813 }
814 
815 /* XLogReaderRoutine->segment_open callback for local pg_wal files */
816 void
818  TimeLineID *tli_p)
819 {
820  TimeLineID tli = *tli_p;
821  char path[MAXPGPATH];
822 
823  XLogFilePath(path, tli, nextSegNo, state->segcxt.ws_segsize);
824  state->seg.ws_file = BasicOpenFile(path, O_RDONLY | PG_BINARY);
825  if (state->seg.ws_file >= 0)
826  return;
827 
828  if (errno == ENOENT)
829  ereport(ERROR,
831  errmsg("requested WAL segment %s has already been removed",
832  path)));
833  else
834  ereport(ERROR,
836  errmsg("could not open file \"%s\": %m",
837  path)));
838 }
839 
840 /* stock XLogReaderRoutine->segment_close callback */
841 void
843 {
844  close(state->seg.ws_file);
845  /* need to check errno? */
846  state->seg.ws_file = -1;
847 }
848 
849 /*
850  * XLogReaderRoutine->page_read callback for reading local xlog files
851  *
852  * Public because it would likely be very helpful for someone writing another
853  * output method outside walsender, e.g. in a bgworker.
854  *
855  * TODO: The walsender has its own version of this, but it relies on the
856  * walsender's latch being set whenever WAL is flushed. No such infrastructure
857  * exists for normal backends, so we have to do a check/sleep/repeat style of
858  * loop for now.
859  */
860 int
862  int reqLen, XLogRecPtr targetRecPtr, char *cur_page)
863 {
864  return read_local_xlog_page_guts(state, targetPagePtr, reqLen,
865  targetRecPtr, cur_page, true);
866 }
867 
868 /*
869  * Same as read_local_xlog_page except that it doesn't wait for future WAL
870  * to be available.
871  */
872 int
874  int reqLen, XLogRecPtr targetRecPtr,
875  char *cur_page)
876 {
877  return read_local_xlog_page_guts(state, targetPagePtr, reqLen,
878  targetRecPtr, cur_page, false);
879 }
880 
881 /*
882  * Implementation of read_local_xlog_page and its no wait version.
883  */
884 static int
886  int reqLen, XLogRecPtr targetRecPtr,
887  char *cur_page, bool wait_for_wal)
888 {
889  XLogRecPtr read_upto,
890  loc;
891  TimeLineID tli;
892  int count;
893  WALReadError errinfo;
894  TimeLineID currTLI;
895 
896  loc = targetPagePtr + reqLen;
897 
898  /* Loop waiting for xlog to be available if necessary */
899  while (1)
900  {
901  /*
902  * Determine the limit of xlog we can currently read to, and what the
903  * most recent timeline is.
904  */
905  if (!RecoveryInProgress())
906  read_upto = GetFlushRecPtr(&currTLI);
907  else
908  read_upto = GetXLogReplayRecPtr(&currTLI);
909  tli = currTLI;
910 
911  /*
912  * Check which timeline to get the record from.
913  *
914  * We have to do it each time through the loop because if we're in
915  * recovery as a cascading standby, the current timeline might've
916  * become historical. We can't rely on RecoveryInProgress() because in
917  * a standby configuration like
918  *
919  * A => B => C
920  *
921  * if we're a logical decoding session on C, and B gets promoted, our
922  * timeline will change while we remain in recovery.
923  *
924  * We can't just keep reading from the old timeline as the last WAL
925  * archive in the timeline will get renamed to .partial by
926  * StartupXLOG().
927  *
928  * If that happens after our caller determined the TLI but before we
929  * actually read the xlog page, we might still try to read from the
930  * old (now renamed) segment and fail. There's not much we can do
931  * about this, but it can only happen when we're a leaf of a cascading
932  * standby whose primary gets promoted while we're decoding, so a
933  * one-off ERROR isn't too bad.
934  */
935  XLogReadDetermineTimeline(state, targetPagePtr, reqLen, tli);
936 
937  if (state->currTLI == currTLI)
938  {
939 
940  if (loc <= read_upto)
941  break;
942 
943  /* If asked, let's not wait for future WAL. */
944  if (!wait_for_wal)
945  {
946  ReadLocalXLogPageNoWaitPrivate *private_data;
947 
948  /*
949  * Inform the caller of read_local_xlog_page_no_wait that the
950  * end of WAL has been reached.
951  */
952  private_data = (ReadLocalXLogPageNoWaitPrivate *)
953  state->private_data;
954  private_data->end_of_wal = true;
955  break;
956  }
957 
959  pg_usleep(1000L);
960  }
961  else
962  {
963  /*
964  * We're on a historical timeline, so limit reading to the switch
965  * point where we moved to the next timeline.
966  *
967  * We don't need to GetFlushRecPtr or GetXLogReplayRecPtr. We know
968  * about the new timeline, so we must've received past the end of
969  * it.
970  */
971  read_upto = state->currTLIValidUntil;
972 
973  /*
974  * Setting tli to our wanted record's TLI is slightly wrong; the
975  * page might begin on an older timeline if it contains a timeline
976  * switch, since its xlog segment will have been copied from the
977  * prior timeline. This is pretty harmless though, as nothing
978  * cares so long as the timeline doesn't go backwards. We should
979  * read the page header instead; FIXME someday.
980  */
981  tli = state->currTLI;
982 
983  /* No need to wait on a historical timeline */
984  break;
985  }
986  }
987 
988  if (targetPagePtr + XLOG_BLCKSZ <= read_upto)
989  {
990  /*
991  * more than one block available; read only that block, have caller
992  * come back if they need more.
993  */
994  count = XLOG_BLCKSZ;
995  }
996  else if (targetPagePtr + reqLen > read_upto)
997  {
998  /* not enough data there */
999  return -1;
1000  }
1001  else
1002  {
1003  /* enough bytes available to satisfy the request */
1004  count = read_upto - targetPagePtr;
1005  }
1006 
1007  if (!WALRead(state, cur_page, targetPagePtr, count, tli,
1008  &errinfo))
1009  WALReadRaiseError(&errinfo);
1010 
1011  /* number of valid bytes in the buffer */
1012  return count;
1013 }
1014 
1015 /*
1016  * Backend-specific convenience code to handle read errors encountered by
1017  * WALRead().
1018  */
1019 void
1021 {
1022  WALOpenSegment *seg = &errinfo->wre_seg;
1023  char fname[MAXFNAMELEN];
1024 
1025  XLogFileName(fname, seg->ws_tli, seg->ws_segno, wal_segment_size);
1026 
1027  if (errinfo->wre_read < 0)
1028  {
1029  errno = errinfo->wre_errno;
1030  ereport(ERROR,
1032  errmsg("could not read from WAL segment %s, offset %d: %m",
1033  fname, errinfo->wre_off)));
1034  }
1035  else if (errinfo->wre_read == 0)
1036  {
1037  ereport(ERROR,
1039  errmsg("could not read from WAL segment %s, offset %d: read %d of %d",
1040  fname, errinfo->wre_off, errinfo->wre_read,
1041  errinfo->wre_req)));
1042  }
1043 }
TimeLineID tliOfPointInHistory(XLogRecPtr ptr, List *history)
Definition: timeline.c:544
XLogRecPtr tliSwitchPoint(TimeLineID tli, List *history, TimeLineID *nextTLI)
Definition: timeline.c:572
List * readTimeLineHistory(TimeLineID targetTLI)
Definition: timeline.c:76
uint32 BlockNumber
Definition: block.h:31
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
Buffer ExtendBufferedRelTo(BufferManagerRelation bmr, ForkNumber fork, BufferAccessStrategy strategy, uint32 flags, BlockNumber extend_to, ReadBufferMode mode)
Definition: bufmgr.c:910
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4924
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:2532
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:5238
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:5158
Buffer ReadBufferWithoutRelcache(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool permanent)
Definition: bufmgr.c:830
bool ReadRecentBuffer(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum, Buffer recent_buffer)
Definition: bufmgr.c:670
void FlushOneBuffer(Buffer buffer)
Definition: bufmgr.c:4904
#define P_NEW
Definition: bufmgr.h:184
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:400
#define BMR_SMGR(p_smgr, p_relpersistence)
Definition: bufmgr.h:108
@ EB_PERFORMING_RECOVERY
Definition: bufmgr.h:77
@ EB_SKIP_EXTENSION_LOCK
Definition: bufmgr.h:74
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:191
ReadBufferMode
Definition: bufmgr.h:44
@ RBM_ZERO_AND_CLEANUP_LOCK
Definition: bufmgr.h:48
@ RBM_ZERO_AND_LOCK
Definition: bufmgr.h:46
@ RBM_NORMAL
Definition: bufmgr.h:45
@ RBM_NORMAL_NO_LOG
Definition: bufmgr.h:51
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:351
Pointer Page
Definition: bufpage.h:81
static bool PageIsNew(Page page)
Definition: bufpage.h:233
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:391
static XLogRecPtr PageGetLSN(const char *page)
Definition: bufpage.h:386
unsigned int uint32
Definition: c.h:509
#define Min(x, y)
Definition: c.h:1007
#define Assert(condition)
Definition: c.h:861
#define PG_BINARY
Definition: c.h:1276
unsigned char uint8
Definition: c.h:507
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:865
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:955
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:352
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1341
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1420
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1385
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1157
int errcode_for_file_access(void)
Definition: elog.c:876
bool message_level_is_interesting(int elevel)
Definition: elog.c:272
int errcode(int sqlerrcode)
Definition: elog.c:853
int errmsg(const char *fmt,...)
Definition: elog.c:1070
#define DEBUG3
Definition: elog.h:28
#define WARNING
Definition: elog.h:36
#define DEBUG2
Definition: elog.h:29
#define PANIC
Definition: elog.h:42
#define DEBUG1
Definition: elog.h:30
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
#define ereport(elevel,...)
Definition: elog.h:149
int BasicOpenFile(const char *fileName, int fileFlags)
Definition: fd.c:1087
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
#define close(a)
Definition: win32.h:12
void list_free_deep(List *list)
Definition: list.c:1560
void pfree(void *pointer)
Definition: mcxt.c:1521
void * palloc0(Size size)
Definition: mcxt.c:1347
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:122
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:41
static PgChecksumMode mode
Definition: pg_checksums.c:56
FormData_pg_class
Definition: pg_class.h:142
#define MAXPGPATH
static char * buf
Definition: pg_test_fsync.c:73
#define sprintf
Definition: port.h:240
unsigned int Oid
Definition: postgres_ext.h:31
#define INVALID_PROC_NUMBER
Definition: procnumber.h:26
tree ctl
Definition: radixtree.h:1853
#define RelationGetRelationName(relation)
Definition: rel.h:539
struct RelationData * Relation
Definition: relcache.h:27
#define RelFileLocatorEquals(locator1, locator2)
ForkNumber
Definition: relpath.h:56
@ INIT_FORKNUM
Definition: relpath.h:61
#define relpathperm(rlocator, forknum)
Definition: relpath.h:98
void pg_usleep(long microsec)
Definition: signal.c:53
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:677
SMgrRelation smgropen(RelFileLocator rlocator, ProcNumber backend)
Definition: smgr.c:201
void smgrdestroyall(void)
Definition: smgr.c:335
void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition: smgr.c:414
RelationData reldata
Definition: xlogutils.c:561
FormData_pg_class pgc
Definition: xlogutils.c:562
Definition: dynahash.c:220
Definition: pg_list.h:54
LockRelId lockRelId
Definition: rel.h:46
Oid relId
Definition: rel.h:40
Oid dbId
Definition: rel.h:41
RelFileNumber relNumber
ProcNumber rd_backend
Definition: rel.h:60
LockInfoData rd_lockInfo
Definition: rel.h:114
SMgrRelation rd_smgr
Definition: rel.h:58
RelFileLocator rd_locator
Definition: rel.h:57
Form_pg_class rd_rel
Definition: rel.h:111
XLogSegNo ws_segno
Definition: xlogreader.h:48
TimeLineID ws_tli
Definition: xlogreader.h:49
WALOpenSegment wre_seg
Definition: xlogreader.h:388
char * errormsg_buf
Definition: xlogreader.h:311
XLogRecPtr EndRecPtr
Definition: xlogreader.h:207
Definition: regguts.h:323
BlockNumber blkno
Definition: xlogutils.c:69
RelFileLocator locator
Definition: xlogutils.c:67
ForkNumber forkno
Definition: xlogutils.c:68
xl_invalid_page_key key
Definition: xlogutils.c:74
bool RecoveryInProgress(void)
Definition: xlog.c:6333
int wal_segment_size
Definition: xlog.c:142
XLogRecPtr GetFlushRecPtr(TimeLineID *insertTLI)
Definition: xlog.c:6498
#define MAXFNAMELEN
static void XLogFilePath(char *path, TimeLineID tli, XLogSegNo logSegNo, int wal_segsz_bytes)
static void XLogFileName(char *fname, TimeLineID tli, XLogSegNo logSegNo, int wal_segsz_bytes)
#define LSN_FORMAT_ARGS(lsn)
Definition: xlogdefs.h:43
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
uint32 TimeLineID
Definition: xlogdefs.h:59
uint64 XLogSegNo
Definition: xlogdefs.h:48
bool XLogRecGetBlockTagExtended(XLogReaderState *record, uint8 block_id, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum, Buffer *prefetch_buffer)
Definition: xlogreader.c:1997
bool WALRead(XLogReaderState *state, char *buf, XLogRecPtr startptr, Size count, TimeLineID tli, WALReadError *errinfo)
Definition: xlogreader.c:1503
bool RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page)
Definition: xlogreader.c:2056
#define XLogRecBlockImageApply(decoder, block_id)
Definition: xlogreader.h:425
#define XLogRecGetBlock(decoder, i)
Definition: xlogreader.h:419
#define XLogRecHasBlockImage(decoder, block_id)
Definition: xlogreader.h:423
#define BKPBLOCK_WILL_INIT
Definition: xlogrecord.h:199
bool reachedConsistency
Definition: xlogrecovery.c:295
XLogRecPtr GetXLogReplayRecPtr(TimeLineID *replayTLI)
void wal_segment_close(XLogReaderState *state)
Definition: xlogutils.c:842
void FreeFakeRelcacheEntry(Relation fakerel)
Definition: xlogutils.c:629
void wal_segment_open(XLogReaderState *state, XLogSegNo nextSegNo, TimeLineID *tli_p)
Definition: xlogutils.c:817
bool ignore_invalid_pages
Definition: xlogutils.c:34
static void report_invalid_page(int elevel, RelFileLocator locator, ForkNumber forkno, BlockNumber blkno, bool present)
Definition: xlogutils.c:86
void XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wantLength, TimeLineID currTLI)
Definition: xlogutils.c:718
FakeRelCacheEntryData * FakeRelCacheEntry
Definition: xlogutils.c:565
bool XLogHaveInvalidPages(void)
Definition: xlogutils.c:235
XLogRedoAction XLogReadBufferForRedo(XLogReaderState *record, uint8 block_id, Buffer *buf)
Definition: xlogutils.c:314
Buffer XLogInitBufferForRedo(XLogReaderState *record, uint8 block_id)
Definition: xlogutils.c:326
Buffer XLogReadBufferExtended(RelFileLocator rlocator, ForkNumber forknum, BlockNumber blkno, ReadBufferMode mode, Buffer recent_buffer)
Definition: xlogutils.c:471
void XLogTruncateRelation(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber nblocks)
Definition: xlogutils.c:671
struct xl_invalid_page xl_invalid_page
Relation CreateFakeRelcacheEntry(RelFileLocator rlocator)
Definition: xlogutils.c:582
HotStandbyState standbyState
Definition: xlogutils.c:53
struct xl_invalid_page_key xl_invalid_page_key
bool InRecovery
Definition: xlogutils.c:50
int read_local_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, char *cur_page)
Definition: xlogutils.c:861
void XLogCheckInvalidPages(void)
Definition: xlogutils.c:245
static void forget_invalid_pages(RelFileLocator locator, ForkNumber forkno, BlockNumber minblkno)
Definition: xlogutils.c:166
void WALReadRaiseError(WALReadError *errinfo)
Definition: xlogutils.c:1020
static void log_invalid_page(RelFileLocator locator, ForkNumber forkno, BlockNumber blkno, bool present)
Definition: xlogutils.c:102
static int read_local_xlog_page_guts(XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, char *cur_page, bool wait_for_wal)
Definition: xlogutils.c:885
void XLogDropRelation(RelFileLocator rlocator, ForkNumber forknum)
Definition: xlogutils.c:641
int read_local_xlog_page_no_wait(XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, char *cur_page)
Definition: xlogutils.c:873
static HTAB * invalid_page_tab
Definition: xlogutils.c:78
XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)
Definition: xlogutils.c:351
static void forget_invalid_pages_db(Oid dbid)
Definition: xlogutils.c:202
void XLogDropDatabase(Oid dbid)
Definition: xlogutils.c:652
HotStandbyState
Definition: xlogutils.h:51
@ STANDBY_DISABLED
Definition: xlogutils.h:52
XLogRedoAction
Definition: xlogutils.h:73
@ BLK_RESTORED
Definition: xlogutils.h:76
@ BLK_NEEDS_REDO
Definition: xlogutils.h:74
@ BLK_DONE
Definition: xlogutils.h:75
@ BLK_NOTFOUND
Definition: xlogutils.h:77