PostgreSQL Source Code  git master
xlogutils.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * xlogutils.c
4  *
5  * PostgreSQL write-ahead log manager utility routines
6  *
7  * This file contains support routines that are used by XLOG replay functions.
8  * None of this code is used during normal system operation.
9  *
10  *
11  * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
12  * Portions Copyright (c) 1994, Regents of the University of California
13  *
14  * src/backend/access/transam/xlogutils.c
15  *
16  *-------------------------------------------------------------------------
17  */
18 #include "postgres.h"
19 
20 #include <unistd.h>
21 
22 #include "access/timeline.h"
23 #include "access/xlogrecovery.h"
24 #include "access/xlog_internal.h"
25 #include "access/xlogprefetcher.h"
26 #include "access/xlogutils.h"
27 #include "miscadmin.h"
28 #include "pgstat.h"
29 #include "storage/fd.h"
30 #include "storage/smgr.h"
31 #include "utils/guc.h"
32 #include "utils/hsearch.h"
33 #include "utils/rel.h"
34 
35 
36 /* GUC variable */
37 bool ignore_invalid_pages = false;
38 
39 /*
40  * Are we doing recovery from XLOG?
41  *
42  * This is only ever true in the startup process; it should be read as meaning
43  * "this process is replaying WAL records", rather than "the system is in
44  * recovery mode". It should be examined primarily by functions that need
45  * to act differently when called from a WAL redo function (e.g., to skip WAL
46  * logging). To check whether the system is in recovery regardless of which
47  * process you're running in, use RecoveryInProgress() but only after shared
48  * memory startup and lock initialization.
49  *
50  * This is updated from xlog.c and xlogrecovery.c, but lives here because
51  * it's mostly read by WAL redo functions.
52  */
53 bool InRecovery = false;
54 
55 /* Are we in Hot Standby mode? Only valid in startup process, see xlogutils.h */
57 
58 /*
59  * During XLOG replay, we may see XLOG records for incremental updates of
60  * pages that no longer exist, because their relation was later dropped or
61  * truncated. (Note: this is only possible when full_page_writes = OFF,
62  * since when it's ON, the first reference we see to a page should always
63  * be a full-page rewrite not an incremental update.) Rather than simply
64  * ignoring such records, we make a note of the referenced page, and then
65  * complain if we don't actually see a drop or truncate covering the page
66  * later in replay.
67  */
68 typedef struct xl_invalid_page_key
69 {
70  RelFileLocator locator; /* the relation */
71  ForkNumber forkno; /* the fork number */
72  BlockNumber blkno; /* the page */
74 
75 typedef struct xl_invalid_page
76 {
77  xl_invalid_page_key key; /* hash key ... must be first */
78  bool present; /* page existed but contained zeroes */
80 
81 static HTAB *invalid_page_tab = NULL;
82 
84  int reqLen, XLogRecPtr targetRecPtr,
85  char *cur_page, bool wait_for_wal);
86 
87 /* Report a reference to an invalid page */
88 static void
89 report_invalid_page(int elevel, RelFileLocator locator, ForkNumber forkno,
90  BlockNumber blkno, bool present)
91 {
92  char *path = relpathperm(locator, forkno);
93 
94  if (present)
95  elog(elevel, "page %u of relation %s is uninitialized",
96  blkno, path);
97  else
98  elog(elevel, "page %u of relation %s does not exist",
99  blkno, path);
100  pfree(path);
101 }
102 
103 /* Log a reference to an invalid page */
104 static void
106  bool present)
107 {
109  xl_invalid_page *hentry;
110  bool found;
111 
112  /*
113  * Once recovery has reached a consistent state, the invalid-page table
114  * should be empty and remain so. If a reference to an invalid page is
115  * found after consistency is reached, PANIC immediately. This might seem
116  * aggressive, but it's better than letting the invalid reference linger
117  * in the hash table until the end of recovery and PANIC there, which
118  * might come only much later if this is a standby server.
119  */
120  if (reachedConsistency)
121  {
122  report_invalid_page(WARNING, locator, forkno, blkno, present);
124  "WAL contains references to invalid pages");
125  }
126 
127  /*
128  * Log references to invalid pages at DEBUG1 level. This allows some
129  * tracing of the cause (note the elog context mechanism will tell us
130  * something about the XLOG record that generated the reference).
131  */
133  report_invalid_page(DEBUG1, locator, forkno, blkno, present);
134 
135  if (invalid_page_tab == NULL)
136  {
137  /* create hash table when first needed */
138  HASHCTL ctl;
139 
140  ctl.keysize = sizeof(xl_invalid_page_key);
141  ctl.entrysize = sizeof(xl_invalid_page);
142 
143  invalid_page_tab = hash_create("XLOG invalid-page table",
144  100,
145  &ctl,
147  }
148 
149  /* we currently assume xl_invalid_page_key contains no padding */
150  key.locator = locator;
151  key.forkno = forkno;
152  key.blkno = blkno;
153  hentry = (xl_invalid_page *)
155 
156  if (!found)
157  {
158  /* hash_search already filled in the key */
159  hentry->present = present;
160  }
161  else
162  {
163  /* repeat reference ... leave "present" as it was */
164  }
165 }
166 
167 /* Forget any invalid pages >= minblkno, because they've been dropped */
168 static void
170  BlockNumber minblkno)
171 {
173  xl_invalid_page *hentry;
174 
175  if (invalid_page_tab == NULL)
176  return; /* nothing to do */
177 
179 
180  while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
181  {
182  if (RelFileLocatorEquals(hentry->key.locator, locator) &&
183  hentry->key.forkno == forkno &&
184  hentry->key.blkno >= minblkno)
185  {
187  {
188  char *path = relpathperm(hentry->key.locator, forkno);
189 
190  elog(DEBUG2, "page %u of relation %s has been dropped",
191  hentry->key.blkno, path);
192  pfree(path);
193  }
194 
196  &hentry->key,
197  HASH_REMOVE, NULL) == NULL)
198  elog(ERROR, "hash table corrupted");
199  }
200  }
201 }
202 
203 /* Forget any invalid pages in a whole database */
204 static void
206 {
208  xl_invalid_page *hentry;
209 
210  if (invalid_page_tab == NULL)
211  return; /* nothing to do */
212 
214 
215  while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
216  {
217  if (hentry->key.locator.dbOid == dbid)
218  {
220  {
221  char *path = relpathperm(hentry->key.locator, hentry->key.forkno);
222 
223  elog(DEBUG2, "page %u of relation %s has been dropped",
224  hentry->key.blkno, path);
225  pfree(path);
226  }
227 
229  &hentry->key,
230  HASH_REMOVE, NULL) == NULL)
231  elog(ERROR, "hash table corrupted");
232  }
233  }
234 }
235 
236 /* Are there any unresolved references to invalid pages? */
237 bool
239 {
240  if (invalid_page_tab != NULL &&
242  return true;
243  return false;
244 }
245 
246 /* Complain about any remaining invalid-page entries */
247 void
249 {
251  xl_invalid_page *hentry;
252  bool foundone = false;
253 
254  if (invalid_page_tab == NULL)
255  return; /* nothing to do */
256 
258 
259  /*
260  * Our strategy is to emit WARNING messages for all remaining entries and
261  * only PANIC after we've dumped all the available info.
262  */
263  while ((hentry = (xl_invalid_page *) hash_seq_search(&status)) != NULL)
264  {
265  report_invalid_page(WARNING, hentry->key.locator, hentry->key.forkno,
266  hentry->key.blkno, hentry->present);
267  foundone = true;
268  }
269 
270  if (foundone)
272  "WAL contains references to invalid pages");
273 
275  invalid_page_tab = NULL;
276 }
277 
278 
279 /*
280  * XLogReadBufferForRedo
281  * Read a page during XLOG replay
282  *
283  * Reads a block referenced by a WAL record into shared buffer cache, and
284  * determines what needs to be done to redo the changes to it. If the WAL
285  * record includes a full-page image of the page, it is restored.
286  *
287  * 'record.EndRecPtr' is compared to the page's LSN to determine if the record
288  * has already been replayed. 'block_id' is the ID number the block was
289  * registered with, when the WAL record was created.
290  *
291  * Returns one of the following:
292  *
293  * BLK_NEEDS_REDO - changes from the WAL record need to be applied
294  * BLK_DONE - block doesn't need replaying
295  * BLK_RESTORED - block was restored from a full-page image included in
296  * the record
297  * BLK_NOTFOUND - block was not found (because it was truncated away by
298  * an operation later in the WAL stream)
299  *
300  * On return, the buffer is locked in exclusive-mode, and returned in *buf.
301  * Note that the buffer is locked and returned even if it doesn't need
302  * replaying. (Getting the buffer lock is not really necessary during
303  * single-process crash recovery, but some subroutines such as MarkBufferDirty
304  * will complain if we don't have the lock. In hot standby mode it's
305  * definitely necessary.)
306  *
307  * Note: when a backup block is available in XLOG with the BKPIMAGE_APPLY flag
308  * set, we restore it, even if the page in the database appears newer. This
309  * is to protect ourselves against database pages that were partially or
310  * incorrectly written during a crash. We assume that the XLOG data must be
311  * good because it has passed a CRC check, while the database page might not
312  * be. This will force us to replay all subsequent modifications of the page
313  * that appear in XLOG, rather than possibly ignoring them as already
314  * applied, but that's not a huge drawback.
315  */
318  Buffer *buf)
319 {
320  return XLogReadBufferForRedoExtended(record, block_id, RBM_NORMAL,
321  false, buf);
322 }
323 
324 /*
325  * Pin and lock a buffer referenced by a WAL record, for the purpose of
326  * re-initializing it.
327  */
328 Buffer
330 {
331  Buffer buf;
332 
333  XLogReadBufferForRedoExtended(record, block_id, RBM_ZERO_AND_LOCK, false,
334  &buf);
335  return buf;
336 }
337 
338 /*
339  * XLogReadBufferForRedoExtended
340  * Like XLogReadBufferForRedo, but with extra options.
341  *
342  * In RBM_ZERO_* modes, if the page doesn't exist, the relation is extended
343  * with all-zeroes pages up to the referenced block number. In
344  * RBM_ZERO_AND_LOCK and RBM_ZERO_AND_CLEANUP_LOCK modes, the return value
345  * is always BLK_NEEDS_REDO.
346  *
347  * (The RBM_ZERO_AND_CLEANUP_LOCK mode is redundant with the get_cleanup_lock
348  * parameter. Do not use an inconsistent combination!)
349  *
350  * If 'get_cleanup_lock' is true, a "cleanup lock" is acquired on the buffer
351  * using LockBufferForCleanup(), instead of a regular exclusive lock.
352  */
355  uint8 block_id,
356  ReadBufferMode mode, bool get_cleanup_lock,
357  Buffer *buf)
358 {
359  XLogRecPtr lsn = record->EndRecPtr;
360  RelFileLocator rlocator;
361  ForkNumber forknum;
362  BlockNumber blkno;
363  Buffer prefetch_buffer;
364  Page page;
365  bool zeromode;
366  bool willinit;
367 
368  if (!XLogRecGetBlockTagExtended(record, block_id, &rlocator, &forknum, &blkno,
369  &prefetch_buffer))
370  {
371  /* Caller specified a bogus block_id */
372  elog(PANIC, "failed to locate backup block with ID %d in WAL record",
373  block_id);
374  }
375 
376  /*
377  * Make sure that if the block is marked with WILL_INIT, the caller is
378  * going to initialize it. And vice versa.
379  */
381  willinit = (XLogRecGetBlock(record, block_id)->flags & BKPBLOCK_WILL_INIT) != 0;
382  if (willinit && !zeromode)
383  elog(PANIC, "block with WILL_INIT flag in WAL record must be zeroed by redo routine");
384  if (!willinit && zeromode)
385  elog(PANIC, "block to be initialized in redo routine must be marked with WILL_INIT flag in the WAL record");
386 
387  /* If it has a full-page image and it should be restored, do it. */
388  if (XLogRecBlockImageApply(record, block_id))
389  {
390  Assert(XLogRecHasBlockImage(record, block_id));
391  *buf = XLogReadBufferExtended(rlocator, forknum, blkno,
392  get_cleanup_lock ? RBM_ZERO_AND_CLEANUP_LOCK : RBM_ZERO_AND_LOCK,
393  prefetch_buffer);
394  page = BufferGetPage(*buf);
395  if (!RestoreBlockImage(record, block_id, page))
396  ereport(ERROR,
397  (errcode(ERRCODE_INTERNAL_ERROR),
398  errmsg_internal("%s", record->errormsg_buf)));
399 
400  /*
401  * The page may be uninitialized. If so, we can't set the LSN because
402  * that would corrupt the page.
403  */
404  if (!PageIsNew(page))
405  {
406  PageSetLSN(page, lsn);
407  }
408 
410 
411  /*
412  * At the end of crash recovery the init forks of unlogged relations
413  * are copied, without going through shared buffers. So we need to
414  * force the on-disk state of init forks to always be in sync with the
415  * state in shared buffers.
416  */
417  if (forknum == INIT_FORKNUM)
419 
420  return BLK_RESTORED;
421  }
422  else
423  {
424  *buf = XLogReadBufferExtended(rlocator, forknum, blkno, mode, prefetch_buffer);
425  if (BufferIsValid(*buf))
426  {
428  {
429  if (get_cleanup_lock)
431  else
433  }
434  if (lsn <= PageGetLSN(BufferGetPage(*buf)))
435  return BLK_DONE;
436  else
437  return BLK_NEEDS_REDO;
438  }
439  else
440  return BLK_NOTFOUND;
441  }
442 }
443 
444 /*
445  * XLogReadBufferExtended
446  * Read a page during XLOG replay
447  *
448  * This is functionally comparable to ReadBufferExtended. There's some
449  * differences in the behavior wrt. the "mode" argument:
450  *
451  * In RBM_NORMAL mode, if the page doesn't exist, or contains all-zeroes, we
452  * return InvalidBuffer. In this case the caller should silently skip the
453  * update on this page. (In this situation, we expect that the page was later
454  * dropped or truncated. If we don't see evidence of that later in the WAL
455  * sequence, we'll complain at the end of WAL replay.)
456  *
457  * In RBM_ZERO_* modes, if the page doesn't exist, the relation is extended
458  * with all-zeroes pages up to the given block number.
459  *
460  * In RBM_NORMAL_NO_LOG mode, we return InvalidBuffer if the page doesn't
461  * exist, and we don't check for all-zeroes. Thus, no log entry is made
462  * to imply that the page should be dropped or truncated later.
463  *
464  * Optionally, recent_buffer can be used to provide a hint about the location
465  * of the page in the buffer pool; it does not have to be correct, but avoids
466  * a buffer mapping table probe if it is.
467  *
468  * NB: A redo function should normally not call this directly. To get a page
469  * to modify, use XLogReadBufferForRedoExtended instead. It is important that
470  * all pages modified by a WAL record are registered in the WAL records, or
471  * they will be invisible to tools that need to know which pages are modified.
472  */
473 Buffer
476  Buffer recent_buffer)
477 {
478  BlockNumber lastblock;
479  Buffer buffer;
480  SMgrRelation smgr;
481 
482  Assert(blkno != P_NEW);
483 
484  /* Do we have a clue where the buffer might be already? */
485  if (BufferIsValid(recent_buffer) &&
486  mode == RBM_NORMAL &&
487  ReadRecentBuffer(rlocator, forknum, blkno, recent_buffer))
488  {
489  buffer = recent_buffer;
490  goto recent_buffer_fast_path;
491  }
492 
493  /* Open the relation at smgr level */
494  smgr = smgropen(rlocator, InvalidBackendId);
495 
496  /*
497  * Create the target file if it doesn't already exist. This lets us cope
498  * if the replay sequence contains writes to a relation that is later
499  * deleted. (The original coding of this routine would instead suppress
500  * the writes, but that seems like it risks losing valuable data if the
501  * filesystem loses an inode during a crash. Better to write the data
502  * until we are actually told to delete the file.)
503  */
504  smgrcreate(smgr, forknum, true);
505 
506  lastblock = smgrnblocks(smgr, forknum);
507 
508  if (blkno < lastblock)
509  {
510  /* page exists in file */
511  buffer = ReadBufferWithoutRelcache(rlocator, forknum, blkno,
512  mode, NULL, true);
513  }
514  else
515  {
516  /* hm, page doesn't exist in file */
517  if (mode == RBM_NORMAL)
518  {
519  log_invalid_page(rlocator, forknum, blkno, false);
520  return InvalidBuffer;
521  }
522  if (mode == RBM_NORMAL_NO_LOG)
523  return InvalidBuffer;
524  /* OK to extend the file */
525  /* we do this in recovery only - no rel-extension lock needed */
527  buffer = InvalidBuffer;
528  do
529  {
530  if (buffer != InvalidBuffer)
531  {
534  ReleaseBuffer(buffer);
535  }
536  buffer = ReadBufferWithoutRelcache(rlocator, forknum,
537  P_NEW, mode, NULL, true);
538  }
539  while (BufferGetBlockNumber(buffer) < blkno);
540  /* Handle the corner case that P_NEW returns non-consecutive pages */
541  if (BufferGetBlockNumber(buffer) != blkno)
542  {
545  ReleaseBuffer(buffer);
546  buffer = ReadBufferWithoutRelcache(rlocator, forknum, blkno,
547  mode, NULL, true);
548  }
549  }
550 
551 recent_buffer_fast_path:
552  if (mode == RBM_NORMAL)
553  {
554  /* check that page has been initialized */
555  Page page = (Page) BufferGetPage(buffer);
556 
557  /*
558  * We assume that PageIsNew is safe without a lock. During recovery,
559  * there should be no other backends that could modify the buffer at
560  * the same time.
561  */
562  if (PageIsNew(page))
563  {
564  ReleaseBuffer(buffer);
565  log_invalid_page(rlocator, forknum, blkno, true);
566  return InvalidBuffer;
567  }
568  }
569 
570  return buffer;
571 }
572 
573 /*
574  * Struct actually returned by CreateFakeRelcacheEntry, though the declared
575  * return type is Relation.
576  */
577 typedef struct
578 {
579  RelationData reldata; /* Note: this must be first */
582 
584 
585 /*
586  * Create a fake relation cache entry for a physical relation
587  *
588  * It's often convenient to use the same functions in XLOG replay as in the
589  * main codepath, but those functions typically work with a relcache entry.
590  * We don't have a working relation cache during XLOG replay, but this
591  * function can be used to create a fake relcache entry instead. Only the
592  * fields related to physical storage, like rd_rel, are initialized, so the
593  * fake entry is only usable in low-level operations like ReadBuffer().
594  *
595  * This is also used for syncing WAL-skipped files.
596  *
597  * Caller must free the returned entry with FreeFakeRelcacheEntry().
598  */
599 Relation
601 {
602  FakeRelCacheEntry fakeentry;
603  Relation rel;
604 
605  /* Allocate the Relation struct and all related space in one block. */
606  fakeentry = palloc0(sizeof(FakeRelCacheEntryData));
607  rel = (Relation) fakeentry;
608 
609  rel->rd_rel = &fakeentry->pgc;
610  rel->rd_locator = rlocator;
611 
612  /*
613  * We will never be working with temp rels during recovery or while
614  * syncing WAL-skipped files.
615  */
617 
618  /* It must be a permanent table here */
619  rel->rd_rel->relpersistence = RELPERSISTENCE_PERMANENT;
620 
621  /* We don't know the name of the relation; use relfilenumber instead */
622  sprintf(RelationGetRelationName(rel), "%u", rlocator.relNumber);
623 
624  /*
625  * We set up the lockRelId in case anything tries to lock the dummy
626  * relation. Note that this is fairly bogus since relNumber may be
627  * different from the relation's OID. It shouldn't really matter though.
628  * In recovery, we are running by ourselves and can't have any lock
629  * conflicts. While syncing, we already hold AccessExclusiveLock.
630  */
631  rel->rd_lockInfo.lockRelId.dbId = rlocator.dbOid;
632  rel->rd_lockInfo.lockRelId.relId = rlocator.relNumber;
633 
634  rel->rd_smgr = NULL;
635 
636  return rel;
637 }
638 
639 /*
640  * Free a fake relation cache entry.
641  */
642 void
644 {
645  /* make sure the fakerel is not referenced by the SmgrRelation anymore */
646  if (fakerel->rd_smgr != NULL)
647  smgrclearowner(&fakerel->rd_smgr, fakerel->rd_smgr);
648  pfree(fakerel);
649 }
650 
651 /*
652  * Drop a relation during XLOG replay
653  *
654  * This is called when the relation is about to be deleted; we need to remove
655  * any open "invalid-page" records for the relation.
656  */
657 void
659 {
660  forget_invalid_pages(rlocator, forknum, 0);
661 }
662 
663 /*
664  * Drop a whole database during XLOG replay
665  *
666  * As above, but for DROP DATABASE instead of dropping a single rel
667  */
668 void
670 {
671  /*
672  * This is unnecessarily heavy-handed, as it will close SMgrRelation
673  * objects for other databases as well. DROP DATABASE occurs seldom enough
674  * that it's not worth introducing a variant of smgrclose for just this
675  * purpose. XXX: Or should we rather leave the smgr entries dangling?
676  */
677  smgrcloseall();
678 
680 }
681 
682 /*
683  * Truncate a relation during XLOG replay
684  *
685  * We need to clean up any open "invalid-page" records for the dropped pages.
686  */
687 void
689  BlockNumber nblocks)
690 {
691  forget_invalid_pages(rlocator, forkNum, nblocks);
692 }
693 
694 /*
695  * Determine which timeline to read an xlog page from and set the
696  * XLogReaderState's currTLI to that timeline ID.
697  *
698  * We care about timelines in xlogreader when we might be reading xlog
699  * generated prior to a promotion, either if we're currently a standby in
700  * recovery or if we're a promoted primary reading xlogs generated by the old
701  * primary before our promotion.
702  *
703  * wantPage must be set to the start address of the page to read and
704  * wantLength to the amount of the page that will be read, up to
705  * XLOG_BLCKSZ. If the amount to be read isn't known, pass XLOG_BLCKSZ.
706  *
707  * The currTLI argument should be the system-wide current timeline.
708  * Note that this may be different from state->currTLI, which is the timeline
709  * from which the caller is currently reading previous xlog records.
710  *
711  * We switch to an xlog segment from the new timeline eagerly when on a
712  * historical timeline, as soon as we reach the start of the xlog segment
713  * containing the timeline switch. The server copied the segment to the new
714  * timeline so all the data up to the switch point is the same, but there's no
715  * guarantee the old segment will still exist. It may have been deleted or
716  * renamed with a .partial suffix so we can't necessarily keep reading from
717  * the old TLI even though tliSwitchPoint says it's OK.
718  *
719  * We can't just check the timeline when we read a page on a different segment
720  * to the last page. We could've received a timeline switch from a cascading
721  * upstream, so the current segment ends abruptly (possibly getting renamed to
722  * .partial) and we have to switch to a new one. Even in the middle of reading
723  * a page we could have to dump the cached page and switch to a new TLI.
724  *
725  * Because of this, callers MAY NOT assume that currTLI is the timeline that
726  * will be in a page's xlp_tli; the page may begin on an older timeline or we
727  * might be reading from historical timeline data on a segment that's been
728  * copied to a new timeline.
729  *
730  * The caller must also make sure it doesn't read past the current replay
731  * position (using GetXLogReplayRecPtr) if executing in recovery, so it
732  * doesn't fail to notice that the current timeline became historical.
733  */
734 void
736  uint32 wantLength, TimeLineID currTLI)
737 {
738  const XLogRecPtr lastReadPage = (state->seg.ws_segno *
739  state->segcxt.ws_segsize + state->segoff);
740 
741  Assert(wantPage != InvalidXLogRecPtr && wantPage % XLOG_BLCKSZ == 0);
742  Assert(wantLength <= XLOG_BLCKSZ);
743  Assert(state->readLen == 0 || state->readLen <= XLOG_BLCKSZ);
744  Assert(currTLI != 0);
745 
746  /*
747  * If the desired page is currently read in and valid, we have nothing to
748  * do.
749  *
750  * The caller should've ensured that it didn't previously advance readOff
751  * past the valid limit of this timeline, so it doesn't matter if the
752  * current TLI has since become historical.
753  */
754  if (lastReadPage == wantPage &&
755  state->readLen != 0 &&
756  lastReadPage + state->readLen >= wantPage + Min(wantLength, XLOG_BLCKSZ - 1))
757  return;
758 
759  /*
760  * If we're reading from the current timeline, it hasn't become historical
761  * and the page we're reading is after the last page read, we can again
762  * just carry on. (Seeking backwards requires a check to make sure the
763  * older page isn't on a prior timeline).
764  *
765  * currTLI might've become historical since the caller obtained the value,
766  * but the caller is required not to read past the flush limit it saw at
767  * the time it looked up the timeline. There's nothing we can do about it
768  * if StartupXLOG() renames it to .partial concurrently.
769  */
770  if (state->currTLI == currTLI && wantPage >= lastReadPage)
771  {
772  Assert(state->currTLIValidUntil == InvalidXLogRecPtr);
773  return;
774  }
775 
776  /*
777  * If we're just reading pages from a previously validated historical
778  * timeline and the timeline we're reading from is valid until the end of
779  * the current segment we can just keep reading.
780  */
781  if (state->currTLIValidUntil != InvalidXLogRecPtr &&
782  state->currTLI != currTLI &&
783  state->currTLI != 0 &&
784  ((wantPage + wantLength) / state->segcxt.ws_segsize) <
785  (state->currTLIValidUntil / state->segcxt.ws_segsize))
786  return;
787 
788  /*
789  * If we reach this point we're either looking up a page for random
790  * access, the current timeline just became historical, or we're reading
791  * from a new segment containing a timeline switch. In all cases we need
792  * to determine the newest timeline on the segment.
793  *
794  * If it's the current timeline we can just keep reading from here unless
795  * we detect a timeline switch that makes the current timeline historical.
796  * If it's a historical timeline we can read all the segment on the newest
797  * timeline because it contains all the old timelines' data too. So only
798  * one switch check is required.
799  */
800  {
801  /*
802  * We need to re-read the timeline history in case it's been changed
803  * by a promotion or replay from a cascaded replica.
804  */
805  List *timelineHistory = readTimeLineHistory(currTLI);
806  XLogRecPtr endOfSegment;
807 
808  endOfSegment = ((wantPage / state->segcxt.ws_segsize) + 1) *
809  state->segcxt.ws_segsize - 1;
810  Assert(wantPage / state->segcxt.ws_segsize ==
811  endOfSegment / state->segcxt.ws_segsize);
812 
813  /*
814  * Find the timeline of the last LSN on the segment containing
815  * wantPage.
816  */
817  state->currTLI = tliOfPointInHistory(endOfSegment, timelineHistory);
818  state->currTLIValidUntil = tliSwitchPoint(state->currTLI, timelineHistory,
819  &state->nextTLI);
820 
821  Assert(state->currTLIValidUntil == InvalidXLogRecPtr ||
822  wantPage + wantLength < state->currTLIValidUntil);
823 
824  list_free_deep(timelineHistory);
825 
826  elog(DEBUG3, "switched to timeline %u valid until %X/%X",
827  state->currTLI,
828  LSN_FORMAT_ARGS(state->currTLIValidUntil));
829  }
830 }
831 
832 /* XLogReaderRoutine->segment_open callback for local pg_wal files */
833 void
835  TimeLineID *tli_p)
836 {
837  TimeLineID tli = *tli_p;
838  char path[MAXPGPATH];
839 
840  XLogFilePath(path, tli, nextSegNo, state->segcxt.ws_segsize);
841  state->seg.ws_file = BasicOpenFile(path, O_RDONLY | PG_BINARY);
842  if (state->seg.ws_file >= 0)
843  return;
844 
845  if (errno == ENOENT)
846  ereport(ERROR,
848  errmsg("requested WAL segment %s has already been removed",
849  path)));
850  else
851  ereport(ERROR,
853  errmsg("could not open file \"%s\": %m",
854  path)));
855 }
856 
857 /* stock XLogReaderRoutine->segment_close callback */
858 void
860 {
861  close(state->seg.ws_file);
862  /* need to check errno? */
863  state->seg.ws_file = -1;
864 }
865 
866 /*
867  * XLogReaderRoutine->page_read callback for reading local xlog files
868  *
869  * Public because it would likely be very helpful for someone writing another
870  * output method outside walsender, e.g. in a bgworker.
871  *
872  * TODO: The walsender has its own version of this, but it relies on the
873  * walsender's latch being set whenever WAL is flushed. No such infrastructure
874  * exists for normal backends, so we have to do a check/sleep/repeat style of
875  * loop for now.
876  */
877 int
879  int reqLen, XLogRecPtr targetRecPtr, char *cur_page)
880 {
881  return read_local_xlog_page_guts(state, targetPagePtr, reqLen,
882  targetRecPtr, cur_page, true);
883 }
884 
885 /*
886  * Same as read_local_xlog_page except that it doesn't wait for future WAL
887  * to be available.
888  */
889 int
891  int reqLen, XLogRecPtr targetRecPtr,
892  char *cur_page)
893 {
894  return read_local_xlog_page_guts(state, targetPagePtr, reqLen,
895  targetRecPtr, cur_page, false);
896 }
897 
898 /*
899  * Implementation of read_local_xlog_page and its no wait version.
900  */
901 static int
903  int reqLen, XLogRecPtr targetRecPtr,
904  char *cur_page, bool wait_for_wal)
905 {
906  XLogRecPtr read_upto,
907  loc;
908  TimeLineID tli;
909  int count;
910  WALReadError errinfo;
911  TimeLineID currTLI;
912 
913  loc = targetPagePtr + reqLen;
914 
915  /* Loop waiting for xlog to be available if necessary */
916  while (1)
917  {
918  /*
919  * Determine the limit of xlog we can currently read to, and what the
920  * most recent timeline is.
921  */
922  if (!RecoveryInProgress())
923  read_upto = GetFlushRecPtr(&currTLI);
924  else
925  read_upto = GetXLogReplayRecPtr(&currTLI);
926  tli = currTLI;
927 
928  /*
929  * Check which timeline to get the record from.
930  *
931  * We have to do it each time through the loop because if we're in
932  * recovery as a cascading standby, the current timeline might've
933  * become historical. We can't rely on RecoveryInProgress() because in
934  * a standby configuration like
935  *
936  * A => B => C
937  *
938  * if we're a logical decoding session on C, and B gets promoted, our
939  * timeline will change while we remain in recovery.
940  *
941  * We can't just keep reading from the old timeline as the last WAL
942  * archive in the timeline will get renamed to .partial by
943  * StartupXLOG().
944  *
945  * If that happens after our caller determined the TLI but before we
946  * actually read the xlog page, we might still try to read from the
947  * old (now renamed) segment and fail. There's not much we can do
948  * about this, but it can only happen when we're a leaf of a cascading
949  * standby whose primary gets promoted while we're decoding, so a
950  * one-off ERROR isn't too bad.
951  */
952  XLogReadDetermineTimeline(state, targetPagePtr, reqLen, tli);
953 
954  if (state->currTLI == currTLI)
955  {
956 
957  if (loc <= read_upto)
958  break;
959 
960  /* If asked, let's not wait for future WAL. */
961  if (!wait_for_wal)
962  {
963  ReadLocalXLogPageNoWaitPrivate *private_data;
964 
965  /*
966  * Inform the caller of read_local_xlog_page_no_wait that the
967  * end of WAL has been reached.
968  */
969  private_data = (ReadLocalXLogPageNoWaitPrivate *)
970  state->private_data;
971  private_data->end_of_wal = true;
972  break;
973  }
974 
976  pg_usleep(1000L);
977  }
978  else
979  {
980  /*
981  * We're on a historical timeline, so limit reading to the switch
982  * point where we moved to the next timeline.
983  *
984  * We don't need to GetFlushRecPtr or GetXLogReplayRecPtr. We know
985  * about the new timeline, so we must've received past the end of
986  * it.
987  */
988  read_upto = state->currTLIValidUntil;
989 
990  /*
991  * Setting tli to our wanted record's TLI is slightly wrong; the
992  * page might begin on an older timeline if it contains a timeline
993  * switch, since its xlog segment will have been copied from the
994  * prior timeline. This is pretty harmless though, as nothing
995  * cares so long as the timeline doesn't go backwards. We should
996  * read the page header instead; FIXME someday.
997  */
998  tli = state->currTLI;
999 
1000  /* No need to wait on a historical timeline */
1001  break;
1002  }
1003  }
1004 
1005  if (targetPagePtr + XLOG_BLCKSZ <= read_upto)
1006  {
1007  /*
1008  * more than one block available; read only that block, have caller
1009  * come back if they need more.
1010  */
1011  count = XLOG_BLCKSZ;
1012  }
1013  else if (targetPagePtr + reqLen > read_upto)
1014  {
1015  /* not enough data there */
1016  return -1;
1017  }
1018  else
1019  {
1020  /* enough bytes available to satisfy the request */
1021  count = read_upto - targetPagePtr;
1022  }
1023 
1024  /*
1025  * Even though we just determined how much of the page can be validly read
1026  * as 'count', read the whole page anyway. It's guaranteed to be
1027  * zero-padded up to the page boundary if it's incomplete.
1028  */
1029  if (!WALRead(state, cur_page, targetPagePtr, XLOG_BLCKSZ, tli,
1030  &errinfo))
1031  WALReadRaiseError(&errinfo);
1032 
1033  /* number of valid bytes in the buffer */
1034  return count;
1035 }
1036 
1037 /*
1038  * Backend-specific convenience code to handle read errors encountered by
1039  * WALRead().
1040  */
1041 void
1043 {
1044  WALOpenSegment *seg = &errinfo->wre_seg;
1045  char fname[MAXFNAMELEN];
1046 
1047  XLogFileName(fname, seg->ws_tli, seg->ws_segno, wal_segment_size);
1048 
1049  if (errinfo->wre_read < 0)
1050  {
1051  errno = errinfo->wre_errno;
1052  ereport(ERROR,
1054  errmsg("could not read from WAL segment %s, offset %d: %m",
1055  fname, errinfo->wre_off)));
1056  }
1057  else if (errinfo->wre_read == 0)
1058  {
1059  ereport(ERROR,
1061  errmsg("could not read from WAL segment %s, offset %d: read %d of %d",
1062  fname, errinfo->wre_off, errinfo->wre_read,
1063  errinfo->wre_req)));
1064  }
1065 }
TimeLineID tliOfPointInHistory(XLogRecPtr ptr, List *history)
Definition: timeline.c:544
XLogRecPtr tliSwitchPoint(TimeLineID tli, List *history, TimeLineID *nextTLI)
Definition: timeline.c:572
List * readTimeLineHistory(TimeLineID targetTLI)
Definition: timeline.c:76
#define InvalidBackendId
Definition: backendid.h:23
uint32 BlockNumber
Definition: block.h:31
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2811
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4005
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1631
void LockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:4303
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4246
Buffer ReadBufferWithoutRelcache(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum, ReadBufferMode mode, BufferAccessStrategy strategy, bool permanent)
Definition: bufmgr.c:791
bool ReadRecentBuffer(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber blockNum, Buffer recent_buffer)
Definition: bufmgr.c:615
void FlushOneBuffer(Buffer buffer)
Definition: bufmgr.c:3985
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:110
#define P_NEW
Definition: bufmgr.h:105
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:285
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:112
ReadBufferMode
Definition: bufmgr.h:43
@ RBM_ZERO_AND_CLEANUP_LOCK
Definition: bufmgr.h:47
@ RBM_ZERO_AND_LOCK
Definition: bufmgr.h:45
@ RBM_NORMAL
Definition: bufmgr.h:44
@ RBM_NORMAL_NO_LOG
Definition: bufmgr.h:50
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:233
Pointer Page
Definition: bufpage.h:78
static bool PageIsNew(Page page)
Definition: bufpage.h:230
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:388
static XLogRecPtr PageGetLSN(Page page)
Definition: bufpage.h:383
unsigned int uint32
Definition: c.h:490
#define Min(x, y)
Definition: c.h:988
#define PG_BINARY
Definition: c.h:1260
unsigned char uint8
Definition: c.h:488
void hash_destroy(HTAB *hashp)
Definition: dynahash.c:863
void * hash_search(HTAB *hashp, const void *keyPtr, HASHACTION action, bool *foundPtr)
Definition: dynahash.c:953
HTAB * hash_create(const char *tabname, long nelem, const HASHCTL *info, int flags)
Definition: dynahash.c:350
long hash_get_num_entries(HTAB *hashp)
Definition: dynahash.c:1377
void * hash_seq_search(HASH_SEQ_STATUS *status)
Definition: dynahash.c:1431
void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp)
Definition: dynahash.c:1421
int errmsg_internal(const char *fmt,...)
Definition: elog.c:1156
int errcode_for_file_access(void)
Definition: elog.c:881
bool message_level_is_interesting(int elevel)
Definition: elog.c:277
int errcode(int sqlerrcode)
Definition: elog.c:858
int errmsg(const char *fmt,...)
Definition: elog.c:1069
#define DEBUG3
Definition: elog.h:28
#define WARNING
Definition: elog.h:36
#define DEBUG2
Definition: elog.h:29
#define PANIC
Definition: elog.h:42
#define DEBUG1
Definition: elog.h:30
#define ERROR
Definition: elog.h:39
#define ereport(elevel,...)
Definition: elog.h:149
int BasicOpenFile(const char *fileName, int fileFlags)
Definition: fd.c:993
@ HASH_REMOVE
Definition: hsearch.h:115
@ HASH_ENTER
Definition: hsearch.h:114
#define HASH_ELEM
Definition: hsearch.h:95
#define HASH_BLOBS
Definition: hsearch.h:97
#define close(a)
Definition: win32.h:12
Assert(fmt[strlen(fmt) - 1] !='\n')
void list_free_deep(List *list)
Definition: list.c:1559
void pfree(void *pointer)
Definition: mcxt.c:1436
void * palloc0(Size size)
Definition: mcxt.c:1241
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:121
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:41
static PgChecksumMode mode
Definition: pg_checksums.c:65
FormData_pg_class
Definition: pg_class.h:142
#define MAXPGPATH
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
static char * buf
Definition: pg_test_fsync.c:67
#define sprintf
Definition: port.h:240
unsigned int Oid
Definition: postgres_ext.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:537
struct RelationData * Relation
Definition: relcache.h:27
#define RelFileLocatorEquals(locator1, locator2)
ForkNumber
Definition: relpath.h:48
@ INIT_FORKNUM
Definition: relpath.h:53
#define relpathperm(rlocator, forknum)
Definition: relpath.h:90
void pg_usleep(long microsec)
Definition: signal.c:53
BlockNumber smgrnblocks(SMgrRelation reln, ForkNumber forknum)
Definition: smgr.c:579
void smgrcreate(SMgrRelation reln, ForkNumber forknum, bool isRedo)
Definition: smgr.c:369
SMgrRelation smgropen(RelFileLocator rlocator, BackendId backend)
Definition: smgr.c:146
void smgrcloseall(void)
Definition: smgr.c:322
void smgrclearowner(SMgrRelation *owner, SMgrRelation reln)
Definition: smgr.c:227
RelationData reldata
Definition: xlogutils.c:579
FormData_pg_class pgc
Definition: xlogutils.c:580
Size keysize
Definition: hsearch.h:75
Size entrysize
Definition: hsearch.h:76
Definition: dynahash.c:220
Definition: pg_list.h:54
LockRelId lockRelId
Definition: rel.h:45
Oid relId
Definition: rel.h:39
Oid dbId
Definition: rel.h:40
RelFileNumber relNumber
LockInfoData rd_lockInfo
Definition: rel.h:113
BackendId rd_backend
Definition: rel.h:59
SMgrRelation rd_smgr
Definition: rel.h:57
RelFileLocator rd_locator
Definition: rel.h:56
Form_pg_class rd_rel
Definition: rel.h:110
XLogSegNo ws_segno
Definition: xlogreader.h:48
TimeLineID ws_tli
Definition: xlogreader.h:49
WALOpenSegment wre_seg
Definition: xlogreader.h:387
char * errormsg_buf
Definition: xlogreader.h:311
XLogRecPtr EndRecPtr
Definition: xlogreader.h:207
Definition: regguts.h:318
BlockNumber blkno
Definition: xlogutils.c:72
RelFileLocator locator
Definition: xlogutils.c:70
ForkNumber forkno
Definition: xlogutils.c:71
xl_invalid_page_key key
Definition: xlogutils.c:77
bool RecoveryInProgress(void)
Definition: xlog.c:5908
int wal_segment_size
Definition: xlog.c:146
XLogRecPtr GetFlushRecPtr(TimeLineID *insertTLI)
Definition: xlog.c:6073
#define MAXFNAMELEN
static void XLogFilePath(char *path, TimeLineID tli, XLogSegNo logSegNo, int wal_segsz_bytes)
static void XLogFileName(char *fname, TimeLineID tli, XLogSegNo logSegNo, int wal_segsz_bytes)
#define LSN_FORMAT_ARGS(lsn)
Definition: xlogdefs.h:43
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define InvalidXLogRecPtr
Definition: xlogdefs.h:28
uint32 TimeLineID
Definition: xlogdefs.h:59
uint64 XLogSegNo
Definition: xlogdefs.h:48
bool XLogRecGetBlockTagExtended(XLogReaderState *record, uint8 block_id, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum, Buffer *prefetch_buffer)
Definition: xlogreader.c:1987
bool WALRead(XLogReaderState *state, char *buf, XLogRecPtr startptr, Size count, TimeLineID tli, WALReadError *errinfo)
Definition: xlogreader.c:1493
bool RestoreBlockImage(XLogReaderState *record, uint8 block_id, char *page)
Definition: xlogreader.c:2046
#define XLogRecBlockImageApply(decoder, block_id)
Definition: xlogreader.h:424
#define XLogRecGetBlock(decoder, i)
Definition: xlogreader.h:418
#define XLogRecHasBlockImage(decoder, block_id)
Definition: xlogreader.h:422
#define BKPBLOCK_WILL_INIT
Definition: xlogrecord.h:188
bool reachedConsistency
Definition: xlogrecovery.c:294
XLogRecPtr GetXLogReplayRecPtr(TimeLineID *replayTLI)
void wal_segment_close(XLogReaderState *state)
Definition: xlogutils.c:859
void FreeFakeRelcacheEntry(Relation fakerel)
Definition: xlogutils.c:643
void wal_segment_open(XLogReaderState *state, XLogSegNo nextSegNo, TimeLineID *tli_p)
Definition: xlogutils.c:834
bool ignore_invalid_pages
Definition: xlogutils.c:37
static void report_invalid_page(int elevel, RelFileLocator locator, ForkNumber forkno, BlockNumber blkno, bool present)
Definition: xlogutils.c:89
void XLogReadDetermineTimeline(XLogReaderState *state, XLogRecPtr wantPage, uint32 wantLength, TimeLineID currTLI)
Definition: xlogutils.c:735
FakeRelCacheEntryData * FakeRelCacheEntry
Definition: xlogutils.c:583
bool XLogHaveInvalidPages(void)
Definition: xlogutils.c:238
XLogRedoAction XLogReadBufferForRedo(XLogReaderState *record, uint8 block_id, Buffer *buf)
Definition: xlogutils.c:317
Buffer XLogInitBufferForRedo(XLogReaderState *record, uint8 block_id)
Definition: xlogutils.c:329
Buffer XLogReadBufferExtended(RelFileLocator rlocator, ForkNumber forknum, BlockNumber blkno, ReadBufferMode mode, Buffer recent_buffer)
Definition: xlogutils.c:474
void XLogTruncateRelation(RelFileLocator rlocator, ForkNumber forkNum, BlockNumber nblocks)
Definition: xlogutils.c:688
struct xl_invalid_page xl_invalid_page
Relation CreateFakeRelcacheEntry(RelFileLocator rlocator)
Definition: xlogutils.c:600
HotStandbyState standbyState
Definition: xlogutils.c:56
struct xl_invalid_page_key xl_invalid_page_key
bool InRecovery
Definition: xlogutils.c:53
int read_local_xlog_page(XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, char *cur_page)
Definition: xlogutils.c:878
void XLogCheckInvalidPages(void)
Definition: xlogutils.c:248
static void forget_invalid_pages(RelFileLocator locator, ForkNumber forkno, BlockNumber minblkno)
Definition: xlogutils.c:169
void WALReadRaiseError(WALReadError *errinfo)
Definition: xlogutils.c:1042
static void log_invalid_page(RelFileLocator locator, ForkNumber forkno, BlockNumber blkno, bool present)
Definition: xlogutils.c:105
static int read_local_xlog_page_guts(XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, char *cur_page, bool wait_for_wal)
Definition: xlogutils.c:902
void XLogDropRelation(RelFileLocator rlocator, ForkNumber forknum)
Definition: xlogutils.c:658
int read_local_xlog_page_no_wait(XLogReaderState *state, XLogRecPtr targetPagePtr, int reqLen, XLogRecPtr targetRecPtr, char *cur_page)
Definition: xlogutils.c:890
static HTAB * invalid_page_tab
Definition: xlogutils.c:81
XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)
Definition: xlogutils.c:354
static void forget_invalid_pages_db(Oid dbid)
Definition: xlogutils.c:205
void XLogDropDatabase(Oid dbid)
Definition: xlogutils.c:669
HotStandbyState
Definition: xlogutils.h:48
@ STANDBY_DISABLED
Definition: xlogutils.h:49
XLogRedoAction
Definition: xlogutils.h:70
@ BLK_RESTORED
Definition: xlogutils.h:73
@ BLK_NEEDS_REDO
Definition: xlogutils.h:71
@ BLK_DONE
Definition: xlogutils.h:72
@ BLK_NOTFOUND
Definition: xlogutils.h:74