PostgreSQL Source Code  git master
nbtxlog.c
Go to the documentation of this file.
1 /*-------------------------------------------------------------------------
2  *
3  * nbtxlog.c
4  * WAL replay logic for btrees.
5  *
6  *
7  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  * IDENTIFICATION
11  * src/backend/access/nbtree/nbtxlog.c
12  *
13  *-------------------------------------------------------------------------
14  */
15 #include "postgres.h"
16 
17 #include "access/bufmask.h"
18 #include "access/nbtree.h"
19 #include "access/nbtxlog.h"
20 #include "access/transam.h"
21 #include "access/xlogutils.h"
22 #include "storage/standby.h"
23 #include "utils/memutils.h"
24 
25 static MemoryContext opCtx; /* working memory for operations */
26 
27 /*
28  * _bt_restore_page -- re-enter all the index tuples on a page
29  *
30  * The page is freshly init'd, and *from (length len) is a copy of what
31  * had been its upper part (pd_upper to pd_special). We assume that the
32  * tuples had been added to the page in item-number order, and therefore
33  * the one with highest item number appears first (lowest on the page).
34  */
35 static void
36 _bt_restore_page(Page page, char *from, int len)
37 {
38  IndexTupleData itupdata;
39  Size itemsz;
40  char *end = from + len;
42  uint16 itemsizes[MaxIndexTuplesPerPage];
43  int i;
44  int nitems;
45 
46  /*
47  * To get the items back in the original order, we add them to the page in
48  * reverse. To figure out where one tuple ends and another begins, we
49  * have to scan them in forward order first.
50  */
51  i = 0;
52  while (from < end)
53  {
54  /*
55  * As we step through the items, 'from' won't always be properly
56  * aligned, so we need to use memcpy(). Further, we use Item (which
57  * is just a char*) here for our items array for the same reason;
58  * wouldn't want the compiler or anyone thinking that an item is
59  * aligned when it isn't.
60  */
61  memcpy(&itupdata, from, sizeof(IndexTupleData));
62  itemsz = IndexTupleSize(&itupdata);
63  itemsz = MAXALIGN(itemsz);
64 
65  items[i] = (Item) from;
66  itemsizes[i] = itemsz;
67  i++;
68 
69  from += itemsz;
70  }
71  nitems = i;
72 
73  for (i = nitems - 1; i >= 0; i--)
74  {
75  if (PageAddItem(page, items[i], itemsizes[i], nitems - i,
76  false, false) == InvalidOffsetNumber)
77  elog(PANIC, "_bt_restore_page: cannot add item to page");
78  }
79 }
80 
81 static void
83 {
84  XLogRecPtr lsn = record->EndRecPtr;
85  Buffer metabuf;
86  Page metapg;
87  BTMetaPageData *md;
88  BTPageOpaque pageop;
89  xl_btree_metadata *xlrec;
90  char *ptr;
91  Size len;
92 
93  metabuf = XLogInitBufferForRedo(record, block_id);
94  ptr = XLogRecGetBlockData(record, block_id, &len);
95 
96  Assert(len == sizeof(xl_btree_metadata));
98  xlrec = (xl_btree_metadata *) ptr;
99  metapg = BufferGetPage(metabuf);
100 
101  _bt_pageinit(metapg, BufferGetPageSize(metabuf));
102 
103  md = BTPageGetMeta(metapg);
104  md->btm_magic = BTREE_MAGIC;
105  md->btm_version = xlrec->version;
106  md->btm_root = xlrec->root;
107  md->btm_level = xlrec->level;
108  md->btm_fastroot = xlrec->fastroot;
109  md->btm_fastlevel = xlrec->fastlevel;
110  /* Cannot log BTREE_MIN_VERSION index metapage without upgrade */
114  md->btm_allequalimage = xlrec->allequalimage;
115 
116  pageop = BTPageGetOpaque(metapg);
117  pageop->btpo_flags = BTP_META;
118 
119  /*
120  * Set pd_lower just past the end of the metadata. This is essential,
121  * because without doing so, metadata will be lost if xlog.c compresses
122  * the page.
123  */
124  ((PageHeader) metapg)->pd_lower =
125  ((char *) md + sizeof(BTMetaPageData)) - (char *) metapg;
126 
127  PageSetLSN(metapg, lsn);
128  MarkBufferDirty(metabuf);
129  UnlockReleaseBuffer(metabuf);
130 }
131 
132 /*
133  * _bt_clear_incomplete_split -- clear INCOMPLETE_SPLIT flag on a page
134  *
135  * This is a common subroutine of the redo functions of all the WAL record
136  * types that can insert a downlink: insert, split, and newroot.
137  */
138 static void
140 {
141  XLogRecPtr lsn = record->EndRecPtr;
142  Buffer buf;
143 
144  if (XLogReadBufferForRedo(record, block_id, &buf) == BLK_NEEDS_REDO)
145  {
146  Page page = (Page) BufferGetPage(buf);
147  BTPageOpaque pageop = BTPageGetOpaque(page);
148 
149  Assert(P_INCOMPLETE_SPLIT(pageop));
150  pageop->btpo_flags &= ~BTP_INCOMPLETE_SPLIT;
151 
152  PageSetLSN(page, lsn);
154  }
155  if (BufferIsValid(buf))
157 }
158 
159 static void
160 btree_xlog_insert(bool isleaf, bool ismeta, bool posting,
161  XLogReaderState *record)
162 {
163  XLogRecPtr lsn = record->EndRecPtr;
164  xl_btree_insert *xlrec = (xl_btree_insert *) XLogRecGetData(record);
165  Buffer buffer;
166  Page page;
167 
168  /*
169  * Insertion to an internal page finishes an incomplete split at the child
170  * level. Clear the incomplete-split flag in the child. Note: during
171  * normal operation, the child and parent pages are locked at the same
172  * time (the locks are coupled), so that clearing the flag and inserting
173  * the downlink appear atomic to other backends. We don't bother with
174  * that during replay, because readers don't care about the
175  * incomplete-split flag and there cannot be updates happening.
176  */
177  if (!isleaf)
178  _bt_clear_incomplete_split(record, 1);
179  if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
180  {
181  Size datalen;
182  char *datapos = XLogRecGetBlockData(record, 0, &datalen);
183 
184  page = BufferGetPage(buffer);
185 
186  if (!posting)
187  {
188  /* Simple retail insertion */
189  if (PageAddItem(page, (Item) datapos, datalen, xlrec->offnum,
190  false, false) == InvalidOffsetNumber)
191  elog(PANIC, "failed to add new item");
192  }
193  else
194  {
195  ItemId itemid;
196  IndexTuple oposting,
197  newitem,
198  nposting;
199  uint16 postingoff;
200 
201  /*
202  * A posting list split occurred during leaf page insertion. WAL
203  * record data will start with an offset number representing the
204  * point in an existing posting list that a split occurs at.
205  *
206  * Use _bt_swap_posting() to repeat posting list split steps from
207  * primary. Note that newitem from WAL record is 'orignewitem',
208  * not the final version of newitem that is actually inserted on
209  * page.
210  */
211  postingoff = *((uint16 *) datapos);
212  datapos += sizeof(uint16);
213  datalen -= sizeof(uint16);
214 
215  itemid = PageGetItemId(page, OffsetNumberPrev(xlrec->offnum));
216  oposting = (IndexTuple) PageGetItem(page, itemid);
217 
218  /* Use mutable, aligned newitem copy in _bt_swap_posting() */
219  Assert(isleaf && postingoff > 0);
220  newitem = CopyIndexTuple((IndexTuple) datapos);
221  nposting = _bt_swap_posting(newitem, oposting, postingoff);
222 
223  /* Replace existing posting list with post-split version */
224  memcpy(oposting, nposting, MAXALIGN(IndexTupleSize(nposting)));
225 
226  /* Insert "final" new item (not orignewitem from WAL stream) */
227  Assert(IndexTupleSize(newitem) == datalen);
228  if (PageAddItem(page, (Item) newitem, datalen, xlrec->offnum,
229  false, false) == InvalidOffsetNumber)
230  elog(PANIC, "failed to add posting split new item");
231  }
232 
233  PageSetLSN(page, lsn);
234  MarkBufferDirty(buffer);
235  }
236  if (BufferIsValid(buffer))
237  UnlockReleaseBuffer(buffer);
238 
239  /*
240  * Note: in normal operation, we'd update the metapage while still holding
241  * lock on the page we inserted into. But during replay it's not
242  * necessary to hold that lock, since no other index updates can be
243  * happening concurrently, and readers will cope fine with following an
244  * obsolete link from the metapage.
245  */
246  if (ismeta)
247  _bt_restore_meta(record, 2);
248 }
249 
250 static void
251 btree_xlog_split(bool newitemonleft, XLogReaderState *record)
252 {
253  XLogRecPtr lsn = record->EndRecPtr;
254  xl_btree_split *xlrec = (xl_btree_split *) XLogRecGetData(record);
255  bool isleaf = (xlrec->level == 0);
256  Buffer buf;
257  Buffer rbuf;
258  Page rpage;
259  BTPageOpaque ropaque;
260  char *datapos;
261  Size datalen;
262  BlockNumber origpagenumber;
263  BlockNumber rightpagenumber;
264  BlockNumber spagenumber;
265 
266  XLogRecGetBlockTag(record, 0, NULL, NULL, &origpagenumber);
267  XLogRecGetBlockTag(record, 1, NULL, NULL, &rightpagenumber);
268  if (!XLogRecGetBlockTagExtended(record, 2, NULL, NULL, &spagenumber, NULL))
269  spagenumber = P_NONE;
270 
271  /*
272  * Clear the incomplete split flag on the appropriate child page one level
273  * down when origpage/buf is an internal page (there must have been
274  * cascading page splits during original execution in the event of an
275  * internal page split). This is like the corresponding btree_xlog_insert
276  * call for internal pages. We're not clearing the incomplete split flag
277  * for the current page split here (you can think of this as part of the
278  * insert of newitem that the page split action needs to perform in
279  * passing).
280  *
281  * Like in btree_xlog_insert, this can be done before locking other pages.
282  * We never need to couple cross-level locks in REDO routines.
283  */
284  if (!isleaf)
285  _bt_clear_incomplete_split(record, 3);
286 
287  /* Reconstruct right (new) sibling page from scratch */
288  rbuf = XLogInitBufferForRedo(record, 1);
289  datapos = XLogRecGetBlockData(record, 1, &datalen);
290  rpage = (Page) BufferGetPage(rbuf);
291 
292  _bt_pageinit(rpage, BufferGetPageSize(rbuf));
293  ropaque = BTPageGetOpaque(rpage);
294 
295  ropaque->btpo_prev = origpagenumber;
296  ropaque->btpo_next = spagenumber;
297  ropaque->btpo_level = xlrec->level;
298  ropaque->btpo_flags = isleaf ? BTP_LEAF : 0;
299  ropaque->btpo_cycleid = 0;
300 
301  _bt_restore_page(rpage, datapos, datalen);
302 
303  PageSetLSN(rpage, lsn);
304  MarkBufferDirty(rbuf);
305 
306  /* Now reconstruct original page (left half of split) */
307  if (XLogReadBufferForRedo(record, 0, &buf) == BLK_NEEDS_REDO)
308  {
309  /*
310  * To retain the same physical order of the tuples that they had, we
311  * initialize a temporary empty page for the left page and add all the
312  * items to that in item number order. This mirrors how _bt_split()
313  * works. Retaining the same physical order makes WAL consistency
314  * checking possible. See also _bt_restore_page(), which does the
315  * same for the right page.
316  */
317  Page origpage = (Page) BufferGetPage(buf);
318  BTPageOpaque oopaque = BTPageGetOpaque(origpage);
319  OffsetNumber off;
320  IndexTuple newitem = NULL,
321  left_hikey = NULL,
322  nposting = NULL;
323  Size newitemsz = 0,
324  left_hikeysz = 0;
325  Page leftpage;
326  OffsetNumber leftoff,
327  replacepostingoff = InvalidOffsetNumber;
328 
329  datapos = XLogRecGetBlockData(record, 0, &datalen);
330 
331  if (newitemonleft || xlrec->postingoff != 0)
332  {
333  newitem = (IndexTuple) datapos;
334  newitemsz = MAXALIGN(IndexTupleSize(newitem));
335  datapos += newitemsz;
336  datalen -= newitemsz;
337 
338  if (xlrec->postingoff != 0)
339  {
340  ItemId itemid;
341  IndexTuple oposting;
342 
343  /* Posting list must be at offset number before new item's */
344  replacepostingoff = OffsetNumberPrev(xlrec->newitemoff);
345 
346  /* Use mutable, aligned newitem copy in _bt_swap_posting() */
347  newitem = CopyIndexTuple(newitem);
348  itemid = PageGetItemId(origpage, replacepostingoff);
349  oposting = (IndexTuple) PageGetItem(origpage, itemid);
350  nposting = _bt_swap_posting(newitem, oposting,
351  xlrec->postingoff);
352  }
353  }
354 
355  /*
356  * Extract left hikey and its size. We assume that 16-bit alignment
357  * is enough to apply IndexTupleSize (since it's fetching from a
358  * uint16 field).
359  */
360  left_hikey = (IndexTuple) datapos;
361  left_hikeysz = MAXALIGN(IndexTupleSize(left_hikey));
362  datapos += left_hikeysz;
363  datalen -= left_hikeysz;
364 
365  Assert(datalen == 0);
366 
367  leftpage = PageGetTempPageCopySpecial(origpage);
368 
369  /* Add high key tuple from WAL record to temp page */
370  leftoff = P_HIKEY;
371  if (PageAddItem(leftpage, (Item) left_hikey, left_hikeysz, P_HIKEY,
372  false, false) == InvalidOffsetNumber)
373  elog(ERROR, "failed to add high key to left page after split");
374  leftoff = OffsetNumberNext(leftoff);
375 
376  for (off = P_FIRSTDATAKEY(oopaque); off < xlrec->firstrightoff; off++)
377  {
378  ItemId itemid;
379  Size itemsz;
380  IndexTuple item;
381 
382  /* Add replacement posting list when required */
383  if (off == replacepostingoff)
384  {
385  Assert(newitemonleft ||
386  xlrec->firstrightoff == xlrec->newitemoff);
387  if (PageAddItem(leftpage, (Item) nposting,
388  MAXALIGN(IndexTupleSize(nposting)), leftoff,
389  false, false) == InvalidOffsetNumber)
390  elog(ERROR, "failed to add new posting list item to left page after split");
391  leftoff = OffsetNumberNext(leftoff);
392  continue; /* don't insert oposting */
393  }
394 
395  /* add the new item if it was inserted on left page */
396  else if (newitemonleft && off == xlrec->newitemoff)
397  {
398  if (PageAddItem(leftpage, (Item) newitem, newitemsz, leftoff,
399  false, false) == InvalidOffsetNumber)
400  elog(ERROR, "failed to add new item to left page after split");
401  leftoff = OffsetNumberNext(leftoff);
402  }
403 
404  itemid = PageGetItemId(origpage, off);
405  itemsz = ItemIdGetLength(itemid);
406  item = (IndexTuple) PageGetItem(origpage, itemid);
407  if (PageAddItem(leftpage, (Item) item, itemsz, leftoff,
408  false, false) == InvalidOffsetNumber)
409  elog(ERROR, "failed to add old item to left page after split");
410  leftoff = OffsetNumberNext(leftoff);
411  }
412 
413  /* cope with possibility that newitem goes at the end */
414  if (newitemonleft && off == xlrec->newitemoff)
415  {
416  if (PageAddItem(leftpage, (Item) newitem, newitemsz, leftoff,
417  false, false) == InvalidOffsetNumber)
418  elog(ERROR, "failed to add new item to left page after split");
419  leftoff = OffsetNumberNext(leftoff);
420  }
421 
422  PageRestoreTempPage(leftpage, origpage);
423 
424  /* Fix opaque fields */
425  oopaque->btpo_flags = BTP_INCOMPLETE_SPLIT;
426  if (isleaf)
427  oopaque->btpo_flags |= BTP_LEAF;
428  oopaque->btpo_next = rightpagenumber;
429  oopaque->btpo_cycleid = 0;
430 
431  PageSetLSN(origpage, lsn);
433  }
434 
435  /* Fix left-link of the page to the right of the new right sibling */
436  if (spagenumber != P_NONE)
437  {
438  Buffer sbuf;
439 
440  if (XLogReadBufferForRedo(record, 2, &sbuf) == BLK_NEEDS_REDO)
441  {
442  Page spage = (Page) BufferGetPage(sbuf);
443  BTPageOpaque spageop = BTPageGetOpaque(spage);
444 
445  spageop->btpo_prev = rightpagenumber;
446 
447  PageSetLSN(spage, lsn);
448  MarkBufferDirty(sbuf);
449  }
450  if (BufferIsValid(sbuf))
451  UnlockReleaseBuffer(sbuf);
452  }
453 
454  /*
455  * Finally, release the remaining buffers. sbuf, rbuf, and buf must be
456  * released together, so that readers cannot observe inconsistencies.
457  */
458  UnlockReleaseBuffer(rbuf);
459  if (BufferIsValid(buf))
461 }
462 
463 static void
465 {
466  XLogRecPtr lsn = record->EndRecPtr;
467  xl_btree_dedup *xlrec = (xl_btree_dedup *) XLogRecGetData(record);
468  Buffer buf;
469 
470  if (XLogReadBufferForRedo(record, 0, &buf) == BLK_NEEDS_REDO)
471  {
472  char *ptr = XLogRecGetBlockData(record, 0, NULL);
473  Page page = (Page) BufferGetPage(buf);
474  BTPageOpaque opaque = BTPageGetOpaque(page);
475  OffsetNumber offnum,
476  minoff,
477  maxoff;
480  Page newpage;
481 
483  state->deduplicate = true; /* unused */
484  state->nmaxitems = 0; /* unused */
485  /* Conservatively use larger maxpostingsize than primary */
486  state->maxpostingsize = BTMaxItemSize(page);
487  state->base = NULL;
488  state->baseoff = InvalidOffsetNumber;
489  state->basetupsize = 0;
490  state->htids = palloc(state->maxpostingsize);
491  state->nhtids = 0;
492  state->nitems = 0;
493  state->phystupsize = 0;
494  state->nintervals = 0;
495 
496  minoff = P_FIRSTDATAKEY(opaque);
497  maxoff = PageGetMaxOffsetNumber(page);
498  newpage = PageGetTempPageCopySpecial(page);
499 
500  if (!P_RIGHTMOST(opaque))
501  {
502  ItemId itemid = PageGetItemId(page, P_HIKEY);
503  Size itemsz = ItemIdGetLength(itemid);
504  IndexTuple item = (IndexTuple) PageGetItem(page, itemid);
505 
506  if (PageAddItem(newpage, (Item) item, itemsz, P_HIKEY,
507  false, false) == InvalidOffsetNumber)
508  elog(ERROR, "deduplication failed to add highkey");
509  }
510 
511  intervals = (BTDedupInterval *) ptr;
512  for (offnum = minoff;
513  offnum <= maxoff;
514  offnum = OffsetNumberNext(offnum))
515  {
516  ItemId itemid = PageGetItemId(page, offnum);
517  IndexTuple itup = (IndexTuple) PageGetItem(page, itemid);
518 
519  if (offnum == minoff)
520  _bt_dedup_start_pending(state, itup, offnum);
521  else if (state->nintervals < xlrec->nintervals &&
522  state->baseoff == intervals[state->nintervals].baseoff &&
523  state->nitems < intervals[state->nintervals].nitems)
524  {
525  if (!_bt_dedup_save_htid(state, itup))
526  elog(ERROR, "deduplication failed to add heap tid to pending posting list");
527  }
528  else
529  {
531  _bt_dedup_start_pending(state, itup, offnum);
532  }
533  }
534 
536  Assert(state->nintervals == xlrec->nintervals);
537  Assert(memcmp(state->intervals, intervals,
538  state->nintervals * sizeof(BTDedupInterval)) == 0);
539 
540  if (P_HAS_GARBAGE(opaque))
541  {
542  BTPageOpaque nopaque = BTPageGetOpaque(newpage);
543 
544  nopaque->btpo_flags &= ~BTP_HAS_GARBAGE;
545  }
546 
547  PageRestoreTempPage(newpage, page);
548  PageSetLSN(page, lsn);
550  }
551 
552  if (BufferIsValid(buf))
554 }
555 
556 static void
557 btree_xlog_updates(Page page, OffsetNumber *updatedoffsets,
558  xl_btree_update *updates, int nupdated)
559 {
560  BTVacuumPosting vacposting;
561  IndexTuple origtuple;
562  ItemId itemid;
563  Size itemsz;
564 
565  for (int i = 0; i < nupdated; i++)
566  {
567  itemid = PageGetItemId(page, updatedoffsets[i]);
568  origtuple = (IndexTuple) PageGetItem(page, itemid);
569 
570  vacposting = palloc(offsetof(BTVacuumPostingData, deletetids) +
571  updates->ndeletedtids * sizeof(uint16));
572  vacposting->updatedoffset = updatedoffsets[i];
573  vacposting->itup = origtuple;
574  vacposting->ndeletedtids = updates->ndeletedtids;
575  memcpy(vacposting->deletetids,
576  (char *) updates + SizeOfBtreeUpdate,
577  updates->ndeletedtids * sizeof(uint16));
578 
579  _bt_update_posting(vacposting);
580 
581  /* Overwrite updated version of tuple */
582  itemsz = MAXALIGN(IndexTupleSize(vacposting->itup));
583  if (!PageIndexTupleOverwrite(page, updatedoffsets[i],
584  (Item) vacposting->itup, itemsz))
585  elog(PANIC, "failed to update partially dead item");
586 
587  pfree(vacposting->itup);
588  pfree(vacposting);
589 
590  /* advance to next xl_btree_update from array */
591  updates = (xl_btree_update *)
592  ((char *) updates + SizeOfBtreeUpdate +
593  updates->ndeletedtids * sizeof(uint16));
594  }
595 }
596 
597 static void
599 {
600  XLogRecPtr lsn = record->EndRecPtr;
601  xl_btree_vacuum *xlrec = (xl_btree_vacuum *) XLogRecGetData(record);
602  Buffer buffer;
603  Page page;
604  BTPageOpaque opaque;
605 
606  /*
607  * We need to take a cleanup lock here, just like btvacuumpage(). However,
608  * it isn't necessary to exhaustively get a cleanup lock on every block in
609  * the index during recovery (just getting a cleanup lock on pages with
610  * items to kill suffices). See nbtree/README for details.
611  */
612  if (XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &buffer)
613  == BLK_NEEDS_REDO)
614  {
615  char *ptr = XLogRecGetBlockData(record, 0, NULL);
616 
617  page = (Page) BufferGetPage(buffer);
618 
619  if (xlrec->nupdated > 0)
620  {
621  OffsetNumber *updatedoffsets;
622  xl_btree_update *updates;
623 
624  updatedoffsets = (OffsetNumber *)
625  (ptr + xlrec->ndeleted * sizeof(OffsetNumber));
626  updates = (xl_btree_update *) ((char *) updatedoffsets +
627  xlrec->nupdated *
628  sizeof(OffsetNumber));
629 
630  btree_xlog_updates(page, updatedoffsets, updates, xlrec->nupdated);
631  }
632 
633  if (xlrec->ndeleted > 0)
634  PageIndexMultiDelete(page, (OffsetNumber *) ptr, xlrec->ndeleted);
635 
636  /*
637  * Mark the page as not containing any LP_DEAD items --- see comments
638  * in _bt_delitems_vacuum().
639  */
640  opaque = BTPageGetOpaque(page);
641  opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
642 
643  PageSetLSN(page, lsn);
644  MarkBufferDirty(buffer);
645  }
646  if (BufferIsValid(buffer))
647  UnlockReleaseBuffer(buffer);
648 }
649 
650 static void
652 {
653  XLogRecPtr lsn = record->EndRecPtr;
654  xl_btree_delete *xlrec = (xl_btree_delete *) XLogRecGetData(record);
655  Buffer buffer;
656  Page page;
657  BTPageOpaque opaque;
658 
659  /*
660  * If we have any conflict processing to do, it must happen before we
661  * update the page
662  */
663  if (InHotStandby)
664  {
665  RelFileLocator rlocator;
666 
667  XLogRecGetBlockTag(record, 0, &rlocator, NULL, NULL);
668 
670  xlrec->isCatalogRel,
671  rlocator);
672  }
673 
674  /*
675  * We don't need to take a cleanup lock to apply these changes. See
676  * nbtree/README for details.
677  */
678  if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
679  {
680  char *ptr = XLogRecGetBlockData(record, 0, NULL);
681 
682  page = (Page) BufferGetPage(buffer);
683 
684  if (xlrec->nupdated > 0)
685  {
686  OffsetNumber *updatedoffsets;
687  xl_btree_update *updates;
688 
689  updatedoffsets = (OffsetNumber *)
690  (ptr + xlrec->ndeleted * sizeof(OffsetNumber));
691  updates = (xl_btree_update *) ((char *) updatedoffsets +
692  xlrec->nupdated *
693  sizeof(OffsetNumber));
694 
695  btree_xlog_updates(page, updatedoffsets, updates, xlrec->nupdated);
696  }
697 
698  if (xlrec->ndeleted > 0)
699  PageIndexMultiDelete(page, (OffsetNumber *) ptr, xlrec->ndeleted);
700 
701  /* Mark the page as not containing any LP_DEAD items */
702  opaque = BTPageGetOpaque(page);
703  opaque->btpo_flags &= ~BTP_HAS_GARBAGE;
704 
705  PageSetLSN(page, lsn);
706  MarkBufferDirty(buffer);
707  }
708  if (BufferIsValid(buffer))
709  UnlockReleaseBuffer(buffer);
710 }
711 
712 static void
714 {
715  XLogRecPtr lsn = record->EndRecPtr;
717  Buffer buffer;
718  Page page;
719  BTPageOpaque pageop;
720  IndexTupleData trunctuple;
721 
722  /*
723  * In normal operation, we would lock all the pages this WAL record
724  * touches before changing any of them. In WAL replay, it should be okay
725  * to lock just one page at a time, since no concurrent index updates can
726  * be happening, and readers should not care whether they arrive at the
727  * target page or not (since it's surely empty).
728  */
729 
730  /* to-be-deleted subtree's parent page */
731  if (XLogReadBufferForRedo(record, 1, &buffer) == BLK_NEEDS_REDO)
732  {
733  OffsetNumber poffset;
734  ItemId itemid;
735  IndexTuple itup;
736  OffsetNumber nextoffset;
737  BlockNumber rightsib;
738 
739  page = (Page) BufferGetPage(buffer);
740  pageop = BTPageGetOpaque(page);
741 
742  poffset = xlrec->poffset;
743 
744  nextoffset = OffsetNumberNext(poffset);
745  itemid = PageGetItemId(page, nextoffset);
746  itup = (IndexTuple) PageGetItem(page, itemid);
747  rightsib = BTreeTupleGetDownLink(itup);
748 
749  itemid = PageGetItemId(page, poffset);
750  itup = (IndexTuple) PageGetItem(page, itemid);
751  BTreeTupleSetDownLink(itup, rightsib);
752  nextoffset = OffsetNumberNext(poffset);
753  PageIndexTupleDelete(page, nextoffset);
754 
755  PageSetLSN(page, lsn);
756  MarkBufferDirty(buffer);
757  }
758 
759  /*
760  * Don't need to couple cross-level locks in REDO routines, so release
761  * lock on internal page immediately
762  */
763  if (BufferIsValid(buffer))
764  UnlockReleaseBuffer(buffer);
765 
766  /* Rewrite the leaf page as a halfdead page */
767  buffer = XLogInitBufferForRedo(record, 0);
768  page = (Page) BufferGetPage(buffer);
769 
770  _bt_pageinit(page, BufferGetPageSize(buffer));
771  pageop = BTPageGetOpaque(page);
772 
773  pageop->btpo_prev = xlrec->leftblk;
774  pageop->btpo_next = xlrec->rightblk;
775  pageop->btpo_level = 0;
776  pageop->btpo_flags = BTP_HALF_DEAD | BTP_LEAF;
777  pageop->btpo_cycleid = 0;
778 
779  /*
780  * Construct a dummy high key item that points to top parent page (value
781  * is InvalidBlockNumber when the top parent page is the leaf page itself)
782  */
783  MemSet(&trunctuple, 0, sizeof(IndexTupleData));
784  trunctuple.t_info = sizeof(IndexTupleData);
785  BTreeTupleSetTopParent(&trunctuple, xlrec->topparent);
786 
787  if (PageAddItem(page, (Item) &trunctuple, sizeof(IndexTupleData), P_HIKEY,
788  false, false) == InvalidOffsetNumber)
789  elog(ERROR, "could not add dummy high key to half-dead page");
790 
791  PageSetLSN(page, lsn);
792  MarkBufferDirty(buffer);
793  UnlockReleaseBuffer(buffer);
794 }
795 
796 
797 static void
799 {
800  XLogRecPtr lsn = record->EndRecPtr;
802  BlockNumber leftsib;
803  BlockNumber rightsib;
804  uint32 level;
805  bool isleaf;
806  FullTransactionId safexid;
807  Buffer leftbuf;
808  Buffer target;
809  Buffer rightbuf;
810  Page page;
811  BTPageOpaque pageop;
812 
813  leftsib = xlrec->leftsib;
814  rightsib = xlrec->rightsib;
815  level = xlrec->level;
816  isleaf = (level == 0);
817  safexid = xlrec->safexid;
818 
819  /* No leaftopparent for level 0 (leaf page) or level 1 target */
820  Assert(!BlockNumberIsValid(xlrec->leaftopparent) || level > 1);
821 
822  /*
823  * In normal operation, we would lock all the pages this WAL record
824  * touches before changing any of them. In WAL replay, we at least lock
825  * the pages in the same standard left-to-right order (leftsib, target,
826  * rightsib), and don't release the sibling locks until the target is
827  * marked deleted.
828  */
829 
830  /* Fix right-link of left sibling, if any */
831  if (leftsib != P_NONE)
832  {
833  if (XLogReadBufferForRedo(record, 1, &leftbuf) == BLK_NEEDS_REDO)
834  {
835  page = (Page) BufferGetPage(leftbuf);
836  pageop = BTPageGetOpaque(page);
837  pageop->btpo_next = rightsib;
838 
839  PageSetLSN(page, lsn);
840  MarkBufferDirty(leftbuf);
841  }
842  }
843  else
844  leftbuf = InvalidBuffer;
845 
846  /* Rewrite target page as empty deleted page */
847  target = XLogInitBufferForRedo(record, 0);
848  page = (Page) BufferGetPage(target);
849 
850  _bt_pageinit(page, BufferGetPageSize(target));
851  pageop = BTPageGetOpaque(page);
852 
853  pageop->btpo_prev = leftsib;
854  pageop->btpo_next = rightsib;
855  pageop->btpo_level = level;
856  BTPageSetDeleted(page, safexid);
857  if (isleaf)
858  pageop->btpo_flags |= BTP_LEAF;
859  pageop->btpo_cycleid = 0;
860 
861  PageSetLSN(page, lsn);
862  MarkBufferDirty(target);
863 
864  /* Fix left-link of right sibling */
865  if (XLogReadBufferForRedo(record, 2, &rightbuf) == BLK_NEEDS_REDO)
866  {
867  page = (Page) BufferGetPage(rightbuf);
868  pageop = BTPageGetOpaque(page);
869  pageop->btpo_prev = leftsib;
870 
871  PageSetLSN(page, lsn);
872  MarkBufferDirty(rightbuf);
873  }
874 
875  /* Release siblings */
876  if (BufferIsValid(leftbuf))
877  UnlockReleaseBuffer(leftbuf);
878  if (BufferIsValid(rightbuf))
879  UnlockReleaseBuffer(rightbuf);
880 
881  /* Release target */
882  UnlockReleaseBuffer(target);
883 
884  /*
885  * If we deleted a parent of the targeted leaf page, instead of the leaf
886  * itself, update the leaf to point to the next remaining child in the
887  * to-be-deleted subtree
888  */
889  if (XLogRecHasBlockRef(record, 3))
890  {
891  /*
892  * There is no real data on the page, so we just re-create it from
893  * scratch using the information from the WAL record.
894  *
895  * Note that we don't end up here when the target page is also the
896  * leafbuf page. There is no need to add a dummy hikey item with a
897  * top parent link when deleting leafbuf because it's the last page
898  * we'll delete in the subtree undergoing deletion.
899  */
900  Buffer leafbuf;
901  IndexTupleData trunctuple;
902 
903  Assert(!isleaf);
904 
905  leafbuf = XLogInitBufferForRedo(record, 3);
906  page = (Page) BufferGetPage(leafbuf);
907 
908  _bt_pageinit(page, BufferGetPageSize(leafbuf));
909  pageop = BTPageGetOpaque(page);
910 
911  pageop->btpo_flags = BTP_HALF_DEAD | BTP_LEAF;
912  pageop->btpo_prev = xlrec->leafleftsib;
913  pageop->btpo_next = xlrec->leafrightsib;
914  pageop->btpo_level = 0;
915  pageop->btpo_cycleid = 0;
916 
917  /* Add a dummy hikey item */
918  MemSet(&trunctuple, 0, sizeof(IndexTupleData));
919  trunctuple.t_info = sizeof(IndexTupleData);
920  BTreeTupleSetTopParent(&trunctuple, xlrec->leaftopparent);
921 
922  if (PageAddItem(page, (Item) &trunctuple, sizeof(IndexTupleData), P_HIKEY,
923  false, false) == InvalidOffsetNumber)
924  elog(ERROR, "could not add dummy high key to half-dead page");
925 
926  PageSetLSN(page, lsn);
927  MarkBufferDirty(leafbuf);
928  UnlockReleaseBuffer(leafbuf);
929  }
930 
931  /* Update metapage if needed */
932  if (info == XLOG_BTREE_UNLINK_PAGE_META)
933  _bt_restore_meta(record, 4);
934 }
935 
936 static void
938 {
939  XLogRecPtr lsn = record->EndRecPtr;
940  xl_btree_newroot *xlrec = (xl_btree_newroot *) XLogRecGetData(record);
941  Buffer buffer;
942  Page page;
943  BTPageOpaque pageop;
944  char *ptr;
945  Size len;
946 
947  buffer = XLogInitBufferForRedo(record, 0);
948  page = (Page) BufferGetPage(buffer);
949 
950  _bt_pageinit(page, BufferGetPageSize(buffer));
951  pageop = BTPageGetOpaque(page);
952 
953  pageop->btpo_flags = BTP_ROOT;
954  pageop->btpo_prev = pageop->btpo_next = P_NONE;
955  pageop->btpo_level = xlrec->level;
956  if (xlrec->level == 0)
957  pageop->btpo_flags |= BTP_LEAF;
958  pageop->btpo_cycleid = 0;
959 
960  if (xlrec->level > 0)
961  {
962  ptr = XLogRecGetBlockData(record, 0, &len);
963  _bt_restore_page(page, ptr, len);
964 
965  /* Clear the incomplete-split flag in left child */
966  _bt_clear_incomplete_split(record, 1);
967  }
968 
969  PageSetLSN(page, lsn);
970  MarkBufferDirty(buffer);
971  UnlockReleaseBuffer(buffer);
972 
973  _bt_restore_meta(record, 2);
974 }
975 
976 /*
977  * In general VACUUM must defer recycling as a way of avoiding certain race
978  * conditions. Deleted pages contain a safexid value that is used by VACUUM
979  * to determine whether or not it's safe to place a page that was deleted by
980  * VACUUM earlier into the FSM now. See nbtree/README.
981  *
982  * As far as any backend operating during original execution is concerned, the
983  * FSM is a cache of recycle-safe pages; the mere presence of the page in the
984  * FSM indicates that the page must already be safe to recycle (actually,
985  * _bt_allocbuf() verifies it's safe using BTPageIsRecyclable(), but that's
986  * just because it would be unwise to completely trust the FSM, given its
987  * current limitations).
988  *
989  * This isn't sufficient to prevent similar concurrent recycling race
990  * conditions during Hot Standby, though. For that we need to log a
991  * xl_btree_reuse_page record at the point that a page is actually recycled
992  * and reused for an entirely unrelated page inside _bt_split(). These
993  * records include the same safexid value from the original deleted page,
994  * stored in the record's snapshotConflictHorizon field.
995  *
996  * The GlobalVisCheckRemovableFullXid() test in BTPageIsRecyclable() is used
997  * to determine if it's safe to recycle a page. This mirrors our own test:
998  * the PGPROC->xmin > limitXmin test inside GetConflictingVirtualXIDs().
999  * Consequently, one XID value achieves the same exclusion effect on primary
1000  * and standby.
1001  */
1002 static void
1004 {
1006 
1007  if (InHotStandby)
1009  xlrec->isCatalogRel,
1010  xlrec->locator);
1011 }
1012 
1013 void
1015 {
1016  uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
1017  MemoryContext oldCtx;
1018 
1019  oldCtx = MemoryContextSwitchTo(opCtx);
1020  switch (info)
1021  {
1023  btree_xlog_insert(true, false, false, record);
1024  break;
1026  btree_xlog_insert(false, false, false, record);
1027  break;
1029  btree_xlog_insert(false, true, false, record);
1030  break;
1031  case XLOG_BTREE_SPLIT_L:
1032  btree_xlog_split(true, record);
1033  break;
1034  case XLOG_BTREE_SPLIT_R:
1035  btree_xlog_split(false, record);
1036  break;
1038  btree_xlog_insert(true, false, true, record);
1039  break;
1040  case XLOG_BTREE_DEDUP:
1041  btree_xlog_dedup(record);
1042  break;
1043  case XLOG_BTREE_VACUUM:
1044  btree_xlog_vacuum(record);
1045  break;
1046  case XLOG_BTREE_DELETE:
1047  btree_xlog_delete(record);
1048  break;
1050  btree_xlog_mark_page_halfdead(info, record);
1051  break;
1054  btree_xlog_unlink_page(info, record);
1055  break;
1056  case XLOG_BTREE_NEWROOT:
1057  btree_xlog_newroot(record);
1058  break;
1059  case XLOG_BTREE_REUSE_PAGE:
1060  btree_xlog_reuse_page(record);
1061  break;
1063  _bt_restore_meta(record, 0);
1064  break;
1065  default:
1066  elog(PANIC, "btree_redo: unknown op code %u", info);
1067  }
1068  MemoryContextSwitchTo(oldCtx);
1070 }
1071 
1072 void
1074 {
1076  "Btree recovery temporary context",
1078 }
1079 
1080 void
1082 {
1084  opCtx = NULL;
1085 }
1086 
1087 /*
1088  * Mask a btree page before performing consistency checks on it.
1089  */
1090 void
1091 btree_mask(char *pagedata, BlockNumber blkno)
1092 {
1093  Page page = (Page) pagedata;
1094  BTPageOpaque maskopaq;
1095 
1097 
1098  mask_page_hint_bits(page);
1099  mask_unused_space(page);
1100 
1101  maskopaq = BTPageGetOpaque(page);
1102 
1103  if (P_ISLEAF(maskopaq))
1104  {
1105  /*
1106  * In btree leaf pages, it is possible to modify the LP_FLAGS without
1107  * emitting any WAL record. Hence, mask the line pointer flags. See
1108  * _bt_killitems(), _bt_check_unique() for details.
1109  */
1110  mask_lp_flags(page);
1111  }
1112 
1113  /*
1114  * BTP_HAS_GARBAGE is just an un-logged hint bit. So, mask it. See
1115  * _bt_delete_or_dedup_one_page(), _bt_killitems(), and _bt_check_unique()
1116  * for details.
1117  */
1118  maskopaq->btpo_flags &= ~BTP_HAS_GARBAGE;
1119 
1120  /*
1121  * During replay of a btree page split, we don't set the BTP_SPLIT_END
1122  * flag of the right sibling and initialize the cycle_id to 0 for the same
1123  * page. See btree_xlog_split() for details.
1124  */
1125  maskopaq->btpo_flags &= ~BTP_SPLIT_END;
1126  maskopaq->btpo_cycleid = 0;
1127 }
uint32 BlockNumber
Definition: block.h:31
static bool BlockNumberIsValid(BlockNumber blockNumber)
Definition: block.h:71
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
void mask_lp_flags(Page page)
Definition: bufmask.c:95
void mask_page_lsn_and_checksum(Page page)
Definition: bufmask.c:31
void mask_unused_space(Page page)
Definition: bufmask.c:71
void mask_page_hint_bits(Page page)
Definition: bufmask.c:46
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:3724
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4941
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:2532
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:400
static Size BufferGetPageSize(Buffer buffer)
Definition: bufmgr.h:389
@ RBM_NORMAL
Definition: bufmgr.h:45
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:351
void PageRestoreTempPage(Page tempPage, Page oldPage)
Definition: bufpage.c:413
void PageIndexMultiDelete(Page page, OffsetNumber *itemnos, int nitems)
Definition: bufpage.c:1150
Page PageGetTempPageCopySpecial(Page page)
Definition: bufpage.c:391
bool PageIndexTupleOverwrite(Page page, OffsetNumber offnum, Item newtup, Size newsize)
Definition: bufpage.c:1394
void PageIndexTupleDelete(Page page, OffsetNumber offnum)
Definition: bufpage.c:1041
PageHeaderData * PageHeader
Definition: bufpage.h:173
Pointer Page
Definition: bufpage.h:81
static Item PageGetItem(Page page, ItemId itemId)
Definition: bufpage.h:354
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:243
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:391
static OffsetNumber PageGetMaxOffsetNumber(Page page)
Definition: bufpage.h:372
#define PageAddItem(page, item, size, offsetNumber, overwrite, is_heap)
Definition: bufpage.h:471
#define MAXALIGN(LEN)
Definition: c.h:765
uint8_t uint8
Definition: c.h:483
#define Assert(condition)
Definition: c.h:812
uint16_t uint16
Definition: c.h:484
uint32_t uint32
Definition: c.h:485
#define MemSet(start, val, len)
Definition: c.h:974
size_t Size
Definition: c.h:559
#define PANIC
Definition: elog.h:42
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:225
#define nitems(x)
Definition: indent.h:31
IndexTuple CopyIndexTuple(IndexTuple source)
Definition: indextuple.c:547
int i
Definition: isn.c:72
Pointer Item
Definition: item.h:17
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
IndexTupleData * IndexTuple
Definition: itup.h:53
#define IndexTupleSize(itup)
Definition: itup.h:70
struct IndexTupleData IndexTupleData
#define MaxIndexTuplesPerPage
Definition: itup.h:165
void MemoryContextReset(MemoryContext context)
Definition: mcxt.c:383
void pfree(void *pointer)
Definition: mcxt.c:1521
MemoryContext CurrentMemoryContext
Definition: mcxt.c:143
void MemoryContextDelete(MemoryContext context)
Definition: mcxt.c:454
void * palloc(Size size)
Definition: mcxt.c:1317
#define AllocSetContextCreate
Definition: memutils.h:129
#define ALLOCSET_DEFAULT_SIZES
Definition: memutils.h:160
IndexTuple _bt_swap_posting(IndexTuple newitem, IndexTuple oposting, int postingoff)
Definition: nbtdedup.c:1022
void _bt_update_posting(BTVacuumPosting vacposting)
Definition: nbtdedup.c:924
bool _bt_dedup_save_htid(BTDedupState state, IndexTuple itup)
Definition: nbtdedup.c:484
void _bt_dedup_start_pending(BTDedupState state, IndexTuple base, OffsetNumber baseoff)
Definition: nbtdedup.c:433
Size _bt_dedup_finish_pending(Page newpage, BTDedupState state)
Definition: nbtdedup.c:555
void _bt_pageinit(Page page, Size size)
Definition: nbtpage.c:1129
#define BTPageGetMeta(p)
Definition: nbtree.h:121
#define P_ISLEAF(opaque)
Definition: nbtree.h:220
#define BTP_LEAF
Definition: nbtree.h:76
#define BTP_HALF_DEAD
Definition: nbtree.h:80
#define P_HIKEY
Definition: nbtree.h:367
#define P_HAS_GARBAGE(opaque)
Definition: nbtree.h:226
static void BTreeTupleSetTopParent(IndexTuple leafhikey, BlockNumber blkno)
Definition: nbtree.h:626
#define BTP_HAS_GARBAGE
Definition: nbtree.h:82
#define BTPageGetOpaque(page)
Definition: nbtree.h:73
#define BTREE_MAGIC
Definition: nbtree.h:149
#define BTP_META
Definition: nbtree.h:79
#define BTP_ROOT
Definition: nbtree.h:77
static void BTreeTupleSetDownLink(IndexTuple pivot, BlockNumber blkno)
Definition: nbtree.h:562
#define P_FIRSTDATAKEY(opaque)
Definition: nbtree.h:369
#define P_NONE
Definition: nbtree.h:212
#define P_RIGHTMOST(opaque)
Definition: nbtree.h:219
#define BTMaxItemSize(page)
Definition: nbtree.h:164
#define P_INCOMPLETE_SPLIT(opaque)
Definition: nbtree.h:227
#define BTREE_METAPAGE
Definition: nbtree.h:148
#define BTP_INCOMPLETE_SPLIT
Definition: nbtree.h:83
static BlockNumber BTreeTupleGetDownLink(IndexTuple pivot)
Definition: nbtree.h:556
static void BTPageSetDeleted(Page page, FullTransactionId safexid)
Definition: nbtree.h:239
#define BTREE_NOVAC_VERSION
Definition: nbtree.h:152
BTDedupStateData * BTDedupState
Definition: nbtree.h:893
#define BTP_SPLIT_END
Definition: nbtree.h:81
static void btree_xlog_delete(XLogReaderState *record)
Definition: nbtxlog.c:651
void btree_redo(XLogReaderState *record)
Definition: nbtxlog.c:1014
static void _bt_restore_meta(XLogReaderState *record, uint8 block_id)
Definition: nbtxlog.c:82
void btree_xlog_cleanup(void)
Definition: nbtxlog.c:1081
static void btree_xlog_newroot(XLogReaderState *record)
Definition: nbtxlog.c:937
static void btree_xlog_updates(Page page, OffsetNumber *updatedoffsets, xl_btree_update *updates, int nupdated)
Definition: nbtxlog.c:557
static void btree_xlog_dedup(XLogReaderState *record)
Definition: nbtxlog.c:464
static void btree_xlog_insert(bool isleaf, bool ismeta, bool posting, XLogReaderState *record)
Definition: nbtxlog.c:160
static void btree_xlog_split(bool newitemonleft, XLogReaderState *record)
Definition: nbtxlog.c:251
static void btree_xlog_reuse_page(XLogReaderState *record)
Definition: nbtxlog.c:1003
static void _bt_clear_incomplete_split(XLogReaderState *record, uint8 block_id)
Definition: nbtxlog.c:139
static void btree_xlog_mark_page_halfdead(uint8 info, XLogReaderState *record)
Definition: nbtxlog.c:713
static void _bt_restore_page(Page page, char *from, int len)
Definition: nbtxlog.c:36
void btree_mask(char *pagedata, BlockNumber blkno)
Definition: nbtxlog.c:1091
static MemoryContext opCtx
Definition: nbtxlog.c:25
void btree_xlog_startup(void)
Definition: nbtxlog.c:1073
static void btree_xlog_vacuum(XLogReaderState *record)
Definition: nbtxlog.c:598
static void btree_xlog_unlink_page(uint8 info, XLogReaderState *record)
Definition: nbtxlog.c:798
#define XLOG_BTREE_META_CLEANUP
Definition: nbtxlog.h:41
#define XLOG_BTREE_INSERT_POST
Definition: nbtxlog.h:32
#define SizeOfBtreeUpdate
Definition: nbtxlog.h:268
#define XLOG_BTREE_VACUUM
Definition: nbtxlog.h:39
#define XLOG_BTREE_SPLIT_R
Definition: nbtxlog.h:31
#define XLOG_BTREE_INSERT_LEAF
Definition: nbtxlog.h:27
#define XLOG_BTREE_INSERT_UPPER
Definition: nbtxlog.h:28
#define XLOG_BTREE_DEDUP
Definition: nbtxlog.h:33
#define XLOG_BTREE_UNLINK_PAGE
Definition: nbtxlog.h:35
#define XLOG_BTREE_UNLINK_PAGE_META
Definition: nbtxlog.h:36
#define XLOG_BTREE_INSERT_META
Definition: nbtxlog.h:29
#define XLOG_BTREE_MARK_PAGE_HALFDEAD
Definition: nbtxlog.h:38
#define XLOG_BTREE_REUSE_PAGE
Definition: nbtxlog.h:40
#define XLOG_BTREE_SPLIT_L
Definition: nbtxlog.h:30
#define XLOG_BTREE_NEWROOT
Definition: nbtxlog.h:37
#define XLOG_BTREE_DELETE
Definition: nbtxlog.h:34
#define InvalidOffsetNumber
Definition: off.h:26
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define OffsetNumberPrev(offsetNumber)
Definition: off.h:54
const void size_t len
static char * buf
Definition: pg_test_fsync.c:72
char * intervals[]
MemoryContextSwitchTo(old_ctx)
void ResolveRecoveryConflictWithSnapshotFullXid(FullTransactionId snapshotConflictHorizon, bool isCatalogRel, RelFileLocator locator)
Definition: standby.c:511
void ResolveRecoveryConflictWithSnapshot(TransactionId snapshotConflictHorizon, bool isCatalogRel, RelFileLocator locator)
Definition: standby.c:467
uint32 btm_last_cleanup_num_delpages
Definition: nbtree.h:114
uint32 btm_level
Definition: nbtree.h:108
float8 btm_last_cleanup_num_heap_tuples
Definition: nbtree.h:116
BlockNumber btm_fastroot
Definition: nbtree.h:109
uint32 btm_version
Definition: nbtree.h:106
uint32 btm_magic
Definition: nbtree.h:105
BlockNumber btm_root
Definition: nbtree.h:107
bool btm_allequalimage
Definition: nbtree.h:118
uint32 btm_fastlevel
Definition: nbtree.h:110
BlockNumber btpo_next
Definition: nbtree.h:65
BlockNumber btpo_prev
Definition: nbtree.h:64
uint16 btpo_flags
Definition: nbtree.h:67
uint32 btpo_level
Definition: nbtree.h:66
BTCycleId btpo_cycleid
Definition: nbtree.h:68
uint16 deletetids[FLEXIBLE_ARRAY_MEMBER]
Definition: nbtree.h:911
uint16 ndeletedtids
Definition: nbtree.h:910
IndexTuple itup
Definition: nbtree.h:906
OffsetNumber updatedoffset
Definition: nbtree.h:907
unsigned short t_info
Definition: itup.h:49
XLogRecPtr EndRecPtr
Definition: xlogreader.h:207
Definition: regguts.h:323
uint16 nintervals
Definition: nbtxlog.h:169
TransactionId snapshotConflictHorizon
Definition: nbtxlog.h:238
bool isCatalogRel
Definition: nbtxlog.h:241
uint16 ndeleted
Definition: nbtxlog.h:239
uint16 nupdated
Definition: nbtxlog.h:240
OffsetNumber offnum
Definition: nbtxlog.h:78
uint32 level
Definition: nbtxlog.h:50
uint32 version
Definition: nbtxlog.h:48
bool allequalimage
Definition: nbtxlog.h:54
BlockNumber fastroot
Definition: nbtxlog.h:51
uint32 fastlevel
Definition: nbtxlog.h:52
BlockNumber root
Definition: nbtxlog.h:49
uint32 last_cleanup_num_delpages
Definition: nbtxlog.h:53
uint32 level
Definition: nbtxlog.h:344
FullTransactionId snapshotConflictHorizon
Definition: nbtxlog.h:187
RelFileLocator locator
Definition: nbtxlog.h:185
uint16 postingoff
Definition: nbtxlog.h:155
OffsetNumber firstrightoff
Definition: nbtxlog.h:153
uint32 level
Definition: nbtxlog.h:152
OffsetNumber newitemoff
Definition: nbtxlog.h:154
uint16 ndeletedtids
Definition: nbtxlog.h:263
uint16 ndeleted
Definition: nbtxlog.h:222
uint16 nupdated
Definition: nbtxlog.h:223
static ItemArray items
Definition: test_tidstore.c:48
uint64 XLogRecPtr
Definition: xlogdefs.h:21
bool XLogRecGetBlockTagExtended(XLogReaderState *record, uint8 block_id, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum, Buffer *prefetch_buffer)
Definition: xlogreader.c:1997
void XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
Definition: xlogreader.c:1971
char * XLogRecGetBlockData(XLogReaderState *record, uint8 block_id, Size *len)
Definition: xlogreader.c:2025
#define XLogRecGetInfo(decoder)
Definition: xlogreader.h:410
#define XLogRecGetData(decoder)
Definition: xlogreader.h:415
#define XLogRecHasBlockRef(decoder, block_id)
Definition: xlogreader.h:420
#define XLR_INFO_MASK
Definition: xlogrecord.h:62
XLogRedoAction XLogReadBufferForRedo(XLogReaderState *record, uint8 block_id, Buffer *buf)
Definition: xlogutils.c:314
Buffer XLogInitBufferForRedo(XLogReaderState *record, uint8 block_id)
Definition: xlogutils.c:326
XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)
Definition: xlogutils.c:351
#define InHotStandby
Definition: xlogutils.h:60
@ BLK_NEEDS_REDO
Definition: xlogutils.h:74