PostgreSQL Source Code git master
Loading...
Searching...
No Matches
heapam_xlog.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * heapam_xlog.c
4 * WAL replay logic for heap access method.
5 *
6 * Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/access/heap/heapam_xlog.c
12 *
13 *-------------------------------------------------------------------------
14 */
15#include "postgres.h"
16
17#include "access/bufmask.h"
18#include "access/heapam.h"
20#include "access/xlog.h"
21#include "access/xlogutils.h"
22#include "storage/freespace.h"
23#include "storage/standby.h"
24
25
26/*
27 * Replay XLOG_HEAP2_PRUNE_* records.
28 */
29static void
31{
32 XLogRecPtr lsn = record->EndRecPtr;
33 char *maindataptr = XLogRecGetData(record);
35 Buffer buffer;
36 RelFileLocator rlocator;
37 BlockNumber blkno;
38 Buffer vmbuffer = InvalidBuffer;
39 uint8 vmflags = 0;
40 Size freespace = 0;
41
42 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
45
46 /*
47 * We will take an ordinary exclusive lock or a cleanup lock depending on
48 * whether the XLHP_CLEANUP_LOCK flag is set. With an ordinary exclusive
49 * lock, we better not be doing anything that requires moving existing
50 * tuple data.
51 */
52 Assert((xlrec.flags & XLHP_CLEANUP_LOCK) != 0 ||
54
55 if (xlrec.flags & XLHP_VM_ALL_VISIBLE)
56 {
58 if (xlrec.flags & XLHP_VM_ALL_FROZEN)
60 }
61
62 /*
63 * After xl_heap_prune is the optional snapshot conflict horizon.
64 *
65 * In Hot Standby mode, we must ensure that there are no running queries
66 * which would conflict with the changes in this record. That means we
67 * can't replay this record if it removes tuples that are still visible to
68 * transactions on the standby, freeze tuples with xids that are still
69 * considered running on the standby, or set a page as all-visible in the
70 * VM if it isn't all-visible to all transactions on the standby.
71 */
72 if ((xlrec.flags & XLHP_HAS_CONFLICT_HORIZON) != 0)
73 {
75
76 /* memcpy() because snapshot_conflict_horizon is stored unaligned */
78 maindataptr += sizeof(TransactionId);
79
80 if (InHotStandby)
82 (xlrec.flags & XLHP_IS_CATALOG_REL) != 0,
83 rlocator);
84 }
85
86 /*
87 * If we have a full-page image of the heap block, restore it and we're
88 * done with the heap block.
89 */
91 (xlrec.flags & XLHP_CLEANUP_LOCK) != 0,
92 &buffer) == BLK_NEEDS_REDO)
93 {
94 Page page = BufferGetPage(buffer);
95 OffsetNumber *redirected;
96 OffsetNumber *nowdead;
97 OffsetNumber *nowunused;
98 int nredirected;
99 int ndead;
100 int nunused;
101 int nplans;
102 Size datalen;
103 xlhp_freeze_plan *plans;
105 char *dataptr = XLogRecGetBlockData(record, 0, &datalen);
106 bool do_prune;
107
109 &nplans, &plans, &frz_offsets,
110 &nredirected, &redirected,
111 &ndead, &nowdead,
112 &nunused, &nowunused);
113
114 do_prune = nredirected > 0 || ndead > 0 || nunused > 0;
115
116 /* Ensure the record does something */
118
119 /*
120 * Update all line pointers per the record, and repair fragmentation
121 * if needed.
122 */
123 if (do_prune)
125 (xlrec.flags & XLHP_CLEANUP_LOCK) == 0,
126 redirected, nredirected,
127 nowdead, ndead,
128 nowunused, nunused);
129
130 /* Freeze tuples */
131 for (int p = 0; p < nplans; p++)
132 {
134
135 /*
136 * Convert freeze plan representation from WAL record into
137 * per-tuple format used by heap_execute_freeze_tuple
138 */
139 frz.xmax = plans[p].xmax;
140 frz.t_infomask2 = plans[p].t_infomask2;
141 frz.t_infomask = plans[p].t_infomask;
142 frz.frzflags = plans[p].frzflags;
143 frz.offset = InvalidOffsetNumber; /* unused, but be tidy */
144
145 for (int i = 0; i < plans[p].ntuples; i++)
146 {
147 OffsetNumber offset = *(frz_offsets++);
148 ItemId lp;
149 HeapTupleHeader tuple;
150
151 lp = PageGetItemId(page, offset);
152 tuple = (HeapTupleHeader) PageGetItem(page, lp);
154 }
155 }
156
157 /* There should be no more data */
158 Assert((char *) frz_offsets == dataptr + datalen);
159
160 /*
161 * The critical integrity requirement here is that we must never end
162 * up with the visibility map bit set and the page-level
163 * PD_ALL_VISIBLE bit unset. If that were to occur, a subsequent page
164 * modification would fail to clear the visibility map bit.
165 */
167 {
168 PageSetAllVisible(page);
169 PageClearPrunable(page);
170 }
171
172 MarkBufferDirty(buffer);
173
174 /*
175 * See log_heap_prune_and_freeze() for commentary on when we set the
176 * heap page LSN.
177 */
178 if (do_prune || nplans > 0 ||
180 PageSetLSN(page, lsn);
181
182 /*
183 * Note: we don't worry about updating the page's prunability hints.
184 * At worst this will cause an extra prune cycle to occur soon.
185 */
186 }
187
188 /*
189 * If we 1) released any space or line pointers or 2) set PD_ALL_VISIBLE
190 * or the VM, update the freespace map.
191 *
192 * Even when no actual space is freed (when only marking the page
193 * all-visible or frozen), we still update the FSM. Because the FSM is
194 * unlogged and maintained heuristically, it often becomes stale on
195 * standbys. If such a standby is later promoted and runs VACUUM, it will
196 * skip recalculating free space for pages that were marked
197 * all-visible/all-frozen. FreeSpaceMapVacuum() can then propagate overly
198 * optimistic free space values upward, causing future insertions to
199 * select pages that turn out to be unusable. In bulk, this can lead to
200 * long stalls.
201 *
202 * To prevent this, always update the FSM even when only marking a page
203 * all-visible/all-frozen.
204 *
205 * Do this regardless of whether a full-page image is logged, since FSM
206 * data is not part of the page itself.
207 */
208 if (BufferIsValid(buffer))
209 {
210 if ((xlrec.flags & (XLHP_HAS_REDIRECTIONS |
214 freespace = PageGetHeapFreeSpace(BufferGetPage(buffer));
215
216 /*
217 * We want to avoid holding an exclusive lock on the heap buffer while
218 * doing IO (either of the FSM or the VM), so we'll release it now.
219 */
220 UnlockReleaseBuffer(buffer);
221 }
222
223 /*
224 * Now read and update the VM block.
225 *
226 * We must redo changes to the VM even if the heap page was skipped due to
227 * LSN interlock. See comment in heap_xlog_multi_insert() for more details
228 * on replaying changes to the VM.
229 */
233 false,
234 &vmbuffer) == BLK_NEEDS_REDO)
235 {
236 Page vmpage = BufferGetPage(vmbuffer);
237
238 /* initialize the page if it was read as zeros */
239 if (PageIsNew(vmpage))
241
242 visibilitymap_set_vmbits(blkno, vmbuffer, vmflags, rlocator);
243
244 Assert(BufferIsDirty(vmbuffer));
245 PageSetLSN(vmpage, lsn);
246 }
247
248 if (BufferIsValid(vmbuffer))
249 UnlockReleaseBuffer(vmbuffer);
250
251 if (freespace > 0)
252 XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
253}
254
255/*
256 * Replay XLOG_HEAP2_VISIBLE records.
257 *
258 * The critical integrity requirement here is that we must never end up with
259 * a situation where the visibility map bit is set, and the page-level
260 * PD_ALL_VISIBLE bit is clear. If that were to occur, then a subsequent
261 * page modification would fail to clear the visibility map bit.
262 */
263static void
265{
266 XLogRecPtr lsn = record->EndRecPtr;
268 Buffer vmbuffer = InvalidBuffer;
269 Buffer buffer;
270 Page page;
271 RelFileLocator rlocator;
272 BlockNumber blkno;
273 XLogRedoAction action;
274
275 Assert((xlrec->flags & VISIBILITYMAP_XLOG_VALID_BITS) == xlrec->flags);
276
277 XLogRecGetBlockTag(record, 1, &rlocator, NULL, &blkno);
278
279 /*
280 * If there are any Hot Standby transactions running that have an xmin
281 * horizon old enough that this page isn't all-visible for them, they
282 * might incorrectly decide that an index-only scan can skip a heap fetch.
283 *
284 * NB: It might be better to throw some kind of "soft" conflict here that
285 * forces any index-only scan that is in flight to perform heap fetches,
286 * rather than killing the transaction outright.
287 */
288 if (InHotStandby)
289 ResolveRecoveryConflictWithSnapshot(xlrec->snapshotConflictHorizon,
291 rlocator);
292
293 /*
294 * Read the heap page, if it still exists. If the heap file has dropped or
295 * truncated later in recovery, we don't need to update the page, but we'd
296 * better still update the visibility map.
297 */
298 action = XLogReadBufferForRedo(record, 1, &buffer);
299 if (action == BLK_NEEDS_REDO)
300 {
301 /*
302 * We don't bump the LSN of the heap page when setting the visibility
303 * map bit (unless checksums or wal_hint_bits is enabled, in which
304 * case we must). This exposes us to torn page hazards, but since
305 * we're not inspecting the existing page contents in any way, we
306 * don't care.
307 */
308 page = BufferGetPage(buffer);
309
310 PageSetAllVisible(page);
311 PageClearPrunable(page);
312
314 PageSetLSN(page, lsn);
315
316 MarkBufferDirty(buffer);
317 }
318 else if (action == BLK_RESTORED)
319 {
320 /*
321 * If heap block was backed up, we already restored it and there's
322 * nothing more to do. (This can only happen with checksums or
323 * wal_log_hints enabled.)
324 */
325 }
326
327 if (BufferIsValid(buffer))
328 {
329 Size space = PageGetFreeSpace(BufferGetPage(buffer));
330
331 UnlockReleaseBuffer(buffer);
332
333 /*
334 * Since FSM is not WAL-logged and only updated heuristically, it
335 * easily becomes stale in standbys. If the standby is later promoted
336 * and runs VACUUM, it will skip updating individual free space
337 * figures for pages that became all-visible (or all-frozen, depending
338 * on the vacuum mode,) which is troublesome when FreeSpaceMapVacuum
339 * propagates too optimistic free space values to upper FSM layers;
340 * later inserters try to use such pages only to find out that they
341 * are unusable. This can cause long stalls when there are many such
342 * pages.
343 *
344 * Forestall those problems by updating FSM's idea about a page that
345 * is becoming all-visible or all-frozen.
346 *
347 * Do this regardless of a full-page image being applied, since the
348 * FSM data is not in the page anyway.
349 */
350 if (xlrec->flags & VISIBILITYMAP_VALID_BITS)
351 XLogRecordPageWithFreeSpace(rlocator, blkno, space);
352 }
353
354 /*
355 * Even if we skipped the heap page update due to the LSN interlock, it's
356 * still safe to update the visibility map. Any WAL record that clears
357 * the visibility map bit does so before checking the page LSN, so any
358 * bits that need to be cleared will still be cleared.
359 */
361 &vmbuffer) == BLK_NEEDS_REDO)
362 {
363 Page vmpage = BufferGetPage(vmbuffer);
366
367 /* initialize the page if it was read as zeros */
368 if (PageIsNew(vmpage))
370
371 /* remove VISIBILITYMAP_XLOG_* */
373
374 /*
375 * XLogReadBufferForRedoExtended locked the buffer. But
376 * visibilitymap_set will handle locking itself.
377 */
379
380 reln = CreateFakeRelcacheEntry(rlocator);
381
382 visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
383 xlrec->snapshotConflictHorizon, vmbits);
384
385 ReleaseBuffer(vmbuffer);
387 }
388 else if (BufferIsValid(vmbuffer))
389 UnlockReleaseBuffer(vmbuffer);
390}
391
392/*
393 * Given an "infobits" field from an XLog record, set the correct bits in the
394 * given infomask and infomask2 for the tuple touched by the record.
395 *
396 * (This is the reverse of compute_infobits).
397 */
398static void
418
419/*
420 * Replay XLOG_HEAP_DELETE records.
421 */
422static void
424{
425 XLogRecPtr lsn = record->EndRecPtr;
427 Buffer buffer;
428 Page page;
429 ItemId lp;
430 HeapTupleHeader htup;
431 BlockNumber blkno;
432 RelFileLocator target_locator;
433 ItemPointerData target_tid;
434
435 XLogRecGetBlockTag(record, 0, &target_locator, NULL, &blkno);
436 ItemPointerSetBlockNumber(&target_tid, blkno);
437 ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
438
439 /*
440 * The visibility map may need to be fixed even if the heap page is
441 * already up-to-date.
442 */
444 {
445 Relation reln = CreateFakeRelcacheEntry(target_locator);
446 Buffer vmbuffer = InvalidBuffer;
447
448 visibilitymap_pin(reln, blkno, &vmbuffer);
450 ReleaseBuffer(vmbuffer);
452 }
453
454 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
455 {
456 page = BufferGetPage(buffer);
457
458 if (xlrec->offnum < 1 || xlrec->offnum > PageGetMaxOffsetNumber(page))
459 elog(PANIC, "offnum out of range");
460 lp = PageGetItemId(page, xlrec->offnum);
461 if (!ItemIdIsNormal(lp))
462 elog(PANIC, "invalid lp");
463
464 htup = (HeapTupleHeader) PageGetItem(page, lp);
465
469 fix_infomask_from_infobits(xlrec->infobits_set,
470 &htup->t_infomask, &htup->t_infomask2);
471 if (!(xlrec->flags & XLH_DELETE_IS_SUPER))
472 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
473 else
476
477 /* Mark the page as a candidate for pruning */
478 PageSetPrunable(page, XLogRecGetXid(record));
479
482
483 /* Make sure t_ctid is set correctly */
486 else
487 htup->t_ctid = target_tid;
488 PageSetLSN(page, lsn);
489 MarkBufferDirty(buffer);
490 }
491 if (BufferIsValid(buffer))
492 UnlockReleaseBuffer(buffer);
493}
494
495/*
496 * Replay XLOG_HEAP_INSERT records.
497 */
498static void
500{
501 XLogRecPtr lsn = record->EndRecPtr;
503 Buffer buffer;
504 Page page;
505 union
506 {
509 } tbuf;
510 HeapTupleHeader htup;
513 Size freespace = 0;
514 RelFileLocator target_locator;
515 BlockNumber blkno;
516 ItemPointerData target_tid;
517 XLogRedoAction action;
518
519 XLogRecGetBlockTag(record, 0, &target_locator, NULL, &blkno);
520 ItemPointerSetBlockNumber(&target_tid, blkno);
521 ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
522
523 /* No freezing in the heap_insert() code path */
525
526 /*
527 * The visibility map may need to be fixed even if the heap page is
528 * already up-to-date.
529 */
531 {
532 Relation reln = CreateFakeRelcacheEntry(target_locator);
533 Buffer vmbuffer = InvalidBuffer;
534
535 visibilitymap_pin(reln, blkno, &vmbuffer);
537 ReleaseBuffer(vmbuffer);
539 }
540
541 /*
542 * If we inserted the first and only tuple on the page, re-initialize the
543 * page from scratch.
544 */
546 {
547 buffer = XLogInitBufferForRedo(record, 0);
548 page = BufferGetPage(buffer);
549 PageInit(page, BufferGetPageSize(buffer), 0);
550 action = BLK_NEEDS_REDO;
551 }
552 else
553 action = XLogReadBufferForRedo(record, 0, &buffer);
554 if (action == BLK_NEEDS_REDO)
555 {
556 Size datalen;
557 char *data;
558
559 page = BufferGetPage(buffer);
560
561 if (PageGetMaxOffsetNumber(page) + 1 < xlrec->offnum)
562 elog(PANIC, "invalid max offset number");
563
564 data = XLogRecGetBlockData(record, 0, &datalen);
565
566 newlen = datalen - SizeOfHeapHeader;
570
571 htup = &tbuf.hdr;
573 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
574 memcpy((char *) htup + SizeofHeapTupleHeader,
575 data,
576 newlen);
578 htup->t_infomask2 = xlhdr.t_infomask2;
579 htup->t_infomask = xlhdr.t_infomask;
580 htup->t_hoff = xlhdr.t_hoff;
583 htup->t_ctid = target_tid;
584
585 if (PageAddItem(page, htup, newlen, xlrec->offnum, true, true) == InvalidOffsetNumber)
586 elog(PANIC, "failed to add tuple");
587
588 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
589
590 PageSetLSN(page, lsn);
591
594
595 MarkBufferDirty(buffer);
596 }
597 if (BufferIsValid(buffer))
598 UnlockReleaseBuffer(buffer);
599
600 /*
601 * If the page is running low on free space, update the FSM as well.
602 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
603 * better than that without knowing the fill-factor for the table.
604 *
605 * XXX: Don't do this if the page was restored from full page image. We
606 * don't bother to update the FSM in that case, it doesn't need to be
607 * totally accurate anyway.
608 */
609 if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
610 XLogRecordPageWithFreeSpace(target_locator, blkno, freespace);
611}
612
613/*
614 * Replay XLOG_HEAP2_MULTI_INSERT records.
615 */
616static void
618{
619 XLogRecPtr lsn = record->EndRecPtr;
621 RelFileLocator rlocator;
622 BlockNumber blkno;
623 Buffer buffer;
624 Page page;
625 union
626 {
629 } tbuf;
630 HeapTupleHeader htup;
632 Size freespace = 0;
633 int i;
634 bool isinit = (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE) != 0;
635 XLogRedoAction action;
636 Buffer vmbuffer = InvalidBuffer;
637
638 /*
639 * Insertion doesn't overwrite MVCC data, so no conflict processing is
640 * required.
641 */
643
644 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
645
646 /* check that the mutually exclusive flags are not both set */
648 (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
649
650 /*
651 * The visibility map may need to be fixed even if the heap page is
652 * already up-to-date.
653 */
655 {
657
658 visibilitymap_pin(reln, blkno, &vmbuffer);
660 ReleaseBuffer(vmbuffer);
661 vmbuffer = InvalidBuffer;
663 }
664
665 if (isinit)
666 {
667 buffer = XLogInitBufferForRedo(record, 0);
668 page = BufferGetPage(buffer);
669 PageInit(page, BufferGetPageSize(buffer), 0);
670 action = BLK_NEEDS_REDO;
671 }
672 else
673 action = XLogReadBufferForRedo(record, 0, &buffer);
674 if (action == BLK_NEEDS_REDO)
675 {
676 char *tupdata;
677 char *endptr;
678 Size len;
679
680 /* Tuples are stored as block data */
681 tupdata = XLogRecGetBlockData(record, 0, &len);
682 endptr = tupdata + len;
683
684 page = BufferGetPage(buffer);
685
686 for (i = 0; i < xlrec->ntuples; i++)
687 {
688 OffsetNumber offnum;
690
691 /*
692 * If we're reinitializing the page, the tuples are stored in
693 * order from FirstOffsetNumber. Otherwise there's an array of
694 * offsets in the WAL record, and the tuples come after that.
695 */
696 if (isinit)
697 offnum = FirstOffsetNumber + i;
698 else
699 offnum = xlrec->offsets[i];
700 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
701 elog(PANIC, "invalid max offset number");
702
704 tupdata = ((char *) xlhdr) + SizeOfMultiInsertTuple;
705
706 newlen = xlhdr->datalen;
708 htup = &tbuf.hdr;
710 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
711 memcpy((char *) htup + SizeofHeapTupleHeader,
712 tupdata,
713 newlen);
714 tupdata += newlen;
715
717 htup->t_infomask2 = xlhdr->t_infomask2;
718 htup->t_infomask = xlhdr->t_infomask;
719 htup->t_hoff = xlhdr->t_hoff;
722 ItemPointerSetBlockNumber(&htup->t_ctid, blkno);
723 ItemPointerSetOffsetNumber(&htup->t_ctid, offnum);
724
725 offnum = PageAddItem(page, htup, newlen, offnum, true, true);
726 if (offnum == InvalidOffsetNumber)
727 elog(PANIC, "failed to add tuple");
728 }
729 if (tupdata != endptr)
730 elog(PANIC, "total tuple length mismatch");
731
732 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
733
734 PageSetLSN(page, lsn);
735
738
739 /* XLH_INSERT_ALL_FROZEN_SET implies that all tuples are visible */
740 if (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)
741 {
742 PageSetAllVisible(page);
743 PageClearPrunable(page);
744 }
745
746 MarkBufferDirty(buffer);
747 }
748 if (BufferIsValid(buffer))
749 UnlockReleaseBuffer(buffer);
750
751 buffer = InvalidBuffer;
752
753 /*
754 * Read and update the visibility map (VM) block.
755 *
756 * We must always redo VM changes, even if the corresponding heap page
757 * update was skipped due to the LSN interlock. Each VM block covers
758 * multiple heap pages, so later WAL records may update other bits in the
759 * same block. If this record includes an FPI (full-page image),
760 * subsequent WAL records may depend on it to guard against torn pages.
761 *
762 * Heap page changes are replayed first to preserve the invariant:
763 * PD_ALL_VISIBLE must be set on the heap page if the VM bit is set.
764 *
765 * Note that we released the heap page lock above. During normal
766 * operation, this would be unsafe — a concurrent modification could
767 * clear PD_ALL_VISIBLE while the VM bit remained set, violating the
768 * invariant.
769 *
770 * During recovery, however, no concurrent writers exist. Therefore,
771 * updating the VM without holding the heap page lock is safe enough. This
772 * same approach is taken when replaying xl_heap_visible records (see
773 * heap_xlog_visible()).
774 */
775 if ((xlrec->flags & XLH_INSERT_ALL_FROZEN_SET) &&
777 &vmbuffer) == BLK_NEEDS_REDO)
778 {
779 Page vmpage = BufferGetPage(vmbuffer);
780
781 /* initialize the page if it was read as zeros */
782 if (PageIsNew(vmpage))
784
786 vmbuffer,
789 rlocator);
790
791 Assert(BufferIsDirty(vmbuffer));
792 PageSetLSN(vmpage, lsn);
793 }
794
795 if (BufferIsValid(vmbuffer))
796 UnlockReleaseBuffer(vmbuffer);
797
798 /*
799 * If the page is running low on free space, update the FSM as well.
800 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
801 * better than that without knowing the fill-factor for the table.
802 *
803 * XXX: Don't do this if the page was restored from full page image. We
804 * don't bother to update the FSM in that case, it doesn't need to be
805 * totally accurate anyway.
806 */
807 if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
808 XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
809}
810
811/*
812 * Replay XLOG_HEAP_UPDATE and XLOG_HEAP_HOT_UPDATE records.
813 */
814static void
816{
817 XLogRecPtr lsn = record->EndRecPtr;
819 RelFileLocator rlocator;
824 nbuffer;
825 Page opage,
826 npage;
827 OffsetNumber offnum;
828 ItemId lp;
830 HeapTupleHeader htup;
831 uint16 prefixlen = 0,
832 suffixlen = 0;
833 char *newp;
834 union
835 {
838 } tbuf;
841 Size freespace = 0;
844
845 /* initialize to keep the compiler quiet */
846 oldtup.t_data = NULL;
847 oldtup.t_len = 0;
848
849 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &newblk);
850 if (XLogRecGetBlockTagExtended(record, 1, NULL, NULL, &oldblk, NULL))
851 {
852 /* HOT updates are never done across pages */
854 }
855 else
856 oldblk = newblk;
857
858 ItemPointerSet(&newtid, newblk, xlrec->new_offnum);
859
860 /*
861 * The visibility map may need to be fixed even if the heap page is
862 * already up-to-date.
863 */
865 {
867 Buffer vmbuffer = InvalidBuffer;
868
869 visibilitymap_pin(reln, oldblk, &vmbuffer);
871 ReleaseBuffer(vmbuffer);
873 }
874
875 /*
876 * In normal operation, it is important to lock the two pages in
877 * page-number order, to avoid possible deadlocks against other update
878 * operations going the other way. However, during WAL replay there can
879 * be no other update happening, so we don't need to worry about that. But
880 * we *do* need to worry that we don't expose an inconsistent state to Hot
881 * Standby queries --- so the original page can't be unlocked before we've
882 * added the new tuple to the new page.
883 */
884
885 /* Deal with old tuple version */
886 oldaction = XLogReadBufferForRedo(record, (oldblk == newblk) ? 0 : 1,
887 &obuffer);
889 {
891 offnum = xlrec->old_offnum;
893 elog(PANIC, "offnum out of range");
894 lp = PageGetItemId(opage, offnum);
895 if (!ItemIdIsNormal(lp))
896 elog(PANIC, "invalid lp");
897
899
900 oldtup.t_data = htup;
901 oldtup.t_len = ItemIdGetLength(lp);
902
905 if (hot_update)
907 else
909 fix_infomask_from_infobits(xlrec->old_infobits_set, &htup->t_infomask,
910 &htup->t_infomask2);
911 HeapTupleHeaderSetXmax(htup, xlrec->old_xmax);
913 /* Set forward chain link in t_ctid */
914 htup->t_ctid = newtid;
915
916 /* Mark the page as a candidate for pruning */
918
921
922 PageSetLSN(opage, lsn);
924 }
925
926 /*
927 * Read the page the new tuple goes into, if different from old.
928 */
929 if (oldblk == newblk)
930 {
933 }
934 else if (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE)
935 {
936 nbuffer = XLogInitBufferForRedo(record, 0);
937 npage = BufferGetPage(nbuffer);
940 }
941 else
943
944 /*
945 * The visibility map may need to be fixed even if the heap page is
946 * already up-to-date.
947 */
949 {
951 Buffer vmbuffer = InvalidBuffer;
952
953 visibilitymap_pin(reln, newblk, &vmbuffer);
955 ReleaseBuffer(vmbuffer);
957 }
958
959 /* Deal with new tuple */
961 {
962 char *recdata;
963 char *recdata_end;
964 Size datalen;
965 Size tuplen;
966
967 recdata = XLogRecGetBlockData(record, 0, &datalen);
968 recdata_end = recdata + datalen;
969
970 npage = BufferGetPage(nbuffer);
971
972 offnum = xlrec->new_offnum;
973 if (PageGetMaxOffsetNumber(npage) + 1 < offnum)
974 elog(PANIC, "invalid max offset number");
975
977 {
978 Assert(newblk == oldblk);
979 memcpy(&prefixlen, recdata, sizeof(uint16));
980 recdata += sizeof(uint16);
981 }
983 {
984 Assert(newblk == oldblk);
985 memcpy(&suffixlen, recdata, sizeof(uint16));
986 recdata += sizeof(uint16);
987 }
988
991
992 tuplen = recdata_end - recdata;
993 Assert(tuplen <= MaxHeapTupleSize);
994
995 htup = &tbuf.hdr;
997
998 /*
999 * Reconstruct the new tuple using the prefix and/or suffix from the
1000 * old tuple, and the data stored in the WAL record.
1001 */
1002 newp = (char *) htup + SizeofHeapTupleHeader;
1003 if (prefixlen > 0)
1004 {
1005 int len;
1006
1007 /* copy bitmap [+ padding] [+ oid] from WAL record */
1010 recdata += len;
1011 newp += len;
1012
1013 /* copy prefix from old tuple */
1014 memcpy(newp, (char *) oldtup.t_data + oldtup.t_data->t_hoff, prefixlen);
1015 newp += prefixlen;
1016
1017 /* copy new tuple data from WAL record */
1018 len = tuplen - (xlhdr.t_hoff - SizeofHeapTupleHeader);
1020 recdata += len;
1021 newp += len;
1022 }
1023 else
1024 {
1025 /*
1026 * copy bitmap [+ padding] [+ oid] + data from record, all in one
1027 * go
1028 */
1029 memcpy(newp, recdata, tuplen);
1030 recdata += tuplen;
1031 newp += tuplen;
1032 }
1034
1035 /* copy suffix from old tuple */
1036 if (suffixlen > 0)
1037 memcpy(newp, (char *) oldtup.t_data + oldtup.t_len - suffixlen, suffixlen);
1038
1040 htup->t_infomask2 = xlhdr.t_infomask2;
1041 htup->t_infomask = xlhdr.t_infomask;
1042 htup->t_hoff = xlhdr.t_hoff;
1043
1046 HeapTupleHeaderSetXmax(htup, xlrec->new_xmax);
1047 /* Make sure there is no forward chain link in t_ctid */
1048 htup->t_ctid = newtid;
1049
1050 offnum = PageAddItem(npage, htup, newlen, offnum, true, true);
1051 if (offnum == InvalidOffsetNumber)
1052 elog(PANIC, "failed to add tuple");
1053
1055 PageClearAllVisible(npage);
1056
1057 /* needed to update FSM below */
1058 freespace = PageGetHeapFreeSpace(npage);
1059
1060 PageSetLSN(npage, lsn);
1062 }
1063
1068
1069 /*
1070 * If the new page is running low on free space, update the FSM as well.
1071 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
1072 * better than that without knowing the fill-factor for the table.
1073 *
1074 * However, don't update the FSM on HOT updates, because after crash
1075 * recovery, either the old or the new tuple will certainly be dead and
1076 * prunable. After pruning, the page will have roughly as much free space
1077 * as it did before the update, assuming the new tuple is about the same
1078 * size as the old one.
1079 *
1080 * XXX: Don't do this if the page was restored from full page image. We
1081 * don't bother to update the FSM in that case, it doesn't need to be
1082 * totally accurate anyway.
1083 */
1084 if (newaction == BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5)
1085 XLogRecordPageWithFreeSpace(rlocator, newblk, freespace);
1086}
1087
1088/*
1089 * Replay XLOG_HEAP_CONFIRM records.
1090 */
1091static void
1093{
1094 XLogRecPtr lsn = record->EndRecPtr;
1096 Buffer buffer;
1097 Page page;
1098 OffsetNumber offnum;
1099 ItemId lp;
1100 HeapTupleHeader htup;
1101
1102 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
1103 {
1104 page = BufferGetPage(buffer);
1105
1106 offnum = xlrec->offnum;
1108 elog(PANIC, "offnum out of range");
1109 lp = PageGetItemId(page, offnum);
1110 if (!ItemIdIsNormal(lp))
1111 elog(PANIC, "invalid lp");
1112
1113 htup = (HeapTupleHeader) PageGetItem(page, lp);
1114
1115 /*
1116 * Confirm tuple as actually inserted
1117 */
1118 ItemPointerSet(&htup->t_ctid, BufferGetBlockNumber(buffer), offnum);
1119
1120 PageSetLSN(page, lsn);
1121 MarkBufferDirty(buffer);
1122 }
1123 if (BufferIsValid(buffer))
1124 UnlockReleaseBuffer(buffer);
1125}
1126
1127/*
1128 * Replay XLOG_HEAP_LOCK records.
1129 */
1130static void
1132{
1133 XLogRecPtr lsn = record->EndRecPtr;
1135 Buffer buffer;
1136 Page page;
1137 OffsetNumber offnum;
1138 ItemId lp;
1139 HeapTupleHeader htup;
1140
1141 /*
1142 * The visibility map may need to be fixed even if the heap page is
1143 * already up-to-date.
1144 */
1146 {
1147 RelFileLocator rlocator;
1148 Buffer vmbuffer = InvalidBuffer;
1149 BlockNumber block;
1150 Relation reln;
1151
1152 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block);
1153 reln = CreateFakeRelcacheEntry(rlocator);
1154
1155 visibilitymap_pin(reln, block, &vmbuffer);
1157
1158 ReleaseBuffer(vmbuffer);
1160 }
1161
1162 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
1163 {
1164 page = BufferGetPage(buffer);
1165
1166 offnum = xlrec->offnum;
1168 elog(PANIC, "offnum out of range");
1169 lp = PageGetItemId(page, offnum);
1170 if (!ItemIdIsNormal(lp))
1171 elog(PANIC, "invalid lp");
1172
1173 htup = (HeapTupleHeader) PageGetItem(page, lp);
1174
1175 htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
1177 fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
1178 &htup->t_infomask2);
1179
1180 /*
1181 * Clear relevant update flags, but only if the modified infomask says
1182 * there's no update.
1183 */
1185 {
1187 /* Make sure there is no forward chain link in t_ctid */
1188 ItemPointerSet(&htup->t_ctid,
1189 BufferGetBlockNumber(buffer),
1190 offnum);
1191 }
1192 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
1194 PageSetLSN(page, lsn);
1195 MarkBufferDirty(buffer);
1196 }
1197 if (BufferIsValid(buffer))
1198 UnlockReleaseBuffer(buffer);
1199}
1200
1201/*
1202 * Replay XLOG_HEAP2_LOCK_UPDATED records.
1203 */
1204static void
1206{
1207 XLogRecPtr lsn = record->EndRecPtr;
1209 Buffer buffer;
1210 Page page;
1211 OffsetNumber offnum;
1212 ItemId lp;
1213 HeapTupleHeader htup;
1214
1216
1217 /*
1218 * The visibility map may need to be fixed even if the heap page is
1219 * already up-to-date.
1220 */
1222 {
1223 RelFileLocator rlocator;
1224 Buffer vmbuffer = InvalidBuffer;
1225 BlockNumber block;
1226 Relation reln;
1227
1228 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block);
1229 reln = CreateFakeRelcacheEntry(rlocator);
1230
1231 visibilitymap_pin(reln, block, &vmbuffer);
1233
1234 ReleaseBuffer(vmbuffer);
1236 }
1237
1238 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
1239 {
1240 page = BufferGetPage(buffer);
1241
1242 offnum = xlrec->offnum;
1244 elog(PANIC, "offnum out of range");
1245 lp = PageGetItemId(page, offnum);
1246 if (!ItemIdIsNormal(lp))
1247 elog(PANIC, "invalid lp");
1248
1249 htup = (HeapTupleHeader) PageGetItem(page, lp);
1250
1251 htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
1253 fix_infomask_from_infobits(xlrec->infobits_set, &htup->t_infomask,
1254 &htup->t_infomask2);
1255 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
1256
1257 PageSetLSN(page, lsn);
1258 MarkBufferDirty(buffer);
1259 }
1260 if (BufferIsValid(buffer))
1261 UnlockReleaseBuffer(buffer);
1262}
1263
1264/*
1265 * Replay XLOG_HEAP_INPLACE records.
1266 */
1267static void
1269{
1270 XLogRecPtr lsn = record->EndRecPtr;
1272 Buffer buffer;
1273 Page page;
1274 OffsetNumber offnum;
1275 ItemId lp;
1276 HeapTupleHeader htup;
1277 uint32 oldlen;
1278 Size newlen;
1279
1280 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
1281 {
1282 char *newtup = XLogRecGetBlockData(record, 0, &newlen);
1283
1284 page = BufferGetPage(buffer);
1285
1286 offnum = xlrec->offnum;
1288 elog(PANIC, "offnum out of range");
1289 lp = PageGetItemId(page, offnum);
1290 if (!ItemIdIsNormal(lp))
1291 elog(PANIC, "invalid lp");
1292
1293 htup = (HeapTupleHeader) PageGetItem(page, lp);
1294
1295 oldlen = ItemIdGetLength(lp) - htup->t_hoff;
1296 if (oldlen != newlen)
1297 elog(PANIC, "wrong tuple length");
1298
1299 memcpy((char *) htup + htup->t_hoff, newtup, newlen);
1300
1301 PageSetLSN(page, lsn);
1302 MarkBufferDirty(buffer);
1303 }
1304 if (BufferIsValid(buffer))
1305 UnlockReleaseBuffer(buffer);
1306
1308 xlrec->nmsgs,
1309 xlrec->relcacheInitFileInval,
1310 xlrec->dbId,
1311 xlrec->tsId);
1312}
1313
1314void
1316{
1317 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
1318
1319 /*
1320 * These operations don't overwrite MVCC data so no conflict processing is
1321 * required. The ones in heap2 rmgr do.
1322 */
1323
1324 switch (info & XLOG_HEAP_OPMASK)
1325 {
1326 case XLOG_HEAP_INSERT:
1327 heap_xlog_insert(record);
1328 break;
1329 case XLOG_HEAP_DELETE:
1330 heap_xlog_delete(record);
1331 break;
1332 case XLOG_HEAP_UPDATE:
1333 heap_xlog_update(record, false);
1334 break;
1335 case XLOG_HEAP_TRUNCATE:
1336
1337 /*
1338 * TRUNCATE is a no-op because the actions are already logged as
1339 * SMGR WAL records. TRUNCATE WAL record only exists for logical
1340 * decoding.
1341 */
1342 break;
1344 heap_xlog_update(record, true);
1345 break;
1346 case XLOG_HEAP_CONFIRM:
1347 heap_xlog_confirm(record);
1348 break;
1349 case XLOG_HEAP_LOCK:
1350 heap_xlog_lock(record);
1351 break;
1352 case XLOG_HEAP_INPLACE:
1353 heap_xlog_inplace(record);
1354 break;
1355 default:
1356 elog(PANIC, "heap_redo: unknown op code %u", info);
1357 }
1358}
1359
1360void
1362{
1363 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
1364
1365 switch (info & XLOG_HEAP_OPMASK)
1366 {
1370 heap_xlog_prune_freeze(record);
1371 break;
1372 case XLOG_HEAP2_VISIBLE:
1373 heap_xlog_visible(record);
1374 break;
1376 heap_xlog_multi_insert(record);
1377 break;
1379 heap_xlog_lock_updated(record);
1380 break;
1381 case XLOG_HEAP2_NEW_CID:
1382
1383 /*
1384 * Nothing to do on a real replay, only used during logical
1385 * decoding.
1386 */
1387 break;
1388 case XLOG_HEAP2_REWRITE:
1390 break;
1391 default:
1392 elog(PANIC, "heap2_redo: unknown op code %u", info);
1393 }
1394}
1395
1396/*
1397 * Mask a heap page before performing consistency checks on it.
1398 */
1399void
1401{
1402 Page page = (Page) pagedata;
1403 OffsetNumber off;
1404
1406
1407 mask_page_hint_bits(page);
1408 mask_unused_space(page);
1409
1410 for (off = 1; off <= PageGetMaxOffsetNumber(page); off++)
1411 {
1412 ItemId iid = PageGetItemId(page, off);
1413 char *page_item;
1414
1415 page_item = (char *) (page + ItemIdGetOffset(iid));
1416
1417 if (ItemIdIsNormal(iid))
1418 {
1420
1421 /*
1422 * If xmin of a tuple is not yet frozen, we should ignore
1423 * differences in hint bits, since they can be set without
1424 * emitting WAL.
1425 */
1428 else
1429 {
1430 /* Still we need to mask xmax hint bits. */
1431 page_htup->t_infomask &= ~HEAP_XMAX_INVALID;
1432 page_htup->t_infomask &= ~HEAP_XMAX_COMMITTED;
1433 }
1434
1435 /*
1436 * During replay, we set Command Id to FirstCommandId. Hence, mask
1437 * it. See heap_xlog_insert() for details.
1438 */
1439 page_htup->t_choice.t_heap.t_field3.t_cid = MASK_MARKER;
1440
1441 /*
1442 * For a speculative tuple, heap_insert() does not set ctid in the
1443 * caller-passed heap tuple itself, leaving the ctid field to
1444 * contain a speculative token value - a per-backend monotonically
1445 * increasing identifier. Besides, it does not WAL-log ctid under
1446 * any circumstances.
1447 *
1448 * During redo, heap_xlog_insert() sets t_ctid to current block
1449 * number and self offset number. It doesn't care about any
1450 * speculative insertions on the primary. Hence, we set t_ctid to
1451 * current block number and self offset number to ignore any
1452 * inconsistency.
1453 */
1455 ItemPointerSet(&page_htup->t_ctid, blkno, off);
1456
1457 /*
1458 * NB: Not ignoring ctid changes due to the tuple having moved
1459 * (i.e. HeapTupleHeaderIndicatesMovedPartitions), because that's
1460 * important information that needs to be in-sync between primary
1461 * and standby, and thus is WAL logged.
1462 */
1463 }
1464
1465 /*
1466 * Ignore any padding bytes after the tuple, when the length of the
1467 * item is not MAXALIGNed.
1468 */
1469 if (ItemIdHasStorage(iid))
1470 {
1471 int len = ItemIdGetLength(iid);
1472 int padlen = MAXALIGN(len) - len;
1473
1474 if (padlen > 0)
1476 }
1477 }
1478}
uint32 BlockNumber
Definition block.h:31
int Buffer
Definition buf.h:23
#define InvalidBuffer
Definition buf.h:25
void mask_page_lsn_and_checksum(Page page)
Definition bufmask.c:31
void mask_unused_space(Page page)
Definition bufmask.c:71
void mask_page_hint_bits(Page page)
Definition bufmask.c:46
#define MASK_MARKER
Definition bufmask.h:24
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition bufmgr.c:4357
bool BufferIsDirty(Buffer buffer)
Definition bufmgr.c:3030
void ReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5505
void UnlockReleaseBuffer(Buffer buffer)
Definition bufmgr.c:5522
void MarkBufferDirty(Buffer buffer)
Definition bufmgr.c:3063
static Page BufferGetPage(Buffer buffer)
Definition bufmgr.h:470
@ BUFFER_LOCK_UNLOCK
Definition bufmgr.h:205
static Size BufferGetPageSize(Buffer buffer)
Definition bufmgr.h:459
static void LockBuffer(Buffer buffer, BufferLockMode mode)
Definition bufmgr.h:332
@ RBM_ZERO_ON_ERROR
Definition bufmgr.h:51
@ RBM_NORMAL
Definition bufmgr.h:46
static bool BufferIsValid(Buffer bufnum)
Definition bufmgr.h:421
Size PageGetFreeSpace(const PageData *page)
Definition bufpage.c:906
Size PageGetHeapFreeSpace(const PageData *page)
Definition bufpage.c:990
void PageInit(Page page, Size pageSize, Size specialSize)
Definition bufpage.c:42
static void PageClearAllVisible(Page page)
Definition bufpage.h:465
static bool PageIsNew(const PageData *page)
Definition bufpage.h:259
static void PageSetAllVisible(Page page)
Definition bufpage.h:460
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition bufpage.h:269
static void * PageGetItem(PageData *page, const ItemIdData *itemId)
Definition bufpage.h:379
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition bufpage.h:417
PageData * Page
Definition bufpage.h:81
#define PageClearPrunable(page)
Definition bufpage.h:486
#define PageSetPrunable(page, xid)
Definition bufpage.h:479
#define PageAddItem(page, item, size, offsetNumber, overwrite, is_heap)
Definition bufpage.h:504
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition bufpage.h:397
#define MAXALIGN(LEN)
Definition c.h:898
uint8_t uint8
Definition c.h:616
#define Assert(condition)
Definition c.h:945
#define FirstCommandId
Definition c.h:754
#define SHORTALIGN(LEN)
Definition c.h:894
uint16_t uint16
Definition c.h:617
uint32_t uint32
Definition c.h:618
#define MemSet(start, val, len)
Definition c.h:1109
uint32 TransactionId
Definition c.h:738
size_t Size
Definition c.h:691
#define PANIC
Definition elog.h:42
#define elog(elevel,...)
Definition elog.h:226
void XLogRecordPageWithFreeSpace(RelFileLocator rlocator, BlockNumber heapBlk, Size spaceAvail)
Definition freespace.c:211
static void heap_execute_freeze_tuple(HeapTupleHeader tuple, HeapTupleFreeze *frz)
Definition heapam.h:518
void heap_redo(XLogReaderState *record)
static void heap_xlog_prune_freeze(XLogReaderState *record)
Definition heapam_xlog.c:30
void heap_mask(char *pagedata, BlockNumber blkno)
static void heap_xlog_insert(XLogReaderState *record)
static void fix_infomask_from_infobits(uint8 infobits, uint16 *infomask, uint16 *infomask2)
static void heap_xlog_update(XLogReaderState *record, bool hot_update)
static void heap_xlog_delete(XLogReaderState *record)
static void heap_xlog_lock_updated(XLogReaderState *record)
static void heap_xlog_lock(XLogReaderState *record)
static void heap_xlog_multi_insert(XLogReaderState *record)
static void heap_xlog_visible(XLogReaderState *record)
static void heap_xlog_inplace(XLogReaderState *record)
static void heap_xlog_confirm(XLogReaderState *record)
void heap2_redo(XLogReaderState *record)
#define XLOG_HEAP2_MULTI_INSERT
Definition heapam_xlog.h:64
#define XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED
Definition heapam_xlog.h:87
#define XLOG_HEAP_HOT_UPDATE
Definition heapam_xlog.h:37
#define XLOG_HEAP_DELETE
Definition heapam_xlog.h:34
#define XLHP_HAS_CONFLICT_HORIZON
#define XLOG_HEAP2_REWRITE
Definition heapam_xlog.h:59
#define XLH_LOCK_ALL_FROZEN_CLEARED
#define XLOG_HEAP_TRUNCATE
Definition heapam_xlog.h:36
#define XLHP_VM_ALL_VISIBLE
#define XLH_INSERT_ALL_FROZEN_SET
Definition heapam_xlog.h:79
#define XLOG_HEAP_OPMASK
Definition heapam_xlog.h:42
#define XLOG_HEAP_UPDATE
Definition heapam_xlog.h:35
#define SizeOfHeapPrune
#define XLHL_XMAX_KEYSHR_LOCK
#define XLH_DELETE_ALL_VISIBLE_CLEARED
#define XLHP_HAS_NOW_UNUSED_ITEMS
#define XLHL_XMAX_IS_MULTI
#define XLHP_VM_ALL_FROZEN
#define XLHP_HAS_REDIRECTIONS
#define XLH_INSERT_ALL_VISIBLE_CLEARED
Definition heapam_xlog.h:72
#define SizeOfHeapHeader
#define XLOG_HEAP2_PRUNE_VACUUM_SCAN
Definition heapam_xlog.h:61
#define XLH_DELETE_IS_PARTITION_MOVE
#define XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED
Definition heapam_xlog.h:85
#define XLHL_XMAX_LOCK_ONLY
#define XLOG_HEAP_INPLACE
Definition heapam_xlog.h:40
#define XLOG_HEAP2_LOCK_UPDATED
Definition heapam_xlog.h:65
#define XLH_UPDATE_SUFFIX_FROM_OLD
Definition heapam_xlog.h:92
#define XLH_UPDATE_PREFIX_FROM_OLD
Definition heapam_xlog.h:91
#define SizeOfMultiInsertTuple
#define XLHL_XMAX_EXCL_LOCK
#define XLOG_HEAP2_PRUNE_ON_ACCESS
Definition heapam_xlog.h:60
#define XLOG_HEAP2_NEW_CID
Definition heapam_xlog.h:66
#define XLHP_CLEANUP_LOCK
#define XLHP_HAS_DEAD_ITEMS
#define XLOG_HEAP_LOCK
Definition heapam_xlog.h:39
#define XLOG_HEAP2_PRUNE_VACUUM_CLEANUP
Definition heapam_xlog.h:62
#define XLOG_HEAP_INSERT
Definition heapam_xlog.h:33
#define XLH_DELETE_IS_SUPER
#define XLHL_KEYS_UPDATED
#define XLOG_HEAP2_VISIBLE
Definition heapam_xlog.h:63
#define XLHP_IS_CATALOG_REL
#define XLOG_HEAP_INIT_PAGE
Definition heapam_xlog.h:47
#define XLOG_HEAP_CONFIRM
Definition heapam_xlog.h:38
void heap_xlog_deserialize_prune_and_freeze(char *cursor, uint16 flags, int *nplans, xlhp_freeze_plan **plans, OffsetNumber **frz_offsets, int *nredirected, OffsetNumber **redirected, int *ndead, OffsetNumber **nowdead, int *nunused, OffsetNumber **nowunused)
Definition heapdesc.c:106
HeapTupleHeaderData * HeapTupleHeader
Definition htup.h:23
static bool HeapTupleHeaderXminFrozen(const HeapTupleHeaderData *tup)
#define SizeofHeapTupleHeader
#define HEAP_KEYS_UPDATED
static bool HEAP_XMAX_IS_LOCKED_ONLY(uint16 infomask)
static void HeapTupleHeaderSetCmax(HeapTupleHeaderData *tup, CommandId cid, bool iscombo)
#define HEAP_XMAX_LOCK_ONLY
static void HeapTupleHeaderClearHotUpdated(HeapTupleHeaderData *tup)
static void HeapTupleHeaderSetCmin(HeapTupleHeaderData *tup, CommandId cid)
#define HEAP_XMAX_BITS
#define HEAP_MOVED
#define HEAP_XMAX_IS_MULTI
#define HEAP_XACT_MASK
#define HEAP_XMAX_EXCL_LOCK
static bool HeapTupleHeaderIsSpeculative(const HeapTupleHeaderData *tup)
static void HeapTupleHeaderSetXmin(HeapTupleHeaderData *tup, TransactionId xid)
#define HEAP_XMAX_KEYSHR_LOCK
static void HeapTupleHeaderSetMovedPartitions(HeapTupleHeaderData *tup)
#define MaxHeapTupleSize
static void HeapTupleHeaderSetXmax(HeapTupleHeaderData *tup, TransactionId xid)
static void HeapTupleHeaderSetHotUpdated(HeapTupleHeaderData *tup)
void ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs, int nmsgs, bool RelcacheInitFileInval, Oid dbid, Oid tsid)
Definition inval.c:1135
int i
Definition isn.c:77
#define ItemIdGetLength(itemId)
Definition itemid.h:59
#define ItemIdIsNormal(itemId)
Definition itemid.h:99
#define ItemIdGetOffset(itemId)
Definition itemid.h:65
#define ItemIdHasStorage(itemId)
Definition itemid.h:120
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition itemptr.h:135
static void ItemPointerSetOffsetNumber(ItemPointerData *pointer, OffsetNumber offsetNumber)
Definition itemptr.h:158
static void ItemPointerSetBlockNumber(ItemPointerData *pointer, BlockNumber blockNumber)
Definition itemptr.h:147
#define InvalidOffsetNumber
Definition off.h:26
uint16 OffsetNumber
Definition off.h:24
#define FirstOffsetNumber
Definition off.h:27
const void size_t len
const void * data
static int fb(int x)
void heap_page_prune_execute(Buffer buffer, bool lp_truncate_only, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused)
Definition pruneheap.c:1666
void heap_xlog_logical_rewrite(XLogReaderState *r)
void ResolveRecoveryConflictWithSnapshot(TransactionId snapshotConflictHorizon, bool isCatalogRel, RelFileLocator locator)
Definition standby.c:470
TransactionId xmax
Definition heapam.h:156
ItemPointerData t_ctid
XLogRecPtr EndRecPtr
Definition xlogreader.h:206
TransactionId xmax
#define InvalidTransactionId
Definition transam.h:31
void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
void visibilitymap_set_vmbits(BlockNumber heapBlk, Buffer vmBuf, uint8 flags, const RelFileLocator rlocator)
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_XLOG_VALID_BITS
#define VISIBILITYMAP_XLOG_CATALOG_REL
#define VISIBILITYMAP_ALL_VISIBLE
#define XLogHintBitIsNeeded()
Definition xlog.h:122
uint64 XLogRecPtr
Definition xlogdefs.h:21
bool XLogRecGetBlockTagExtended(XLogReaderState *record, uint8 block_id, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum, Buffer *prefetch_buffer)
char * XLogRecGetBlockData(XLogReaderState *record, uint8 block_id, Size *len)
void XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
#define XLogRecGetInfo(decoder)
Definition xlogreader.h:409
#define XLogRecGetData(decoder)
Definition xlogreader.h:414
#define XLogRecGetXid(decoder)
Definition xlogreader.h:411
void FreeFakeRelcacheEntry(Relation fakerel)
Definition xlogutils.c:618
XLogRedoAction XLogReadBufferForRedo(XLogReaderState *record, uint8 block_id, Buffer *buf)
Definition xlogutils.c:303
Buffer XLogInitBufferForRedo(XLogReaderState *record, uint8 block_id)
Definition xlogutils.c:315
Relation CreateFakeRelcacheEntry(RelFileLocator rlocator)
Definition xlogutils.c:571
XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)
Definition xlogutils.c:340
#define InHotStandby
Definition xlogutils.h:60
XLogRedoAction
Definition xlogutils.h:73
@ BLK_RESTORED
Definition xlogutils.h:76
@ BLK_NEEDS_REDO
Definition xlogutils.h:74