PostgreSQL Source Code git master
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Pages
heapam_xlog.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * heapam_xlog.c
4 * WAL replay logic for heap access method.
5 *
6 * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/access/heap/heapam_xlog.c
12 *
13 *-------------------------------------------------------------------------
14 */
15#include "postgres.h"
16
17#include "access/bufmask.h"
18#include "access/heapam.h"
20#include "access/xlog.h"
21#include "access/xlogutils.h"
22#include "storage/freespace.h"
23#include "storage/standby.h"
24
25
26/*
27 * Replay XLOG_HEAP2_PRUNE_* records.
28 */
29static void
31{
32 XLogRecPtr lsn = record->EndRecPtr;
33 char *maindataptr = XLogRecGetData(record);
34 xl_heap_prune xlrec;
35 Buffer buffer;
36 RelFileLocator rlocator;
37 BlockNumber blkno;
39
40 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
41 memcpy(&xlrec, maindataptr, SizeOfHeapPrune);
42 maindataptr += SizeOfHeapPrune;
43
44 /*
45 * We will take an ordinary exclusive lock or a cleanup lock depending on
46 * whether the XLHP_CLEANUP_LOCK flag is set. With an ordinary exclusive
47 * lock, we better not be doing anything that requires moving existing
48 * tuple data.
49 */
50 Assert((xlrec.flags & XLHP_CLEANUP_LOCK) != 0 ||
52
53 /*
54 * We are about to remove and/or freeze tuples. In Hot Standby mode,
55 * ensure that there are no queries running for which the removed tuples
56 * are still visible or which still consider the frozen xids as running.
57 * The conflict horizon XID comes after xl_heap_prune.
58 */
59 if ((xlrec.flags & XLHP_HAS_CONFLICT_HORIZON) != 0)
60 {
61 TransactionId snapshot_conflict_horizon;
62
63 /* memcpy() because snapshot_conflict_horizon is stored unaligned */
64 memcpy(&snapshot_conflict_horizon, maindataptr, sizeof(TransactionId));
65 maindataptr += sizeof(TransactionId);
66
67 if (InHotStandby)
68 ResolveRecoveryConflictWithSnapshot(snapshot_conflict_horizon,
69 (xlrec.flags & XLHP_IS_CATALOG_REL) != 0,
70 rlocator);
71 }
72
73 /*
74 * If we have a full-page image, restore it and we're done.
75 */
77 (xlrec.flags & XLHP_CLEANUP_LOCK) != 0,
78 &buffer);
80 {
81 Page page = (Page) BufferGetPage(buffer);
82 OffsetNumber *redirected;
83 OffsetNumber *nowdead;
84 OffsetNumber *nowunused;
85 int nredirected;
86 int ndead;
87 int nunused;
88 int nplans;
89 Size datalen;
90 xlhp_freeze_plan *plans;
91 OffsetNumber *frz_offsets;
92 char *dataptr = XLogRecGetBlockData(record, 0, &datalen);
93
95 &nplans, &plans, &frz_offsets,
96 &nredirected, &redirected,
97 &ndead, &nowdead,
98 &nunused, &nowunused);
99
100 /*
101 * Update all line pointers per the record, and repair fragmentation
102 * if needed.
103 */
104 if (nredirected > 0 || ndead > 0 || nunused > 0)
106 (xlrec.flags & XLHP_CLEANUP_LOCK) == 0,
107 redirected, nredirected,
108 nowdead, ndead,
109 nowunused, nunused);
110
111 /* Freeze tuples */
112 for (int p = 0; p < nplans; p++)
113 {
114 HeapTupleFreeze frz;
115
116 /*
117 * Convert freeze plan representation from WAL record into
118 * per-tuple format used by heap_execute_freeze_tuple
119 */
120 frz.xmax = plans[p].xmax;
121 frz.t_infomask2 = plans[p].t_infomask2;
122 frz.t_infomask = plans[p].t_infomask;
123 frz.frzflags = plans[p].frzflags;
124 frz.offset = InvalidOffsetNumber; /* unused, but be tidy */
125
126 for (int i = 0; i < plans[p].ntuples; i++)
127 {
128 OffsetNumber offset = *(frz_offsets++);
129 ItemId lp;
130 HeapTupleHeader tuple;
131
132 lp = PageGetItemId(page, offset);
133 tuple = (HeapTupleHeader) PageGetItem(page, lp);
134 heap_execute_freeze_tuple(tuple, &frz);
135 }
136 }
137
138 /* There should be no more data */
139 Assert((char *) frz_offsets == dataptr + datalen);
140
141 /*
142 * Note: we don't worry about updating the page's prunability hints.
143 * At worst this will cause an extra prune cycle to occur soon.
144 */
145
146 PageSetLSN(page, lsn);
147 MarkBufferDirty(buffer);
148 }
149
150 /*
151 * If we released any space or line pointers, update the free space map.
152 *
153 * Do this regardless of a full-page image being applied, since the FSM
154 * data is not in the page anyway.
155 */
156 if (BufferIsValid(buffer))
157 {
158 if (xlrec.flags & (XLHP_HAS_REDIRECTIONS |
161 {
162 Size freespace = PageGetHeapFreeSpace(BufferGetPage(buffer));
163
164 UnlockReleaseBuffer(buffer);
165
166 XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
167 }
168 else
169 UnlockReleaseBuffer(buffer);
170 }
171}
172
173/*
174 * Replay XLOG_HEAP2_VISIBLE records.
175 *
176 * The critical integrity requirement here is that we must never end up with
177 * a situation where the visibility map bit is set, and the page-level
178 * PD_ALL_VISIBLE bit is clear. If that were to occur, then a subsequent
179 * page modification would fail to clear the visibility map bit.
180 */
181static void
183{
184 XLogRecPtr lsn = record->EndRecPtr;
186 Buffer vmbuffer = InvalidBuffer;
187 Buffer buffer;
188 Page page;
189 RelFileLocator rlocator;
190 BlockNumber blkno;
192
193 Assert((xlrec->flags & VISIBILITYMAP_XLOG_VALID_BITS) == xlrec->flags);
194
195 XLogRecGetBlockTag(record, 1, &rlocator, NULL, &blkno);
196
197 /*
198 * If there are any Hot Standby transactions running that have an xmin
199 * horizon old enough that this page isn't all-visible for them, they
200 * might incorrectly decide that an index-only scan can skip a heap fetch.
201 *
202 * NB: It might be better to throw some kind of "soft" conflict here that
203 * forces any index-only scan that is in flight to perform heap fetches,
204 * rather than killing the transaction outright.
205 */
206 if (InHotStandby)
209 rlocator);
210
211 /*
212 * Read the heap page, if it still exists. If the heap file has dropped or
213 * truncated later in recovery, we don't need to update the page, but we'd
214 * better still update the visibility map.
215 */
216 action = XLogReadBufferForRedo(record, 1, &buffer);
217 if (action == BLK_NEEDS_REDO)
218 {
219 /*
220 * We don't bump the LSN of the heap page when setting the visibility
221 * map bit (unless checksums or wal_hint_bits is enabled, in which
222 * case we must). This exposes us to torn page hazards, but since
223 * we're not inspecting the existing page contents in any way, we
224 * don't care.
225 */
226 page = BufferGetPage(buffer);
227
228 PageSetAllVisible(page);
229
231 PageSetLSN(page, lsn);
232
233 MarkBufferDirty(buffer);
234 }
235 else if (action == BLK_RESTORED)
236 {
237 /*
238 * If heap block was backed up, we already restored it and there's
239 * nothing more to do. (This can only happen with checksums or
240 * wal_log_hints enabled.)
241 */
242 }
243
244 if (BufferIsValid(buffer))
245 {
246 Size space = PageGetFreeSpace(BufferGetPage(buffer));
247
248 UnlockReleaseBuffer(buffer);
249
250 /*
251 * Since FSM is not WAL-logged and only updated heuristically, it
252 * easily becomes stale in standbys. If the standby is later promoted
253 * and runs VACUUM, it will skip updating individual free space
254 * figures for pages that became all-visible (or all-frozen, depending
255 * on the vacuum mode,) which is troublesome when FreeSpaceMapVacuum
256 * propagates too optimistic free space values to upper FSM layers;
257 * later inserters try to use such pages only to find out that they
258 * are unusable. This can cause long stalls when there are many such
259 * pages.
260 *
261 * Forestall those problems by updating FSM's idea about a page that
262 * is becoming all-visible or all-frozen.
263 *
264 * Do this regardless of a full-page image being applied, since the
265 * FSM data is not in the page anyway.
266 */
267 if (xlrec->flags & VISIBILITYMAP_VALID_BITS)
268 XLogRecordPageWithFreeSpace(rlocator, blkno, space);
269 }
270
271 /*
272 * Even if we skipped the heap page update due to the LSN interlock, it's
273 * still safe to update the visibility map. Any WAL record that clears
274 * the visibility map bit does so before checking the page LSN, so any
275 * bits that need to be cleared will still be cleared.
276 */
278 &vmbuffer) == BLK_NEEDS_REDO)
279 {
280 Page vmpage = BufferGetPage(vmbuffer);
281 Relation reln;
282 uint8 vmbits;
283
284 /* initialize the page if it was read as zeros */
285 if (PageIsNew(vmpage))
286 PageInit(vmpage, BLCKSZ, 0);
287
288 /* remove VISIBILITYMAP_XLOG_* */
289 vmbits = xlrec->flags & VISIBILITYMAP_VALID_BITS;
290
291 /*
292 * XLogReadBufferForRedoExtended locked the buffer. But
293 * visibilitymap_set will handle locking itself.
294 */
296
297 reln = CreateFakeRelcacheEntry(rlocator);
298 visibilitymap_pin(reln, blkno, &vmbuffer);
299
300 visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
301 xlrec->snapshotConflictHorizon, vmbits);
302
303 ReleaseBuffer(vmbuffer);
305 }
306 else if (BufferIsValid(vmbuffer))
307 UnlockReleaseBuffer(vmbuffer);
308}
309
310/*
311 * Given an "infobits" field from an XLog record, set the correct bits in the
312 * given infomask and infomask2 for the tuple touched by the record.
313 *
314 * (This is the reverse of compute_infobits).
315 */
316static void
317fix_infomask_from_infobits(uint8 infobits, uint16 *infomask, uint16 *infomask2)
318{
319 *infomask &= ~(HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY |
321 *infomask2 &= ~HEAP_KEYS_UPDATED;
322
323 if (infobits & XLHL_XMAX_IS_MULTI)
324 *infomask |= HEAP_XMAX_IS_MULTI;
325 if (infobits & XLHL_XMAX_LOCK_ONLY)
326 *infomask |= HEAP_XMAX_LOCK_ONLY;
327 if (infobits & XLHL_XMAX_EXCL_LOCK)
328 *infomask |= HEAP_XMAX_EXCL_LOCK;
329 /* note HEAP_XMAX_SHR_LOCK isn't considered here */
330 if (infobits & XLHL_XMAX_KEYSHR_LOCK)
331 *infomask |= HEAP_XMAX_KEYSHR_LOCK;
332
333 if (infobits & XLHL_KEYS_UPDATED)
334 *infomask2 |= HEAP_KEYS_UPDATED;
335}
336
337/*
338 * Replay XLOG_HEAP_DELETE records.
339 */
340static void
342{
343 XLogRecPtr lsn = record->EndRecPtr;
344 xl_heap_delete *xlrec = (xl_heap_delete *) XLogRecGetData(record);
345 Buffer buffer;
346 Page page;
347 ItemId lp = NULL;
348 HeapTupleHeader htup;
349 BlockNumber blkno;
350 RelFileLocator target_locator;
351 ItemPointerData target_tid;
352
353 XLogRecGetBlockTag(record, 0, &target_locator, NULL, &blkno);
354 ItemPointerSetBlockNumber(&target_tid, blkno);
355 ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
356
357 /*
358 * The visibility map may need to be fixed even if the heap page is
359 * already up-to-date.
360 */
362 {
363 Relation reln = CreateFakeRelcacheEntry(target_locator);
364 Buffer vmbuffer = InvalidBuffer;
365
366 visibilitymap_pin(reln, blkno, &vmbuffer);
367 visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
368 ReleaseBuffer(vmbuffer);
370 }
371
372 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
373 {
374 page = BufferGetPage(buffer);
375
376 if (PageGetMaxOffsetNumber(page) >= xlrec->offnum)
377 lp = PageGetItemId(page, xlrec->offnum);
378
379 if (PageGetMaxOffsetNumber(page) < xlrec->offnum || !ItemIdIsNormal(lp))
380 elog(PANIC, "invalid lp");
381
382 htup = (HeapTupleHeader) PageGetItem(page, lp);
383
385 htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
388 &htup->t_infomask, &htup->t_infomask2);
389 if (!(xlrec->flags & XLH_DELETE_IS_SUPER))
390 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
391 else
394
395 /* Mark the page as a candidate for pruning */
396 PageSetPrunable(page, XLogRecGetXid(record));
397
400
401 /* Make sure t_ctid is set correctly */
404 else
405 htup->t_ctid = target_tid;
406 PageSetLSN(page, lsn);
407 MarkBufferDirty(buffer);
408 }
409 if (BufferIsValid(buffer))
410 UnlockReleaseBuffer(buffer);
411}
412
413/*
414 * Replay XLOG_HEAP_INSERT records.
415 */
416static void
418{
419 XLogRecPtr lsn = record->EndRecPtr;
420 xl_heap_insert *xlrec = (xl_heap_insert *) XLogRecGetData(record);
421 Buffer buffer;
422 Page page;
423 union
424 {
427 } tbuf;
428 HeapTupleHeader htup;
429 xl_heap_header xlhdr;
430 uint32 newlen;
431 Size freespace = 0;
432 RelFileLocator target_locator;
433 BlockNumber blkno;
434 ItemPointerData target_tid;
436
437 XLogRecGetBlockTag(record, 0, &target_locator, NULL, &blkno);
438 ItemPointerSetBlockNumber(&target_tid, blkno);
439 ItemPointerSetOffsetNumber(&target_tid, xlrec->offnum);
440
441 /*
442 * The visibility map may need to be fixed even if the heap page is
443 * already up-to-date.
444 */
446 {
447 Relation reln = CreateFakeRelcacheEntry(target_locator);
448 Buffer vmbuffer = InvalidBuffer;
449
450 visibilitymap_pin(reln, blkno, &vmbuffer);
451 visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
452 ReleaseBuffer(vmbuffer);
454 }
455
456 /*
457 * If we inserted the first and only tuple on the page, re-initialize the
458 * page from scratch.
459 */
461 {
462 buffer = XLogInitBufferForRedo(record, 0);
463 page = BufferGetPage(buffer);
464 PageInit(page, BufferGetPageSize(buffer), 0);
466 }
467 else
468 action = XLogReadBufferForRedo(record, 0, &buffer);
469 if (action == BLK_NEEDS_REDO)
470 {
471 Size datalen;
472 char *data;
473
474 page = BufferGetPage(buffer);
475
476 if (PageGetMaxOffsetNumber(page) + 1 < xlrec->offnum)
477 elog(PANIC, "invalid max offset number");
478
479 data = XLogRecGetBlockData(record, 0, &datalen);
480
481 newlen = datalen - SizeOfHeapHeader;
482 Assert(datalen > SizeOfHeapHeader && newlen <= MaxHeapTupleSize);
483 memcpy((char *) &xlhdr, data, SizeOfHeapHeader);
485
486 htup = &tbuf.hdr;
487 MemSet((char *) htup, 0, SizeofHeapTupleHeader);
488 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
489 memcpy((char *) htup + SizeofHeapTupleHeader,
490 data,
491 newlen);
492 newlen += SizeofHeapTupleHeader;
493 htup->t_infomask2 = xlhdr.t_infomask2;
494 htup->t_infomask = xlhdr.t_infomask;
495 htup->t_hoff = xlhdr.t_hoff;
498 htup->t_ctid = target_tid;
499
500 if (PageAddItem(page, (Item) htup, newlen, xlrec->offnum,
501 true, true) == InvalidOffsetNumber)
502 elog(PANIC, "failed to add tuple");
503
504 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
505
506 PageSetLSN(page, lsn);
507
510
511 /* XLH_INSERT_ALL_FROZEN_SET implies that all tuples are visible */
512 if (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)
513 PageSetAllVisible(page);
514
515 MarkBufferDirty(buffer);
516 }
517 if (BufferIsValid(buffer))
518 UnlockReleaseBuffer(buffer);
519
520 /*
521 * If the page is running low on free space, update the FSM as well.
522 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
523 * better than that without knowing the fill-factor for the table.
524 *
525 * XXX: Don't do this if the page was restored from full page image. We
526 * don't bother to update the FSM in that case, it doesn't need to be
527 * totally accurate anyway.
528 */
529 if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
530 XLogRecordPageWithFreeSpace(target_locator, blkno, freespace);
531}
532
533/*
534 * Replay XLOG_HEAP2_MULTI_INSERT records.
535 */
536static void
538{
539 XLogRecPtr lsn = record->EndRecPtr;
541 RelFileLocator rlocator;
542 BlockNumber blkno;
543 Buffer buffer;
544 Page page;
545 union
546 {
549 } tbuf;
550 HeapTupleHeader htup;
551 uint32 newlen;
552 Size freespace = 0;
553 int i;
554 bool isinit = (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE) != 0;
556
557 /*
558 * Insertion doesn't overwrite MVCC data, so no conflict processing is
559 * required.
560 */
561 xlrec = (xl_heap_multi_insert *) XLogRecGetData(record);
562
563 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &blkno);
564
565 /* check that the mutually exclusive flags are not both set */
567 (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)));
568
569 /*
570 * The visibility map may need to be fixed even if the heap page is
571 * already up-to-date.
572 */
574 {
575 Relation reln = CreateFakeRelcacheEntry(rlocator);
576 Buffer vmbuffer = InvalidBuffer;
577
578 visibilitymap_pin(reln, blkno, &vmbuffer);
579 visibilitymap_clear(reln, blkno, vmbuffer, VISIBILITYMAP_VALID_BITS);
580 ReleaseBuffer(vmbuffer);
582 }
583
584 if (isinit)
585 {
586 buffer = XLogInitBufferForRedo(record, 0);
587 page = BufferGetPage(buffer);
588 PageInit(page, BufferGetPageSize(buffer), 0);
590 }
591 else
592 action = XLogReadBufferForRedo(record, 0, &buffer);
593 if (action == BLK_NEEDS_REDO)
594 {
595 char *tupdata;
596 char *endptr;
597 Size len;
598
599 /* Tuples are stored as block data */
600 tupdata = XLogRecGetBlockData(record, 0, &len);
601 endptr = tupdata + len;
602
603 page = (Page) BufferGetPage(buffer);
604
605 for (i = 0; i < xlrec->ntuples; i++)
606 {
607 OffsetNumber offnum;
609
610 /*
611 * If we're reinitializing the page, the tuples are stored in
612 * order from FirstOffsetNumber. Otherwise there's an array of
613 * offsets in the WAL record, and the tuples come after that.
614 */
615 if (isinit)
616 offnum = FirstOffsetNumber + i;
617 else
618 offnum = xlrec->offsets[i];
619 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
620 elog(PANIC, "invalid max offset number");
621
622 xlhdr = (xl_multi_insert_tuple *) SHORTALIGN(tupdata);
623 tupdata = ((char *) xlhdr) + SizeOfMultiInsertTuple;
624
625 newlen = xlhdr->datalen;
626 Assert(newlen <= MaxHeapTupleSize);
627 htup = &tbuf.hdr;
628 MemSet((char *) htup, 0, SizeofHeapTupleHeader);
629 /* PG73FORMAT: get bitmap [+ padding] [+ oid] + data */
630 memcpy((char *) htup + SizeofHeapTupleHeader,
631 (char *) tupdata,
632 newlen);
633 tupdata += newlen;
634
635 newlen += SizeofHeapTupleHeader;
636 htup->t_infomask2 = xlhdr->t_infomask2;
637 htup->t_infomask = xlhdr->t_infomask;
638 htup->t_hoff = xlhdr->t_hoff;
641 ItemPointerSetBlockNumber(&htup->t_ctid, blkno);
642 ItemPointerSetOffsetNumber(&htup->t_ctid, offnum);
643
644 offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
645 if (offnum == InvalidOffsetNumber)
646 elog(PANIC, "failed to add tuple");
647 }
648 if (tupdata != endptr)
649 elog(PANIC, "total tuple length mismatch");
650
651 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
652
653 PageSetLSN(page, lsn);
654
657
658 /* XLH_INSERT_ALL_FROZEN_SET implies that all tuples are visible */
659 if (xlrec->flags & XLH_INSERT_ALL_FROZEN_SET)
660 PageSetAllVisible(page);
661
662 MarkBufferDirty(buffer);
663 }
664 if (BufferIsValid(buffer))
665 UnlockReleaseBuffer(buffer);
666
667 /*
668 * If the page is running low on free space, update the FSM as well.
669 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
670 * better than that without knowing the fill-factor for the table.
671 *
672 * XXX: Don't do this if the page was restored from full page image. We
673 * don't bother to update the FSM in that case, it doesn't need to be
674 * totally accurate anyway.
675 */
676 if (action == BLK_NEEDS_REDO && freespace < BLCKSZ / 5)
677 XLogRecordPageWithFreeSpace(rlocator, blkno, freespace);
678}
679
680/*
681 * Replay XLOG_HEAP_UPDATE and XLOG_HEAP_HOT_UPDATE records.
682 */
683static void
684heap_xlog_update(XLogReaderState *record, bool hot_update)
685{
686 XLogRecPtr lsn = record->EndRecPtr;
687 xl_heap_update *xlrec = (xl_heap_update *) XLogRecGetData(record);
688 RelFileLocator rlocator;
689 BlockNumber oldblk;
690 BlockNumber newblk;
691 ItemPointerData newtid;
692 Buffer obuffer,
693 nbuffer;
694 Page page;
695 OffsetNumber offnum;
696 ItemId lp = NULL;
697 HeapTupleData oldtup;
698 HeapTupleHeader htup;
699 uint16 prefixlen = 0,
700 suffixlen = 0;
701 char *newp;
702 union
703 {
706 } tbuf;
707 xl_heap_header xlhdr;
708 uint32 newlen;
709 Size freespace = 0;
710 XLogRedoAction oldaction;
711 XLogRedoAction newaction;
712
713 /* initialize to keep the compiler quiet */
714 oldtup.t_data = NULL;
715 oldtup.t_len = 0;
716
717 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &newblk);
718 if (XLogRecGetBlockTagExtended(record, 1, NULL, NULL, &oldblk, NULL))
719 {
720 /* HOT updates are never done across pages */
721 Assert(!hot_update);
722 }
723 else
724 oldblk = newblk;
725
726 ItemPointerSet(&newtid, newblk, xlrec->new_offnum);
727
728 /*
729 * The visibility map may need to be fixed even if the heap page is
730 * already up-to-date.
731 */
733 {
734 Relation reln = CreateFakeRelcacheEntry(rlocator);
735 Buffer vmbuffer = InvalidBuffer;
736
737 visibilitymap_pin(reln, oldblk, &vmbuffer);
738 visibilitymap_clear(reln, oldblk, vmbuffer, VISIBILITYMAP_VALID_BITS);
739 ReleaseBuffer(vmbuffer);
741 }
742
743 /*
744 * In normal operation, it is important to lock the two pages in
745 * page-number order, to avoid possible deadlocks against other update
746 * operations going the other way. However, during WAL replay there can
747 * be no other update happening, so we don't need to worry about that. But
748 * we *do* need to worry that we don't expose an inconsistent state to Hot
749 * Standby queries --- so the original page can't be unlocked before we've
750 * added the new tuple to the new page.
751 */
752
753 /* Deal with old tuple version */
754 oldaction = XLogReadBufferForRedo(record, (oldblk == newblk) ? 0 : 1,
755 &obuffer);
756 if (oldaction == BLK_NEEDS_REDO)
757 {
758 page = BufferGetPage(obuffer);
759 offnum = xlrec->old_offnum;
760 if (PageGetMaxOffsetNumber(page) >= offnum)
761 lp = PageGetItemId(page, offnum);
762
763 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
764 elog(PANIC, "invalid lp");
765
766 htup = (HeapTupleHeader) PageGetItem(page, lp);
767
768 oldtup.t_data = htup;
769 oldtup.t_len = ItemIdGetLength(lp);
770
772 htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
773 if (hot_update)
775 else
778 &htup->t_infomask2);
779 HeapTupleHeaderSetXmax(htup, xlrec->old_xmax);
781 /* Set forward chain link in t_ctid */
782 htup->t_ctid = newtid;
783
784 /* Mark the page as a candidate for pruning */
785 PageSetPrunable(page, XLogRecGetXid(record));
786
789
790 PageSetLSN(page, lsn);
791 MarkBufferDirty(obuffer);
792 }
793
794 /*
795 * Read the page the new tuple goes into, if different from old.
796 */
797 if (oldblk == newblk)
798 {
799 nbuffer = obuffer;
800 newaction = oldaction;
801 }
802 else if (XLogRecGetInfo(record) & XLOG_HEAP_INIT_PAGE)
803 {
804 nbuffer = XLogInitBufferForRedo(record, 0);
805 page = (Page) BufferGetPage(nbuffer);
806 PageInit(page, BufferGetPageSize(nbuffer), 0);
807 newaction = BLK_NEEDS_REDO;
808 }
809 else
810 newaction = XLogReadBufferForRedo(record, 0, &nbuffer);
811
812 /*
813 * The visibility map may need to be fixed even if the heap page is
814 * already up-to-date.
815 */
817 {
818 Relation reln = CreateFakeRelcacheEntry(rlocator);
819 Buffer vmbuffer = InvalidBuffer;
820
821 visibilitymap_pin(reln, newblk, &vmbuffer);
822 visibilitymap_clear(reln, newblk, vmbuffer, VISIBILITYMAP_VALID_BITS);
823 ReleaseBuffer(vmbuffer);
825 }
826
827 /* Deal with new tuple */
828 if (newaction == BLK_NEEDS_REDO)
829 {
830 char *recdata;
831 char *recdata_end;
832 Size datalen;
833 Size tuplen;
834
835 recdata = XLogRecGetBlockData(record, 0, &datalen);
836 recdata_end = recdata + datalen;
837
838 page = BufferGetPage(nbuffer);
839
840 offnum = xlrec->new_offnum;
841 if (PageGetMaxOffsetNumber(page) + 1 < offnum)
842 elog(PANIC, "invalid max offset number");
843
845 {
846 Assert(newblk == oldblk);
847 memcpy(&prefixlen, recdata, sizeof(uint16));
848 recdata += sizeof(uint16);
849 }
851 {
852 Assert(newblk == oldblk);
853 memcpy(&suffixlen, recdata, sizeof(uint16));
854 recdata += sizeof(uint16);
855 }
856
857 memcpy((char *) &xlhdr, recdata, SizeOfHeapHeader);
858 recdata += SizeOfHeapHeader;
859
860 tuplen = recdata_end - recdata;
861 Assert(tuplen <= MaxHeapTupleSize);
862
863 htup = &tbuf.hdr;
864 MemSet((char *) htup, 0, SizeofHeapTupleHeader);
865
866 /*
867 * Reconstruct the new tuple using the prefix and/or suffix from the
868 * old tuple, and the data stored in the WAL record.
869 */
870 newp = (char *) htup + SizeofHeapTupleHeader;
871 if (prefixlen > 0)
872 {
873 int len;
874
875 /* copy bitmap [+ padding] [+ oid] from WAL record */
876 len = xlhdr.t_hoff - SizeofHeapTupleHeader;
877 memcpy(newp, recdata, len);
878 recdata += len;
879 newp += len;
880
881 /* copy prefix from old tuple */
882 memcpy(newp, (char *) oldtup.t_data + oldtup.t_data->t_hoff, prefixlen);
883 newp += prefixlen;
884
885 /* copy new tuple data from WAL record */
886 len = tuplen - (xlhdr.t_hoff - SizeofHeapTupleHeader);
887 memcpy(newp, recdata, len);
888 recdata += len;
889 newp += len;
890 }
891 else
892 {
893 /*
894 * copy bitmap [+ padding] [+ oid] + data from record, all in one
895 * go
896 */
897 memcpy(newp, recdata, tuplen);
898 recdata += tuplen;
899 newp += tuplen;
900 }
901 Assert(recdata == recdata_end);
902
903 /* copy suffix from old tuple */
904 if (suffixlen > 0)
905 memcpy(newp, (char *) oldtup.t_data + oldtup.t_len - suffixlen, suffixlen);
906
907 newlen = SizeofHeapTupleHeader + tuplen + prefixlen + suffixlen;
908 htup->t_infomask2 = xlhdr.t_infomask2;
909 htup->t_infomask = xlhdr.t_infomask;
910 htup->t_hoff = xlhdr.t_hoff;
911
914 HeapTupleHeaderSetXmax(htup, xlrec->new_xmax);
915 /* Make sure there is no forward chain link in t_ctid */
916 htup->t_ctid = newtid;
917
918 offnum = PageAddItem(page, (Item) htup, newlen, offnum, true, true);
919 if (offnum == InvalidOffsetNumber)
920 elog(PANIC, "failed to add tuple");
921
924
925 freespace = PageGetHeapFreeSpace(page); /* needed to update FSM below */
926
927 PageSetLSN(page, lsn);
928 MarkBufferDirty(nbuffer);
929 }
930
931 if (BufferIsValid(nbuffer) && nbuffer != obuffer)
932 UnlockReleaseBuffer(nbuffer);
933 if (BufferIsValid(obuffer))
934 UnlockReleaseBuffer(obuffer);
935
936 /*
937 * If the new page is running low on free space, update the FSM as well.
938 * Arbitrarily, our definition of "low" is less than 20%. We can't do much
939 * better than that without knowing the fill-factor for the table.
940 *
941 * However, don't update the FSM on HOT updates, because after crash
942 * recovery, either the old or the new tuple will certainly be dead and
943 * prunable. After pruning, the page will have roughly as much free space
944 * as it did before the update, assuming the new tuple is about the same
945 * size as the old one.
946 *
947 * XXX: Don't do this if the page was restored from full page image. We
948 * don't bother to update the FSM in that case, it doesn't need to be
949 * totally accurate anyway.
950 */
951 if (newaction == BLK_NEEDS_REDO && !hot_update && freespace < BLCKSZ / 5)
952 XLogRecordPageWithFreeSpace(rlocator, newblk, freespace);
953}
954
955/*
956 * Replay XLOG_HEAP_CONFIRM records.
957 */
958static void
960{
961 XLogRecPtr lsn = record->EndRecPtr;
963 Buffer buffer;
964 Page page;
965 OffsetNumber offnum;
966 ItemId lp = NULL;
967 HeapTupleHeader htup;
968
969 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
970 {
971 page = BufferGetPage(buffer);
972
973 offnum = xlrec->offnum;
974 if (PageGetMaxOffsetNumber(page) >= offnum)
975 lp = PageGetItemId(page, offnum);
976
977 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
978 elog(PANIC, "invalid lp");
979
980 htup = (HeapTupleHeader) PageGetItem(page, lp);
981
982 /*
983 * Confirm tuple as actually inserted
984 */
985 ItemPointerSet(&htup->t_ctid, BufferGetBlockNumber(buffer), offnum);
986
987 PageSetLSN(page, lsn);
988 MarkBufferDirty(buffer);
989 }
990 if (BufferIsValid(buffer))
991 UnlockReleaseBuffer(buffer);
992}
993
994/*
995 * Replay XLOG_HEAP_LOCK records.
996 */
997static void
999{
1000 XLogRecPtr lsn = record->EndRecPtr;
1001 xl_heap_lock *xlrec = (xl_heap_lock *) XLogRecGetData(record);
1002 Buffer buffer;
1003 Page page;
1004 OffsetNumber offnum;
1005 ItemId lp = NULL;
1006 HeapTupleHeader htup;
1007
1008 /*
1009 * The visibility map may need to be fixed even if the heap page is
1010 * already up-to-date.
1011 */
1013 {
1014 RelFileLocator rlocator;
1015 Buffer vmbuffer = InvalidBuffer;
1016 BlockNumber block;
1017 Relation reln;
1018
1019 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block);
1020 reln = CreateFakeRelcacheEntry(rlocator);
1021
1022 visibilitymap_pin(reln, block, &vmbuffer);
1023 visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);
1024
1025 ReleaseBuffer(vmbuffer);
1027 }
1028
1029 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
1030 {
1031 page = (Page) BufferGetPage(buffer);
1032
1033 offnum = xlrec->offnum;
1034 if (PageGetMaxOffsetNumber(page) >= offnum)
1035 lp = PageGetItemId(page, offnum);
1036
1037 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
1038 elog(PANIC, "invalid lp");
1039
1040 htup = (HeapTupleHeader) PageGetItem(page, lp);
1041
1042 htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
1043 htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
1045 &htup->t_infomask2);
1046
1047 /*
1048 * Clear relevant update flags, but only if the modified infomask says
1049 * there's no update.
1050 */
1052 {
1054 /* Make sure there is no forward chain link in t_ctid */
1055 ItemPointerSet(&htup->t_ctid,
1056 BufferGetBlockNumber(buffer),
1057 offnum);
1058 }
1059 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
1061 PageSetLSN(page, lsn);
1062 MarkBufferDirty(buffer);
1063 }
1064 if (BufferIsValid(buffer))
1065 UnlockReleaseBuffer(buffer);
1066}
1067
1068/*
1069 * Replay XLOG_HEAP2_LOCK_UPDATED records.
1070 */
1071static void
1073{
1074 XLogRecPtr lsn = record->EndRecPtr;
1075 xl_heap_lock_updated *xlrec;
1076 Buffer buffer;
1077 Page page;
1078 OffsetNumber offnum;
1079 ItemId lp = NULL;
1080 HeapTupleHeader htup;
1081
1082 xlrec = (xl_heap_lock_updated *) XLogRecGetData(record);
1083
1084 /*
1085 * The visibility map may need to be fixed even if the heap page is
1086 * already up-to-date.
1087 */
1089 {
1090 RelFileLocator rlocator;
1091 Buffer vmbuffer = InvalidBuffer;
1092 BlockNumber block;
1093 Relation reln;
1094
1095 XLogRecGetBlockTag(record, 0, &rlocator, NULL, &block);
1096 reln = CreateFakeRelcacheEntry(rlocator);
1097
1098 visibilitymap_pin(reln, block, &vmbuffer);
1099 visibilitymap_clear(reln, block, vmbuffer, VISIBILITYMAP_ALL_FROZEN);
1100
1101 ReleaseBuffer(vmbuffer);
1103 }
1104
1105 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
1106 {
1107 page = BufferGetPage(buffer);
1108
1109 offnum = xlrec->offnum;
1110 if (PageGetMaxOffsetNumber(page) >= offnum)
1111 lp = PageGetItemId(page, offnum);
1112
1113 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
1114 elog(PANIC, "invalid lp");
1115
1116 htup = (HeapTupleHeader) PageGetItem(page, lp);
1117
1118 htup->t_infomask &= ~(HEAP_XMAX_BITS | HEAP_MOVED);
1119 htup->t_infomask2 &= ~HEAP_KEYS_UPDATED;
1121 &htup->t_infomask2);
1122 HeapTupleHeaderSetXmax(htup, xlrec->xmax);
1123
1124 PageSetLSN(page, lsn);
1125 MarkBufferDirty(buffer);
1126 }
1127 if (BufferIsValid(buffer))
1128 UnlockReleaseBuffer(buffer);
1129}
1130
1131/*
1132 * Replay XLOG_HEAP_INPLACE records.
1133 */
1134static void
1136{
1137 XLogRecPtr lsn = record->EndRecPtr;
1138 xl_heap_inplace *xlrec = (xl_heap_inplace *) XLogRecGetData(record);
1139 Buffer buffer;
1140 Page page;
1141 OffsetNumber offnum;
1142 ItemId lp = NULL;
1143 HeapTupleHeader htup;
1144 uint32 oldlen;
1145 Size newlen;
1146
1147 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
1148 {
1149 char *newtup = XLogRecGetBlockData(record, 0, &newlen);
1150
1151 page = BufferGetPage(buffer);
1152
1153 offnum = xlrec->offnum;
1154 if (PageGetMaxOffsetNumber(page) >= offnum)
1155 lp = PageGetItemId(page, offnum);
1156
1157 if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
1158 elog(PANIC, "invalid lp");
1159
1160 htup = (HeapTupleHeader) PageGetItem(page, lp);
1161
1162 oldlen = ItemIdGetLength(lp) - htup->t_hoff;
1163 if (oldlen != newlen)
1164 elog(PANIC, "wrong tuple length");
1165
1166 memcpy((char *) htup + htup->t_hoff, newtup, newlen);
1167
1168 PageSetLSN(page, lsn);
1169 MarkBufferDirty(buffer);
1170 }
1171 if (BufferIsValid(buffer))
1172 UnlockReleaseBuffer(buffer);
1173
1175 xlrec->nmsgs,
1176 xlrec->relcacheInitFileInval,
1177 xlrec->dbId,
1178 xlrec->tsId);
1179}
1180
1181void
1183{
1184 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
1185
1186 /*
1187 * These operations don't overwrite MVCC data so no conflict processing is
1188 * required. The ones in heap2 rmgr do.
1189 */
1190
1191 switch (info & XLOG_HEAP_OPMASK)
1192 {
1193 case XLOG_HEAP_INSERT:
1194 heap_xlog_insert(record);
1195 break;
1196 case XLOG_HEAP_DELETE:
1197 heap_xlog_delete(record);
1198 break;
1199 case XLOG_HEAP_UPDATE:
1200 heap_xlog_update(record, false);
1201 break;
1202 case XLOG_HEAP_TRUNCATE:
1203
1204 /*
1205 * TRUNCATE is a no-op because the actions are already logged as
1206 * SMGR WAL records. TRUNCATE WAL record only exists for logical
1207 * decoding.
1208 */
1209 break;
1211 heap_xlog_update(record, true);
1212 break;
1213 case XLOG_HEAP_CONFIRM:
1214 heap_xlog_confirm(record);
1215 break;
1216 case XLOG_HEAP_LOCK:
1217 heap_xlog_lock(record);
1218 break;
1219 case XLOG_HEAP_INPLACE:
1220 heap_xlog_inplace(record);
1221 break;
1222 default:
1223 elog(PANIC, "heap_redo: unknown op code %u", info);
1224 }
1225}
1226
1227void
1229{
1230 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
1231
1232 switch (info & XLOG_HEAP_OPMASK)
1233 {
1237 heap_xlog_prune_freeze(record);
1238 break;
1239 case XLOG_HEAP2_VISIBLE:
1240 heap_xlog_visible(record);
1241 break;
1243 heap_xlog_multi_insert(record);
1244 break;
1246 heap_xlog_lock_updated(record);
1247 break;
1248 case XLOG_HEAP2_NEW_CID:
1249
1250 /*
1251 * Nothing to do on a real replay, only used during logical
1252 * decoding.
1253 */
1254 break;
1255 case XLOG_HEAP2_REWRITE:
1257 break;
1258 default:
1259 elog(PANIC, "heap2_redo: unknown op code %u", info);
1260 }
1261}
1262
1263/*
1264 * Mask a heap page before performing consistency checks on it.
1265 */
1266void
1267heap_mask(char *pagedata, BlockNumber blkno)
1268{
1269 Page page = (Page) pagedata;
1270 OffsetNumber off;
1271
1273
1274 mask_page_hint_bits(page);
1275 mask_unused_space(page);
1276
1277 for (off = 1; off <= PageGetMaxOffsetNumber(page); off++)
1278 {
1279 ItemId iid = PageGetItemId(page, off);
1280 char *page_item;
1281
1282 page_item = (char *) (page + ItemIdGetOffset(iid));
1283
1284 if (ItemIdIsNormal(iid))
1285 {
1286 HeapTupleHeader page_htup = (HeapTupleHeader) page_item;
1287
1288 /*
1289 * If xmin of a tuple is not yet frozen, we should ignore
1290 * differences in hint bits, since they can be set without
1291 * emitting WAL.
1292 */
1293 if (!HeapTupleHeaderXminFrozen(page_htup))
1294 page_htup->t_infomask &= ~HEAP_XACT_MASK;
1295 else
1296 {
1297 /* Still we need to mask xmax hint bits. */
1298 page_htup->t_infomask &= ~HEAP_XMAX_INVALID;
1299 page_htup->t_infomask &= ~HEAP_XMAX_COMMITTED;
1300 }
1301
1302 /*
1303 * During replay, we set Command Id to FirstCommandId. Hence, mask
1304 * it. See heap_xlog_insert() for details.
1305 */
1306 page_htup->t_choice.t_heap.t_field3.t_cid = MASK_MARKER;
1307
1308 /*
1309 * For a speculative tuple, heap_insert() does not set ctid in the
1310 * caller-passed heap tuple itself, leaving the ctid field to
1311 * contain a speculative token value - a per-backend monotonically
1312 * increasing identifier. Besides, it does not WAL-log ctid under
1313 * any circumstances.
1314 *
1315 * During redo, heap_xlog_insert() sets t_ctid to current block
1316 * number and self offset number. It doesn't care about any
1317 * speculative insertions on the primary. Hence, we set t_ctid to
1318 * current block number and self offset number to ignore any
1319 * inconsistency.
1320 */
1321 if (HeapTupleHeaderIsSpeculative(page_htup))
1322 ItemPointerSet(&page_htup->t_ctid, blkno, off);
1323
1324 /*
1325 * NB: Not ignoring ctid changes due to the tuple having moved
1326 * (i.e. HeapTupleHeaderIndicatesMovedPartitions), because that's
1327 * important information that needs to be in-sync between primary
1328 * and standby, and thus is WAL logged.
1329 */
1330 }
1331
1332 /*
1333 * Ignore any padding bytes after the tuple, when the length of the
1334 * item is not MAXALIGNed.
1335 */
1336 if (ItemIdHasStorage(iid))
1337 {
1338 int len = ItemIdGetLength(iid);
1339 int padlen = MAXALIGN(len) - len;
1340
1341 if (padlen > 0)
1342 memset(page_item + len, MASK_MARKER, padlen);
1343 }
1344 }
1345}
uint32 BlockNumber
Definition: block.h:31
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
void mask_page_lsn_and_checksum(Page page)
Definition: bufmask.c:31
void mask_unused_space(Page page)
Definition: bufmask.c:71
void mask_page_hint_bits(Page page)
Definition: bufmask.c:46
#define MASK_MARKER
Definition: bufmask.h:24
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:3724
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4924
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:4941
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:2532
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:5158
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:189
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:400
static Size BufferGetPageSize(Buffer buffer)
Definition: bufmgr.h:389
@ RBM_ZERO_ON_ERROR
Definition: bufmgr.h:50
@ RBM_NORMAL
Definition: bufmgr.h:45
static bool BufferIsValid(Buffer bufnum)
Definition: bufmgr.h:351
Size PageGetHeapFreeSpace(Page page)
Definition: bufpage.c:980
void PageInit(Page page, Size pageSize, Size specialSize)
Definition: bufpage.c:42
Size PageGetFreeSpace(Page page)
Definition: bufpage.c:896
Pointer Page
Definition: bufpage.h:81
static Item PageGetItem(Page page, ItemId itemId)
Definition: bufpage.h:354
static void PageClearAllVisible(Page page)
Definition: bufpage.h:439
static void PageSetAllVisible(Page page)
Definition: bufpage.h:434
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:243
static bool PageIsNew(Page page)
Definition: bufpage.h:233
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:391
static OffsetNumber PageGetMaxOffsetNumber(Page page)
Definition: bufpage.h:372
#define PageSetPrunable(page, xid)
Definition: bufpage.h:447
#define PageAddItem(page, item, size, offsetNumber, overwrite, is_heap)
Definition: bufpage.h:471
#define MAXALIGN(LEN)
Definition: c.h:765
uint8_t uint8
Definition: c.h:483
#define Assert(condition)
Definition: c.h:812
#define FirstCommandId
Definition: c.h:622
#define SHORTALIGN(LEN)
Definition: c.h:761
uint16_t uint16
Definition: c.h:484
uint32_t uint32
Definition: c.h:485
#define MemSet(start, val, len)
Definition: c.h:974
uint32 TransactionId
Definition: c.h:606
size_t Size
Definition: c.h:559
#define PANIC
Definition: elog.h:42
#define elog(elevel,...)
Definition: elog.h:225
void XLogRecordPageWithFreeSpace(RelFileLocator rlocator, BlockNumber heapBlk, Size spaceAvail)
Definition: freespace.c:211
static void heap_execute_freeze_tuple(HeapTupleHeader tuple, HeapTupleFreeze *frz)
Definition: heapam.h:443
void heap_redo(XLogReaderState *record)
Definition: heapam_xlog.c:1182
static void heap_xlog_prune_freeze(XLogReaderState *record)
Definition: heapam_xlog.c:30
void heap_mask(char *pagedata, BlockNumber blkno)
Definition: heapam_xlog.c:1267
static void heap_xlog_insert(XLogReaderState *record)
Definition: heapam_xlog.c:417
static void fix_infomask_from_infobits(uint8 infobits, uint16 *infomask, uint16 *infomask2)
Definition: heapam_xlog.c:317
static void heap_xlog_update(XLogReaderState *record, bool hot_update)
Definition: heapam_xlog.c:684
static void heap_xlog_delete(XLogReaderState *record)
Definition: heapam_xlog.c:341
static void heap_xlog_lock_updated(XLogReaderState *record)
Definition: heapam_xlog.c:1072
static void heap_xlog_lock(XLogReaderState *record)
Definition: heapam_xlog.c:998
static void heap_xlog_multi_insert(XLogReaderState *record)
Definition: heapam_xlog.c:537
static void heap_xlog_visible(XLogReaderState *record)
Definition: heapam_xlog.c:182
static void heap_xlog_inplace(XLogReaderState *record)
Definition: heapam_xlog.c:1135
static void heap_xlog_confirm(XLogReaderState *record)
Definition: heapam_xlog.c:959
void heap2_redo(XLogReaderState *record)
Definition: heapam_xlog.c:1228
#define XLOG_HEAP2_MULTI_INSERT
Definition: heapam_xlog.h:64
#define XLH_UPDATE_NEW_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:87
#define XLOG_HEAP_HOT_UPDATE
Definition: heapam_xlog.h:37
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:34
#define XLHP_HAS_CONFLICT_HORIZON
Definition: heapam_xlog.h:317
#define XLOG_HEAP2_REWRITE
Definition: heapam_xlog.h:59
#define XLH_LOCK_ALL_FROZEN_CLEARED
Definition: heapam_xlog.h:393
#define XLOG_HEAP_TRUNCATE
Definition: heapam_xlog.h:36
#define XLH_INSERT_ALL_FROZEN_SET
Definition: heapam_xlog.h:79
#define XLOG_HEAP_OPMASK
Definition: heapam_xlog.h:42
#define XLOG_HEAP_UPDATE
Definition: heapam_xlog.h:35
#define SizeOfHeapPrune
Definition: heapam_xlog.h:296
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:389
#define XLH_DELETE_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:102
#define XLHP_HAS_NOW_UNUSED_ITEMS
Definition: heapam_xlog.h:332
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:386
#define XLHP_HAS_REDIRECTIONS
Definition: heapam_xlog.h:330
#define XLH_INSERT_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:72
#define SizeOfHeapHeader
Definition: heapam_xlog.h:157
#define XLOG_HEAP2_PRUNE_VACUUM_SCAN
Definition: heapam_xlog.h:61
#define XLH_DELETE_IS_PARTITION_MOVE
Definition: heapam_xlog.h:106
#define XLH_UPDATE_OLD_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:85
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:387
#define XLOG_HEAP_INPLACE
Definition: heapam_xlog.h:40
#define XLOG_HEAP2_LOCK_UPDATED
Definition: heapam_xlog.h:65
#define XLH_UPDATE_SUFFIX_FROM_OLD
Definition: heapam_xlog.h:92
#define XLH_UPDATE_PREFIX_FROM_OLD
Definition: heapam_xlog.h:91
#define SizeOfMultiInsertTuple
Definition: heapam_xlog.h:199
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:388
#define XLOG_HEAP2_PRUNE_ON_ACCESS
Definition: heapam_xlog.h:60
#define XLOG_HEAP2_NEW_CID
Definition: heapam_xlog.h:66
#define XLHP_CLEANUP_LOCK
Definition: heapam_xlog.h:309
#define XLHP_HAS_DEAD_ITEMS
Definition: heapam_xlog.h:331
#define XLOG_HEAP_LOCK
Definition: heapam_xlog.h:39
#define XLOG_HEAP2_PRUNE_VACUUM_CLEANUP
Definition: heapam_xlog.h:62
#define XLOG_HEAP_INSERT
Definition: heapam_xlog.h:33
#define XLH_DELETE_IS_SUPER
Definition: heapam_xlog.h:105
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:390
#define XLOG_HEAP2_VISIBLE
Definition: heapam_xlog.h:63
#define XLHP_IS_CATALOG_REL
Definition: heapam_xlog.h:299
#define XLOG_HEAP_INIT_PAGE
Definition: heapam_xlog.h:47
#define XLOG_HEAP_CONFIRM
Definition: heapam_xlog.h:38
void heap_xlog_deserialize_prune_and_freeze(char *cursor, uint8 flags, int *nplans, xlhp_freeze_plan **plans, OffsetNumber **frz_offsets, int *nredirected, OffsetNumber **redirected, int *ndead, OffsetNumber **nowdead, int *nunused, OffsetNumber **nowunused)
Definition: heapdesc.c:105
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:227
#define SizeofHeapTupleHeader
Definition: htup_details.h:185
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:275
#define HeapTupleHeaderSetXmin(tup, xid)
Definition: htup_details.h:315
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:376
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:197
#define HEAP_XMAX_BITS
Definition: htup_details.h:267
#define HeapTupleHeaderClearHotUpdated(tup)
Definition: htup_details.h:494
#define HeapTupleHeaderSetCmin(tup, cid)
Definition: htup_details.h:393
#define HeapTupleHeaderSetHotUpdated(tup)
Definition: htup_details.h:489
#define HEAP_MOVED
Definition: htup_details.h:213
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:209
#define HEAP_XMAX_COMMITTED
Definition: htup_details.h:207
#define HEAP_XACT_MASK
Definition: htup_details.h:215
#define HeapTupleHeaderXminFrozen(tup)
Definition: htup_details.h:331
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:196
#define HEAP_XMAX_INVALID
Definition: htup_details.h:208
#define HeapTupleHeaderSetMovedPartitions(tup)
Definition: htup_details.h:447
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:194
#define MaxHeapTupleSize
Definition: htup_details.h:558
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:428
#define HeapTupleHeaderSetCmax(tup, cid, iscombo)
Definition: htup_details.h:401
void ProcessCommittedInvalidationMessages(SharedInvalidationMessage *msgs, int nmsgs, bool RelcacheInitFileInval, Oid dbid, Oid tsid)
Definition: inval.c:1062
int i
Definition: isn.c:72
Pointer Item
Definition: item.h:17
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define ItemIdGetOffset(itemId)
Definition: itemid.h:65
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition: itemptr.h:135
static void ItemPointerSetOffsetNumber(ItemPointerData *pointer, OffsetNumber offsetNumber)
Definition: itemptr.h:158
static void ItemPointerSetBlockNumber(ItemPointerData *pointer, BlockNumber blockNumber)
Definition: itemptr.h:147
#define InvalidOffsetNumber
Definition: off.h:26
uint16 OffsetNumber
Definition: off.h:24
#define FirstOffsetNumber
Definition: off.h:27
const void size_t len
const void * data
void heap_page_prune_execute(Buffer buffer, bool lp_truncate_only, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused)
Definition: pruneheap.c:1561
void heap_xlog_logical_rewrite(XLogReaderState *r)
Definition: rewriteheap.c:1073
void ResolveRecoveryConflictWithSnapshot(TransactionId snapshotConflictHorizon, bool isCatalogRel, RelFileLocator locator)
Definition: standby.c:467
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
uint8 frzflags
Definition: heapam.h:147
uint16 t_infomask2
Definition: heapam.h:145
TransactionId xmax
Definition: heapam.h:144
OffsetNumber offset
Definition: heapam.h:152
uint16 t_infomask
Definition: heapam.h:146
ItemPointerData t_ctid
Definition: htup_details.h:161
XLogRecPtr EndRecPtr
Definition: xlogreader.h:207
OffsetNumber offnum
Definition: heapam_xlog.h:420
TransactionId xmax
Definition: heapam_xlog.h:115
OffsetNumber offnum
Definition: heapam_xlog.h:116
uint8 infobits_set
Definition: heapam_xlog.h:117
OffsetNumber offnum
Definition: heapam_xlog.h:428
SharedInvalidationMessage msgs[FLEXIBLE_ARRAY_MEMBER]
Definition: heapam_xlog.h:433
bool relcacheInitFileInval
Definition: heapam_xlog.h:431
OffsetNumber offnum
Definition: heapam_xlog.h:162
TransactionId xmax
Definition: heapam_xlog.h:409
OffsetNumber offnum
Definition: heapam_xlog.h:410
uint8 infobits_set
Definition: heapam_xlog.h:400
OffsetNumber offnum
Definition: heapam_xlog.h:399
TransactionId xmax
Definition: heapam_xlog.h:398
OffsetNumber offsets[FLEXIBLE_ARRAY_MEMBER]
Definition: heapam_xlog.h:185
TransactionId new_xmax
Definition: heapam_xlog.h:224
uint8 old_infobits_set
Definition: heapam_xlog.h:222
TransactionId old_xmax
Definition: heapam_xlog.h:220
OffsetNumber old_offnum
Definition: heapam_xlog.h:221
OffsetNumber new_offnum
Definition: heapam_xlog.h:225
TransactionId snapshotConflictHorizon
Definition: heapam_xlog.h:446
TransactionId xmax
Definition: heapam_xlog.h:344
#define InvalidTransactionId
Definition: transam.h:31
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer vmbuf, uint8 flags)
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *vmbuf)
uint8 visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf, XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid, uint8 flags)
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_XLOG_VALID_BITS
#define VISIBILITYMAP_XLOG_CATALOG_REL
#define XLogHintBitIsNeeded()
Definition: xlog.h:120
uint64 XLogRecPtr
Definition: xlogdefs.h:21
bool XLogRecGetBlockTagExtended(XLogReaderState *record, uint8 block_id, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum, Buffer *prefetch_buffer)
Definition: xlogreader.c:1997
char * XLogRecGetBlockData(XLogReaderState *record, uint8 block_id, Size *len)
Definition: xlogreader.c:2025
void XLogRecGetBlockTag(XLogReaderState *record, uint8 block_id, RelFileLocator *rlocator, ForkNumber *forknum, BlockNumber *blknum)
Definition: xlogreader.c:1971
#define XLogRecGetInfo(decoder)
Definition: xlogreader.h:410
#define XLogRecGetData(decoder)
Definition: xlogreader.h:415
#define XLogRecGetXid(decoder)
Definition: xlogreader.h:412
void FreeFakeRelcacheEntry(Relation fakerel)
Definition: xlogutils.c:629
XLogRedoAction XLogReadBufferForRedo(XLogReaderState *record, uint8 block_id, Buffer *buf)
Definition: xlogutils.c:314
Buffer XLogInitBufferForRedo(XLogReaderState *record, uint8 block_id)
Definition: xlogutils.c:326
Relation CreateFakeRelcacheEntry(RelFileLocator rlocator)
Definition: xlogutils.c:582
XLogRedoAction XLogReadBufferForRedoExtended(XLogReaderState *record, uint8 block_id, ReadBufferMode mode, bool get_cleanup_lock, Buffer *buf)
Definition: xlogutils.c:351
#define InHotStandby
Definition: xlogutils.h:60
XLogRedoAction
Definition: xlogutils.h:73
@ BLK_RESTORED
Definition: xlogutils.h:76
@ BLK_NEEDS_REDO
Definition: xlogutils.h:74