PostgreSQL Source Code git master
pruneheap.c File Reference
#include "postgres.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/htup_details.h"
#include "access/multixact.h"
#include "access/transam.h"
#include "access/visibilitymapdefs.h"
#include "access/xlog.h"
#include "access/xloginsert.h"
#include "commands/vacuum.h"
#include "executor/instrument.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "storage/bufmgr.h"
#include "utils/rel.h"
#include "utils/snapmgr.h"
Include dependency graph for pruneheap.c:

Go to the source code of this file.

Data Structures

struct  PruneState
 

Functions

static HTSV_Result heap_prune_satisfies_vacuum (PruneState *prstate, HeapTuple tup, Buffer buffer)
 
static HTSV_Result htsv_get_valid_status (int status)
 
static void heap_prune_chain (Page page, BlockNumber blockno, OffsetNumber maxoff, OffsetNumber rootoffnum, PruneState *prstate)
 
static void heap_prune_record_prunable (PruneState *prstate, TransactionId xid)
 
static void heap_prune_record_redirect (PruneState *prstate, OffsetNumber offnum, OffsetNumber rdoffnum, bool was_normal)
 
static void heap_prune_record_dead (PruneState *prstate, OffsetNumber offnum, bool was_normal)
 
static void heap_prune_record_dead_or_unused (PruneState *prstate, OffsetNumber offnum, bool was_normal)
 
static void heap_prune_record_unused (PruneState *prstate, OffsetNumber offnum, bool was_normal)
 
static void heap_prune_record_unchanged_lp_unused (Page page, PruneState *prstate, OffsetNumber offnum)
 
static void heap_prune_record_unchanged_lp_normal (Page page, PruneState *prstate, OffsetNumber offnum)
 
static void heap_prune_record_unchanged_lp_dead (Page page, PruneState *prstate, OffsetNumber offnum)
 
static void heap_prune_record_unchanged_lp_redirect (PruneState *prstate, OffsetNumber offnum)
 
static void page_verify_redirects (Page page)
 
static bool heap_page_will_freeze (Relation relation, Buffer buffer, bool did_tuple_hint_fpi, bool do_prune, bool do_hint_prune, PruneState *prstate)
 
void heap_page_prune_opt (Relation relation, Buffer buffer)
 
void heap_page_prune_and_freeze (Relation relation, Buffer buffer, GlobalVisState *vistest, int options, struct VacuumCutoffs *cutoffs, PruneFreezeResult *presult, PruneReason reason, OffsetNumber *off_loc, TransactionId *new_relfrozen_xid, MultiXactId *new_relmin_mxid)
 
void heap_page_prune_execute (Buffer buffer, bool lp_truncate_only, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused)
 
void heap_get_root_tuples (Page page, OffsetNumber *root_offsets)
 
static bool heap_log_freeze_eq (xlhp_freeze_plan *plan, HeapTupleFreeze *frz)
 
static int heap_log_freeze_cmp (const void *arg1, const void *arg2)
 
static void heap_log_freeze_new_plan (xlhp_freeze_plan *plan, HeapTupleFreeze *frz)
 
static int heap_log_freeze_plan (HeapTupleFreeze *tuples, int ntuples, xlhp_freeze_plan *plans_out, OffsetNumber *offsets_out)
 
void log_heap_prune_and_freeze (Relation relation, Buffer buffer, Buffer vmbuffer, uint8 vmflags, TransactionId conflict_xid, bool cleanup_lock, PruneReason reason, HeapTupleFreeze *frozen, int nfrozen, OffsetNumber *redirected, int nredirected, OffsetNumber *dead, int ndead, OffsetNumber *unused, int nunused)
 

Function Documentation

◆ heap_get_root_tuples()

void heap_get_root_tuples ( Page  page,
OffsetNumber root_offsets 
)

Definition at line 1829 of file pruneheap.c.

1830{
1831 OffsetNumber offnum,
1832 maxoff;
1833
1834 MemSet(root_offsets, InvalidOffsetNumber,
1836
1837 maxoff = PageGetMaxOffsetNumber(page);
1838 for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum))
1839 {
1840 ItemId lp = PageGetItemId(page, offnum);
1841 HeapTupleHeader htup;
1842 OffsetNumber nextoffnum;
1843 TransactionId priorXmax;
1844
1845 /* skip unused and dead items */
1846 if (!ItemIdIsUsed(lp) || ItemIdIsDead(lp))
1847 continue;
1848
1849 if (ItemIdIsNormal(lp))
1850 {
1851 htup = (HeapTupleHeader) PageGetItem(page, lp);
1852
1853 /*
1854 * Check if this tuple is part of a HOT-chain rooted at some other
1855 * tuple. If so, skip it for now; we'll process it when we find
1856 * its root.
1857 */
1858 if (HeapTupleHeaderIsHeapOnly(htup))
1859 continue;
1860
1861 /*
1862 * This is either a plain tuple or the root of a HOT-chain.
1863 * Remember it in the mapping.
1864 */
1865 root_offsets[offnum - 1] = offnum;
1866
1867 /* If it's not the start of a HOT-chain, we're done with it */
1868 if (!HeapTupleHeaderIsHotUpdated(htup))
1869 continue;
1870
1871 /* Set up to scan the HOT-chain */
1872 nextoffnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
1873 priorXmax = HeapTupleHeaderGetUpdateXid(htup);
1874 }
1875 else
1876 {
1877 /* Must be a redirect item. We do not set its root_offsets entry */
1879 /* Set up to scan the HOT-chain */
1880 nextoffnum = ItemIdGetRedirect(lp);
1881 priorXmax = InvalidTransactionId;
1882 }
1883
1884 /*
1885 * Now follow the HOT-chain and collect other tuples in the chain.
1886 *
1887 * Note: Even though this is a nested loop, the complexity of the
1888 * function is O(N) because a tuple in the page should be visited not
1889 * more than twice, once in the outer loop and once in HOT-chain
1890 * chases.
1891 */
1892 for (;;)
1893 {
1894 /* Sanity check (pure paranoia) */
1895 if (offnum < FirstOffsetNumber)
1896 break;
1897
1898 /*
1899 * An offset past the end of page's line pointer array is possible
1900 * when the array was truncated
1901 */
1902 if (offnum > maxoff)
1903 break;
1904
1905 lp = PageGetItemId(page, nextoffnum);
1906
1907 /* Check for broken chains */
1908 if (!ItemIdIsNormal(lp))
1909 break;
1910
1911 htup = (HeapTupleHeader) PageGetItem(page, lp);
1912
1913 if (TransactionIdIsValid(priorXmax) &&
1915 break;
1916
1917 /* Remember the root line pointer for this item */
1918 root_offsets[nextoffnum - 1] = offnum;
1919
1920 /* Advance to next chain member, if any */
1921 if (!HeapTupleHeaderIsHotUpdated(htup))
1922 break;
1923
1924 /* HOT implies it can't have moved to different partition */
1926
1927 nextoffnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
1928 priorXmax = HeapTupleHeaderGetUpdateXid(htup);
1929 }
1930 }
1931}
static void * PageGetItem(const PageData *page, const ItemIdData *itemId)
Definition: bufpage.h:353
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:243
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition: bufpage.h:371
#define MemSet(start, val, len)
Definition: c.h:1023
uint32 TransactionId
Definition: c.h:661
Assert(PointerIsAligned(start, uint64))
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
static bool HeapTupleHeaderIsHeapOnly(const HeapTupleHeaderData *tup)
Definition: htup_details.h:555
static TransactionId HeapTupleHeaderGetXmin(const HeapTupleHeaderData *tup)
Definition: htup_details.h:324
static bool HeapTupleHeaderIndicatesMovedPartitions(const HeapTupleHeaderData *tup)
Definition: htup_details.h:480
static bool HeapTupleHeaderIsHotUpdated(const HeapTupleHeaderData *tup)
Definition: htup_details.h:534
static TransactionId HeapTupleHeaderGetUpdateXid(const HeapTupleHeaderData *tup)
Definition: htup_details.h:397
#define MaxHeapTuplesPerPage
Definition: htup_details.h:624
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:78
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
#define InvalidOffsetNumber
Definition: off.h:26
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define FirstOffsetNumber
Definition: off.h:27
ItemPointerData t_ctid
Definition: htup_details.h:161
#define InvalidTransactionId
Definition: transam.h:31
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
#define TransactionIdIsValid(xid)
Definition: transam.h:41

References Assert(), FirstOffsetNumber, HeapTupleHeaderGetUpdateXid(), HeapTupleHeaderGetXmin(), HeapTupleHeaderIndicatesMovedPartitions(), HeapTupleHeaderIsHeapOnly(), HeapTupleHeaderIsHotUpdated(), InvalidOffsetNumber, InvalidTransactionId, ItemIdGetRedirect, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerGetOffsetNumber(), MaxHeapTuplesPerPage, MemSet, OffsetNumberNext, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), HeapTupleHeaderData::t_ctid, TransactionIdEquals, and TransactionIdIsValid.

Referenced by heapam_index_build_range_scan(), and heapam_index_validate_scan().

◆ heap_log_freeze_cmp()

static int heap_log_freeze_cmp ( const void *  arg1,
const void *  arg2 
)
static

Definition at line 1956 of file pruneheap.c.

1957{
1958 HeapTupleFreeze *frz1 = (HeapTupleFreeze *) arg1;
1959 HeapTupleFreeze *frz2 = (HeapTupleFreeze *) arg2;
1960
1961 if (frz1->xmax < frz2->xmax)
1962 return -1;
1963 else if (frz1->xmax > frz2->xmax)
1964 return 1;
1965
1966 if (frz1->t_infomask2 < frz2->t_infomask2)
1967 return -1;
1968 else if (frz1->t_infomask2 > frz2->t_infomask2)
1969 return 1;
1970
1971 if (frz1->t_infomask < frz2->t_infomask)
1972 return -1;
1973 else if (frz1->t_infomask > frz2->t_infomask)
1974 return 1;
1975
1976 if (frz1->frzflags < frz2->frzflags)
1977 return -1;
1978 else if (frz1->frzflags > frz2->frzflags)
1979 return 1;
1980
1981 /*
1982 * heap_log_freeze_eq would consider these tuple-wise plans to be equal.
1983 * (So the tuples will share a single canonical freeze plan.)
1984 *
1985 * We tiebreak on page offset number to keep each freeze plan's page
1986 * offset number array individually sorted. (Unnecessary, but be tidy.)
1987 */
1988 if (frz1->offset < frz2->offset)
1989 return -1;
1990 else if (frz1->offset > frz2->offset)
1991 return 1;
1992
1993 Assert(false);
1994 return 0;
1995}
uint8 frzflags
Definition: heapam.h:147
uint16 t_infomask2
Definition: heapam.h:145
TransactionId xmax
Definition: heapam.h:144
OffsetNumber offset
Definition: heapam.h:152
uint16 t_infomask
Definition: heapam.h:146

References Assert(), HeapTupleFreeze::frzflags, HeapTupleFreeze::offset, HeapTupleFreeze::t_infomask, HeapTupleFreeze::t_infomask2, and HeapTupleFreeze::xmax.

Referenced by heap_log_freeze_plan().

◆ heap_log_freeze_eq()

static bool heap_log_freeze_eq ( xlhp_freeze_plan plan,
HeapTupleFreeze frz 
)
inlinestatic

Definition at line 1940 of file pruneheap.c.

1941{
1942 if (plan->xmax == frz->xmax &&
1943 plan->t_infomask2 == frz->t_infomask2 &&
1944 plan->t_infomask == frz->t_infomask &&
1945 plan->frzflags == frz->frzflags)
1946 return true;
1947
1948 /* Caller must call heap_log_freeze_new_plan again for frz */
1949 return false;
1950}
#define plan(x)
Definition: pg_regress.c:161

References HeapTupleFreeze::frzflags, plan, HeapTupleFreeze::t_infomask, HeapTupleFreeze::t_infomask2, and HeapTupleFreeze::xmax.

Referenced by heap_log_freeze_plan().

◆ heap_log_freeze_new_plan()

static void heap_log_freeze_new_plan ( xlhp_freeze_plan plan,
HeapTupleFreeze frz 
)
inlinestatic

Definition at line 2002 of file pruneheap.c.

2003{
2004 plan->xmax = frz->xmax;
2005 plan->t_infomask2 = frz->t_infomask2;
2006 plan->t_infomask = frz->t_infomask;
2007 plan->frzflags = frz->frzflags;
2008 plan->ntuples = 1; /* for now */
2009}

References HeapTupleFreeze::frzflags, plan, HeapTupleFreeze::t_infomask, HeapTupleFreeze::t_infomask2, and HeapTupleFreeze::xmax.

Referenced by heap_log_freeze_plan().

◆ heap_log_freeze_plan()

static int heap_log_freeze_plan ( HeapTupleFreeze tuples,
int  ntuples,
xlhp_freeze_plan plans_out,
OffsetNumber offsets_out 
)
static

Definition at line 2022 of file pruneheap.c.

2025{
2026 int nplans = 0;
2027
2028 /* Sort tuple-based freeze plans in the order required to deduplicate */
2029 qsort(tuples, ntuples, sizeof(HeapTupleFreeze), heap_log_freeze_cmp);
2030
2031 for (int i = 0; i < ntuples; i++)
2032 {
2033 HeapTupleFreeze *frz = tuples + i;
2034
2035 if (i == 0)
2036 {
2037 /* New canonical freeze plan starting with first tup */
2038 heap_log_freeze_new_plan(plans_out, frz);
2039 nplans++;
2040 }
2041 else if (heap_log_freeze_eq(plans_out, frz))
2042 {
2043 /* tup matches open canonical plan -- include tup in it */
2044 Assert(offsets_out[i - 1] < frz->offset);
2045 plans_out->ntuples++;
2046 }
2047 else
2048 {
2049 /* Tup doesn't match current plan -- done with it now */
2050 plans_out++;
2051
2052 /* New canonical freeze plan starting with this tup */
2053 heap_log_freeze_new_plan(plans_out, frz);
2054 nplans++;
2055 }
2056
2057 /*
2058 * Save page offset number in dedicated buffer in passing.
2059 *
2060 * REDO routine relies on the record's offset numbers array grouping
2061 * offset numbers by freeze plan. The sort order within each grouping
2062 * is ascending offset number order, just to keep things tidy.
2063 */
2064 offsets_out[i] = frz->offset;
2065 }
2066
2067 Assert(nplans > 0 && nplans <= ntuples);
2068
2069 return nplans;
2070}
int i
Definition: isn.c:77
#define qsort(a, b, c, d)
Definition: port.h:479
static int heap_log_freeze_cmp(const void *arg1, const void *arg2)
Definition: pruneheap.c:1956
static bool heap_log_freeze_eq(xlhp_freeze_plan *plan, HeapTupleFreeze *frz)
Definition: pruneheap.c:1940
static void heap_log_freeze_new_plan(xlhp_freeze_plan *plan, HeapTupleFreeze *frz)
Definition: pruneheap.c:2002

References Assert(), heap_log_freeze_cmp(), heap_log_freeze_eq(), heap_log_freeze_new_plan(), i, xlhp_freeze_plan::ntuples, HeapTupleFreeze::offset, and qsort.

Referenced by log_heap_prune_and_freeze().

◆ heap_page_prune_and_freeze()

void heap_page_prune_and_freeze ( Relation  relation,
Buffer  buffer,
GlobalVisState vistest,
int  options,
struct VacuumCutoffs cutoffs,
PruneFreezeResult presult,
PruneReason  reason,
OffsetNumber off_loc,
TransactionId new_relfrozen_xid,
MultiXactId new_relmin_mxid 
)

Definition at line 466 of file pruneheap.c.

475{
476 Page page = BufferGetPage(buffer);
477 BlockNumber blockno = BufferGetBlockNumber(buffer);
478 OffsetNumber offnum,
479 maxoff;
480 PruneState prstate;
481 HeapTupleData tup;
482 bool do_freeze;
483 bool do_prune;
484 bool do_hint_prune;
485 bool did_tuple_hint_fpi;
486 int64 fpi_before = pgWalUsage.wal_fpi;
487
488 /* Copy parameters to prstate */
489 prstate.vistest = vistest;
492 prstate.cutoffs = cutoffs;
493
494 /*
495 * Our strategy is to scan the page and make lists of items to change,
496 * then apply the changes within a critical section. This keeps as much
497 * logic as possible out of the critical section, and also ensures that
498 * WAL replay will work the same as the normal case.
499 *
500 * First, initialize the new pd_prune_xid value to zero (indicating no
501 * prunable tuples). If we find any tuples which may soon become
502 * prunable, we will save the lowest relevant XID in new_prune_xid. Also
503 * initialize the rest of our working state.
504 */
507 prstate.nredirected = prstate.ndead = prstate.nunused = prstate.nfrozen = 0;
508 prstate.nroot_items = 0;
509 prstate.nheaponly_items = 0;
510
511 /* initialize page freezing working state */
512 prstate.pagefrz.freeze_required = false;
513 if (prstate.attempt_freeze)
514 {
515 Assert(new_relfrozen_xid && new_relmin_mxid);
516 prstate.pagefrz.FreezePageRelfrozenXid = *new_relfrozen_xid;
517 prstate.pagefrz.NoFreezePageRelfrozenXid = *new_relfrozen_xid;
518 prstate.pagefrz.FreezePageRelminMxid = *new_relmin_mxid;
519 prstate.pagefrz.NoFreezePageRelminMxid = *new_relmin_mxid;
520 }
521 else
522 {
523 Assert(new_relfrozen_xid == NULL && new_relmin_mxid == NULL);
528 }
529
530 prstate.ndeleted = 0;
531 prstate.live_tuples = 0;
532 prstate.recently_dead_tuples = 0;
533 prstate.hastup = false;
534 prstate.lpdead_items = 0;
535 prstate.deadoffsets = presult->deadoffsets;
536
537 /*
538 * Caller may update the VM after we're done. We can keep track of
539 * whether the page will be all-visible and all-frozen after pruning and
540 * freezing to help the caller to do that.
541 *
542 * Currently, only VACUUM sets the VM bits. To save the effort, only do
543 * the bookkeeping if the caller needs it. Currently, that's tied to
544 * HEAP_PAGE_PRUNE_FREEZE, but it could be a separate flag if you wanted
545 * to update the VM bits without also freezing or freeze without also
546 * setting the VM bits.
547 *
548 * In addition to telling the caller whether it can set the VM bit, we
549 * also use 'all_visible' and 'all_frozen' for our own decision-making. If
550 * the whole page would become frozen, we consider opportunistically
551 * freezing tuples. We will not be able to freeze the whole page if there
552 * are tuples present that are not visible to everyone or if there are
553 * dead tuples which are not yet removable. However, dead tuples which
554 * will be removed by the end of vacuuming should not preclude us from
555 * opportunistically freezing. Because of that, we do not clear
556 * all_visible when we see LP_DEAD items. We fix that at the end of the
557 * function, when we return the value to the caller, so that the caller
558 * doesn't set the VM bit incorrectly.
559 */
560 if (prstate.attempt_freeze)
561 {
562 prstate.all_visible = true;
563 prstate.all_frozen = true;
564 }
565 else
566 {
567 /*
568 * Initializing to false allows skipping the work to update them in
569 * heap_prune_record_unchanged_lp_normal().
570 */
571 prstate.all_visible = false;
572 prstate.all_frozen = false;
573 }
574
575 /*
576 * The visibility cutoff xid is the newest xmin of live tuples on the
577 * page. In the common case, this will be set as the conflict horizon the
578 * caller can use for updating the VM. If, at the end of freezing and
579 * pruning, the page is all-frozen, there is no possibility that any
580 * running transaction on the standby does not see tuples on the page as
581 * all-visible, so the conflict horizon remains InvalidTransactionId.
582 */
584
585 maxoff = PageGetMaxOffsetNumber(page);
586 tup.t_tableOid = RelationGetRelid(relation);
587
588 /*
589 * Determine HTSV for all tuples, and queue them up for processing as HOT
590 * chain roots or as heap-only items.
591 *
592 * Determining HTSV only once for each tuple is required for correctness,
593 * to deal with cases where running HTSV twice could result in different
594 * results. For example, RECENTLY_DEAD can turn to DEAD if another
595 * checked item causes GlobalVisTestIsRemovableFullXid() to update the
596 * horizon, or INSERT_IN_PROGRESS can change to DEAD if the inserting
597 * transaction aborts.
598 *
599 * It's also good for performance. Most commonly tuples within a page are
600 * stored at decreasing offsets (while the items are stored at increasing
601 * offsets). When processing all tuples on a page this leads to reading
602 * memory at decreasing offsets within a page, with a variable stride.
603 * That's hard for CPU prefetchers to deal with. Processing the items in
604 * reverse order (and thus the tuples in increasing order) increases
605 * prefetching efficiency significantly / decreases the number of cache
606 * misses.
607 */
608 for (offnum = maxoff;
609 offnum >= FirstOffsetNumber;
610 offnum = OffsetNumberPrev(offnum))
611 {
612 ItemId itemid = PageGetItemId(page, offnum);
613 HeapTupleHeader htup;
614
615 /*
616 * Set the offset number so that we can display it along with any
617 * error that occurred while processing this tuple.
618 */
619 *off_loc = offnum;
620
621 prstate.processed[offnum] = false;
622 prstate.htsv[offnum] = -1;
623
624 /* Nothing to do if slot doesn't contain a tuple */
625 if (!ItemIdIsUsed(itemid))
626 {
627 heap_prune_record_unchanged_lp_unused(page, &prstate, offnum);
628 continue;
629 }
630
631 if (ItemIdIsDead(itemid))
632 {
633 /*
634 * If the caller set mark_unused_now true, we can set dead line
635 * pointers LP_UNUSED now.
636 */
637 if (unlikely(prstate.mark_unused_now))
638 heap_prune_record_unused(&prstate, offnum, false);
639 else
640 heap_prune_record_unchanged_lp_dead(page, &prstate, offnum);
641 continue;
642 }
643
644 if (ItemIdIsRedirected(itemid))
645 {
646 /* This is the start of a HOT chain */
647 prstate.root_items[prstate.nroot_items++] = offnum;
648 continue;
649 }
650
651 Assert(ItemIdIsNormal(itemid));
652
653 /*
654 * Get the tuple's visibility status and queue it up for processing.
655 */
656 htup = (HeapTupleHeader) PageGetItem(page, itemid);
657 tup.t_data = htup;
658 tup.t_len = ItemIdGetLength(itemid);
659 ItemPointerSet(&tup.t_self, blockno, offnum);
660
661 prstate.htsv[offnum] = heap_prune_satisfies_vacuum(&prstate, &tup,
662 buffer);
663
664 if (!HeapTupleHeaderIsHeapOnly(htup))
665 prstate.root_items[prstate.nroot_items++] = offnum;
666 else
667 prstate.heaponly_items[prstate.nheaponly_items++] = offnum;
668 }
669
670 /*
671 * If checksums are enabled, heap_prune_satisfies_vacuum() may have caused
672 * an FPI to be emitted.
673 */
674 did_tuple_hint_fpi = fpi_before != pgWalUsage.wal_fpi;
675
676 /*
677 * Process HOT chains.
678 *
679 * We added the items to the array starting from 'maxoff', so by
680 * processing the array in reverse order, we process the items in
681 * ascending offset number order. The order doesn't matter for
682 * correctness, but some quick micro-benchmarking suggests that this is
683 * faster. (Earlier PostgreSQL versions, which scanned all the items on
684 * the page instead of using the root_items array, also did it in
685 * ascending offset number order.)
686 */
687 for (int i = prstate.nroot_items - 1; i >= 0; i--)
688 {
689 offnum = prstate.root_items[i];
690
691 /* Ignore items already processed as part of an earlier chain */
692 if (prstate.processed[offnum])
693 continue;
694
695 /* see preceding loop */
696 *off_loc = offnum;
697
698 /* Process this item or chain of items */
699 heap_prune_chain(page, blockno, maxoff, offnum, &prstate);
700 }
701
702 /*
703 * Process any heap-only tuples that were not already processed as part of
704 * a HOT chain.
705 */
706 for (int i = prstate.nheaponly_items - 1; i >= 0; i--)
707 {
708 offnum = prstate.heaponly_items[i];
709
710 if (prstate.processed[offnum])
711 continue;
712
713 /* see preceding loop */
714 *off_loc = offnum;
715
716 /*
717 * If the tuple is DEAD and doesn't chain to anything else, mark it
718 * unused. (If it does chain, we can only remove it as part of
719 * pruning its chain.)
720 *
721 * We need this primarily to handle aborted HOT updates, that is,
722 * XMIN_INVALID heap-only tuples. Those might not be linked to by any
723 * chain, since the parent tuple might be re-updated before any
724 * pruning occurs. So we have to be able to reap them separately from
725 * chain-pruning. (Note that HeapTupleHeaderIsHotUpdated will never
726 * return true for an XMIN_INVALID tuple, so this code will work even
727 * when there were sequential updates within the aborted transaction.)
728 */
729 if (prstate.htsv[offnum] == HEAPTUPLE_DEAD)
730 {
731 ItemId itemid = PageGetItemId(page, offnum);
732 HeapTupleHeader htup = (HeapTupleHeader) PageGetItem(page, itemid);
733
735 {
737 &prstate.latest_xid_removed);
738 heap_prune_record_unused(&prstate, offnum, true);
739 }
740 else
741 {
742 /*
743 * This tuple should've been processed and removed as part of
744 * a HOT chain, so something's wrong. To preserve evidence,
745 * we don't dare to remove it. We cannot leave behind a DEAD
746 * tuple either, because that will cause VACUUM to error out.
747 * Throwing an error with a distinct error message seems like
748 * the least bad option.
749 */
750 elog(ERROR, "dead heap-only tuple (%u, %d) is not linked to from any HOT chain",
751 blockno, offnum);
752 }
753 }
754 else
755 heap_prune_record_unchanged_lp_normal(page, &prstate, offnum);
756 }
757
758 /* We should now have processed every tuple exactly once */
759#ifdef USE_ASSERT_CHECKING
760 for (offnum = FirstOffsetNumber;
761 offnum <= maxoff;
762 offnum = OffsetNumberNext(offnum))
763 {
764 *off_loc = offnum;
765
766 Assert(prstate.processed[offnum]);
767 }
768#endif
769
770 /* Clear the offset information once we have processed the given page. */
771 *off_loc = InvalidOffsetNumber;
772
773 do_prune = prstate.nredirected > 0 ||
774 prstate.ndead > 0 ||
775 prstate.nunused > 0;
776
777 /*
778 * Even if we don't prune anything, if we found a new value for the
779 * pd_prune_xid field or the page was marked full, we will update the hint
780 * bit.
781 */
782 do_hint_prune = ((PageHeader) page)->pd_prune_xid != prstate.new_prune_xid ||
783 PageIsFull(page);
784
785 /*
786 * Decide if we want to go ahead with freezing according to the freeze
787 * plans we prepared, or not.
788 */
789 do_freeze = heap_page_will_freeze(relation, buffer,
790 did_tuple_hint_fpi,
791 do_prune,
792 do_hint_prune,
793 &prstate);
794
795 /* Any error while applying the changes is critical */
797
798 if (do_hint_prune)
799 {
800 /*
801 * Update the page's pd_prune_xid field to either zero, or the lowest
802 * XID of any soon-prunable tuple.
803 */
804 ((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;
805
806 /*
807 * Also clear the "page is full" flag, since there's no point in
808 * repeating the prune/defrag process until something else happens to
809 * the page.
810 */
811 PageClearFull(page);
812
813 /*
814 * If that's all we had to do to the page, this is a non-WAL-logged
815 * hint. If we are going to freeze or prune the page, we will mark
816 * the buffer dirty below.
817 */
818 if (!do_freeze && !do_prune)
819 MarkBufferDirtyHint(buffer, true);
820 }
821
822 if (do_prune || do_freeze)
823 {
824 /* Apply the planned item changes and repair page fragmentation. */
825 if (do_prune)
826 {
827 heap_page_prune_execute(buffer, false,
828 prstate.redirected, prstate.nredirected,
829 prstate.nowdead, prstate.ndead,
830 prstate.nowunused, prstate.nunused);
831 }
832
833 if (do_freeze)
834 heap_freeze_prepared_tuples(buffer, prstate.frozen, prstate.nfrozen);
835
836 MarkBufferDirty(buffer);
837
838 /*
839 * Emit a WAL XLOG_HEAP2_PRUNE* record showing what we did
840 */
841 if (RelationNeedsWAL(relation))
842 {
843 /*
844 * The snapshotConflictHorizon for the whole record should be the
845 * most conservative of all the horizons calculated for any of the
846 * possible modifications. If this record will prune tuples, any
847 * transactions on the standby older than the youngest xmax of the
848 * most recently removed tuple this record will prune will
849 * conflict. If this record will freeze tuples, any transactions
850 * on the standby with xids older than the youngest tuple this
851 * record will freeze will conflict.
852 */
853 TransactionId frz_conflict_horizon = InvalidTransactionId;
854 TransactionId conflict_xid;
855
856 /*
857 * We can use the visibility_cutoff_xid as our cutoff for
858 * conflicts when the whole page is eligible to become all-frozen
859 * in the VM once we're done with it. Otherwise we generate a
860 * conservative cutoff by stepping back from OldestXmin.
861 */
862 if (do_freeze)
863 {
864 if (prstate.all_visible && prstate.all_frozen)
865 frz_conflict_horizon = prstate.visibility_cutoff_xid;
866 else
867 {
868 /* Avoids false conflicts when hot_standby_feedback in use */
869 frz_conflict_horizon = prstate.cutoffs->OldestXmin;
870 TransactionIdRetreat(frz_conflict_horizon);
871 }
872 }
873
874 if (TransactionIdFollows(frz_conflict_horizon, prstate.latest_xid_removed))
875 conflict_xid = frz_conflict_horizon;
876 else
877 conflict_xid = prstate.latest_xid_removed;
878
879 log_heap_prune_and_freeze(relation, buffer,
880 InvalidBuffer, /* vmbuffer */
881 0, /* vmflags */
882 conflict_xid,
883 true, reason,
884 prstate.frozen, prstate.nfrozen,
885 prstate.redirected, prstate.nredirected,
886 prstate.nowdead, prstate.ndead,
887 prstate.nowunused, prstate.nunused);
888 }
889 }
890
892
893 /* Copy information back for caller */
894 presult->ndeleted = prstate.ndeleted;
895 presult->nnewlpdead = prstate.ndead;
896 presult->nfrozen = prstate.nfrozen;
897 presult->live_tuples = prstate.live_tuples;
899
900 /*
901 * It was convenient to ignore LP_DEAD items in all_visible earlier on to
902 * make the choice of whether or not to freeze the page unaffected by the
903 * short-term presence of LP_DEAD items. These LP_DEAD items were
904 * effectively assumed to be LP_UNUSED items in the making. It doesn't
905 * matter which vacuum heap pass (initial pass or final pass) ends up
906 * setting the page all-frozen, as long as the ongoing VACUUM does it.
907 *
908 * Now that freezing has been finalized, unset all_visible if there are
909 * any LP_DEAD items on the page. It needs to reflect the present state
910 * of the page, as expected by our caller.
911 */
912 if (prstate.all_visible && prstate.lpdead_items == 0)
913 {
914 presult->all_visible = prstate.all_visible;
915 presult->all_frozen = prstate.all_frozen;
916 }
917 else
918 {
919 presult->all_visible = false;
920 presult->all_frozen = false;
921 }
922
923 presult->hastup = prstate.hastup;
924
925 /*
926 * For callers planning to update the visibility map, the conflict horizon
927 * for that record must be the newest xmin on the page. However, if the
928 * page is completely frozen, there can be no conflict and the
929 * vm_conflict_horizon should remain InvalidTransactionId. This includes
930 * the case that we just froze all the tuples; the prune-freeze record
931 * included the conflict XID already so the caller doesn't need it.
932 */
933 if (presult->all_frozen)
935 else
937
938 presult->lpdead_items = prstate.lpdead_items;
939 /* the presult->deadoffsets array was already filled in */
940
941 if (prstate.attempt_freeze)
942 {
943 if (presult->nfrozen > 0)
944 {
945 *new_relfrozen_xid = prstate.pagefrz.FreezePageRelfrozenXid;
946 *new_relmin_mxid = prstate.pagefrz.FreezePageRelminMxid;
947 }
948 else
949 {
950 *new_relfrozen_xid = prstate.pagefrz.NoFreezePageRelfrozenXid;
951 *new_relmin_mxid = prstate.pagefrz.NoFreezePageRelminMxid;
952 }
953 }
954}
uint32 BlockNumber
Definition: block.h:31
#define InvalidBuffer
Definition: buf.h:25
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:4223
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:2943
void MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
Definition: bufmgr.c:5430
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:425
PageHeaderData * PageHeader
Definition: bufpage.h:173
static void PageClearFull(Page page)
Definition: bufpage.h:422
PageData * Page
Definition: bufpage.h:81
static bool PageIsFull(const PageData *page)
Definition: bufpage.h:412
#define likely(x)
Definition: c.h:405
int64_t int64
Definition: c.h:539
#define unlikely(x)
Definition: c.h:406
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
void HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple, TransactionId *snapshotConflictHorizon)
Definition: heapam.c:7984
void heap_freeze_prepared_tuples(Buffer buffer, HeapTupleFreeze *tuples, int ntuples)
Definition: heapam.c:7391
#define HEAP_PAGE_PRUNE_FREEZE
Definition: heapam.h:44
@ HEAPTUPLE_DEAD
Definition: heapam.h:126
#define HEAP_PAGE_PRUNE_MARK_UNUSED_NOW
Definition: heapam.h:43
WalUsage pgWalUsage
Definition: instrument.c:22
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition: itemptr.h:135
#define START_CRIT_SECTION()
Definition: miscadmin.h:150
#define END_CRIT_SECTION()
Definition: miscadmin.h:152
#define InvalidMultiXactId
Definition: multixact.h:25
#define OffsetNumberPrev(offsetNumber)
Definition: off.h:54
static bool heap_page_will_freeze(Relation relation, Buffer buffer, bool did_tuple_hint_fpi, bool do_prune, bool do_hint_prune, PruneState *prstate)
Definition: pruneheap.c:315
static void heap_prune_chain(Page page, BlockNumber blockno, OffsetNumber maxoff, OffsetNumber rootoffnum, PruneState *prstate)
Definition: pruneheap.c:1043
static void heap_prune_record_unchanged_lp_dead(Page page, PruneState *prstate, OffsetNumber offnum)
Definition: pruneheap.c:1552
void log_heap_prune_and_freeze(Relation relation, Buffer buffer, Buffer vmbuffer, uint8 vmflags, TransactionId conflict_xid, bool cleanup_lock, PruneReason reason, HeapTupleFreeze *frozen, int nfrozen, OffsetNumber *redirected, int nredirected, OffsetNumber *dead, int ndead, OffsetNumber *unused, int nunused)
Definition: pruneheap.c:2101
static void heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum, bool was_normal)
Definition: pruneheap.c:1341
static void heap_prune_record_unchanged_lp_normal(Page page, PruneState *prstate, OffsetNumber offnum)
Definition: pruneheap.c:1374
static void heap_prune_record_unchanged_lp_unused(Page page, PruneState *prstate, OffsetNumber offnum)
Definition: pruneheap.c:1363
static HTSV_Result heap_prune_satisfies_vacuum(PruneState *prstate, HeapTuple tup, Buffer buffer)
Definition: pruneheap.c:961
void heap_page_prune_execute(Buffer buffer, bool lp_truncate_only, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused)
Definition: pruneheap.c:1605
#define RelationGetRelid(relation)
Definition: rel.h:515
#define RelationNeedsWAL(relation)
Definition: rel.h:638
MultiXactId NoFreezePageRelminMxid
Definition: heapam.h:220
TransactionId FreezePageRelfrozenXid
Definition: heapam.h:208
bool freeze_required
Definition: heapam.h:182
MultiXactId FreezePageRelminMxid
Definition: heapam.h:209
TransactionId NoFreezePageRelfrozenXid
Definition: heapam.h:219
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
int recently_dead_tuples
Definition: heapam.h:235
TransactionId vm_conflict_horizon
Definition: heapam.h:250
OffsetNumber deadoffsets[MaxHeapTuplesPerPage]
Definition: heapam.h:264
bool all_visible
Definition: heapam.h:248
HeapPageFreeze pagefrz
Definition: pruneheap.c:104
bool all_visible
Definition: pruneheap.c:151
int ndead
Definition: pruneheap.c:56
bool processed[MaxHeapTuplesPerPage+1]
Definition: pruneheap.c:87
OffsetNumber heaponly_items[MaxHeapTuplesPerPage]
Definition: pruneheap.c:79
TransactionId new_prune_xid
Definition: pruneheap.c:53
bool attempt_freeze
Definition: pruneheap.c:46
bool hastup
Definition: pruneheap.c:123
int recently_dead_tuples
Definition: pruneheap.c:120
OffsetNumber nowdead[MaxHeapTuplesPerPage]
Definition: pruneheap.c:61
int nroot_items
Definition: pruneheap.c:76
OffsetNumber nowunused[MaxHeapTuplesPerPage]
Definition: pruneheap.c:62
int nheaponly_items
Definition: pruneheap.c:78
bool mark_unused_now
Definition: pruneheap.c:44
int live_tuples
Definition: pruneheap.c:119
TransactionId visibility_cutoff_xid
Definition: pruneheap.c:153
bool all_frozen
Definition: pruneheap.c:152
GlobalVisState * vistest
Definition: pruneheap.c:42
struct VacuumCutoffs * cutoffs
Definition: pruneheap.c:47
HeapTupleFreeze frozen[MaxHeapTuplesPerPage]
Definition: pruneheap.c:63
int lpdead_items
Definition: pruneheap.c:129
int nfrozen
Definition: pruneheap.c:58
OffsetNumber redirected[MaxHeapTuplesPerPage *2]
Definition: pruneheap.c:60
int ndeleted
Definition: pruneheap.c:116
int nredirected
Definition: pruneheap.c:55
int8 htsv[MaxHeapTuplesPerPage+1]
Definition: pruneheap.c:99
TransactionId latest_xid_removed
Definition: pruneheap.c:54
int nunused
Definition: pruneheap.c:57
OffsetNumber root_items[MaxHeapTuplesPerPage]
Definition: pruneheap.c:77
OffsetNumber * deadoffsets
Definition: pruneheap.c:130
TransactionId OldestXmin
Definition: vacuum.h:279
int64 wal_fpi
Definition: instrument.h:54
static bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.h:297
#define TransactionIdRetreat(dest)
Definition: transam.h:141

References PruneState::all_frozen, PruneFreezeResult::all_frozen, PruneState::all_visible, PruneFreezeResult::all_visible, Assert(), PruneState::attempt_freeze, BufferGetBlockNumber(), BufferGetPage(), PruneState::cutoffs, PruneState::deadoffsets, PruneFreezeResult::deadoffsets, elog, END_CRIT_SECTION, ERROR, FirstOffsetNumber, HeapPageFreeze::freeze_required, HeapPageFreeze::FreezePageRelfrozenXid, HeapPageFreeze::FreezePageRelminMxid, PruneState::frozen, PruneState::hastup, PruneFreezeResult::hastup, heap_freeze_prepared_tuples(), heap_page_prune_execute(), HEAP_PAGE_PRUNE_FREEZE, HEAP_PAGE_PRUNE_MARK_UNUSED_NOW, heap_page_will_freeze(), heap_prune_chain(), heap_prune_record_unchanged_lp_dead(), heap_prune_record_unchanged_lp_normal(), heap_prune_record_unchanged_lp_unused(), heap_prune_record_unused(), heap_prune_satisfies_vacuum(), PruneState::heaponly_items, HEAPTUPLE_DEAD, HeapTupleHeaderAdvanceConflictHorizon(), HeapTupleHeaderIsHeapOnly(), HeapTupleHeaderIsHotUpdated(), PruneState::htsv, i, InvalidBuffer, InvalidMultiXactId, InvalidOffsetNumber, InvalidTransactionId, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), PruneState::latest_xid_removed, likely, PruneState::live_tuples, PruneFreezeResult::live_tuples, log_heap_prune_and_freeze(), PruneState::lpdead_items, PruneFreezeResult::lpdead_items, PruneState::mark_unused_now, MarkBufferDirty(), MarkBufferDirtyHint(), PruneState::ndead, PruneState::ndeleted, PruneFreezeResult::ndeleted, PruneState::new_prune_xid, PruneState::nfrozen, PruneFreezeResult::nfrozen, PruneState::nheaponly_items, PruneFreezeResult::nnewlpdead, HeapPageFreeze::NoFreezePageRelfrozenXid, HeapPageFreeze::NoFreezePageRelminMxid, PruneState::nowdead, PruneState::nowunused, PruneState::nredirected, PruneState::nroot_items, PruneState::nunused, OffsetNumberNext, OffsetNumberPrev, VacuumCutoffs::OldestXmin, PageClearFull(), PruneState::pagefrz, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageIsFull(), pgWalUsage, PruneState::processed, PruneState::recently_dead_tuples, PruneFreezeResult::recently_dead_tuples, PruneState::redirected, RelationGetRelid, RelationNeedsWAL, PruneState::root_items, START_CRIT_SECTION, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdFollows(), TransactionIdRetreat, unlikely, PruneState::visibility_cutoff_xid, PruneState::vistest, PruneFreezeResult::vm_conflict_horizon, and WalUsage::wal_fpi.

Referenced by heap_page_prune_opt(), and lazy_scan_prune().

◆ heap_page_prune_execute()

void heap_page_prune_execute ( Buffer  buffer,
bool  lp_truncate_only,
OffsetNumber redirected,
int  nredirected,
OffsetNumber nowdead,
int  ndead,
OffsetNumber nowunused,
int  nunused 
)

Definition at line 1605 of file pruneheap.c.

1609{
1610 Page page = BufferGetPage(buffer);
1611 OffsetNumber *offnum;
1613
1614 /* Shouldn't be called unless there's something to do */
1615 Assert(nredirected > 0 || ndead > 0 || nunused > 0);
1616
1617 /* If 'lp_truncate_only', we can only remove already-dead line pointers */
1618 Assert(!lp_truncate_only || (nredirected == 0 && ndead == 0));
1619
1620 /* Update all redirected line pointers */
1621 offnum = redirected;
1622 for (int i = 0; i < nredirected; i++)
1623 {
1624 OffsetNumber fromoff = *offnum++;
1625 OffsetNumber tooff = *offnum++;
1626 ItemId fromlp = PageGetItemId(page, fromoff);
1628
1629#ifdef USE_ASSERT_CHECKING
1630
1631 /*
1632 * Any existing item that we set as an LP_REDIRECT (any 'from' item)
1633 * must be the first item from a HOT chain. If the item has tuple
1634 * storage then it can't be a heap-only tuple. Otherwise we are just
1635 * maintaining an existing LP_REDIRECT from an existing HOT chain that
1636 * has been pruned at least once before now.
1637 */
1638 if (!ItemIdIsRedirected(fromlp))
1639 {
1640 Assert(ItemIdHasStorage(fromlp) && ItemIdIsNormal(fromlp));
1641
1642 htup = (HeapTupleHeader) PageGetItem(page, fromlp);
1644 }
1645 else
1646 {
1647 /* We shouldn't need to redundantly set the redirect */
1648 Assert(ItemIdGetRedirect(fromlp) != tooff);
1649 }
1650
1651 /*
1652 * The item that we're about to set as an LP_REDIRECT (the 'from'
1653 * item) will point to an existing item (the 'to' item) that is
1654 * already a heap-only tuple. There can be at most one LP_REDIRECT
1655 * item per HOT chain.
1656 *
1657 * We need to keep around an LP_REDIRECT item (after original
1658 * non-heap-only root tuple gets pruned away) so that it's always
1659 * possible for VACUUM to easily figure out what TID to delete from
1660 * indexes when an entire HOT chain becomes dead. A heap-only tuple
1661 * can never become LP_DEAD; an LP_REDIRECT item or a regular heap
1662 * tuple can.
1663 *
1664 * This check may miss problems, e.g. the target of a redirect could
1665 * be marked as unused subsequently. The page_verify_redirects() check
1666 * below will catch such problems.
1667 */
1668 tolp = PageGetItemId(page, tooff);
1669 Assert(ItemIdHasStorage(tolp) && ItemIdIsNormal(tolp));
1670 htup = (HeapTupleHeader) PageGetItem(page, tolp);
1672#endif
1673
1674 ItemIdSetRedirect(fromlp, tooff);
1675 }
1676
1677 /* Update all now-dead line pointers */
1678 offnum = nowdead;
1679 for (int i = 0; i < ndead; i++)
1680 {
1681 OffsetNumber off = *offnum++;
1682 ItemId lp = PageGetItemId(page, off);
1683
1684#ifdef USE_ASSERT_CHECKING
1685
1686 /*
1687 * An LP_DEAD line pointer must be left behind when the original item
1688 * (which is dead to everybody) could still be referenced by a TID in
1689 * an index. This should never be necessary with any individual
1690 * heap-only tuple item, though. (It's not clear how much of a problem
1691 * that would be, but there is no reason to allow it.)
1692 */
1693 if (ItemIdHasStorage(lp))
1694 {
1696 htup = (HeapTupleHeader) PageGetItem(page, lp);
1698 }
1699 else
1700 {
1701 /* Whole HOT chain becomes dead */
1703 }
1704#endif
1705
1706 ItemIdSetDead(lp);
1707 }
1708
1709 /* Update all now-unused line pointers */
1710 offnum = nowunused;
1711 for (int i = 0; i < nunused; i++)
1712 {
1713 OffsetNumber off = *offnum++;
1714 ItemId lp = PageGetItemId(page, off);
1715
1716#ifdef USE_ASSERT_CHECKING
1717
1718 if (lp_truncate_only)
1719 {
1720 /* Setting LP_DEAD to LP_UNUSED in vacuum's second pass */
1722 }
1723 else
1724 {
1725 /*
1726 * When heap_page_prune_and_freeze() was called, mark_unused_now
1727 * may have been passed as true, which allows would-be LP_DEAD
1728 * items to be made LP_UNUSED instead. This is only possible if
1729 * the relation has no indexes. If there are any dead items, then
1730 * mark_unused_now was not true and every item being marked
1731 * LP_UNUSED must refer to a heap-only tuple.
1732 */
1733 if (ndead > 0)
1734 {
1736 htup = (HeapTupleHeader) PageGetItem(page, lp);
1738 }
1739 else
1740 Assert(ItemIdIsUsed(lp));
1741 }
1742
1743#endif
1744
1745 ItemIdSetUnused(lp);
1746 }
1747
1748 if (lp_truncate_only)
1750 else
1751 {
1752 /*
1753 * Finally, repair any fragmentation, and update the page's hint bit
1754 * about whether it has free pointers.
1755 */
1757
1758 /*
1759 * Now that the page has been modified, assert that redirect items
1760 * still point to valid targets.
1761 */
1763 }
1764}
void PageRepairFragmentation(Page page)
Definition: bufpage.c:698
void PageTruncateLinePointerArray(Page page)
Definition: bufpage.c:834
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:227
#define ItemIdSetRedirect(itemId, link)
Definition: itemid.h:152
#define ItemIdSetDead(itemId)
Definition: itemid.h:164
#define ItemIdSetUnused(itemId)
Definition: itemid.h:128
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
static void page_verify_redirects(Page page)
Definition: pruneheap.c:1781

References Assert(), BufferGetPage(), HeapTupleHeaderIsHeapOnly(), i, ItemIdGetRedirect, ItemIdHasStorage, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemIdSetDead, ItemIdSetRedirect, ItemIdSetUnused, page_verify_redirects(), PageGetItem(), PageGetItemId(), PageRepairFragmentation(), PageTruncateLinePointerArray(), and PG_USED_FOR_ASSERTS_ONLY.

Referenced by heap_page_prune_and_freeze(), and heap_xlog_prune_freeze().

◆ heap_page_prune_opt()

void heap_page_prune_opt ( Relation  relation,
Buffer  buffer 
)

Definition at line 198 of file pruneheap.c.

199{
200 Page page = BufferGetPage(buffer);
201 TransactionId prune_xid;
202 GlobalVisState *vistest;
203 Size minfree;
204
205 /*
206 * We can't write WAL in recovery mode, so there's no point trying to
207 * clean the page. The primary will likely issue a cleaning WAL record
208 * soon anyway, so this is no particular loss.
209 */
210 if (RecoveryInProgress())
211 return;
212
213 /*
214 * First check whether there's any chance there's something to prune,
215 * determining the appropriate horizon is a waste if there's no prune_xid
216 * (i.e. no updates/deletes left potentially dead tuples around).
217 */
218 prune_xid = ((PageHeader) page)->pd_prune_xid;
219 if (!TransactionIdIsValid(prune_xid))
220 return;
221
222 /*
223 * Check whether prune_xid indicates that there may be dead rows that can
224 * be cleaned up.
225 */
226 vistest = GlobalVisTestFor(relation);
227
228 if (!GlobalVisTestIsRemovableXid(vistest, prune_xid))
229 return;
230
231 /*
232 * We prune when a previous UPDATE failed to find enough space on the page
233 * for a new tuple version, or when free space falls below the relation's
234 * fill-factor target (but not less than 10%).
235 *
236 * Checking free space here is questionable since we aren't holding any
237 * lock on the buffer; in the worst case we could get a bogus answer. It's
238 * unlikely to be *seriously* wrong, though, since reading either pd_lower
239 * or pd_upper is probably atomic. Avoiding taking a lock seems more
240 * important than sometimes getting a wrong answer in what is after all
241 * just a heuristic estimate.
242 */
243 minfree = RelationGetTargetPageFreeSpace(relation,
245 minfree = Max(minfree, BLCKSZ / 10);
246
247 if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
248 {
249 /* OK, try to get exclusive buffer lock */
251 return;
252
253 /*
254 * Now that we have buffer lock, get accurate information about the
255 * page's free space, and recheck the heuristic about whether to
256 * prune.
257 */
258 if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
259 {
260 OffsetNumber dummy_off_loc;
261 PruneFreezeResult presult;
262
263 /*
264 * For now, pass mark_unused_now as false regardless of whether or
265 * not the relation has indexes, since we cannot safely determine
266 * that during on-access pruning with the current implementation.
267 */
268 heap_page_prune_and_freeze(relation, buffer, vistest, 0,
269 NULL, &presult, PRUNE_ON_ACCESS, &dummy_off_loc, NULL, NULL);
270
271 /*
272 * Report the number of tuples reclaimed to pgstats. This is
273 * presult.ndeleted minus the number of newly-LP_DEAD-set items.
274 *
275 * We derive the number of dead tuples like this to avoid totally
276 * forgetting about items that were set to LP_DEAD, since they
277 * still need to be cleaned up by VACUUM. We only want to count
278 * heap-only tuples that just became LP_UNUSED in our report,
279 * which don't.
280 *
281 * VACUUM doesn't have to compensate in the same way when it
282 * tracks ndeleted, since it will set the same LP_DEAD items to
283 * LP_UNUSED separately.
284 */
285 if (presult.ndeleted > presult.nnewlpdead)
287 presult.ndeleted - presult.nnewlpdead);
288 }
289
290 /* And release buffer lock */
292
293 /*
294 * We avoid reuse of any free space created on the page by unrelated
295 * UPDATEs/INSERTs by opting to not update the FSM at this point. The
296 * free space should be reused by UPDATEs to *this* page.
297 */
298 }
299}
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:5604
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:5857
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:203
Size PageGetHeapFreeSpace(const PageData *page)
Definition: bufpage.c:990
#define Max(x, y)
Definition: c.h:1001
size_t Size
Definition: c.h:614
@ PRUNE_ON_ACCESS
Definition: heapam.h:270
void pgstat_update_heap_dead_tuples(Relation rel, int delta)
bool GlobalVisTestIsRemovableXid(GlobalVisState *state, TransactionId xid)
Definition: procarray.c:4226
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:4069
void heap_page_prune_and_freeze(Relation relation, Buffer buffer, GlobalVisState *vistest, int options, struct VacuumCutoffs *cutoffs, PruneFreezeResult *presult, PruneReason reason, OffsetNumber *off_loc, TransactionId *new_relfrozen_xid, MultiXactId *new_relmin_mxid)
Definition: pruneheap.c:466
#define RelationGetTargetPageFreeSpace(relation, defaultff)
Definition: rel.h:390
#define HEAP_DEFAULT_FILLFACTOR
Definition: rel.h:361
bool RecoveryInProgress(void)
Definition: xlog.c:6406

References BUFFER_LOCK_UNLOCK, BufferGetPage(), ConditionalLockBufferForCleanup(), GlobalVisTestFor(), GlobalVisTestIsRemovableXid(), HEAP_DEFAULT_FILLFACTOR, heap_page_prune_and_freeze(), LockBuffer(), Max, PruneFreezeResult::ndeleted, PruneFreezeResult::nnewlpdead, PageGetHeapFreeSpace(), PageIsFull(), pgstat_update_heap_dead_tuples(), PRUNE_ON_ACCESS, RecoveryInProgress(), RelationGetTargetPageFreeSpace, and TransactionIdIsValid.

Referenced by BitmapHeapScanNextBlock(), heap_prepare_pagescan(), and heapam_index_fetch_tuple().

◆ heap_page_will_freeze()

static bool heap_page_will_freeze ( Relation  relation,
Buffer  buffer,
bool  did_tuple_hint_fpi,
bool  do_prune,
bool  do_hint_prune,
PruneState prstate 
)
static

Definition at line 315 of file pruneheap.c.

320{
321 bool do_freeze = false;
322
323 /*
324 * If the caller specified we should not attempt to freeze any tuples,
325 * validate that everything is in the right state and return.
326 */
327 if (!prstate->attempt_freeze)
328 {
329 Assert(!prstate->all_frozen && prstate->nfrozen == 0);
330 Assert(prstate->lpdead_items == 0 || !prstate->all_visible);
331 return false;
332 }
333
334 if (prstate->pagefrz.freeze_required)
335 {
336 /*
337 * heap_prepare_freeze_tuple indicated that at least one XID/MXID from
338 * before FreezeLimit/MultiXactCutoff is present. Must freeze to
339 * advance relfrozenxid/relminmxid.
340 */
341 do_freeze = true;
342 }
343 else
344 {
345 /*
346 * Opportunistically freeze the page if we are generating an FPI
347 * anyway and if doing so means that we can set the page all-frozen
348 * afterwards (might not happen until VACUUM's final heap pass).
349 *
350 * XXX: Previously, we knew if pruning emitted an FPI by checking
351 * pgWalUsage.wal_fpi before and after pruning. Once the freeze and
352 * prune records were combined, this heuristic couldn't be used
353 * anymore. The opportunistic freeze heuristic must be improved;
354 * however, for now, try to approximate the old logic.
355 */
356 if (prstate->all_visible && prstate->all_frozen && prstate->nfrozen > 0)
357 {
358 /*
359 * Freezing would make the page all-frozen. Have already emitted
360 * an FPI or will do so anyway?
361 */
362 if (RelationNeedsWAL(relation))
363 {
364 if (did_tuple_hint_fpi)
365 do_freeze = true;
366 else if (do_prune)
367 {
368 if (XLogCheckBufferNeedsBackup(buffer))
369 do_freeze = true;
370 }
371 else if (do_hint_prune)
372 {
374 do_freeze = true;
375 }
376 }
377 }
378 }
379
380 if (do_freeze)
381 {
382 /*
383 * Validate the tuples we will be freezing before entering the
384 * critical section.
385 */
386 heap_pre_freeze_checks(buffer, prstate->frozen, prstate->nfrozen);
387 }
388 else if (prstate->nfrozen > 0)
389 {
390 /*
391 * The page contained some tuples that were not already frozen, and we
392 * chose not to freeze them now. The page won't be all-frozen then.
393 */
394 Assert(!prstate->pagefrz.freeze_required);
395
396 prstate->all_frozen = false;
397 prstate->nfrozen = 0; /* avoid miscounts in instrumentation */
398 }
399 else
400 {
401 /*
402 * We have no freeze plans to execute. The page might already be
403 * all-frozen (perhaps only following pruning), though. Such pages
404 * can be marked all-frozen in the VM by our caller, even though none
405 * of its tuples were newly frozen here.
406 */
407 }
408
409 return do_freeze;
410}
void heap_pre_freeze_checks(Buffer buffer, HeapTupleFreeze *tuples, int ntuples)
Definition: heapam.c:7338
#define XLogHintBitIsNeeded()
Definition: xlog.h:120
bool XLogCheckBufferNeedsBackup(Buffer buffer)
Definition: xloginsert.c:1049

References PruneState::all_frozen, PruneState::all_visible, Assert(), PruneState::attempt_freeze, HeapPageFreeze::freeze_required, PruneState::frozen, heap_pre_freeze_checks(), PruneState::lpdead_items, PruneState::nfrozen, PruneState::pagefrz, RelationNeedsWAL, XLogCheckBufferNeedsBackup(), and XLogHintBitIsNeeded.

Referenced by heap_page_prune_and_freeze().

◆ heap_prune_chain()

static void heap_prune_chain ( Page  page,
BlockNumber  blockno,
OffsetNumber  maxoff,
OffsetNumber  rootoffnum,
PruneState prstate 
)
static

Definition at line 1043 of file pruneheap.c.

1045{
1047 ItemId rootlp;
1048 OffsetNumber offnum;
1050
1051 /*
1052 * After traversing the HOT chain, ndeadchain is the index in chainitems
1053 * of the first live successor after the last dead item.
1054 */
1055 int ndeadchain = 0,
1056 nchain = 0;
1057
1058 rootlp = PageGetItemId(page, rootoffnum);
1059
1060 /* Start from the root tuple */
1061 offnum = rootoffnum;
1062
1063 /* while not end of the chain */
1064 for (;;)
1065 {
1066 HeapTupleHeader htup;
1067 ItemId lp;
1068
1069 /* Sanity check (pure paranoia) */
1070 if (offnum < FirstOffsetNumber)
1071 break;
1072
1073 /*
1074 * An offset past the end of page's line pointer array is possible
1075 * when the array was truncated (original item must have been unused)
1076 */
1077 if (offnum > maxoff)
1078 break;
1079
1080 /* If item is already processed, stop --- it must not be same chain */
1081 if (prstate->processed[offnum])
1082 break;
1083
1084 lp = PageGetItemId(page, offnum);
1085
1086 /*
1087 * Unused item obviously isn't part of the chain. Likewise, a dead
1088 * line pointer can't be part of the chain. Both of those cases were
1089 * already marked as processed.
1090 */
1091 Assert(ItemIdIsUsed(lp));
1092 Assert(!ItemIdIsDead(lp));
1093
1094 /*
1095 * If we are looking at the redirected root line pointer, jump to the
1096 * first normal tuple in the chain. If we find a redirect somewhere
1097 * else, stop --- it must not be same chain.
1098 */
1099 if (ItemIdIsRedirected(lp))
1100 {
1101 if (nchain > 0)
1102 break; /* not at start of chain */
1103 chainitems[nchain++] = offnum;
1104 offnum = ItemIdGetRedirect(rootlp);
1105 continue;
1106 }
1107
1109
1110 htup = (HeapTupleHeader) PageGetItem(page, lp);
1111
1112 /*
1113 * Check the tuple XMIN against prior XMAX, if any
1114 */
1115 if (TransactionIdIsValid(priorXmax) &&
1117 break;
1118
1119 /*
1120 * OK, this tuple is indeed a member of the chain.
1121 */
1122 chainitems[nchain++] = offnum;
1123
1124 switch (htsv_get_valid_status(prstate->htsv[offnum]))
1125 {
1126 case HEAPTUPLE_DEAD:
1127
1128 /* Remember the last DEAD tuple seen */
1129 ndeadchain = nchain;
1131 &prstate->latest_xid_removed);
1132 /* Advance to next chain member */
1133 break;
1134
1136
1137 /*
1138 * We don't need to advance the conflict horizon for
1139 * RECENTLY_DEAD tuples, even if we are removing them. This
1140 * is because we only remove RECENTLY_DEAD tuples if they
1141 * precede a DEAD tuple, and the DEAD tuple must have been
1142 * inserted by a newer transaction than the RECENTLY_DEAD
1143 * tuple by virtue of being later in the chain. We will have
1144 * advanced the conflict horizon for the DEAD tuple.
1145 */
1146
1147 /*
1148 * Advance past RECENTLY_DEAD tuples just in case there's a
1149 * DEAD one after them. We have to make sure that we don't
1150 * miss any DEAD tuples, since DEAD tuples that still have
1151 * tuple storage after pruning will confuse VACUUM.
1152 */
1153 break;
1154
1156 case HEAPTUPLE_LIVE:
1158 goto process_chain;
1159
1160 default:
1161 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1162 goto process_chain;
1163 }
1164
1165 /*
1166 * If the tuple is not HOT-updated, then we are at the end of this
1167 * HOT-update chain.
1168 */
1169 if (!HeapTupleHeaderIsHotUpdated(htup))
1170 goto process_chain;
1171
1172 /* HOT implies it can't have moved to different partition */
1174
1175 /*
1176 * Advance to next chain member.
1177 */
1178 Assert(ItemPointerGetBlockNumber(&htup->t_ctid) == blockno);
1179 offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
1180 priorXmax = HeapTupleHeaderGetUpdateXid(htup);
1181 }
1182
1183 if (ItemIdIsRedirected(rootlp) && nchain < 2)
1184 {
1185 /*
1186 * We found a redirect item that doesn't point to a valid follow-on
1187 * item. This can happen if the loop in heap_page_prune_and_freeze()
1188 * caused us to visit the dead successor of a redirect item before
1189 * visiting the redirect item. We can clean up by setting the
1190 * redirect item to LP_DEAD state or LP_UNUSED if the caller
1191 * indicated.
1192 */
1193 heap_prune_record_dead_or_unused(prstate, rootoffnum, false);
1194 return;
1195 }
1196
1197process_chain:
1198
1199 if (ndeadchain == 0)
1200 {
1201 /*
1202 * No DEAD tuple was found, so the chain is entirely composed of
1203 * normal, unchanged tuples. Leave it alone.
1204 */
1205 int i = 0;
1206
1207 if (ItemIdIsRedirected(rootlp))
1208 {
1209 heap_prune_record_unchanged_lp_redirect(prstate, rootoffnum);
1210 i++;
1211 }
1212 for (; i < nchain; i++)
1213 heap_prune_record_unchanged_lp_normal(page, prstate, chainitems[i]);
1214 }
1215 else if (ndeadchain == nchain)
1216 {
1217 /*
1218 * The entire chain is dead. Mark the root line pointer LP_DEAD, and
1219 * fully remove the other tuples in the chain.
1220 */
1221 heap_prune_record_dead_or_unused(prstate, rootoffnum, ItemIdIsNormal(rootlp));
1222 for (int i = 1; i < nchain; i++)
1223 heap_prune_record_unused(prstate, chainitems[i], true);
1224 }
1225 else
1226 {
1227 /*
1228 * We found a DEAD tuple in the chain. Redirect the root line pointer
1229 * to the first non-DEAD tuple, and mark as unused each intermediate
1230 * item that we are able to remove from the chain.
1231 */
1232 heap_prune_record_redirect(prstate, rootoffnum, chainitems[ndeadchain],
1233 ItemIdIsNormal(rootlp));
1234 for (int i = 1; i < ndeadchain; i++)
1235 heap_prune_record_unused(prstate, chainitems[i], true);
1236
1237 /* the rest of tuples in the chain are normal, unchanged tuples */
1238 for (int i = ndeadchain; i < nchain; i++)
1239 heap_prune_record_unchanged_lp_normal(page, prstate, chainitems[i]);
1240 }
1241}
@ HEAPTUPLE_RECENTLY_DEAD
Definition: heapam.h:128
@ HEAPTUPLE_INSERT_IN_PROGRESS
Definition: heapam.h:129
@ HEAPTUPLE_LIVE
Definition: heapam.h:127
@ HEAPTUPLE_DELETE_IN_PROGRESS
Definition: heapam.h:130
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
static HTSV_Result htsv_get_valid_status(int status)
Definition: pruneheap.c:1004
static void heap_prune_record_redirect(PruneState *prstate, OffsetNumber offnum, OffsetNumber rdoffnum, bool was_normal)
Definition: pruneheap.c:1259
static void heap_prune_record_dead_or_unused(PruneState *prstate, OffsetNumber offnum, bool was_normal)
Definition: pruneheap.c:1324
static void heap_prune_record_unchanged_lp_redirect(PruneState *prstate, OffsetNumber offnum)
Definition: pruneheap.c:1580

References Assert(), elog, ERROR, FirstOffsetNumber, heap_prune_record_dead_or_unused(), heap_prune_record_redirect(), heap_prune_record_unchanged_lp_normal(), heap_prune_record_unchanged_lp_redirect(), heap_prune_record_unused(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderAdvanceConflictHorizon(), HeapTupleHeaderGetUpdateXid(), HeapTupleHeaderGetXmin(), HeapTupleHeaderIndicatesMovedPartitions(), HeapTupleHeaderIsHotUpdated(), PruneState::htsv, htsv_get_valid_status(), i, InvalidTransactionId, ItemIdGetRedirect, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerGetBlockNumber(), ItemPointerGetOffsetNumber(), PruneState::latest_xid_removed, MaxHeapTuplesPerPage, PageGetItem(), PageGetItemId(), PruneState::processed, HeapTupleHeaderData::t_ctid, TransactionIdEquals, and TransactionIdIsValid.

Referenced by heap_page_prune_and_freeze().

◆ heap_prune_record_dead()

static void heap_prune_record_dead ( PruneState prstate,
OffsetNumber  offnum,
bool  was_normal 
)
static

Definition at line 1290 of file pruneheap.c.

1292{
1293 Assert(!prstate->processed[offnum]);
1294 prstate->processed[offnum] = true;
1295
1296 Assert(prstate->ndead < MaxHeapTuplesPerPage);
1297 prstate->nowdead[prstate->ndead] = offnum;
1298 prstate->ndead++;
1299
1300 /*
1301 * Deliberately delay unsetting all_visible until later during pruning.
1302 * Removable dead tuples shouldn't preclude freezing the page.
1303 */
1304
1305 /* Record the dead offset for vacuum */
1306 prstate->deadoffsets[prstate->lpdead_items++] = offnum;
1307
1308 /*
1309 * If the root entry had been a normal tuple, we are deleting it, so count
1310 * it in the result. But changing a redirect (even to DEAD state) doesn't
1311 * count.
1312 */
1313 if (was_normal)
1314 prstate->ndeleted++;
1315}

References Assert(), PruneState::deadoffsets, PruneState::lpdead_items, MaxHeapTuplesPerPage, PruneState::ndead, PruneState::ndeleted, PruneState::nowdead, and PruneState::processed.

Referenced by heap_prune_record_dead_or_unused().

◆ heap_prune_record_dead_or_unused()

static void heap_prune_record_dead_or_unused ( PruneState prstate,
OffsetNumber  offnum,
bool  was_normal 
)
static

Definition at line 1324 of file pruneheap.c.

1326{
1327 /*
1328 * If the caller set mark_unused_now to true, we can remove dead tuples
1329 * during pruning instead of marking their line pointers dead. Set this
1330 * tuple's line pointer LP_UNUSED. We hint that this option is less
1331 * likely.
1332 */
1333 if (unlikely(prstate->mark_unused_now))
1334 heap_prune_record_unused(prstate, offnum, was_normal);
1335 else
1336 heap_prune_record_dead(prstate, offnum, was_normal);
1337}
static void heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum, bool was_normal)
Definition: pruneheap.c:1290

References heap_prune_record_dead(), heap_prune_record_unused(), PruneState::mark_unused_now, and unlikely.

Referenced by heap_prune_chain().

◆ heap_prune_record_prunable()

static void heap_prune_record_prunable ( PruneState prstate,
TransactionId  xid 
)
static

Definition at line 1245 of file pruneheap.c.

1246{
1247 /*
1248 * This should exactly match the PageSetPrunable macro. We can't store
1249 * directly into the page header yet, so we update working state.
1250 */
1252 if (!TransactionIdIsValid(prstate->new_prune_xid) ||
1253 TransactionIdPrecedes(xid, prstate->new_prune_xid))
1254 prstate->new_prune_xid = xid;
1255}
#define TransactionIdIsNormal(xid)
Definition: transam.h:42
static bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.h:263

References Assert(), PruneState::new_prune_xid, TransactionIdIsNormal, TransactionIdIsValid, and TransactionIdPrecedes().

Referenced by heap_prune_record_unchanged_lp_normal().

◆ heap_prune_record_redirect()

static void heap_prune_record_redirect ( PruneState prstate,
OffsetNumber  offnum,
OffsetNumber  rdoffnum,
bool  was_normal 
)
static

Definition at line 1259 of file pruneheap.c.

1262{
1263 Assert(!prstate->processed[offnum]);
1264 prstate->processed[offnum] = true;
1265
1266 /*
1267 * Do not mark the redirect target here. It needs to be counted
1268 * separately as an unchanged tuple.
1269 */
1270
1272 prstate->redirected[prstate->nredirected * 2] = offnum;
1273 prstate->redirected[prstate->nredirected * 2 + 1] = rdoffnum;
1274
1275 prstate->nredirected++;
1276
1277 /*
1278 * If the root entry had been a normal tuple, we are deleting it, so count
1279 * it in the result. But changing a redirect (even to DEAD state) doesn't
1280 * count.
1281 */
1282 if (was_normal)
1283 prstate->ndeleted++;
1284
1285 prstate->hastup = true;
1286}

References Assert(), PruneState::hastup, MaxHeapTuplesPerPage, PruneState::ndeleted, PruneState::nredirected, PruneState::processed, and PruneState::redirected.

Referenced by heap_prune_chain().

◆ heap_prune_record_unchanged_lp_dead()

static void heap_prune_record_unchanged_lp_dead ( Page  page,
PruneState prstate,
OffsetNumber  offnum 
)
static

Definition at line 1552 of file pruneheap.c.

1553{
1554 Assert(!prstate->processed[offnum]);
1555 prstate->processed[offnum] = true;
1556
1557 /*
1558 * Deliberately don't set hastup for LP_DEAD items. We make the soft
1559 * assumption that any LP_DEAD items encountered here will become
1560 * LP_UNUSED later on, before count_nondeletable_pages is reached. If we
1561 * don't make this assumption then rel truncation will only happen every
1562 * other VACUUM, at most. Besides, VACUUM must treat
1563 * hastup/nonempty_pages as provisional no matter how LP_DEAD items are
1564 * handled (handled here, or handled later on).
1565 *
1566 * Similarly, don't unset all_visible until later, at the end of
1567 * heap_page_prune_and_freeze(). This will allow us to attempt to freeze
1568 * the page after pruning. As long as we unset it before updating the
1569 * visibility map, this will be correct.
1570 */
1571
1572 /* Record the dead offset for vacuum */
1573 prstate->deadoffsets[prstate->lpdead_items++] = offnum;
1574}

References Assert(), PruneState::deadoffsets, PruneState::lpdead_items, and PruneState::processed.

Referenced by heap_page_prune_and_freeze().

◆ heap_prune_record_unchanged_lp_normal()

static void heap_prune_record_unchanged_lp_normal ( Page  page,
PruneState prstate,
OffsetNumber  offnum 
)
static

Definition at line 1374 of file pruneheap.c.

1375{
1376 HeapTupleHeader htup;
1377
1378 Assert(!prstate->processed[offnum]);
1379 prstate->processed[offnum] = true;
1380
1381 prstate->hastup = true; /* the page is not empty */
1382
1383 /*
1384 * The criteria for counting a tuple as live in this block need to match
1385 * what analyze.c's acquire_sample_rows() does, otherwise VACUUM and
1386 * ANALYZE may produce wildly different reltuples values, e.g. when there
1387 * are many recently-dead tuples.
1388 *
1389 * The logic here is a bit simpler than acquire_sample_rows(), as VACUUM
1390 * can't run inside a transaction block, which makes some cases impossible
1391 * (e.g. in-progress insert from the same transaction).
1392 *
1393 * HEAPTUPLE_DEAD are handled by the other heap_prune_record_*()
1394 * subroutines. They don't count dead items like acquire_sample_rows()
1395 * does, because we assume that all dead items will become LP_UNUSED
1396 * before VACUUM finishes. This difference is only superficial. VACUUM
1397 * effectively agrees with ANALYZE about DEAD items, in the end. VACUUM
1398 * won't remember LP_DEAD items, but only because they're not supposed to
1399 * be left behind when it is done. (Cases where we bypass index vacuuming
1400 * will violate this optimistic assumption, but the overall impact of that
1401 * should be negligible.)
1402 */
1403 htup = (HeapTupleHeader) PageGetItem(page, PageGetItemId(page, offnum));
1404
1405 switch (prstate->htsv[offnum])
1406 {
1407 case HEAPTUPLE_LIVE:
1408
1409 /*
1410 * Count it as live. Not only is this natural, but it's also what
1411 * acquire_sample_rows() does.
1412 */
1413 prstate->live_tuples++;
1414
1415 /*
1416 * Is the tuple definitely visible to all transactions?
1417 *
1418 * NB: Like with per-tuple hint bits, we can't set the
1419 * PD_ALL_VISIBLE flag if the inserter committed asynchronously.
1420 * See SetHintBits for more info. Check that the tuple is hinted
1421 * xmin-committed because of that.
1422 */
1423 if (prstate->all_visible)
1424 {
1425 TransactionId xmin;
1426
1428 {
1429 prstate->all_visible = false;
1430 break;
1431 }
1432
1433 /*
1434 * The inserter definitely committed. But is it old enough
1435 * that everyone sees it as committed? A FrozenTransactionId
1436 * is seen as committed to everyone. Otherwise, we check if
1437 * there is a snapshot that considers this xid to still be
1438 * running, and if so, we don't consider the page all-visible.
1439 */
1440 xmin = HeapTupleHeaderGetXmin(htup);
1441
1442 /*
1443 * For now always use prstate->cutoffs for this test, because
1444 * we only update 'all_visible' when freezing is requested. We
1445 * could use GlobalVisTestIsRemovableXid instead, if a
1446 * non-freezing caller wanted to set the VM bit.
1447 */
1448 Assert(prstate->cutoffs);
1449 if (!TransactionIdPrecedes(xmin, prstate->cutoffs->OldestXmin))
1450 {
1451 prstate->all_visible = false;
1452 break;
1453 }
1454
1455 /* Track newest xmin on page. */
1456 if (TransactionIdFollows(xmin, prstate->visibility_cutoff_xid) &&
1458 prstate->visibility_cutoff_xid = xmin;
1459 }
1460 break;
1461
1463 prstate->recently_dead_tuples++;
1464 prstate->all_visible = false;
1465
1466 /*
1467 * This tuple will soon become DEAD. Update the hint field so
1468 * that the page is reconsidered for pruning in future.
1469 */
1472 break;
1473
1475
1476 /*
1477 * We do not count these rows as live, because we expect the
1478 * inserting transaction to update the counters at commit, and we
1479 * assume that will happen only after we report our results. This
1480 * assumption is a bit shaky, but it is what acquire_sample_rows()
1481 * does, so be consistent.
1482 */
1483 prstate->all_visible = false;
1484
1485 /*
1486 * If we wanted to optimize for aborts, we might consider marking
1487 * the page prunable when we see INSERT_IN_PROGRESS. But we
1488 * don't. See related decisions about when to mark the page
1489 * prunable in heapam.c.
1490 */
1491 break;
1492
1494
1495 /*
1496 * This an expected case during concurrent vacuum. Count such
1497 * rows as live. As above, we assume the deleting transaction
1498 * will commit and update the counters after we report.
1499 */
1500 prstate->live_tuples++;
1501 prstate->all_visible = false;
1502
1503 /*
1504 * This tuple may soon become DEAD. Update the hint field so that
1505 * the page is reconsidered for pruning in future.
1506 */
1509 break;
1510
1511 default:
1512
1513 /*
1514 * DEAD tuples should've been passed to heap_prune_record_dead()
1515 * or heap_prune_record_unused() instead.
1516 */
1517 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result %d",
1518 prstate->htsv[offnum]);
1519 break;
1520 }
1521
1522 /* Consider freezing any normal tuples which will not be removed */
1523 if (prstate->attempt_freeze)
1524 {
1525 bool totally_frozen;
1526
1527 if ((heap_prepare_freeze_tuple(htup,
1528 prstate->cutoffs,
1529 &prstate->pagefrz,
1530 &prstate->frozen[prstate->nfrozen],
1531 &totally_frozen)))
1532 {
1533 /* Save prepared freeze plan for later */
1534 prstate->frozen[prstate->nfrozen++].offset = offnum;
1535 }
1536
1537 /*
1538 * If any tuple isn't either totally frozen already or eligible to
1539 * become totally frozen (according to its freeze plan), then the page
1540 * definitely cannot be set all-frozen in the visibility map later on.
1541 */
1542 if (!totally_frozen)
1543 prstate->all_frozen = false;
1544 }
1545}
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, HeapPageFreeze *pagefrz, HeapTupleFreeze *frz, bool *totally_frozen)
Definition: heapam.c:7065
static bool HeapTupleHeaderXminCommitted(const HeapTupleHeaderData *tup)
Definition: htup_details.h:337
static void heap_prune_record_prunable(PruneState *prstate, TransactionId xid)
Definition: pruneheap.c:1245

References PruneState::all_frozen, PruneState::all_visible, Assert(), PruneState::attempt_freeze, PruneState::cutoffs, elog, ERROR, PruneState::frozen, PruneState::hastup, heap_prepare_freeze_tuple(), heap_prune_record_prunable(), HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetUpdateXid(), HeapTupleHeaderGetXmin(), HeapTupleHeaderXminCommitted(), PruneState::htsv, PruneState::live_tuples, PruneState::nfrozen, HeapTupleFreeze::offset, VacuumCutoffs::OldestXmin, PruneState::pagefrz, PageGetItem(), PageGetItemId(), PruneState::processed, PruneState::recently_dead_tuples, TransactionIdFollows(), TransactionIdIsNormal, TransactionIdPrecedes(), and PruneState::visibility_cutoff_xid.

Referenced by heap_page_prune_and_freeze(), and heap_prune_chain().

◆ heap_prune_record_unchanged_lp_redirect()

static void heap_prune_record_unchanged_lp_redirect ( PruneState prstate,
OffsetNumber  offnum 
)
static

Definition at line 1580 of file pruneheap.c.

1581{
1582 /*
1583 * A redirect line pointer doesn't count as a live tuple.
1584 *
1585 * If we leave a redirect line pointer in place, there will be another
1586 * tuple on the page that it points to. We will do the bookkeeping for
1587 * that separately. So we have nothing to do here, except remember that
1588 * we processed this item.
1589 */
1590 Assert(!prstate->processed[offnum]);
1591 prstate->processed[offnum] = true;
1592}

References Assert(), and PruneState::processed.

Referenced by heap_prune_chain().

◆ heap_prune_record_unchanged_lp_unused()

static void heap_prune_record_unchanged_lp_unused ( Page  page,
PruneState prstate,
OffsetNumber  offnum 
)
static

Definition at line 1363 of file pruneheap.c.

1364{
1365 Assert(!prstate->processed[offnum]);
1366 prstate->processed[offnum] = true;
1367}

References Assert(), and PruneState::processed.

Referenced by heap_page_prune_and_freeze().

◆ heap_prune_record_unused()

static void heap_prune_record_unused ( PruneState prstate,
OffsetNumber  offnum,
bool  was_normal 
)
static

Definition at line 1341 of file pruneheap.c.

1342{
1343 Assert(!prstate->processed[offnum]);
1344 prstate->processed[offnum] = true;
1345
1347 prstate->nowunused[prstate->nunused] = offnum;
1348 prstate->nunused++;
1349
1350 /*
1351 * If the root entry had been a normal tuple, we are deleting it, so count
1352 * it in the result. But changing a redirect (even to DEAD state) doesn't
1353 * count.
1354 */
1355 if (was_normal)
1356 prstate->ndeleted++;
1357}

References Assert(), MaxHeapTuplesPerPage, PruneState::ndeleted, PruneState::nowunused, PruneState::nunused, and PruneState::processed.

Referenced by heap_page_prune_and_freeze(), heap_prune_chain(), and heap_prune_record_dead_or_unused().

◆ heap_prune_satisfies_vacuum()

static HTSV_Result heap_prune_satisfies_vacuum ( PruneState prstate,
HeapTuple  tup,
Buffer  buffer 
)
static

Definition at line 961 of file pruneheap.c.

962{
963 HTSV_Result res;
964 TransactionId dead_after;
965
966 res = HeapTupleSatisfiesVacuumHorizon(tup, buffer, &dead_after);
967
968 if (res != HEAPTUPLE_RECENTLY_DEAD)
969 return res;
970
971 /*
972 * For VACUUM, we must be sure to prune tuples with xmax older than
973 * OldestXmin -- a visibility cutoff determined at the beginning of
974 * vacuuming the relation. OldestXmin is used for freezing determination
975 * and we cannot freeze dead tuples' xmaxes.
976 */
977 if (prstate->cutoffs &&
979 NormalTransactionIdPrecedes(dead_after, prstate->cutoffs->OldestXmin))
980 return HEAPTUPLE_DEAD;
981
982 /*
983 * Determine whether or not the tuple is considered dead when compared
984 * with the provided GlobalVisState. On-access pruning does not provide
985 * VacuumCutoffs. And for vacuum, even if the tuple's xmax is not older
986 * than OldestXmin, GlobalVisTestIsRemovableXid() could find the row dead
987 * if the GlobalVisState has been updated since the beginning of vacuuming
988 * the relation.
989 */
990 if (GlobalVisTestIsRemovableXid(prstate->vistest, dead_after))
991 return HEAPTUPLE_DEAD;
992
993 return res;
994}
HTSV_Result
Definition: heapam.h:125
HTSV_Result HeapTupleSatisfiesVacuumHorizon(HeapTuple htup, Buffer buffer, TransactionId *dead_after)
#define NormalTransactionIdPrecedes(id1, id2)
Definition: transam.h:147

References PruneState::cutoffs, GlobalVisTestIsRemovableXid(), HEAPTUPLE_DEAD, HEAPTUPLE_RECENTLY_DEAD, HeapTupleSatisfiesVacuumHorizon(), NormalTransactionIdPrecedes, VacuumCutoffs::OldestXmin, TransactionIdIsValid, and PruneState::vistest.

Referenced by heap_page_prune_and_freeze().

◆ htsv_get_valid_status()

static HTSV_Result htsv_get_valid_status ( int  status)
inlinestatic

Definition at line 1004 of file pruneheap.c.

1005{
1006 Assert(status >= HEAPTUPLE_DEAD &&
1008 return (HTSV_Result) status;
1009}

References Assert(), HEAPTUPLE_DEAD, and HEAPTUPLE_DELETE_IN_PROGRESS.

Referenced by heap_prune_chain().

◆ log_heap_prune_and_freeze()

void log_heap_prune_and_freeze ( Relation  relation,
Buffer  buffer,
Buffer  vmbuffer,
uint8  vmflags,
TransactionId  conflict_xid,
bool  cleanup_lock,
PruneReason  reason,
HeapTupleFreeze frozen,
int  nfrozen,
OffsetNumber redirected,
int  nredirected,
OffsetNumber dead,
int  ndead,
OffsetNumber unused,
int  nunused 
)

Definition at line 2101 of file pruneheap.c.

2110{
2111 xl_heap_prune xlrec;
2112 XLogRecPtr recptr;
2113 uint8 info;
2114 uint8 regbuf_flags_heap;
2115
2116 /* The following local variables hold data registered in the WAL record: */
2118 xlhp_freeze_plans freeze_plans;
2119 xlhp_prune_items redirect_items;
2120 xlhp_prune_items dead_items;
2121 xlhp_prune_items unused_items;
2123 bool do_prune = nredirected > 0 || ndead > 0 || nunused > 0;
2124 bool do_set_vm = vmflags & VISIBILITYMAP_VALID_BITS;
2125
2126 Assert((vmflags & VISIBILITYMAP_VALID_BITS) == vmflags);
2127
2128 xlrec.flags = 0;
2129 regbuf_flags_heap = REGBUF_STANDARD;
2130
2131 /*
2132 * We can avoid an FPI of the heap page if the only modification we are
2133 * making to it is to set PD_ALL_VISIBLE and checksums/wal_log_hints are
2134 * disabled. Note that if we explicitly skip an FPI, we must not stamp the
2135 * heap page with this record's LSN. Recovery skips records <= the stamped
2136 * LSN, so this could lead to skipping an earlier FPI needed to repair a
2137 * torn page.
2138 */
2139 if (!do_prune &&
2140 nfrozen == 0 &&
2141 (!do_set_vm || !XLogHintBitIsNeeded()))
2142 regbuf_flags_heap |= REGBUF_NO_IMAGE;
2143
2144 /*
2145 * Prepare data for the buffer. The arrays are not actually in the
2146 * buffer, but we pretend that they are. When XLogInsert stores a full
2147 * page image, the arrays can be omitted.
2148 */
2150 XLogRegisterBuffer(0, buffer, regbuf_flags_heap);
2151
2152 if (do_set_vm)
2153 XLogRegisterBuffer(1, vmbuffer, 0);
2154
2155 if (nfrozen > 0)
2156 {
2157 int nplans;
2158
2160
2161 /*
2162 * Prepare deduplicated representation for use in the WAL record. This
2163 * destructively sorts frozen tuples array in-place.
2164 */
2165 nplans = heap_log_freeze_plan(frozen, nfrozen, plans, frz_offsets);
2166
2167 freeze_plans.nplans = nplans;
2168 XLogRegisterBufData(0, &freeze_plans,
2169 offsetof(xlhp_freeze_plans, plans));
2170 XLogRegisterBufData(0, plans,
2171 sizeof(xlhp_freeze_plan) * nplans);
2172 }
2173 if (nredirected > 0)
2174 {
2176
2177 redirect_items.ntargets = nredirected;
2178 XLogRegisterBufData(0, &redirect_items,
2179 offsetof(xlhp_prune_items, data));
2180 XLogRegisterBufData(0, redirected,
2181 sizeof(OffsetNumber[2]) * nredirected);
2182 }
2183 if (ndead > 0)
2184 {
2185 xlrec.flags |= XLHP_HAS_DEAD_ITEMS;
2186
2187 dead_items.ntargets = ndead;
2188 XLogRegisterBufData(0, &dead_items,
2189 offsetof(xlhp_prune_items, data));
2190 XLogRegisterBufData(0, dead,
2191 sizeof(OffsetNumber) * ndead);
2192 }
2193 if (nunused > 0)
2194 {
2196
2197 unused_items.ntargets = nunused;
2198 XLogRegisterBufData(0, &unused_items,
2199 offsetof(xlhp_prune_items, data));
2200 XLogRegisterBufData(0, unused,
2201 sizeof(OffsetNumber) * nunused);
2202 }
2203 if (nfrozen > 0)
2204 XLogRegisterBufData(0, frz_offsets,
2205 sizeof(OffsetNumber) * nfrozen);
2206
2207 /*
2208 * Prepare the main xl_heap_prune record. We already set the XLHP_HAS_*
2209 * flag above.
2210 */
2211 if (vmflags & VISIBILITYMAP_ALL_VISIBLE)
2212 {
2213 xlrec.flags |= XLHP_VM_ALL_VISIBLE;
2214 if (vmflags & VISIBILITYMAP_ALL_FROZEN)
2215 xlrec.flags |= XLHP_VM_ALL_FROZEN;
2216 }
2218 xlrec.flags |= XLHP_IS_CATALOG_REL;
2219 if (TransactionIdIsValid(conflict_xid))
2221 if (cleanup_lock)
2222 xlrec.flags |= XLHP_CLEANUP_LOCK;
2223 else
2224 {
2225 Assert(nredirected == 0 && ndead == 0);
2226 /* also, any items in 'unused' must've been LP_DEAD previously */
2227 }
2229 if (TransactionIdIsValid(conflict_xid))
2230 XLogRegisterData(&conflict_xid, sizeof(TransactionId));
2231
2232 switch (reason)
2233 {
2234 case PRUNE_ON_ACCESS:
2236 break;
2237 case PRUNE_VACUUM_SCAN:
2239 break;
2242 break;
2243 default:
2244 elog(ERROR, "unrecognized prune reason: %d", (int) reason);
2245 break;
2246 }
2247 recptr = XLogInsert(RM_HEAP2_ID, info);
2248
2249 if (do_set_vm)
2250 {
2251 Assert(BufferIsDirty(vmbuffer));
2252 PageSetLSN(BufferGetPage(vmbuffer), recptr);
2253 }
2254
2255 /*
2256 * See comment at the top of the function about regbuf_flags_heap for
2257 * details on when we can advance the page LSN.
2258 */
2259 if (do_prune || nfrozen > 0 || (do_set_vm && XLogHintBitIsNeeded()))
2260 {
2261 Assert(BufferIsDirty(buffer));
2262 PageSetLSN(BufferGetPage(buffer), recptr);
2263 }
2264}
bool BufferIsDirty(Buffer buffer)
Definition: bufmgr.c:2911
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:390
uint8_t uint8
Definition: c.h:540
@ PRUNE_VACUUM_CLEANUP
Definition: heapam.h:272
@ PRUNE_VACUUM_SCAN
Definition: heapam.h:271
#define XLHP_HAS_CONFLICT_HORIZON
Definition: heapam_xlog.h:316
#define XLHP_HAS_FREEZE_PLANS
Definition: heapam_xlog.h:322
#define XLHP_VM_ALL_VISIBLE
Definition: heapam_xlog.h:339
#define SizeOfHeapPrune
Definition: heapam_xlog.h:295
#define XLHP_HAS_NOW_UNUSED_ITEMS
Definition: heapam_xlog.h:331
#define XLHP_VM_ALL_FROZEN
Definition: heapam_xlog.h:340
#define XLHP_HAS_REDIRECTIONS
Definition: heapam_xlog.h:329
#define XLOG_HEAP2_PRUNE_VACUUM_SCAN
Definition: heapam_xlog.h:61
#define XLOG_HEAP2_PRUNE_ON_ACCESS
Definition: heapam_xlog.h:60
#define XLHP_CLEANUP_LOCK
Definition: heapam_xlog.h:308
#define XLHP_HAS_DEAD_ITEMS
Definition: heapam_xlog.h:330
#define XLOG_HEAP2_PRUNE_VACUUM_CLEANUP
Definition: heapam_xlog.h:62
#define XLHP_IS_CATALOG_REL
Definition: heapam_xlog.h:298
const void * data
static int heap_log_freeze_plan(HeapTupleFreeze *tuples, int ntuples, xlhp_freeze_plan *plans_out, OffsetNumber *offsets_out)
Definition: pruneheap.c:2022
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:694
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE
uint64 XLogRecPtr
Definition: xlogdefs.h:21
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:478
void XLogRegisterBufData(uint8 block_id, const void *data, uint32 len)
Definition: xloginsert.c:409
void XLogRegisterData(const void *data, uint32 len)
Definition: xloginsert.c:368
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:245
void XLogBeginInsert(void)
Definition: xloginsert.c:152
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define REGBUF_NO_IMAGE
Definition: xloginsert.h:33

References Assert(), BufferGetPage(), BufferIsDirty(), data, elog, ERROR, xl_heap_prune::flags, heap_log_freeze_plan(), MaxHeapTuplesPerPage, xlhp_freeze_plans::nplans, xlhp_prune_items::ntargets, PageSetLSN(), PRUNE_ON_ACCESS, PRUNE_VACUUM_CLEANUP, PRUNE_VACUUM_SCAN, REGBUF_NO_IMAGE, REGBUF_STANDARD, RelationIsAccessibleInLogicalDecoding, SizeOfHeapPrune, TransactionIdIsValid, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, VISIBILITYMAP_VALID_BITS, XLHP_CLEANUP_LOCK, XLHP_HAS_CONFLICT_HORIZON, XLHP_HAS_DEAD_ITEMS, XLHP_HAS_FREEZE_PLANS, XLHP_HAS_NOW_UNUSED_ITEMS, XLHP_HAS_REDIRECTIONS, XLHP_IS_CATALOG_REL, XLHP_VM_ALL_FROZEN, XLHP_VM_ALL_VISIBLE, XLOG_HEAP2_PRUNE_ON_ACCESS, XLOG_HEAP2_PRUNE_VACUUM_CLEANUP, XLOG_HEAP2_PRUNE_VACUUM_SCAN, XLogBeginInsert(), XLogHintBitIsNeeded, XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by heap_page_prune_and_freeze(), and lazy_vacuum_heap_page().

◆ page_verify_redirects()

static void page_verify_redirects ( Page  page)
static

Definition at line 1781 of file pruneheap.c.

1782{
1783#ifdef USE_ASSERT_CHECKING
1784 OffsetNumber offnum;
1785 OffsetNumber maxoff;
1786
1787 maxoff = PageGetMaxOffsetNumber(page);
1788 for (offnum = FirstOffsetNumber;
1789 offnum <= maxoff;
1790 offnum = OffsetNumberNext(offnum))
1791 {
1792 ItemId itemid = PageGetItemId(page, offnum);
1793 OffsetNumber targoff;
1794 ItemId targitem;
1795 HeapTupleHeader htup;
1796
1797 if (!ItemIdIsRedirected(itemid))
1798 continue;
1799
1800 targoff = ItemIdGetRedirect(itemid);
1801 targitem = PageGetItemId(page, targoff);
1802
1803 Assert(ItemIdIsUsed(targitem));
1804 Assert(ItemIdIsNormal(targitem));
1805 Assert(ItemIdHasStorage(targitem));
1806 htup = (HeapTupleHeader) PageGetItem(page, targitem);
1808 }
1809#endif
1810}

References Assert(), FirstOffsetNumber, HeapTupleHeaderIsHeapOnly(), ItemIdGetRedirect, ItemIdHasStorage, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, OffsetNumberNext, PageGetItem(), PageGetItemId(), and PageGetMaxOffsetNumber().

Referenced by heap_page_prune_execute().