PostgreSQL Source Code git master
pruneheap.c File Reference
#include "postgres.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/htup_details.h"
#include "access/multixact.h"
#include "access/transam.h"
#include "access/visibilitymapdefs.h"
#include "access/xlog.h"
#include "access/xloginsert.h"
#include "commands/vacuum.h"
#include "executor/instrument.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "storage/bufmgr.h"
#include "utils/rel.h"
#include "utils/snapmgr.h"
Include dependency graph for pruneheap.c:

Go to the source code of this file.

Data Structures

struct  PruneState
 

Functions

static void prune_freeze_setup (PruneFreezeParams *params, TransactionId *new_relfrozen_xid, MultiXactId *new_relmin_mxid, PruneFreezeResult *presult, PruneState *prstate)
 
static void prune_freeze_plan (Oid reloid, Buffer buffer, PruneState *prstate, OffsetNumber *off_loc)
 
static HTSV_Result heap_prune_satisfies_vacuum (PruneState *prstate, HeapTuple tup, Buffer buffer)
 
static HTSV_Result htsv_get_valid_status (int status)
 
static void heap_prune_chain (Page page, BlockNumber blockno, OffsetNumber maxoff, OffsetNumber rootoffnum, PruneState *prstate)
 
static void heap_prune_record_prunable (PruneState *prstate, TransactionId xid)
 
static void heap_prune_record_redirect (PruneState *prstate, OffsetNumber offnum, OffsetNumber rdoffnum, bool was_normal)
 
static void heap_prune_record_dead (PruneState *prstate, OffsetNumber offnum, bool was_normal)
 
static void heap_prune_record_dead_or_unused (PruneState *prstate, OffsetNumber offnum, bool was_normal)
 
static void heap_prune_record_unused (PruneState *prstate, OffsetNumber offnum, bool was_normal)
 
static void heap_prune_record_unchanged_lp_unused (Page page, PruneState *prstate, OffsetNumber offnum)
 
static void heap_prune_record_unchanged_lp_normal (Page page, PruneState *prstate, OffsetNumber offnum)
 
static void heap_prune_record_unchanged_lp_dead (Page page, PruneState *prstate, OffsetNumber offnum)
 
static void heap_prune_record_unchanged_lp_redirect (PruneState *prstate, OffsetNumber offnum)
 
static void page_verify_redirects (Page page)
 
static bool heap_page_will_freeze (Relation relation, Buffer buffer, bool did_tuple_hint_fpi, bool do_prune, bool do_hint_prune, PruneState *prstate)
 
void heap_page_prune_opt (Relation relation, Buffer buffer)
 
void heap_page_prune_and_freeze (PruneFreezeParams *params, PruneFreezeResult *presult, OffsetNumber *off_loc, TransactionId *new_relfrozen_xid, MultiXactId *new_relmin_mxid)
 
void heap_page_prune_execute (Buffer buffer, bool lp_truncate_only, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused)
 
void heap_get_root_tuples (Page page, OffsetNumber *root_offsets)
 
static bool heap_log_freeze_eq (xlhp_freeze_plan *plan, HeapTupleFreeze *frz)
 
static int heap_log_freeze_cmp (const void *arg1, const void *arg2)
 
static void heap_log_freeze_new_plan (xlhp_freeze_plan *plan, HeapTupleFreeze *frz)
 
static int heap_log_freeze_plan (HeapTupleFreeze *tuples, int ntuples, xlhp_freeze_plan *plans_out, OffsetNumber *offsets_out)
 
void log_heap_prune_and_freeze (Relation relation, Buffer buffer, Buffer vmbuffer, uint8 vmflags, TransactionId conflict_xid, bool cleanup_lock, PruneReason reason, HeapTupleFreeze *frozen, int nfrozen, OffsetNumber *redirected, int nredirected, OffsetNumber *dead, int ndead, OffsetNumber *unused, int nunused)
 

Function Documentation

◆ heap_get_root_tuples()

void heap_get_root_tuples ( Page  page,
OffsetNumber root_offsets 
)

Definition at line 1895 of file pruneheap.c.

1896{
1897 OffsetNumber offnum,
1898 maxoff;
1899
1900 MemSet(root_offsets, InvalidOffsetNumber,
1902
1903 maxoff = PageGetMaxOffsetNumber(page);
1904 for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum))
1905 {
1906 ItemId lp = PageGetItemId(page, offnum);
1907 HeapTupleHeader htup;
1908 OffsetNumber nextoffnum;
1909 TransactionId priorXmax;
1910
1911 /* skip unused and dead items */
1912 if (!ItemIdIsUsed(lp) || ItemIdIsDead(lp))
1913 continue;
1914
1915 if (ItemIdIsNormal(lp))
1916 {
1917 htup = (HeapTupleHeader) PageGetItem(page, lp);
1918
1919 /*
1920 * Check if this tuple is part of a HOT-chain rooted at some other
1921 * tuple. If so, skip it for now; we'll process it when we find
1922 * its root.
1923 */
1924 if (HeapTupleHeaderIsHeapOnly(htup))
1925 continue;
1926
1927 /*
1928 * This is either a plain tuple or the root of a HOT-chain.
1929 * Remember it in the mapping.
1930 */
1931 root_offsets[offnum - 1] = offnum;
1932
1933 /* If it's not the start of a HOT-chain, we're done with it */
1934 if (!HeapTupleHeaderIsHotUpdated(htup))
1935 continue;
1936
1937 /* Set up to scan the HOT-chain */
1938 nextoffnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
1939 priorXmax = HeapTupleHeaderGetUpdateXid(htup);
1940 }
1941 else
1942 {
1943 /* Must be a redirect item. We do not set its root_offsets entry */
1945 /* Set up to scan the HOT-chain */
1946 nextoffnum = ItemIdGetRedirect(lp);
1947 priorXmax = InvalidTransactionId;
1948 }
1949
1950 /*
1951 * Now follow the HOT-chain and collect other tuples in the chain.
1952 *
1953 * Note: Even though this is a nested loop, the complexity of the
1954 * function is O(N) because a tuple in the page should be visited not
1955 * more than twice, once in the outer loop and once in HOT-chain
1956 * chases.
1957 */
1958 for (;;)
1959 {
1960 /* Sanity check (pure paranoia) */
1961 if (offnum < FirstOffsetNumber)
1962 break;
1963
1964 /*
1965 * An offset past the end of page's line pointer array is possible
1966 * when the array was truncated
1967 */
1968 if (offnum > maxoff)
1969 break;
1970
1971 lp = PageGetItemId(page, nextoffnum);
1972
1973 /* Check for broken chains */
1974 if (!ItemIdIsNormal(lp))
1975 break;
1976
1977 htup = (HeapTupleHeader) PageGetItem(page, lp);
1978
1979 if (TransactionIdIsValid(priorXmax) &&
1981 break;
1982
1983 /* Remember the root line pointer for this item */
1984 root_offsets[nextoffnum - 1] = offnum;
1985
1986 /* Advance to next chain member, if any */
1987 if (!HeapTupleHeaderIsHotUpdated(htup))
1988 break;
1989
1990 /* HOT implies it can't have moved to different partition */
1992
1993 nextoffnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
1994 priorXmax = HeapTupleHeaderGetUpdateXid(htup);
1995 }
1996 }
1997}
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:243
static void * PageGetItem(PageData *page, const ItemIdData *itemId)
Definition: bufpage.h:353
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition: bufpage.h:371
#define MemSet(start, val, len)
Definition: c.h:1019
uint32 TransactionId
Definition: c.h:672
Assert(PointerIsAligned(start, uint64))
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
static bool HeapTupleHeaderIsHeapOnly(const HeapTupleHeaderData *tup)
Definition: htup_details.h:555
static TransactionId HeapTupleHeaderGetXmin(const HeapTupleHeaderData *tup)
Definition: htup_details.h:324
static bool HeapTupleHeaderIndicatesMovedPartitions(const HeapTupleHeaderData *tup)
Definition: htup_details.h:480
static bool HeapTupleHeaderIsHotUpdated(const HeapTupleHeaderData *tup)
Definition: htup_details.h:534
static TransactionId HeapTupleHeaderGetUpdateXid(const HeapTupleHeaderData *tup)
Definition: htup_details.h:397
#define MaxHeapTuplesPerPage
Definition: htup_details.h:624
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:78
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
#define InvalidOffsetNumber
Definition: off.h:26
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define FirstOffsetNumber
Definition: off.h:27
ItemPointerData t_ctid
Definition: htup_details.h:161
#define InvalidTransactionId
Definition: transam.h:31
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
#define TransactionIdIsValid(xid)
Definition: transam.h:41

References Assert(), FirstOffsetNumber, HeapTupleHeaderGetUpdateXid(), HeapTupleHeaderGetXmin(), HeapTupleHeaderIndicatesMovedPartitions(), HeapTupleHeaderIsHeapOnly(), HeapTupleHeaderIsHotUpdated(), InvalidOffsetNumber, InvalidTransactionId, ItemIdGetRedirect, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerGetOffsetNumber(), MaxHeapTuplesPerPage, MemSet, OffsetNumberNext, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), HeapTupleHeaderData::t_ctid, TransactionIdEquals, and TransactionIdIsValid.

Referenced by heapam_index_build_range_scan(), and heapam_index_validate_scan().

◆ heap_log_freeze_cmp()

static int heap_log_freeze_cmp ( const void *  arg1,
const void *  arg2 
)
static

Definition at line 2022 of file pruneheap.c.

2023{
2024 HeapTupleFreeze *frz1 = (HeapTupleFreeze *) arg1;
2025 HeapTupleFreeze *frz2 = (HeapTupleFreeze *) arg2;
2026
2027 if (frz1->xmax < frz2->xmax)
2028 return -1;
2029 else if (frz1->xmax > frz2->xmax)
2030 return 1;
2031
2032 if (frz1->t_infomask2 < frz2->t_infomask2)
2033 return -1;
2034 else if (frz1->t_infomask2 > frz2->t_infomask2)
2035 return 1;
2036
2037 if (frz1->t_infomask < frz2->t_infomask)
2038 return -1;
2039 else if (frz1->t_infomask > frz2->t_infomask)
2040 return 1;
2041
2042 if (frz1->frzflags < frz2->frzflags)
2043 return -1;
2044 else if (frz1->frzflags > frz2->frzflags)
2045 return 1;
2046
2047 /*
2048 * heap_log_freeze_eq would consider these tuple-wise plans to be equal.
2049 * (So the tuples will share a single canonical freeze plan.)
2050 *
2051 * We tiebreak on page offset number to keep each freeze plan's page
2052 * offset number array individually sorted. (Unnecessary, but be tidy.)
2053 */
2054 if (frz1->offset < frz2->offset)
2055 return -1;
2056 else if (frz1->offset > frz2->offset)
2057 return 1;
2058
2059 Assert(false);
2060 return 0;
2061}
uint8 frzflags
Definition: heapam.h:147
uint16 t_infomask2
Definition: heapam.h:145
TransactionId xmax
Definition: heapam.h:144
OffsetNumber offset
Definition: heapam.h:152
uint16 t_infomask
Definition: heapam.h:146

References Assert(), HeapTupleFreeze::frzflags, HeapTupleFreeze::offset, HeapTupleFreeze::t_infomask, HeapTupleFreeze::t_infomask2, and HeapTupleFreeze::xmax.

Referenced by heap_log_freeze_plan().

◆ heap_log_freeze_eq()

static bool heap_log_freeze_eq ( xlhp_freeze_plan plan,
HeapTupleFreeze frz 
)
inlinestatic

Definition at line 2006 of file pruneheap.c.

2007{
2008 if (plan->xmax == frz->xmax &&
2009 plan->t_infomask2 == frz->t_infomask2 &&
2010 plan->t_infomask == frz->t_infomask &&
2011 plan->frzflags == frz->frzflags)
2012 return true;
2013
2014 /* Caller must call heap_log_freeze_new_plan again for frz */
2015 return false;
2016}
#define plan(x)
Definition: pg_regress.c:161

References HeapTupleFreeze::frzflags, plan, HeapTupleFreeze::t_infomask, HeapTupleFreeze::t_infomask2, and HeapTupleFreeze::xmax.

Referenced by heap_log_freeze_plan().

◆ heap_log_freeze_new_plan()

static void heap_log_freeze_new_plan ( xlhp_freeze_plan plan,
HeapTupleFreeze frz 
)
inlinestatic

Definition at line 2068 of file pruneheap.c.

2069{
2070 plan->xmax = frz->xmax;
2071 plan->t_infomask2 = frz->t_infomask2;
2072 plan->t_infomask = frz->t_infomask;
2073 plan->frzflags = frz->frzflags;
2074 plan->ntuples = 1; /* for now */
2075}

References HeapTupleFreeze::frzflags, plan, HeapTupleFreeze::t_infomask, HeapTupleFreeze::t_infomask2, and HeapTupleFreeze::xmax.

Referenced by heap_log_freeze_plan().

◆ heap_log_freeze_plan()

static int heap_log_freeze_plan ( HeapTupleFreeze tuples,
int  ntuples,
xlhp_freeze_plan plans_out,
OffsetNumber offsets_out 
)
static

Definition at line 2088 of file pruneheap.c.

2091{
2092 int nplans = 0;
2093
2094 /* Sort tuple-based freeze plans in the order required to deduplicate */
2095 qsort(tuples, ntuples, sizeof(HeapTupleFreeze), heap_log_freeze_cmp);
2096
2097 for (int i = 0; i < ntuples; i++)
2098 {
2099 HeapTupleFreeze *frz = tuples + i;
2100
2101 if (i == 0)
2102 {
2103 /* New canonical freeze plan starting with first tup */
2104 heap_log_freeze_new_plan(plans_out, frz);
2105 nplans++;
2106 }
2107 else if (heap_log_freeze_eq(plans_out, frz))
2108 {
2109 /* tup matches open canonical plan -- include tup in it */
2110 Assert(offsets_out[i - 1] < frz->offset);
2111 plans_out->ntuples++;
2112 }
2113 else
2114 {
2115 /* Tup doesn't match current plan -- done with it now */
2116 plans_out++;
2117
2118 /* New canonical freeze plan starting with this tup */
2119 heap_log_freeze_new_plan(plans_out, frz);
2120 nplans++;
2121 }
2122
2123 /*
2124 * Save page offset number in dedicated buffer in passing.
2125 *
2126 * REDO routine relies on the record's offset numbers array grouping
2127 * offset numbers by freeze plan. The sort order within each grouping
2128 * is ascending offset number order, just to keep things tidy.
2129 */
2130 offsets_out[i] = frz->offset;
2131 }
2132
2133 Assert(nplans > 0 && nplans <= ntuples);
2134
2135 return nplans;
2136}
int i
Definition: isn.c:77
#define qsort(a, b, c, d)
Definition: port.h:499
static int heap_log_freeze_cmp(const void *arg1, const void *arg2)
Definition: pruneheap.c:2022
static bool heap_log_freeze_eq(xlhp_freeze_plan *plan, HeapTupleFreeze *frz)
Definition: pruneheap.c:2006
static void heap_log_freeze_new_plan(xlhp_freeze_plan *plan, HeapTupleFreeze *frz)
Definition: pruneheap.c:2068

References Assert(), heap_log_freeze_cmp(), heap_log_freeze_eq(), heap_log_freeze_new_plan(), i, xlhp_freeze_plan::ntuples, HeapTupleFreeze::offset, and qsort.

Referenced by log_heap_prune_and_freeze().

◆ heap_page_prune_and_freeze()

void heap_page_prune_and_freeze ( PruneFreezeParams params,
PruneFreezeResult presult,
OffsetNumber off_loc,
TransactionId new_relfrozen_xid,
MultiXactId new_relmin_mxid 
)

Definition at line 819 of file pruneheap.c.

824{
825 Buffer buffer = params->buffer;
826 Page page = BufferGetPage(buffer);
827 PruneState prstate;
828 bool do_freeze;
829 bool do_prune;
830 bool do_hint_prune;
831 bool did_tuple_hint_fpi;
832 int64 fpi_before = pgWalUsage.wal_fpi;
833
834 /* Initialize prstate */
835 prune_freeze_setup(params,
836 new_relfrozen_xid, new_relmin_mxid,
837 presult, &prstate);
838
839 /*
840 * Examine all line pointers and tuple visibility information to determine
841 * which line pointers should change state and which tuples may be frozen.
842 * Prepare queue of state changes to later be executed in a critical
843 * section.
844 */
846 buffer, &prstate, off_loc);
847
848 /*
849 * If checksums are enabled, calling heap_prune_satisfies_vacuum() while
850 * checking tuple visibility information in prune_freeze_plan() may have
851 * caused an FPI to be emitted.
852 */
853 did_tuple_hint_fpi = fpi_before != pgWalUsage.wal_fpi;
854
855 do_prune = prstate.nredirected > 0 ||
856 prstate.ndead > 0 ||
857 prstate.nunused > 0;
858
859 /*
860 * Even if we don't prune anything, if we found a new value for the
861 * pd_prune_xid field or the page was marked full, we will update the hint
862 * bit.
863 */
864 do_hint_prune = ((PageHeader) page)->pd_prune_xid != prstate.new_prune_xid ||
865 PageIsFull(page);
866
867 /*
868 * Decide if we want to go ahead with freezing according to the freeze
869 * plans we prepared, or not.
870 */
871 do_freeze = heap_page_will_freeze(params->relation, buffer,
872 did_tuple_hint_fpi,
873 do_prune,
874 do_hint_prune,
875 &prstate);
876
877 /*
878 * While scanning the line pointers, we did not clear
879 * all_visible/all_frozen when encountering LP_DEAD items because we
880 * wanted the decision whether or not to freeze the page to be unaffected
881 * by the short-term presence of LP_DEAD items. These LP_DEAD items are
882 * effectively assumed to be LP_UNUSED items in the making. It doesn't
883 * matter which vacuum heap pass (initial pass or final pass) ends up
884 * setting the page all-frozen, as long as the ongoing VACUUM does it.
885 *
886 * Now that we finished determining whether or not to freeze the page,
887 * update all_visible and all_frozen so that they reflect the true state
888 * of the page for setting PD_ALL_VISIBLE and VM bits.
889 */
890 if (prstate.lpdead_items > 0)
891 prstate.all_visible = prstate.all_frozen = false;
892
893 Assert(!prstate.all_frozen || prstate.all_visible);
894
895 /* Any error while applying the changes is critical */
897
898 if (do_hint_prune)
899 {
900 /*
901 * Update the page's pd_prune_xid field to either zero, or the lowest
902 * XID of any soon-prunable tuple.
903 */
904 ((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;
905
906 /*
907 * Also clear the "page is full" flag, since there's no point in
908 * repeating the prune/defrag process until something else happens to
909 * the page.
910 */
911 PageClearFull(page);
912
913 /*
914 * If that's all we had to do to the page, this is a non-WAL-logged
915 * hint. If we are going to freeze or prune the page, we will mark
916 * the buffer dirty below.
917 */
918 if (!do_freeze && !do_prune)
919 MarkBufferDirtyHint(buffer, true);
920 }
921
922 if (do_prune || do_freeze)
923 {
924 /* Apply the planned item changes and repair page fragmentation. */
925 if (do_prune)
926 {
927 heap_page_prune_execute(buffer, false,
928 prstate.redirected, prstate.nredirected,
929 prstate.nowdead, prstate.ndead,
930 prstate.nowunused, prstate.nunused);
931 }
932
933 if (do_freeze)
934 heap_freeze_prepared_tuples(buffer, prstate.frozen, prstate.nfrozen);
935
936 MarkBufferDirty(buffer);
937
938 /*
939 * Emit a WAL XLOG_HEAP2_PRUNE* record showing what we did
940 */
941 if (RelationNeedsWAL(params->relation))
942 {
943 /*
944 * The snapshotConflictHorizon for the whole record should be the
945 * most conservative of all the horizons calculated for any of the
946 * possible modifications. If this record will prune tuples, any
947 * transactions on the standby older than the youngest xmax of the
948 * most recently removed tuple this record will prune will
949 * conflict. If this record will freeze tuples, any transactions
950 * on the standby with xids older than the youngest tuple this
951 * record will freeze will conflict.
952 */
953 TransactionId conflict_xid;
954
956 prstate.latest_xid_removed))
957 conflict_xid = prstate.frz_conflict_horizon;
958 else
959 conflict_xid = prstate.latest_xid_removed;
960
961 log_heap_prune_and_freeze(params->relation, buffer,
962 InvalidBuffer, /* vmbuffer */
963 0, /* vmflags */
964 conflict_xid,
965 true, params->reason,
966 prstate.frozen, prstate.nfrozen,
967 prstate.redirected, prstate.nredirected,
968 prstate.nowdead, prstate.ndead,
969 prstate.nowunused, prstate.nunused);
970 }
971 }
972
974
975 /* Copy information back for caller */
976 presult->ndeleted = prstate.ndeleted;
977 presult->nnewlpdead = prstate.ndead;
978 presult->nfrozen = prstate.nfrozen;
979 presult->live_tuples = prstate.live_tuples;
981 presult->all_visible = prstate.all_visible;
982 presult->all_frozen = prstate.all_frozen;
983 presult->hastup = prstate.hastup;
984
985 /*
986 * For callers planning to update the visibility map, the conflict horizon
987 * for that record must be the newest xmin on the page. However, if the
988 * page is completely frozen, there can be no conflict and the
989 * vm_conflict_horizon should remain InvalidTransactionId. This includes
990 * the case that we just froze all the tuples; the prune-freeze record
991 * included the conflict XID already so the caller doesn't need it.
992 */
993 if (presult->all_frozen)
995 else
997
998 presult->lpdead_items = prstate.lpdead_items;
999 /* the presult->deadoffsets array was already filled in */
1000
1001 if (prstate.attempt_freeze)
1002 {
1003 if (presult->nfrozen > 0)
1004 {
1005 *new_relfrozen_xid = prstate.pagefrz.FreezePageRelfrozenXid;
1006 *new_relmin_mxid = prstate.pagefrz.FreezePageRelminMxid;
1007 }
1008 else
1009 {
1010 *new_relfrozen_xid = prstate.pagefrz.NoFreezePageRelfrozenXid;
1011 *new_relmin_mxid = prstate.pagefrz.NoFreezePageRelminMxid;
1012 }
1013 }
1014}
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:3037
void MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
Definition: bufmgr.c:5525
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:436
PageHeaderData * PageHeader
Definition: bufpage.h:173
static void PageClearFull(Page page)
Definition: bufpage.h:422
PageData * Page
Definition: bufpage.h:81
static bool PageIsFull(const PageData *page)
Definition: bufpage.h:412
int64_t int64
Definition: c.h:549
void heap_freeze_prepared_tuples(Buffer buffer, HeapTupleFreeze *tuples, int ntuples)
Definition: heapam.c:7425
WalUsage pgWalUsage
Definition: instrument.c:22
#define START_CRIT_SECTION()
Definition: miscadmin.h:150
#define END_CRIT_SECTION()
Definition: miscadmin.h:152
static bool heap_page_will_freeze(Relation relation, Buffer buffer, bool did_tuple_hint_fpi, bool do_prune, bool do_hint_prune, PruneState *prstate)
Definition: pruneheap.c:663
static void prune_freeze_plan(Oid reloid, Buffer buffer, PruneState *prstate, OffsetNumber *off_loc)
Definition: pruneheap.c:458
void log_heap_prune_and_freeze(Relation relation, Buffer buffer, Buffer vmbuffer, uint8 vmflags, TransactionId conflict_xid, bool cleanup_lock, PruneReason reason, HeapTupleFreeze *frozen, int nfrozen, OffsetNumber *redirected, int nredirected, OffsetNumber *dead, int ndead, OffsetNumber *unused, int nunused)
Definition: pruneheap.c:2167
static void prune_freeze_setup(PruneFreezeParams *params, TransactionId *new_relfrozen_xid, MultiXactId *new_relmin_mxid, PruneFreezeResult *presult, PruneState *prstate)
Definition: pruneheap.c:330
void heap_page_prune_execute(Buffer buffer, bool lp_truncate_only, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused)
Definition: pruneheap.c:1671
#define RelationGetRelid(relation)
Definition: rel.h:515
#define RelationNeedsWAL(relation)
Definition: rel.h:638
MultiXactId NoFreezePageRelminMxid
Definition: heapam.h:220
TransactionId FreezePageRelfrozenXid
Definition: heapam.h:208
MultiXactId FreezePageRelminMxid
Definition: heapam.h:209
TransactionId NoFreezePageRelfrozenXid
Definition: heapam.h:219
PruneReason reason
Definition: heapam.h:245
Buffer buffer
Definition: heapam.h:239
Relation relation
Definition: heapam.h:238
int recently_dead_tuples
Definition: heapam.h:285
TransactionId vm_conflict_horizon
Definition: heapam.h:300
bool all_visible
Definition: heapam.h:298
HeapPageFreeze pagefrz
Definition: pruneheap.c:104
bool all_visible
Definition: pruneheap.c:154
int ndead
Definition: pruneheap.c:56
TransactionId new_prune_xid
Definition: pruneheap.c:53
bool attempt_freeze
Definition: pruneheap.c:46
bool hastup
Definition: pruneheap.c:123
int recently_dead_tuples
Definition: pruneheap.c:120
OffsetNumber nowdead[MaxHeapTuplesPerPage]
Definition: pruneheap.c:61
TransactionId frz_conflict_horizon
Definition: pruneheap.c:137
OffsetNumber nowunused[MaxHeapTuplesPerPage]
Definition: pruneheap.c:62
int live_tuples
Definition: pruneheap.c:119
TransactionId visibility_cutoff_xid
Definition: pruneheap.c:156
bool all_frozen
Definition: pruneheap.c:155
HeapTupleFreeze frozen[MaxHeapTuplesPerPage]
Definition: pruneheap.c:63
int lpdead_items
Definition: pruneheap.c:129
int nfrozen
Definition: pruneheap.c:58
OffsetNumber redirected[MaxHeapTuplesPerPage *2]
Definition: pruneheap.c:60
int ndeleted
Definition: pruneheap.c:116
int nredirected
Definition: pruneheap.c:55
TransactionId latest_xid_removed
Definition: pruneheap.c:54
int nunused
Definition: pruneheap.c:57
int64 wal_fpi
Definition: instrument.h:54
static bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.h:297

References PruneState::all_frozen, PruneFreezeResult::all_frozen, PruneState::all_visible, PruneFreezeResult::all_visible, Assert(), PruneState::attempt_freeze, PruneFreezeParams::buffer, BufferGetPage(), END_CRIT_SECTION, HeapPageFreeze::FreezePageRelfrozenXid, HeapPageFreeze::FreezePageRelminMxid, PruneState::frozen, PruneState::frz_conflict_horizon, PruneState::hastup, PruneFreezeResult::hastup, heap_freeze_prepared_tuples(), heap_page_prune_execute(), heap_page_will_freeze(), InvalidBuffer, InvalidTransactionId, PruneState::latest_xid_removed, PruneState::live_tuples, PruneFreezeResult::live_tuples, log_heap_prune_and_freeze(), PruneState::lpdead_items, PruneFreezeResult::lpdead_items, MarkBufferDirty(), MarkBufferDirtyHint(), PruneState::ndead, PruneState::ndeleted, PruneFreezeResult::ndeleted, PruneState::new_prune_xid, PruneState::nfrozen, PruneFreezeResult::nfrozen, PruneFreezeResult::nnewlpdead, HeapPageFreeze::NoFreezePageRelfrozenXid, HeapPageFreeze::NoFreezePageRelminMxid, PruneState::nowdead, PruneState::nowunused, PruneState::nredirected, PruneState::nunused, PageClearFull(), PruneState::pagefrz, PageIsFull(), pgWalUsage, prune_freeze_plan(), prune_freeze_setup(), PruneFreezeParams::reason, PruneState::recently_dead_tuples, PruneFreezeResult::recently_dead_tuples, PruneState::redirected, PruneFreezeParams::relation, RelationGetRelid, RelationNeedsWAL, START_CRIT_SECTION, TransactionIdFollows(), PruneState::visibility_cutoff_xid, PruneFreezeResult::vm_conflict_horizon, and WalUsage::wal_fpi.

Referenced by heap_page_prune_opt(), and lazy_scan_prune().

◆ heap_page_prune_execute()

void heap_page_prune_execute ( Buffer  buffer,
bool  lp_truncate_only,
OffsetNumber redirected,
int  nredirected,
OffsetNumber nowdead,
int  ndead,
OffsetNumber nowunused,
int  nunused 
)

Definition at line 1671 of file pruneheap.c.

1675{
1676 Page page = BufferGetPage(buffer);
1677 OffsetNumber *offnum;
1679
1680 /* Shouldn't be called unless there's something to do */
1681 Assert(nredirected > 0 || ndead > 0 || nunused > 0);
1682
1683 /* If 'lp_truncate_only', we can only remove already-dead line pointers */
1684 Assert(!lp_truncate_only || (nredirected == 0 && ndead == 0));
1685
1686 /* Update all redirected line pointers */
1687 offnum = redirected;
1688 for (int i = 0; i < nredirected; i++)
1689 {
1690 OffsetNumber fromoff = *offnum++;
1691 OffsetNumber tooff = *offnum++;
1692 ItemId fromlp = PageGetItemId(page, fromoff);
1694
1695#ifdef USE_ASSERT_CHECKING
1696
1697 /*
1698 * Any existing item that we set as an LP_REDIRECT (any 'from' item)
1699 * must be the first item from a HOT chain. If the item has tuple
1700 * storage then it can't be a heap-only tuple. Otherwise we are just
1701 * maintaining an existing LP_REDIRECT from an existing HOT chain that
1702 * has been pruned at least once before now.
1703 */
1704 if (!ItemIdIsRedirected(fromlp))
1705 {
1706 Assert(ItemIdHasStorage(fromlp) && ItemIdIsNormal(fromlp));
1707
1708 htup = (HeapTupleHeader) PageGetItem(page, fromlp);
1710 }
1711 else
1712 {
1713 /* We shouldn't need to redundantly set the redirect */
1714 Assert(ItemIdGetRedirect(fromlp) != tooff);
1715 }
1716
1717 /*
1718 * The item that we're about to set as an LP_REDIRECT (the 'from'
1719 * item) will point to an existing item (the 'to' item) that is
1720 * already a heap-only tuple. There can be at most one LP_REDIRECT
1721 * item per HOT chain.
1722 *
1723 * We need to keep around an LP_REDIRECT item (after original
1724 * non-heap-only root tuple gets pruned away) so that it's always
1725 * possible for VACUUM to easily figure out what TID to delete from
1726 * indexes when an entire HOT chain becomes dead. A heap-only tuple
1727 * can never become LP_DEAD; an LP_REDIRECT item or a regular heap
1728 * tuple can.
1729 *
1730 * This check may miss problems, e.g. the target of a redirect could
1731 * be marked as unused subsequently. The page_verify_redirects() check
1732 * below will catch such problems.
1733 */
1734 tolp = PageGetItemId(page, tooff);
1735 Assert(ItemIdHasStorage(tolp) && ItemIdIsNormal(tolp));
1736 htup = (HeapTupleHeader) PageGetItem(page, tolp);
1738#endif
1739
1740 ItemIdSetRedirect(fromlp, tooff);
1741 }
1742
1743 /* Update all now-dead line pointers */
1744 offnum = nowdead;
1745 for (int i = 0; i < ndead; i++)
1746 {
1747 OffsetNumber off = *offnum++;
1748 ItemId lp = PageGetItemId(page, off);
1749
1750#ifdef USE_ASSERT_CHECKING
1751
1752 /*
1753 * An LP_DEAD line pointer must be left behind when the original item
1754 * (which is dead to everybody) could still be referenced by a TID in
1755 * an index. This should never be necessary with any individual
1756 * heap-only tuple item, though. (It's not clear how much of a problem
1757 * that would be, but there is no reason to allow it.)
1758 */
1759 if (ItemIdHasStorage(lp))
1760 {
1762 htup = (HeapTupleHeader) PageGetItem(page, lp);
1764 }
1765 else
1766 {
1767 /* Whole HOT chain becomes dead */
1769 }
1770#endif
1771
1772 ItemIdSetDead(lp);
1773 }
1774
1775 /* Update all now-unused line pointers */
1776 offnum = nowunused;
1777 for (int i = 0; i < nunused; i++)
1778 {
1779 OffsetNumber off = *offnum++;
1780 ItemId lp = PageGetItemId(page, off);
1781
1782#ifdef USE_ASSERT_CHECKING
1783
1784 if (lp_truncate_only)
1785 {
1786 /* Setting LP_DEAD to LP_UNUSED in vacuum's second pass */
1788 }
1789 else
1790 {
1791 /*
1792 * When heap_page_prune_and_freeze() was called, mark_unused_now
1793 * may have been passed as true, which allows would-be LP_DEAD
1794 * items to be made LP_UNUSED instead. This is only possible if
1795 * the relation has no indexes. If there are any dead items, then
1796 * mark_unused_now was not true and every item being marked
1797 * LP_UNUSED must refer to a heap-only tuple.
1798 */
1799 if (ndead > 0)
1800 {
1802 htup = (HeapTupleHeader) PageGetItem(page, lp);
1804 }
1805 else
1806 Assert(ItemIdIsUsed(lp));
1807 }
1808
1809#endif
1810
1811 ItemIdSetUnused(lp);
1812 }
1813
1814 if (lp_truncate_only)
1816 else
1817 {
1818 /*
1819 * Finally, repair any fragmentation, and update the page's hint bit
1820 * about whether it has free pointers.
1821 */
1823
1824 /*
1825 * Now that the page has been modified, assert that redirect items
1826 * still point to valid targets.
1827 */
1829 }
1830}
void PageRepairFragmentation(Page page)
Definition: bufpage.c:698
void PageTruncateLinePointerArray(Page page)
Definition: bufpage.c:834
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:229
#define ItemIdSetRedirect(itemId, link)
Definition: itemid.h:152
#define ItemIdSetDead(itemId)
Definition: itemid.h:164
#define ItemIdSetUnused(itemId)
Definition: itemid.h:128
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
static void page_verify_redirects(Page page)
Definition: pruneheap.c:1847

References Assert(), BufferGetPage(), HeapTupleHeaderIsHeapOnly(), i, ItemIdGetRedirect, ItemIdHasStorage, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemIdSetDead, ItemIdSetRedirect, ItemIdSetUnused, page_verify_redirects(), PageGetItem(), PageGetItemId(), PageRepairFragmentation(), PageTruncateLinePointerArray(), and PG_USED_FOR_ASSERTS_ONLY.

Referenced by heap_page_prune_and_freeze(), and heap_xlog_prune_freeze().

◆ heap_page_prune_opt()

void heap_page_prune_opt ( Relation  relation,
Buffer  buffer 
)

Definition at line 209 of file pruneheap.c.

210{
211 Page page = BufferGetPage(buffer);
212 TransactionId prune_xid;
213 GlobalVisState *vistest;
214 Size minfree;
215
216 /*
217 * We can't write WAL in recovery mode, so there's no point trying to
218 * clean the page. The primary will likely issue a cleaning WAL record
219 * soon anyway, so this is no particular loss.
220 */
221 if (RecoveryInProgress())
222 return;
223
224 /*
225 * First check whether there's any chance there's something to prune,
226 * determining the appropriate horizon is a waste if there's no prune_xid
227 * (i.e. no updates/deletes left potentially dead tuples around).
228 */
229 prune_xid = ((PageHeader) page)->pd_prune_xid;
230 if (!TransactionIdIsValid(prune_xid))
231 return;
232
233 /*
234 * Check whether prune_xid indicates that there may be dead rows that can
235 * be cleaned up.
236 */
237 vistest = GlobalVisTestFor(relation);
238
239 if (!GlobalVisTestIsRemovableXid(vistest, prune_xid))
240 return;
241
242 /*
243 * We prune when a previous UPDATE failed to find enough space on the page
244 * for a new tuple version, or when free space falls below the relation's
245 * fill-factor target (but not less than 10%).
246 *
247 * Checking free space here is questionable since we aren't holding any
248 * lock on the buffer; in the worst case we could get a bogus answer. It's
249 * unlikely to be *seriously* wrong, though, since reading either pd_lower
250 * or pd_upper is probably atomic. Avoiding taking a lock seems more
251 * important than sometimes getting a wrong answer in what is after all
252 * just a heuristic estimate.
253 */
254 minfree = RelationGetTargetPageFreeSpace(relation,
256 minfree = Max(minfree, BLCKSZ / 10);
257
258 if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
259 {
260 /* OK, try to get exclusive buffer lock */
262 return;
263
264 /*
265 * Now that we have buffer lock, get accurate information about the
266 * page's free space, and recheck the heuristic about whether to
267 * prune.
268 */
269 if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
270 {
271 OffsetNumber dummy_off_loc;
272 PruneFreezeResult presult;
273
274 /*
275 * We don't pass the HEAP_PAGE_PRUNE_MARK_UNUSED_NOW option
276 * regardless of whether or not the relation has indexes, since we
277 * cannot safely determine that during on-access pruning with the
278 * current implementation.
279 */
280 PruneFreezeParams params = {
281 .relation = relation,
282 .buffer = buffer,
283 .reason = PRUNE_ON_ACCESS,
284 .options = 0,
285 .vistest = vistest,
286 .cutoffs = NULL,
287 };
288
289 heap_page_prune_and_freeze(&params, &presult, &dummy_off_loc,
290 NULL, NULL);
291
292 /*
293 * Report the number of tuples reclaimed to pgstats. This is
294 * presult.ndeleted minus the number of newly-LP_DEAD-set items.
295 *
296 * We derive the number of dead tuples like this to avoid totally
297 * forgetting about items that were set to LP_DEAD, since they
298 * still need to be cleaned up by VACUUM. We only want to count
299 * heap-only tuples that just became LP_UNUSED in our report,
300 * which don't.
301 *
302 * VACUUM doesn't have to compensate in the same way when it
303 * tracks ndeleted, since it will set the same LP_DEAD items to
304 * LP_UNUSED separately.
305 */
306 if (presult.ndeleted > presult.nnewlpdead)
308 presult.ndeleted - presult.nnewlpdead);
309 }
310
311 /* And release buffer lock */
313
314 /*
315 * We avoid reuse of any free space created on the page by unrelated
316 * UPDATEs/INSERTs by opting to not update the FSM at this point. The
317 * free space should be reused by UPDATEs to *this* page.
318 */
319 }
320}
void LockBuffer(Buffer buffer, BufferLockMode mode)
Definition: bufmgr.c:5699
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:5952
@ BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:205
Size PageGetHeapFreeSpace(const PageData *page)
Definition: bufpage.c:990
#define Max(x, y)
Definition: c.h:997
size_t Size
Definition: c.h:625
@ PRUNE_ON_ACCESS
Definition: heapam.h:228
void pgstat_update_heap_dead_tuples(Relation rel, int delta)
bool GlobalVisTestIsRemovableXid(GlobalVisState *state, TransactionId xid)
Definition: procarray.c:4243
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:4086
void heap_page_prune_and_freeze(PruneFreezeParams *params, PruneFreezeResult *presult, OffsetNumber *off_loc, TransactionId *new_relfrozen_xid, MultiXactId *new_relmin_mxid)
Definition: pruneheap.c:819
#define RelationGetTargetPageFreeSpace(relation, defaultff)
Definition: rel.h:390
#define HEAP_DEFAULT_FILLFACTOR
Definition: rel.h:361
bool RecoveryInProgress(void)
Definition: xlog.c:6461

References BUFFER_LOCK_UNLOCK, BufferGetPage(), ConditionalLockBufferForCleanup(), GlobalVisTestFor(), GlobalVisTestIsRemovableXid(), HEAP_DEFAULT_FILLFACTOR, heap_page_prune_and_freeze(), LockBuffer(), Max, PruneFreezeResult::ndeleted, PruneFreezeResult::nnewlpdead, PageGetHeapFreeSpace(), PageIsFull(), pgstat_update_heap_dead_tuples(), PRUNE_ON_ACCESS, RecoveryInProgress(), PruneFreezeParams::relation, RelationGetTargetPageFreeSpace, and TransactionIdIsValid.

Referenced by BitmapHeapScanNextBlock(), heap_prepare_pagescan(), and heapam_index_fetch_tuple().

◆ heap_page_will_freeze()

static bool heap_page_will_freeze ( Relation  relation,
Buffer  buffer,
bool  did_tuple_hint_fpi,
bool  do_prune,
bool  do_hint_prune,
PruneState prstate 
)
static

Definition at line 663 of file pruneheap.c.

668{
669 bool do_freeze = false;
670
671 /*
672 * If the caller specified we should not attempt to freeze any tuples,
673 * validate that everything is in the right state and return.
674 */
675 if (!prstate->attempt_freeze)
676 {
677 Assert(!prstate->all_frozen && prstate->nfrozen == 0);
678 Assert(prstate->lpdead_items == 0 || !prstate->all_visible);
679 return false;
680 }
681
682 if (prstate->pagefrz.freeze_required)
683 {
684 /*
685 * heap_prepare_freeze_tuple indicated that at least one XID/MXID from
686 * before FreezeLimit/MultiXactCutoff is present. Must freeze to
687 * advance relfrozenxid/relminmxid.
688 */
689 do_freeze = true;
690 }
691 else
692 {
693 /*
694 * Opportunistically freeze the page if we are generating an FPI
695 * anyway and if doing so means that we can set the page all-frozen
696 * afterwards (might not happen until VACUUM's final heap pass).
697 *
698 * XXX: Previously, we knew if pruning emitted an FPI by checking
699 * pgWalUsage.wal_fpi before and after pruning. Once the freeze and
700 * prune records were combined, this heuristic couldn't be used
701 * anymore. The opportunistic freeze heuristic must be improved;
702 * however, for now, try to approximate the old logic.
703 */
704 if (prstate->all_frozen && prstate->nfrozen > 0)
705 {
706 Assert(prstate->all_visible);
707
708 /*
709 * Freezing would make the page all-frozen. Have already emitted
710 * an FPI or will do so anyway?
711 */
712 if (RelationNeedsWAL(relation))
713 {
714 if (did_tuple_hint_fpi)
715 do_freeze = true;
716 else if (do_prune)
717 {
718 if (XLogCheckBufferNeedsBackup(buffer))
719 do_freeze = true;
720 }
721 else if (do_hint_prune)
722 {
724 do_freeze = true;
725 }
726 }
727 }
728 }
729
730 if (do_freeze)
731 {
732 /*
733 * Validate the tuples we will be freezing before entering the
734 * critical section.
735 */
736 heap_pre_freeze_checks(buffer, prstate->frozen, prstate->nfrozen);
737
738 /*
739 * Calculate what the snapshot conflict horizon should be for a record
740 * freezing tuples. We can use the visibility_cutoff_xid as our cutoff
741 * for conflicts when the whole page is eligible to become all-frozen
742 * in the VM once we're done with it. Otherwise, we generate a
743 * conservative cutoff by stepping back from OldestXmin.
744 */
745 if (prstate->all_frozen)
747 else
748 {
749 /* Avoids false conflicts when hot_standby_feedback in use */
750 prstate->frz_conflict_horizon = prstate->cutoffs->OldestXmin;
752 }
753 }
754 else if (prstate->nfrozen > 0)
755 {
756 /*
757 * The page contained some tuples that were not already frozen, and we
758 * chose not to freeze them now. The page won't be all-frozen then.
759 */
760 Assert(!prstate->pagefrz.freeze_required);
761
762 prstate->all_frozen = false;
763 prstate->nfrozen = 0; /* avoid miscounts in instrumentation */
764 }
765 else
766 {
767 /*
768 * We have no freeze plans to execute. The page might already be
769 * all-frozen (perhaps only following pruning), though. Such pages
770 * can be marked all-frozen in the VM by our caller, even though none
771 * of its tuples were newly frozen here.
772 */
773 }
774
775 return do_freeze;
776}
void heap_pre_freeze_checks(Buffer buffer, HeapTupleFreeze *tuples, int ntuples)
Definition: heapam.c:7372
bool freeze_required
Definition: heapam.h:182
struct VacuumCutoffs * cutoffs
Definition: pruneheap.c:47
TransactionId OldestXmin
Definition: vacuum.h:279
#define TransactionIdRetreat(dest)
Definition: transam.h:141
#define XLogHintBitIsNeeded()
Definition: xlog.h:122
bool XLogCheckBufferNeedsBackup(Buffer buffer)
Definition: xloginsert.c:1049

References PruneState::all_frozen, PruneState::all_visible, Assert(), PruneState::attempt_freeze, PruneState::cutoffs, HeapPageFreeze::freeze_required, PruneState::frozen, PruneState::frz_conflict_horizon, heap_pre_freeze_checks(), PruneState::lpdead_items, PruneState::nfrozen, VacuumCutoffs::OldestXmin, PruneState::pagefrz, RelationNeedsWAL, TransactionIdRetreat, PruneState::visibility_cutoff_xid, XLogCheckBufferNeedsBackup(), and XLogHintBitIsNeeded.

Referenced by heap_page_prune_and_freeze().

◆ heap_prune_chain()

static void heap_prune_chain ( Page  page,
BlockNumber  blockno,
OffsetNumber  maxoff,
OffsetNumber  rootoffnum,
PruneState prstate 
)
static

Definition at line 1103 of file pruneheap.c.

1105{
1107 ItemId rootlp;
1108 OffsetNumber offnum;
1110
1111 /*
1112 * After traversing the HOT chain, ndeadchain is the index in chainitems
1113 * of the first live successor after the last dead item.
1114 */
1115 int ndeadchain = 0,
1116 nchain = 0;
1117
1118 rootlp = PageGetItemId(page, rootoffnum);
1119
1120 /* Start from the root tuple */
1121 offnum = rootoffnum;
1122
1123 /* while not end of the chain */
1124 for (;;)
1125 {
1126 HeapTupleHeader htup;
1127 ItemId lp;
1128
1129 /* Sanity check (pure paranoia) */
1130 if (offnum < FirstOffsetNumber)
1131 break;
1132
1133 /*
1134 * An offset past the end of page's line pointer array is possible
1135 * when the array was truncated (original item must have been unused)
1136 */
1137 if (offnum > maxoff)
1138 break;
1139
1140 /* If item is already processed, stop --- it must not be same chain */
1141 if (prstate->processed[offnum])
1142 break;
1143
1144 lp = PageGetItemId(page, offnum);
1145
1146 /*
1147 * Unused item obviously isn't part of the chain. Likewise, a dead
1148 * line pointer can't be part of the chain. Both of those cases were
1149 * already marked as processed.
1150 */
1151 Assert(ItemIdIsUsed(lp));
1152 Assert(!ItemIdIsDead(lp));
1153
1154 /*
1155 * If we are looking at the redirected root line pointer, jump to the
1156 * first normal tuple in the chain. If we find a redirect somewhere
1157 * else, stop --- it must not be same chain.
1158 */
1159 if (ItemIdIsRedirected(lp))
1160 {
1161 if (nchain > 0)
1162 break; /* not at start of chain */
1163 chainitems[nchain++] = offnum;
1164 offnum = ItemIdGetRedirect(rootlp);
1165 continue;
1166 }
1167
1169
1170 htup = (HeapTupleHeader) PageGetItem(page, lp);
1171
1172 /*
1173 * Check the tuple XMIN against prior XMAX, if any
1174 */
1175 if (TransactionIdIsValid(priorXmax) &&
1177 break;
1178
1179 /*
1180 * OK, this tuple is indeed a member of the chain.
1181 */
1182 chainitems[nchain++] = offnum;
1183
1184 switch (htsv_get_valid_status(prstate->htsv[offnum]))
1185 {
1186 case HEAPTUPLE_DEAD:
1187
1188 /* Remember the last DEAD tuple seen */
1189 ndeadchain = nchain;
1191 &prstate->latest_xid_removed);
1192 /* Advance to next chain member */
1193 break;
1194
1196
1197 /*
1198 * We don't need to advance the conflict horizon for
1199 * RECENTLY_DEAD tuples, even if we are removing them. This
1200 * is because we only remove RECENTLY_DEAD tuples if they
1201 * precede a DEAD tuple, and the DEAD tuple must have been
1202 * inserted by a newer transaction than the RECENTLY_DEAD
1203 * tuple by virtue of being later in the chain. We will have
1204 * advanced the conflict horizon for the DEAD tuple.
1205 */
1206
1207 /*
1208 * Advance past RECENTLY_DEAD tuples just in case there's a
1209 * DEAD one after them. We have to make sure that we don't
1210 * miss any DEAD tuples, since DEAD tuples that still have
1211 * tuple storage after pruning will confuse VACUUM.
1212 */
1213 break;
1214
1216 case HEAPTUPLE_LIVE:
1218 goto process_chain;
1219
1220 default:
1221 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1222 goto process_chain;
1223 }
1224
1225 /*
1226 * If the tuple is not HOT-updated, then we are at the end of this
1227 * HOT-update chain.
1228 */
1229 if (!HeapTupleHeaderIsHotUpdated(htup))
1230 goto process_chain;
1231
1232 /* HOT implies it can't have moved to different partition */
1234
1235 /*
1236 * Advance to next chain member.
1237 */
1238 Assert(ItemPointerGetBlockNumber(&htup->t_ctid) == blockno);
1239 offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
1240 priorXmax = HeapTupleHeaderGetUpdateXid(htup);
1241 }
1242
1243 if (ItemIdIsRedirected(rootlp) && nchain < 2)
1244 {
1245 /*
1246 * We found a redirect item that doesn't point to a valid follow-on
1247 * item. This can happen if the loop in heap_page_prune_and_freeze()
1248 * caused us to visit the dead successor of a redirect item before
1249 * visiting the redirect item. We can clean up by setting the
1250 * redirect item to LP_DEAD state or LP_UNUSED if the caller
1251 * indicated.
1252 */
1253 heap_prune_record_dead_or_unused(prstate, rootoffnum, false);
1254 return;
1255 }
1256
1257process_chain:
1258
1259 if (ndeadchain == 0)
1260 {
1261 /*
1262 * No DEAD tuple was found, so the chain is entirely composed of
1263 * normal, unchanged tuples. Leave it alone.
1264 */
1265 int i = 0;
1266
1267 if (ItemIdIsRedirected(rootlp))
1268 {
1269 heap_prune_record_unchanged_lp_redirect(prstate, rootoffnum);
1270 i++;
1271 }
1272 for (; i < nchain; i++)
1273 heap_prune_record_unchanged_lp_normal(page, prstate, chainitems[i]);
1274 }
1275 else if (ndeadchain == nchain)
1276 {
1277 /*
1278 * The entire chain is dead. Mark the root line pointer LP_DEAD, and
1279 * fully remove the other tuples in the chain.
1280 */
1281 heap_prune_record_dead_or_unused(prstate, rootoffnum, ItemIdIsNormal(rootlp));
1282 for (int i = 1; i < nchain; i++)
1283 heap_prune_record_unused(prstate, chainitems[i], true);
1284 }
1285 else
1286 {
1287 /*
1288 * We found a DEAD tuple in the chain. Redirect the root line pointer
1289 * to the first non-DEAD tuple, and mark as unused each intermediate
1290 * item that we are able to remove from the chain.
1291 */
1292 heap_prune_record_redirect(prstate, rootoffnum, chainitems[ndeadchain],
1293 ItemIdIsNormal(rootlp));
1294 for (int i = 1; i < ndeadchain; i++)
1295 heap_prune_record_unused(prstate, chainitems[i], true);
1296
1297 /* the rest of tuples in the chain are normal, unchanged tuples */
1298 for (int i = ndeadchain; i < nchain; i++)
1299 heap_prune_record_unchanged_lp_normal(page, prstate, chainitems[i]);
1300 }
1301}
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
void HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple, TransactionId *snapshotConflictHorizon)
Definition: heapam.c:8018
@ HEAPTUPLE_RECENTLY_DEAD
Definition: heapam.h:128
@ HEAPTUPLE_INSERT_IN_PROGRESS
Definition: heapam.h:129
@ HEAPTUPLE_LIVE
Definition: heapam.h:127
@ HEAPTUPLE_DELETE_IN_PROGRESS
Definition: heapam.h:130
@ HEAPTUPLE_DEAD
Definition: heapam.h:126
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
static HTSV_Result htsv_get_valid_status(int status)
Definition: pruneheap.c:1064
static void heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum, bool was_normal)
Definition: pruneheap.c:1402
static void heap_prune_record_redirect(PruneState *prstate, OffsetNumber offnum, OffsetNumber rdoffnum, bool was_normal)
Definition: pruneheap.c:1319
static void heap_prune_record_unchanged_lp_normal(Page page, PruneState *prstate, OffsetNumber offnum)
Definition: pruneheap.c:1435
static void heap_prune_record_dead_or_unused(PruneState *prstate, OffsetNumber offnum, bool was_normal)
Definition: pruneheap.c:1385
static void heap_prune_record_unchanged_lp_redirect(PruneState *prstate, OffsetNumber offnum)
Definition: pruneheap.c:1646
bool processed[MaxHeapTuplesPerPage+1]
Definition: pruneheap.c:87
int8 htsv[MaxHeapTuplesPerPage+1]
Definition: pruneheap.c:99

References Assert(), elog, ERROR, FirstOffsetNumber, heap_prune_record_dead_or_unused(), heap_prune_record_redirect(), heap_prune_record_unchanged_lp_normal(), heap_prune_record_unchanged_lp_redirect(), heap_prune_record_unused(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderAdvanceConflictHorizon(), HeapTupleHeaderGetUpdateXid(), HeapTupleHeaderGetXmin(), HeapTupleHeaderIndicatesMovedPartitions(), HeapTupleHeaderIsHotUpdated(), PruneState::htsv, htsv_get_valid_status(), i, InvalidTransactionId, ItemIdGetRedirect, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerGetBlockNumber(), ItemPointerGetOffsetNumber(), PruneState::latest_xid_removed, MaxHeapTuplesPerPage, PageGetItem(), PageGetItemId(), PruneState::processed, HeapTupleHeaderData::t_ctid, TransactionIdEquals, and TransactionIdIsValid.

Referenced by prune_freeze_plan().

◆ heap_prune_record_dead()

static void heap_prune_record_dead ( PruneState prstate,
OffsetNumber  offnum,
bool  was_normal 
)
static

Definition at line 1350 of file pruneheap.c.

1352{
1353 Assert(!prstate->processed[offnum]);
1354 prstate->processed[offnum] = true;
1355
1356 Assert(prstate->ndead < MaxHeapTuplesPerPage);
1357 prstate->nowdead[prstate->ndead] = offnum;
1358 prstate->ndead++;
1359
1360 /*
1361 * Deliberately delay unsetting all_visible and all_frozen until later
1362 * during pruning. Removable dead tuples shouldn't preclude freezing the
1363 * page.
1364 */
1365
1366 /* Record the dead offset for vacuum */
1367 prstate->deadoffsets[prstate->lpdead_items++] = offnum;
1368
1369 /*
1370 * If the root entry had been a normal tuple, we are deleting it, so count
1371 * it in the result. But changing a redirect (even to DEAD state) doesn't
1372 * count.
1373 */
1374 if (was_normal)
1375 prstate->ndeleted++;
1376}
OffsetNumber * deadoffsets
Definition: pruneheap.c:130

References Assert(), PruneState::deadoffsets, PruneState::lpdead_items, MaxHeapTuplesPerPage, PruneState::ndead, PruneState::ndeleted, PruneState::nowdead, and PruneState::processed.

Referenced by heap_prune_record_dead_or_unused().

◆ heap_prune_record_dead_or_unused()

static void heap_prune_record_dead_or_unused ( PruneState prstate,
OffsetNumber  offnum,
bool  was_normal 
)
static

Definition at line 1385 of file pruneheap.c.

1387{
1388 /*
1389 * If the caller set mark_unused_now to true, we can remove dead tuples
1390 * during pruning instead of marking their line pointers dead. Set this
1391 * tuple's line pointer LP_UNUSED. We hint that this option is less
1392 * likely.
1393 */
1394 if (unlikely(prstate->mark_unused_now))
1395 heap_prune_record_unused(prstate, offnum, was_normal);
1396 else
1397 heap_prune_record_dead(prstate, offnum, was_normal);
1398}
#define unlikely(x)
Definition: c.h:418
static void heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum, bool was_normal)
Definition: pruneheap.c:1350
bool mark_unused_now
Definition: pruneheap.c:44

References heap_prune_record_dead(), heap_prune_record_unused(), PruneState::mark_unused_now, and unlikely.

Referenced by heap_prune_chain().

◆ heap_prune_record_prunable()

static void heap_prune_record_prunable ( PruneState prstate,
TransactionId  xid 
)
static

Definition at line 1305 of file pruneheap.c.

1306{
1307 /*
1308 * This should exactly match the PageSetPrunable macro. We can't store
1309 * directly into the page header yet, so we update working state.
1310 */
1312 if (!TransactionIdIsValid(prstate->new_prune_xid) ||
1313 TransactionIdPrecedes(xid, prstate->new_prune_xid))
1314 prstate->new_prune_xid = xid;
1315}
#define TransactionIdIsNormal(xid)
Definition: transam.h:42
static bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.h:263

References Assert(), PruneState::new_prune_xid, TransactionIdIsNormal, TransactionIdIsValid, and TransactionIdPrecedes().

Referenced by heap_prune_record_unchanged_lp_normal().

◆ heap_prune_record_redirect()

static void heap_prune_record_redirect ( PruneState prstate,
OffsetNumber  offnum,
OffsetNumber  rdoffnum,
bool  was_normal 
)
static

Definition at line 1319 of file pruneheap.c.

1322{
1323 Assert(!prstate->processed[offnum]);
1324 prstate->processed[offnum] = true;
1325
1326 /*
1327 * Do not mark the redirect target here. It needs to be counted
1328 * separately as an unchanged tuple.
1329 */
1330
1332 prstate->redirected[prstate->nredirected * 2] = offnum;
1333 prstate->redirected[prstate->nredirected * 2 + 1] = rdoffnum;
1334
1335 prstate->nredirected++;
1336
1337 /*
1338 * If the root entry had been a normal tuple, we are deleting it, so count
1339 * it in the result. But changing a redirect (even to DEAD state) doesn't
1340 * count.
1341 */
1342 if (was_normal)
1343 prstate->ndeleted++;
1344
1345 prstate->hastup = true;
1346}

References Assert(), PruneState::hastup, MaxHeapTuplesPerPage, PruneState::ndeleted, PruneState::nredirected, PruneState::processed, and PruneState::redirected.

Referenced by heap_prune_chain().

◆ heap_prune_record_unchanged_lp_dead()

static void heap_prune_record_unchanged_lp_dead ( Page  page,
PruneState prstate,
OffsetNumber  offnum 
)
static

Definition at line 1618 of file pruneheap.c.

1619{
1620 Assert(!prstate->processed[offnum]);
1621 prstate->processed[offnum] = true;
1622
1623 /*
1624 * Deliberately don't set hastup for LP_DEAD items. We make the soft
1625 * assumption that any LP_DEAD items encountered here will become
1626 * LP_UNUSED later on, before count_nondeletable_pages is reached. If we
1627 * don't make this assumption then rel truncation will only happen every
1628 * other VACUUM, at most. Besides, VACUUM must treat
1629 * hastup/nonempty_pages as provisional no matter how LP_DEAD items are
1630 * handled (handled here, or handled later on).
1631 *
1632 * Similarly, don't unset all_visible and all_frozen until later, at the
1633 * end of heap_page_prune_and_freeze(). This will allow us to attempt to
1634 * freeze the page after pruning. As long as we unset it before updating
1635 * the visibility map, this will be correct.
1636 */
1637
1638 /* Record the dead offset for vacuum */
1639 prstate->deadoffsets[prstate->lpdead_items++] = offnum;
1640}

References Assert(), PruneState::deadoffsets, PruneState::lpdead_items, and PruneState::processed.

Referenced by prune_freeze_plan().

◆ heap_prune_record_unchanged_lp_normal()

static void heap_prune_record_unchanged_lp_normal ( Page  page,
PruneState prstate,
OffsetNumber  offnum 
)
static

Definition at line 1435 of file pruneheap.c.

1436{
1437 HeapTupleHeader htup;
1438
1439 Assert(!prstate->processed[offnum]);
1440 prstate->processed[offnum] = true;
1441
1442 prstate->hastup = true; /* the page is not empty */
1443
1444 /*
1445 * The criteria for counting a tuple as live in this block need to match
1446 * what analyze.c's acquire_sample_rows() does, otherwise VACUUM and
1447 * ANALYZE may produce wildly different reltuples values, e.g. when there
1448 * are many recently-dead tuples.
1449 *
1450 * The logic here is a bit simpler than acquire_sample_rows(), as VACUUM
1451 * can't run inside a transaction block, which makes some cases impossible
1452 * (e.g. in-progress insert from the same transaction).
1453 *
1454 * HEAPTUPLE_DEAD are handled by the other heap_prune_record_*()
1455 * subroutines. They don't count dead items like acquire_sample_rows()
1456 * does, because we assume that all dead items will become LP_UNUSED
1457 * before VACUUM finishes. This difference is only superficial. VACUUM
1458 * effectively agrees with ANALYZE about DEAD items, in the end. VACUUM
1459 * won't remember LP_DEAD items, but only because they're not supposed to
1460 * be left behind when it is done. (Cases where we bypass index vacuuming
1461 * will violate this optimistic assumption, but the overall impact of that
1462 * should be negligible.)
1463 */
1464 htup = (HeapTupleHeader) PageGetItem(page, PageGetItemId(page, offnum));
1465
1466 switch (prstate->htsv[offnum])
1467 {
1468 case HEAPTUPLE_LIVE:
1469
1470 /*
1471 * Count it as live. Not only is this natural, but it's also what
1472 * acquire_sample_rows() does.
1473 */
1474 prstate->live_tuples++;
1475
1476 /*
1477 * Is the tuple definitely visible to all transactions?
1478 *
1479 * NB: Like with per-tuple hint bits, we can't set the
1480 * PD_ALL_VISIBLE flag if the inserter committed asynchronously.
1481 * See SetHintBits for more info. Check that the tuple is hinted
1482 * xmin-committed because of that.
1483 */
1484 if (prstate->all_visible)
1485 {
1486 TransactionId xmin;
1487
1489 {
1490 prstate->all_visible = false;
1491 prstate->all_frozen = false;
1492 break;
1493 }
1494
1495 /*
1496 * The inserter definitely committed. But is it old enough
1497 * that everyone sees it as committed? A FrozenTransactionId
1498 * is seen as committed to everyone. Otherwise, we check if
1499 * there is a snapshot that considers this xid to still be
1500 * running, and if so, we don't consider the page all-visible.
1501 */
1502 xmin = HeapTupleHeaderGetXmin(htup);
1503
1504 /*
1505 * For now always use prstate->cutoffs for this test, because
1506 * we only update 'all_visible' and 'all_frozen' when freezing
1507 * is requested. We could use GlobalVisTestIsRemovableXid
1508 * instead, if a non-freezing caller wanted to set the VM bit.
1509 */
1510 Assert(prstate->cutoffs);
1511 if (!TransactionIdPrecedes(xmin, prstate->cutoffs->OldestXmin))
1512 {
1513 prstate->all_visible = false;
1514 prstate->all_frozen = false;
1515 break;
1516 }
1517
1518 /* Track newest xmin on page. */
1519 if (TransactionIdFollows(xmin, prstate->visibility_cutoff_xid) &&
1521 prstate->visibility_cutoff_xid = xmin;
1522 }
1523 break;
1524
1526 prstate->recently_dead_tuples++;
1527 prstate->all_visible = false;
1528 prstate->all_frozen = false;
1529
1530 /*
1531 * This tuple will soon become DEAD. Update the hint field so
1532 * that the page is reconsidered for pruning in future.
1533 */
1536 break;
1537
1539
1540 /*
1541 * We do not count these rows as live, because we expect the
1542 * inserting transaction to update the counters at commit, and we
1543 * assume that will happen only after we report our results. This
1544 * assumption is a bit shaky, but it is what acquire_sample_rows()
1545 * does, so be consistent.
1546 */
1547 prstate->all_visible = false;
1548 prstate->all_frozen = false;
1549
1550 /*
1551 * If we wanted to optimize for aborts, we might consider marking
1552 * the page prunable when we see INSERT_IN_PROGRESS. But we
1553 * don't. See related decisions about when to mark the page
1554 * prunable in heapam.c.
1555 */
1556 break;
1557
1559
1560 /*
1561 * This an expected case during concurrent vacuum. Count such
1562 * rows as live. As above, we assume the deleting transaction
1563 * will commit and update the counters after we report.
1564 */
1565 prstate->live_tuples++;
1566 prstate->all_visible = false;
1567 prstate->all_frozen = false;
1568
1569 /*
1570 * This tuple may soon become DEAD. Update the hint field so that
1571 * the page is reconsidered for pruning in future.
1572 */
1575 break;
1576
1577 default:
1578
1579 /*
1580 * DEAD tuples should've been passed to heap_prune_record_dead()
1581 * or heap_prune_record_unused() instead.
1582 */
1583 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result %d",
1584 prstate->htsv[offnum]);
1585 break;
1586 }
1587
1588 /* Consider freezing any normal tuples which will not be removed */
1589 if (prstate->attempt_freeze)
1590 {
1591 bool totally_frozen;
1592
1593 if ((heap_prepare_freeze_tuple(htup,
1594 prstate->cutoffs,
1595 &prstate->pagefrz,
1596 &prstate->frozen[prstate->nfrozen],
1597 &totally_frozen)))
1598 {
1599 /* Save prepared freeze plan for later */
1600 prstate->frozen[prstate->nfrozen++].offset = offnum;
1601 }
1602
1603 /*
1604 * If any tuple isn't either totally frozen already or eligible to
1605 * become totally frozen (according to its freeze plan), then the page
1606 * definitely cannot be set all-frozen in the visibility map later on.
1607 */
1608 if (!totally_frozen)
1609 prstate->all_frozen = false;
1610 }
1611}
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, HeapPageFreeze *pagefrz, HeapTupleFreeze *frz, bool *totally_frozen)
Definition: heapam.c:7099
static bool HeapTupleHeaderXminCommitted(const HeapTupleHeaderData *tup)
Definition: htup_details.h:337
static void heap_prune_record_prunable(PruneState *prstate, TransactionId xid)
Definition: pruneheap.c:1305

References PruneState::all_frozen, PruneState::all_visible, Assert(), PruneState::attempt_freeze, PruneState::cutoffs, elog, ERROR, PruneState::frozen, PruneState::hastup, heap_prepare_freeze_tuple(), heap_prune_record_prunable(), HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetUpdateXid(), HeapTupleHeaderGetXmin(), HeapTupleHeaderXminCommitted(), PruneState::htsv, PruneState::live_tuples, PruneState::nfrozen, HeapTupleFreeze::offset, VacuumCutoffs::OldestXmin, PruneState::pagefrz, PageGetItem(), PageGetItemId(), PruneState::processed, PruneState::recently_dead_tuples, TransactionIdFollows(), TransactionIdIsNormal, TransactionIdPrecedes(), and PruneState::visibility_cutoff_xid.

Referenced by heap_prune_chain(), and prune_freeze_plan().

◆ heap_prune_record_unchanged_lp_redirect()

static void heap_prune_record_unchanged_lp_redirect ( PruneState prstate,
OffsetNumber  offnum 
)
static

Definition at line 1646 of file pruneheap.c.

1647{
1648 /*
1649 * A redirect line pointer doesn't count as a live tuple.
1650 *
1651 * If we leave a redirect line pointer in place, there will be another
1652 * tuple on the page that it points to. We will do the bookkeeping for
1653 * that separately. So we have nothing to do here, except remember that
1654 * we processed this item.
1655 */
1656 Assert(!prstate->processed[offnum]);
1657 prstate->processed[offnum] = true;
1658}

References Assert(), and PruneState::processed.

Referenced by heap_prune_chain().

◆ heap_prune_record_unchanged_lp_unused()

static void heap_prune_record_unchanged_lp_unused ( Page  page,
PruneState prstate,
OffsetNumber  offnum 
)
static

Definition at line 1424 of file pruneheap.c.

1425{
1426 Assert(!prstate->processed[offnum]);
1427 prstate->processed[offnum] = true;
1428}

References Assert(), and PruneState::processed.

Referenced by prune_freeze_plan().

◆ heap_prune_record_unused()

static void heap_prune_record_unused ( PruneState prstate,
OffsetNumber  offnum,
bool  was_normal 
)
static

Definition at line 1402 of file pruneheap.c.

1403{
1404 Assert(!prstate->processed[offnum]);
1405 prstate->processed[offnum] = true;
1406
1408 prstate->nowunused[prstate->nunused] = offnum;
1409 prstate->nunused++;
1410
1411 /*
1412 * If the root entry had been a normal tuple, we are deleting it, so count
1413 * it in the result. But changing a redirect (even to DEAD state) doesn't
1414 * count.
1415 */
1416 if (was_normal)
1417 prstate->ndeleted++;
1418}

References Assert(), MaxHeapTuplesPerPage, PruneState::ndeleted, PruneState::nowunused, PruneState::nunused, and PruneState::processed.

Referenced by heap_prune_chain(), heap_prune_record_dead_or_unused(), and prune_freeze_plan().

◆ heap_prune_satisfies_vacuum()

static HTSV_Result heap_prune_satisfies_vacuum ( PruneState prstate,
HeapTuple  tup,
Buffer  buffer 
)
static

Definition at line 1021 of file pruneheap.c.

1022{
1023 HTSV_Result res;
1024 TransactionId dead_after;
1025
1026 res = HeapTupleSatisfiesVacuumHorizon(tup, buffer, &dead_after);
1027
1028 if (res != HEAPTUPLE_RECENTLY_DEAD)
1029 return res;
1030
1031 /*
1032 * For VACUUM, we must be sure to prune tuples with xmax older than
1033 * OldestXmin -- a visibility cutoff determined at the beginning of
1034 * vacuuming the relation. OldestXmin is used for freezing determination
1035 * and we cannot freeze dead tuples' xmaxes.
1036 */
1037 if (prstate->cutoffs &&
1039 NormalTransactionIdPrecedes(dead_after, prstate->cutoffs->OldestXmin))
1040 return HEAPTUPLE_DEAD;
1041
1042 /*
1043 * Determine whether or not the tuple is considered dead when compared
1044 * with the provided GlobalVisState. On-access pruning does not provide
1045 * VacuumCutoffs. And for vacuum, even if the tuple's xmax is not older
1046 * than OldestXmin, GlobalVisTestIsRemovableXid() could find the row dead
1047 * if the GlobalVisState has been updated since the beginning of vacuuming
1048 * the relation.
1049 */
1050 if (GlobalVisTestIsRemovableXid(prstate->vistest, dead_after))
1051 return HEAPTUPLE_DEAD;
1052
1053 return res;
1054}
HTSV_Result
Definition: heapam.h:125
HTSV_Result HeapTupleSatisfiesVacuumHorizon(HeapTuple htup, Buffer buffer, TransactionId *dead_after)
GlobalVisState * vistest
Definition: pruneheap.c:42
#define NormalTransactionIdPrecedes(id1, id2)
Definition: transam.h:147

References PruneState::cutoffs, GlobalVisTestIsRemovableXid(), HEAPTUPLE_DEAD, HEAPTUPLE_RECENTLY_DEAD, HeapTupleSatisfiesVacuumHorizon(), NormalTransactionIdPrecedes, VacuumCutoffs::OldestXmin, TransactionIdIsValid, and PruneState::vistest.

Referenced by prune_freeze_plan().

◆ htsv_get_valid_status()

static HTSV_Result htsv_get_valid_status ( int  status)
inlinestatic

Definition at line 1064 of file pruneheap.c.

1065{
1066 Assert(status >= HEAPTUPLE_DEAD &&
1068 return (HTSV_Result) status;
1069}

References Assert(), HEAPTUPLE_DEAD, and HEAPTUPLE_DELETE_IN_PROGRESS.

Referenced by heap_prune_chain().

◆ log_heap_prune_and_freeze()

void log_heap_prune_and_freeze ( Relation  relation,
Buffer  buffer,
Buffer  vmbuffer,
uint8  vmflags,
TransactionId  conflict_xid,
bool  cleanup_lock,
PruneReason  reason,
HeapTupleFreeze frozen,
int  nfrozen,
OffsetNumber redirected,
int  nredirected,
OffsetNumber dead,
int  ndead,
OffsetNumber unused,
int  nunused 
)

Definition at line 2167 of file pruneheap.c.

2176{
2177 xl_heap_prune xlrec;
2178 XLogRecPtr recptr;
2179 uint8 info;
2180 uint8 regbuf_flags_heap;
2181
2182 /* The following local variables hold data registered in the WAL record: */
2184 xlhp_freeze_plans freeze_plans;
2185 xlhp_prune_items redirect_items;
2186 xlhp_prune_items dead_items;
2187 xlhp_prune_items unused_items;
2189 bool do_prune = nredirected > 0 || ndead > 0 || nunused > 0;
2190 bool do_set_vm = vmflags & VISIBILITYMAP_VALID_BITS;
2191
2192 Assert((vmflags & VISIBILITYMAP_VALID_BITS) == vmflags);
2193
2194 xlrec.flags = 0;
2195 regbuf_flags_heap = REGBUF_STANDARD;
2196
2197 /*
2198 * We can avoid an FPI of the heap page if the only modification we are
2199 * making to it is to set PD_ALL_VISIBLE and checksums/wal_log_hints are
2200 * disabled. Note that if we explicitly skip an FPI, we must not stamp the
2201 * heap page with this record's LSN. Recovery skips records <= the stamped
2202 * LSN, so this could lead to skipping an earlier FPI needed to repair a
2203 * torn page.
2204 */
2205 if (!do_prune &&
2206 nfrozen == 0 &&
2207 (!do_set_vm || !XLogHintBitIsNeeded()))
2208 regbuf_flags_heap |= REGBUF_NO_IMAGE;
2209
2210 /*
2211 * Prepare data for the buffer. The arrays are not actually in the
2212 * buffer, but we pretend that they are. When XLogInsert stores a full
2213 * page image, the arrays can be omitted.
2214 */
2216 XLogRegisterBuffer(0, buffer, regbuf_flags_heap);
2217
2218 if (do_set_vm)
2219 XLogRegisterBuffer(1, vmbuffer, 0);
2220
2221 if (nfrozen > 0)
2222 {
2223 int nplans;
2224
2226
2227 /*
2228 * Prepare deduplicated representation for use in the WAL record. This
2229 * destructively sorts frozen tuples array in-place.
2230 */
2231 nplans = heap_log_freeze_plan(frozen, nfrozen, plans, frz_offsets);
2232
2233 freeze_plans.nplans = nplans;
2234 XLogRegisterBufData(0, &freeze_plans,
2235 offsetof(xlhp_freeze_plans, plans));
2236 XLogRegisterBufData(0, plans,
2237 sizeof(xlhp_freeze_plan) * nplans);
2238 }
2239 if (nredirected > 0)
2240 {
2242
2243 redirect_items.ntargets = nredirected;
2244 XLogRegisterBufData(0, &redirect_items,
2245 offsetof(xlhp_prune_items, data));
2246 XLogRegisterBufData(0, redirected,
2247 sizeof(OffsetNumber[2]) * nredirected);
2248 }
2249 if (ndead > 0)
2250 {
2251 xlrec.flags |= XLHP_HAS_DEAD_ITEMS;
2252
2253 dead_items.ntargets = ndead;
2254 XLogRegisterBufData(0, &dead_items,
2255 offsetof(xlhp_prune_items, data));
2256 XLogRegisterBufData(0, dead,
2257 sizeof(OffsetNumber) * ndead);
2258 }
2259 if (nunused > 0)
2260 {
2262
2263 unused_items.ntargets = nunused;
2264 XLogRegisterBufData(0, &unused_items,
2265 offsetof(xlhp_prune_items, data));
2266 XLogRegisterBufData(0, unused,
2267 sizeof(OffsetNumber) * nunused);
2268 }
2269 if (nfrozen > 0)
2270 XLogRegisterBufData(0, frz_offsets,
2271 sizeof(OffsetNumber) * nfrozen);
2272
2273 /*
2274 * Prepare the main xl_heap_prune record. We already set the XLHP_HAS_*
2275 * flag above.
2276 */
2277 if (vmflags & VISIBILITYMAP_ALL_VISIBLE)
2278 {
2279 xlrec.flags |= XLHP_VM_ALL_VISIBLE;
2280 if (vmflags & VISIBILITYMAP_ALL_FROZEN)
2281 xlrec.flags |= XLHP_VM_ALL_FROZEN;
2282 }
2284 xlrec.flags |= XLHP_IS_CATALOG_REL;
2285 if (TransactionIdIsValid(conflict_xid))
2287 if (cleanup_lock)
2288 xlrec.flags |= XLHP_CLEANUP_LOCK;
2289 else
2290 {
2291 Assert(nredirected == 0 && ndead == 0);
2292 /* also, any items in 'unused' must've been LP_DEAD previously */
2293 }
2295 if (TransactionIdIsValid(conflict_xid))
2296 XLogRegisterData(&conflict_xid, sizeof(TransactionId));
2297
2298 switch (reason)
2299 {
2300 case PRUNE_ON_ACCESS:
2302 break;
2303 case PRUNE_VACUUM_SCAN:
2305 break;
2308 break;
2309 default:
2310 elog(ERROR, "unrecognized prune reason: %d", (int) reason);
2311 break;
2312 }
2313 recptr = XLogInsert(RM_HEAP2_ID, info);
2314
2315 if (do_set_vm)
2316 {
2317 Assert(BufferIsDirty(vmbuffer));
2318 PageSetLSN(BufferGetPage(vmbuffer), recptr);
2319 }
2320
2321 /*
2322 * See comment at the top of the function about regbuf_flags_heap for
2323 * details on when we can advance the page LSN.
2324 */
2325 if (do_prune || nfrozen > 0 || (do_set_vm && XLogHintBitIsNeeded()))
2326 {
2327 Assert(BufferIsDirty(buffer));
2328 PageSetLSN(BufferGetPage(buffer), recptr);
2329 }
2330}
bool BufferIsDirty(Buffer buffer)
Definition: bufmgr.c:3005
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:390
uint8_t uint8
Definition: c.h:550
@ PRUNE_VACUUM_CLEANUP
Definition: heapam.h:230
@ PRUNE_VACUUM_SCAN
Definition: heapam.h:229
#define XLHP_HAS_CONFLICT_HORIZON
Definition: heapam_xlog.h:316
#define XLHP_HAS_FREEZE_PLANS
Definition: heapam_xlog.h:322
#define XLHP_VM_ALL_VISIBLE
Definition: heapam_xlog.h:339
#define SizeOfHeapPrune
Definition: heapam_xlog.h:295
#define XLHP_HAS_NOW_UNUSED_ITEMS
Definition: heapam_xlog.h:331
#define XLHP_VM_ALL_FROZEN
Definition: heapam_xlog.h:340
#define XLHP_HAS_REDIRECTIONS
Definition: heapam_xlog.h:329
#define XLOG_HEAP2_PRUNE_VACUUM_SCAN
Definition: heapam_xlog.h:61
#define XLOG_HEAP2_PRUNE_ON_ACCESS
Definition: heapam_xlog.h:60
#define XLHP_CLEANUP_LOCK
Definition: heapam_xlog.h:308
#define XLHP_HAS_DEAD_ITEMS
Definition: heapam_xlog.h:330
#define XLOG_HEAP2_PRUNE_VACUUM_CLEANUP
Definition: heapam_xlog.h:62
#define XLHP_IS_CATALOG_REL
Definition: heapam_xlog.h:298
const void * data
static int heap_log_freeze_plan(HeapTupleFreeze *tuples, int ntuples, xlhp_freeze_plan *plans_out, OffsetNumber *offsets_out)
Definition: pruneheap.c:2088
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:694
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE
uint64 XLogRecPtr
Definition: xlogdefs.h:21
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:478
void XLogRegisterBufData(uint8 block_id, const void *data, uint32 len)
Definition: xloginsert.c:409
void XLogRegisterData(const void *data, uint32 len)
Definition: xloginsert.c:368
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:245
void XLogBeginInsert(void)
Definition: xloginsert.c:152
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define REGBUF_NO_IMAGE
Definition: xloginsert.h:33

References Assert(), BufferGetPage(), BufferIsDirty(), data, elog, ERROR, xl_heap_prune::flags, heap_log_freeze_plan(), MaxHeapTuplesPerPage, xlhp_freeze_plans::nplans, xlhp_prune_items::ntargets, PageSetLSN(), PRUNE_ON_ACCESS, PRUNE_VACUUM_CLEANUP, PRUNE_VACUUM_SCAN, REGBUF_NO_IMAGE, REGBUF_STANDARD, RelationIsAccessibleInLogicalDecoding, SizeOfHeapPrune, TransactionIdIsValid, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, VISIBILITYMAP_VALID_BITS, XLHP_CLEANUP_LOCK, XLHP_HAS_CONFLICT_HORIZON, XLHP_HAS_DEAD_ITEMS, XLHP_HAS_FREEZE_PLANS, XLHP_HAS_NOW_UNUSED_ITEMS, XLHP_HAS_REDIRECTIONS, XLHP_IS_CATALOG_REL, XLHP_VM_ALL_FROZEN, XLHP_VM_ALL_VISIBLE, XLOG_HEAP2_PRUNE_ON_ACCESS, XLOG_HEAP2_PRUNE_VACUUM_CLEANUP, XLOG_HEAP2_PRUNE_VACUUM_SCAN, XLogBeginInsert(), XLogHintBitIsNeeded, XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by heap_page_prune_and_freeze(), and lazy_vacuum_heap_page().

◆ page_verify_redirects()

static void page_verify_redirects ( Page  page)
static

Definition at line 1847 of file pruneheap.c.

1848{
1849#ifdef USE_ASSERT_CHECKING
1850 OffsetNumber offnum;
1851 OffsetNumber maxoff;
1852
1853 maxoff = PageGetMaxOffsetNumber(page);
1854 for (offnum = FirstOffsetNumber;
1855 offnum <= maxoff;
1856 offnum = OffsetNumberNext(offnum))
1857 {
1858 ItemId itemid = PageGetItemId(page, offnum);
1859 OffsetNumber targoff;
1860 ItemId targitem;
1861 HeapTupleHeader htup;
1862
1863 if (!ItemIdIsRedirected(itemid))
1864 continue;
1865
1866 targoff = ItemIdGetRedirect(itemid);
1867 targitem = PageGetItemId(page, targoff);
1868
1869 Assert(ItemIdIsUsed(targitem));
1870 Assert(ItemIdIsNormal(targitem));
1871 Assert(ItemIdHasStorage(targitem));
1872 htup = (HeapTupleHeader) PageGetItem(page, targitem);
1874 }
1875#endif
1876}

References Assert(), FirstOffsetNumber, HeapTupleHeaderIsHeapOnly(), ItemIdGetRedirect, ItemIdHasStorage, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, OffsetNumberNext, PageGetItem(), PageGetItemId(), and PageGetMaxOffsetNumber().

Referenced by heap_page_prune_execute().

◆ prune_freeze_plan()

static void prune_freeze_plan ( Oid  reloid,
Buffer  buffer,
PruneState prstate,
OffsetNumber off_loc 
)
static

Definition at line 458 of file pruneheap.c.

460{
461 Page page = BufferGetPage(buffer);
462 BlockNumber blockno = BufferGetBlockNumber(buffer);
464 OffsetNumber offnum;
465 HeapTupleData tup;
466
467 tup.t_tableOid = reloid;
468
469 /*
470 * Determine HTSV for all tuples, and queue them up for processing as HOT
471 * chain roots or as heap-only items.
472 *
473 * Determining HTSV only once for each tuple is required for correctness,
474 * to deal with cases where running HTSV twice could result in different
475 * results. For example, RECENTLY_DEAD can turn to DEAD if another
476 * checked item causes GlobalVisTestIsRemovableFullXid() to update the
477 * horizon, or INSERT_IN_PROGRESS can change to DEAD if the inserting
478 * transaction aborts.
479 *
480 * It's also good for performance. Most commonly tuples within a page are
481 * stored at decreasing offsets (while the items are stored at increasing
482 * offsets). When processing all tuples on a page this leads to reading
483 * memory at decreasing offsets within a page, with a variable stride.
484 * That's hard for CPU prefetchers to deal with. Processing the items in
485 * reverse order (and thus the tuples in increasing order) increases
486 * prefetching efficiency significantly / decreases the number of cache
487 * misses.
488 */
489 for (offnum = maxoff;
490 offnum >= FirstOffsetNumber;
491 offnum = OffsetNumberPrev(offnum))
492 {
493 ItemId itemid = PageGetItemId(page, offnum);
494 HeapTupleHeader htup;
495
496 /*
497 * Set the offset number so that we can display it along with any
498 * error that occurred while processing this tuple.
499 */
500 *off_loc = offnum;
501
502 prstate->processed[offnum] = false;
503 prstate->htsv[offnum] = -1;
504
505 /* Nothing to do if slot doesn't contain a tuple */
506 if (!ItemIdIsUsed(itemid))
507 {
508 heap_prune_record_unchanged_lp_unused(page, prstate, offnum);
509 continue;
510 }
511
512 if (ItemIdIsDead(itemid))
513 {
514 /*
515 * If the caller set mark_unused_now true, we can set dead line
516 * pointers LP_UNUSED now.
517 */
518 if (unlikely(prstate->mark_unused_now))
519 heap_prune_record_unused(prstate, offnum, false);
520 else
521 heap_prune_record_unchanged_lp_dead(page, prstate, offnum);
522 continue;
523 }
524
525 if (ItemIdIsRedirected(itemid))
526 {
527 /* This is the start of a HOT chain */
528 prstate->root_items[prstate->nroot_items++] = offnum;
529 continue;
530 }
531
532 Assert(ItemIdIsNormal(itemid));
533
534 /*
535 * Get the tuple's visibility status and queue it up for processing.
536 */
537 htup = (HeapTupleHeader) PageGetItem(page, itemid);
538 tup.t_data = htup;
539 tup.t_len = ItemIdGetLength(itemid);
540 ItemPointerSet(&tup.t_self, blockno, offnum);
541
542 prstate->htsv[offnum] = heap_prune_satisfies_vacuum(prstate, &tup,
543 buffer);
544
545 if (!HeapTupleHeaderIsHeapOnly(htup))
546 prstate->root_items[prstate->nroot_items++] = offnum;
547 else
548 prstate->heaponly_items[prstate->nheaponly_items++] = offnum;
549 }
550
551 /*
552 * Process HOT chains.
553 *
554 * We added the items to the array starting from 'maxoff', so by
555 * processing the array in reverse order, we process the items in
556 * ascending offset number order. The order doesn't matter for
557 * correctness, but some quick micro-benchmarking suggests that this is
558 * faster. (Earlier PostgreSQL versions, which scanned all the items on
559 * the page instead of using the root_items array, also did it in
560 * ascending offset number order.)
561 */
562 for (int i = prstate->nroot_items - 1; i >= 0; i--)
563 {
564 offnum = prstate->root_items[i];
565
566 /* Ignore items already processed as part of an earlier chain */
567 if (prstate->processed[offnum])
568 continue;
569
570 /* see preceding loop */
571 *off_loc = offnum;
572
573 /* Process this item or chain of items */
574 heap_prune_chain(page, blockno, maxoff, offnum, prstate);
575 }
576
577 /*
578 * Process any heap-only tuples that were not already processed as part of
579 * a HOT chain.
580 */
581 for (int i = prstate->nheaponly_items - 1; i >= 0; i--)
582 {
583 offnum = prstate->heaponly_items[i];
584
585 if (prstate->processed[offnum])
586 continue;
587
588 /* see preceding loop */
589 *off_loc = offnum;
590
591 /*
592 * If the tuple is DEAD and doesn't chain to anything else, mark it
593 * unused. (If it does chain, we can only remove it as part of
594 * pruning its chain.)
595 *
596 * We need this primarily to handle aborted HOT updates, that is,
597 * XMIN_INVALID heap-only tuples. Those might not be linked to by any
598 * chain, since the parent tuple might be re-updated before any
599 * pruning occurs. So we have to be able to reap them separately from
600 * chain-pruning. (Note that HeapTupleHeaderIsHotUpdated will never
601 * return true for an XMIN_INVALID tuple, so this code will work even
602 * when there were sequential updates within the aborted transaction.)
603 */
604 if (prstate->htsv[offnum] == HEAPTUPLE_DEAD)
605 {
606 ItemId itemid = PageGetItemId(page, offnum);
607 HeapTupleHeader htup = (HeapTupleHeader) PageGetItem(page, itemid);
608
610 {
612 &prstate->latest_xid_removed);
613 heap_prune_record_unused(prstate, offnum, true);
614 }
615 else
616 {
617 /*
618 * This tuple should've been processed and removed as part of
619 * a HOT chain, so something's wrong. To preserve evidence,
620 * we don't dare to remove it. We cannot leave behind a DEAD
621 * tuple either, because that will cause VACUUM to error out.
622 * Throwing an error with a distinct error message seems like
623 * the least bad option.
624 */
625 elog(ERROR, "dead heap-only tuple (%u, %d) is not linked to from any HOT chain",
626 blockno, offnum);
627 }
628 }
629 else
630 heap_prune_record_unchanged_lp_normal(page, prstate, offnum);
631 }
632
633 /* We should now have processed every tuple exactly once */
634#ifdef USE_ASSERT_CHECKING
635 for (offnum = FirstOffsetNumber;
636 offnum <= maxoff;
637 offnum = OffsetNumberNext(offnum))
638 {
639 *off_loc = offnum;
640
641 Assert(prstate->processed[offnum]);
642 }
643#endif
644
645 /* Clear the offset information once we have processed the given page. */
646 *off_loc = InvalidOffsetNumber;
647}
uint32 BlockNumber
Definition: block.h:31
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:4318
#define likely(x)
Definition: c.h:417
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition: itemptr.h:135
#define OffsetNumberPrev(offsetNumber)
Definition: off.h:54
static void heap_prune_chain(Page page, BlockNumber blockno, OffsetNumber maxoff, OffsetNumber rootoffnum, PruneState *prstate)
Definition: pruneheap.c:1103
static void heap_prune_record_unchanged_lp_dead(Page page, PruneState *prstate, OffsetNumber offnum)
Definition: pruneheap.c:1618
static void heap_prune_record_unchanged_lp_unused(Page page, PruneState *prstate, OffsetNumber offnum)
Definition: pruneheap.c:1424
static HTSV_Result heap_prune_satisfies_vacuum(PruneState *prstate, HeapTuple tup, Buffer buffer)
Definition: pruneheap.c:1021
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
OffsetNumber heaponly_items[MaxHeapTuplesPerPage]
Definition: pruneheap.c:79
int nroot_items
Definition: pruneheap.c:76
int nheaponly_items
Definition: pruneheap.c:78
OffsetNumber root_items[MaxHeapTuplesPerPage]
Definition: pruneheap.c:77

References Assert(), BufferGetBlockNumber(), BufferGetPage(), elog, ERROR, FirstOffsetNumber, heap_prune_chain(), heap_prune_record_unchanged_lp_dead(), heap_prune_record_unchanged_lp_normal(), heap_prune_record_unchanged_lp_unused(), heap_prune_record_unused(), heap_prune_satisfies_vacuum(), PruneState::heaponly_items, HEAPTUPLE_DEAD, HeapTupleHeaderAdvanceConflictHorizon(), HeapTupleHeaderIsHeapOnly(), HeapTupleHeaderIsHotUpdated(), PruneState::htsv, i, InvalidOffsetNumber, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), PruneState::latest_xid_removed, likely, PruneState::mark_unused_now, PruneState::nheaponly_items, PruneState::nroot_items, OffsetNumberNext, OffsetNumberPrev, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PruneState::processed, PruneState::root_items, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, and unlikely.

Referenced by heap_page_prune_and_freeze().

◆ prune_freeze_setup()

static void prune_freeze_setup ( PruneFreezeParams params,
TransactionId new_relfrozen_xid,
MultiXactId new_relmin_mxid,
PruneFreezeResult presult,
PruneState prstate 
)
static

Definition at line 330 of file pruneheap.c.

335{
336 /* Copy parameters to prstate */
337 prstate->vistest = params->vistest;
338 prstate->mark_unused_now =
340
341 /* cutoffs must be provided if we will attempt freezing */
342 Assert(!(params->options & HEAP_PAGE_PRUNE_FREEZE) || params->cutoffs);
343 prstate->attempt_freeze = (params->options & HEAP_PAGE_PRUNE_FREEZE) != 0;
344 prstate->cutoffs = params->cutoffs;
345
346 /*
347 * Our strategy is to scan the page and make lists of items to change,
348 * then apply the changes within a critical section. This keeps as much
349 * logic as possible out of the critical section, and also ensures that
350 * WAL replay will work the same as the normal case.
351 *
352 * First, initialize the new pd_prune_xid value to zero (indicating no
353 * prunable tuples). If we find any tuples which may soon become
354 * prunable, we will save the lowest relevant XID in new_prune_xid. Also
355 * initialize the rest of our working state.
356 */
359 prstate->nredirected = prstate->ndead = prstate->nunused = 0;
360 prstate->nfrozen = 0;
361 prstate->nroot_items = 0;
362 prstate->nheaponly_items = 0;
363
364 /* initialize page freezing working state */
365 prstate->pagefrz.freeze_required = false;
366 if (prstate->attempt_freeze)
367 {
368 Assert(new_relfrozen_xid && new_relmin_mxid);
369 prstate->pagefrz.FreezePageRelfrozenXid = *new_relfrozen_xid;
370 prstate->pagefrz.NoFreezePageRelfrozenXid = *new_relfrozen_xid;
371 prstate->pagefrz.FreezePageRelminMxid = *new_relmin_mxid;
372 prstate->pagefrz.NoFreezePageRelminMxid = *new_relmin_mxid;
373 }
374 else
375 {
376 Assert(!new_relfrozen_xid && !new_relmin_mxid);
381 }
382
383 prstate->ndeleted = 0;
384 prstate->live_tuples = 0;
385 prstate->recently_dead_tuples = 0;
386 prstate->hastup = false;
387 prstate->lpdead_items = 0;
388
389 /*
390 * deadoffsets are filled in during pruning but are only used to populate
391 * PruneFreezeResult->deadoffsets. To avoid needing two copies of the
392 * array, just save a pointer to the result offsets array in the
393 * PruneState.
394 */
395 prstate->deadoffsets = presult->deadoffsets;
397
398 /*
399 * Vacuum may update the VM after we're done. We can keep track of
400 * whether the page will be all-visible and all-frozen after pruning and
401 * freezing to help the caller to do that.
402 *
403 * Currently, only VACUUM sets the VM bits. To save the effort, only do
404 * the bookkeeping if the caller needs it. Currently, that's tied to
405 * HEAP_PAGE_PRUNE_FREEZE, but it could be a separate flag if you wanted
406 * to update the VM bits without also freezing or freeze without also
407 * setting the VM bits.
408 *
409 * In addition to telling the caller whether it can set the VM bit, we
410 * also use 'all_visible' and 'all_frozen' for our own decision-making. If
411 * the whole page would become frozen, we consider opportunistically
412 * freezing tuples. We will not be able to freeze the whole page if there
413 * are tuples present that are not visible to everyone or if there are
414 * dead tuples which are not yet removable. However, dead tuples which
415 * will be removed by the end of vacuuming should not preclude us from
416 * opportunistically freezing. Because of that, we do not immediately
417 * clear all_visible and all_frozen when we see LP_DEAD items. We fix
418 * that after scanning the line pointers. We must correct all_visible and
419 * all_frozen before we return them to the caller, so that the caller
420 * doesn't set the VM bits incorrectly.
421 */
422 if (prstate->attempt_freeze)
423 {
424 prstate->all_visible = true;
425 prstate->all_frozen = true;
426 }
427 else
428 {
429 /*
430 * Initializing to false allows skipping the work to update them in
431 * heap_prune_record_unchanged_lp_normal().
432 */
433 prstate->all_visible = false;
434 prstate->all_frozen = false;
435 }
436
437 /*
438 * The visibility cutoff xid is the newest xmin of live tuples on the
439 * page. In the common case, this will be set as the conflict horizon the
440 * caller can use for updating the VM. If, at the end of freezing and
441 * pruning, the page is all-frozen, there is no possibility that any
442 * running transaction on the standby does not see tuples on the page as
443 * all-visible, so the conflict horizon remains InvalidTransactionId.
444 */
446}
#define HEAP_PAGE_PRUNE_FREEZE
Definition: heapam.h:44
#define HEAP_PAGE_PRUNE_MARK_UNUSED_NOW
Definition: heapam.h:43
#define InvalidMultiXactId
Definition: multixact.h:25
GlobalVisState * vistest
Definition: heapam.h:262
struct VacuumCutoffs * cutoffs
Definition: heapam.h:271
OffsetNumber deadoffsets[MaxHeapTuplesPerPage]
Definition: heapam.h:314

References PruneState::all_frozen, PruneState::all_visible, Assert(), PruneState::attempt_freeze, PruneState::cutoffs, PruneFreezeParams::cutoffs, PruneState::deadoffsets, PruneFreezeResult::deadoffsets, HeapPageFreeze::freeze_required, HeapPageFreeze::FreezePageRelfrozenXid, HeapPageFreeze::FreezePageRelminMxid, PruneState::frz_conflict_horizon, PruneState::hastup, HEAP_PAGE_PRUNE_FREEZE, HEAP_PAGE_PRUNE_MARK_UNUSED_NOW, InvalidMultiXactId, InvalidTransactionId, PruneState::latest_xid_removed, PruneState::live_tuples, PruneState::lpdead_items, PruneState::mark_unused_now, PruneState::ndead, PruneState::ndeleted, PruneState::new_prune_xid, PruneState::nfrozen, PruneState::nheaponly_items, HeapPageFreeze::NoFreezePageRelfrozenXid, HeapPageFreeze::NoFreezePageRelminMxid, PruneState::nredirected, PruneState::nroot_items, PruneState::nunused, PruneFreezeParams::options, PruneState::pagefrz, PruneState::recently_dead_tuples, PruneState::visibility_cutoff_xid, PruneState::vistest, and PruneFreezeParams::vistest.

Referenced by heap_page_prune_and_freeze().