PostgreSQL Source Code git master
pruneheap.c File Reference
#include "postgres.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/htup_details.h"
#include "access/multixact.h"
#include "access/transam.h"
#include "access/visibilitymapdefs.h"
#include "access/xlog.h"
#include "access/xloginsert.h"
#include "commands/vacuum.h"
#include "executor/instrument.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "storage/bufmgr.h"
#include "utils/rel.h"
#include "utils/snapmgr.h"
Include dependency graph for pruneheap.c:

Go to the source code of this file.

Data Structures

struct  PruneState
 

Functions

static void prune_freeze_setup (PruneFreezeParams *params, TransactionId *new_relfrozen_xid, MultiXactId *new_relmin_mxid, const PruneFreezeResult *presult, PruneState *prstate)
 
static void prune_freeze_plan (Oid reloid, Buffer buffer, PruneState *prstate, OffsetNumber *off_loc)
 
static HTSV_Result heap_prune_satisfies_vacuum (PruneState *prstate, HeapTuple tup, Buffer buffer)
 
static HTSV_Result htsv_get_valid_status (int status)
 
static void heap_prune_chain (Page page, BlockNumber blockno, OffsetNumber maxoff, OffsetNumber rootoffnum, PruneState *prstate)
 
static void heap_prune_record_prunable (PruneState *prstate, TransactionId xid)
 
static void heap_prune_record_redirect (PruneState *prstate, OffsetNumber offnum, OffsetNumber rdoffnum, bool was_normal)
 
static void heap_prune_record_dead (PruneState *prstate, OffsetNumber offnum, bool was_normal)
 
static void heap_prune_record_dead_or_unused (PruneState *prstate, OffsetNumber offnum, bool was_normal)
 
static void heap_prune_record_unused (PruneState *prstate, OffsetNumber offnum, bool was_normal)
 
static void heap_prune_record_unchanged_lp_unused (Page page, PruneState *prstate, OffsetNumber offnum)
 
static void heap_prune_record_unchanged_lp_normal (Page page, PruneState *prstate, OffsetNumber offnum)
 
static void heap_prune_record_unchanged_lp_dead (Page page, PruneState *prstate, OffsetNumber offnum)
 
static void heap_prune_record_unchanged_lp_redirect (PruneState *prstate, OffsetNumber offnum)
 
static void page_verify_redirects (Page page)
 
static bool heap_page_will_freeze (Relation relation, Buffer buffer, bool did_tuple_hint_fpi, bool do_prune, bool do_hint_prune, PruneState *prstate)
 
void heap_page_prune_opt (Relation relation, Buffer buffer)
 
void heap_page_prune_and_freeze (PruneFreezeParams *params, PruneFreezeResult *presult, OffsetNumber *off_loc, TransactionId *new_relfrozen_xid, MultiXactId *new_relmin_mxid)
 
void heap_page_prune_execute (Buffer buffer, bool lp_truncate_only, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused)
 
void heap_get_root_tuples (Page page, OffsetNumber *root_offsets)
 
static bool heap_log_freeze_eq (xlhp_freeze_plan *plan, HeapTupleFreeze *frz)
 
static int heap_log_freeze_cmp (const void *arg1, const void *arg2)
 
static void heap_log_freeze_new_plan (xlhp_freeze_plan *plan, HeapTupleFreeze *frz)
 
static int heap_log_freeze_plan (HeapTupleFreeze *tuples, int ntuples, xlhp_freeze_plan *plans_out, OffsetNumber *offsets_out)
 
void log_heap_prune_and_freeze (Relation relation, Buffer buffer, Buffer vmbuffer, uint8 vmflags, TransactionId conflict_xid, bool cleanup_lock, PruneReason reason, HeapTupleFreeze *frozen, int nfrozen, OffsetNumber *redirected, int nredirected, OffsetNumber *dead, int ndead, OffsetNumber *unused, int nunused)
 

Function Documentation

◆ heap_get_root_tuples()

void heap_get_root_tuples ( Page  page,
OffsetNumber root_offsets 
)

Definition at line 1885 of file pruneheap.c.

1886{
1887 OffsetNumber offnum,
1888 maxoff;
1889
1890 MemSet(root_offsets, InvalidOffsetNumber,
1892
1893 maxoff = PageGetMaxOffsetNumber(page);
1894 for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum))
1895 {
1896 ItemId lp = PageGetItemId(page, offnum);
1897 HeapTupleHeader htup;
1898 OffsetNumber nextoffnum;
1899 TransactionId priorXmax;
1900
1901 /* skip unused and dead items */
1902 if (!ItemIdIsUsed(lp) || ItemIdIsDead(lp))
1903 continue;
1904
1905 if (ItemIdIsNormal(lp))
1906 {
1907 htup = (HeapTupleHeader) PageGetItem(page, lp);
1908
1909 /*
1910 * Check if this tuple is part of a HOT-chain rooted at some other
1911 * tuple. If so, skip it for now; we'll process it when we find
1912 * its root.
1913 */
1914 if (HeapTupleHeaderIsHeapOnly(htup))
1915 continue;
1916
1917 /*
1918 * This is either a plain tuple or the root of a HOT-chain.
1919 * Remember it in the mapping.
1920 */
1921 root_offsets[offnum - 1] = offnum;
1922
1923 /* If it's not the start of a HOT-chain, we're done with it */
1924 if (!HeapTupleHeaderIsHotUpdated(htup))
1925 continue;
1926
1927 /* Set up to scan the HOT-chain */
1928 nextoffnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
1929 priorXmax = HeapTupleHeaderGetUpdateXid(htup);
1930 }
1931 else
1932 {
1933 /* Must be a redirect item. We do not set its root_offsets entry */
1935 /* Set up to scan the HOT-chain */
1936 nextoffnum = ItemIdGetRedirect(lp);
1937 priorXmax = InvalidTransactionId;
1938 }
1939
1940 /*
1941 * Now follow the HOT-chain and collect other tuples in the chain.
1942 *
1943 * Note: Even though this is a nested loop, the complexity of the
1944 * function is O(N) because a tuple in the page should be visited not
1945 * more than twice, once in the outer loop and once in HOT-chain
1946 * chases.
1947 */
1948 for (;;)
1949 {
1950 /* Sanity check (pure paranoia) */
1951 if (offnum < FirstOffsetNumber)
1952 break;
1953
1954 /*
1955 * An offset past the end of page's line pointer array is possible
1956 * when the array was truncated
1957 */
1958 if (offnum > maxoff)
1959 break;
1960
1961 lp = PageGetItemId(page, nextoffnum);
1962
1963 /* Check for broken chains */
1964 if (!ItemIdIsNormal(lp))
1965 break;
1966
1967 htup = (HeapTupleHeader) PageGetItem(page, lp);
1968
1969 if (TransactionIdIsValid(priorXmax) &&
1971 break;
1972
1973 /* Remember the root line pointer for this item */
1974 root_offsets[nextoffnum - 1] = offnum;
1975
1976 /* Advance to next chain member, if any */
1977 if (!HeapTupleHeaderIsHotUpdated(htup))
1978 break;
1979
1980 /* HOT implies it can't have moved to different partition */
1982
1983 nextoffnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
1984 priorXmax = HeapTupleHeaderGetUpdateXid(htup);
1985 }
1986 }
1987}
static void * PageGetItem(const PageData *page, const ItemIdData *itemId)
Definition: bufpage.h:353
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:243
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition: bufpage.h:371
#define MemSet(start, val, len)
Definition: c.h:1022
uint32 TransactionId
Definition: c.h:660
Assert(PointerIsAligned(start, uint64))
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
static bool HeapTupleHeaderIsHeapOnly(const HeapTupleHeaderData *tup)
Definition: htup_details.h:555
static TransactionId HeapTupleHeaderGetXmin(const HeapTupleHeaderData *tup)
Definition: htup_details.h:324
static bool HeapTupleHeaderIndicatesMovedPartitions(const HeapTupleHeaderData *tup)
Definition: htup_details.h:480
static bool HeapTupleHeaderIsHotUpdated(const HeapTupleHeaderData *tup)
Definition: htup_details.h:534
static TransactionId HeapTupleHeaderGetUpdateXid(const HeapTupleHeaderData *tup)
Definition: htup_details.h:397
#define MaxHeapTuplesPerPage
Definition: htup_details.h:624
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:78
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
#define InvalidOffsetNumber
Definition: off.h:26
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define FirstOffsetNumber
Definition: off.h:27
ItemPointerData t_ctid
Definition: htup_details.h:161
#define InvalidTransactionId
Definition: transam.h:31
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
#define TransactionIdIsValid(xid)
Definition: transam.h:41

References Assert(), FirstOffsetNumber, HeapTupleHeaderGetUpdateXid(), HeapTupleHeaderGetXmin(), HeapTupleHeaderIndicatesMovedPartitions(), HeapTupleHeaderIsHeapOnly(), HeapTupleHeaderIsHotUpdated(), InvalidOffsetNumber, InvalidTransactionId, ItemIdGetRedirect, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerGetOffsetNumber(), MaxHeapTuplesPerPage, MemSet, OffsetNumberNext, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), HeapTupleHeaderData::t_ctid, TransactionIdEquals, and TransactionIdIsValid.

Referenced by heapam_index_build_range_scan(), and heapam_index_validate_scan().

◆ heap_log_freeze_cmp()

static int heap_log_freeze_cmp ( const void *  arg1,
const void *  arg2 
)
static

Definition at line 2012 of file pruneheap.c.

2013{
2014 HeapTupleFreeze *frz1 = (HeapTupleFreeze *) arg1;
2015 HeapTupleFreeze *frz2 = (HeapTupleFreeze *) arg2;
2016
2017 if (frz1->xmax < frz2->xmax)
2018 return -1;
2019 else if (frz1->xmax > frz2->xmax)
2020 return 1;
2021
2022 if (frz1->t_infomask2 < frz2->t_infomask2)
2023 return -1;
2024 else if (frz1->t_infomask2 > frz2->t_infomask2)
2025 return 1;
2026
2027 if (frz1->t_infomask < frz2->t_infomask)
2028 return -1;
2029 else if (frz1->t_infomask > frz2->t_infomask)
2030 return 1;
2031
2032 if (frz1->frzflags < frz2->frzflags)
2033 return -1;
2034 else if (frz1->frzflags > frz2->frzflags)
2035 return 1;
2036
2037 /*
2038 * heap_log_freeze_eq would consider these tuple-wise plans to be equal.
2039 * (So the tuples will share a single canonical freeze plan.)
2040 *
2041 * We tiebreak on page offset number to keep each freeze plan's page
2042 * offset number array individually sorted. (Unnecessary, but be tidy.)
2043 */
2044 if (frz1->offset < frz2->offset)
2045 return -1;
2046 else if (frz1->offset > frz2->offset)
2047 return 1;
2048
2049 Assert(false);
2050 return 0;
2051}
uint8 frzflags
Definition: heapam.h:147
uint16 t_infomask2
Definition: heapam.h:145
TransactionId xmax
Definition: heapam.h:144
OffsetNumber offset
Definition: heapam.h:152
uint16 t_infomask
Definition: heapam.h:146

References Assert(), HeapTupleFreeze::frzflags, HeapTupleFreeze::offset, HeapTupleFreeze::t_infomask, HeapTupleFreeze::t_infomask2, and HeapTupleFreeze::xmax.

Referenced by heap_log_freeze_plan().

◆ heap_log_freeze_eq()

static bool heap_log_freeze_eq ( xlhp_freeze_plan plan,
HeapTupleFreeze frz 
)
inlinestatic

Definition at line 1996 of file pruneheap.c.

1997{
1998 if (plan->xmax == frz->xmax &&
1999 plan->t_infomask2 == frz->t_infomask2 &&
2000 plan->t_infomask == frz->t_infomask &&
2001 plan->frzflags == frz->frzflags)
2002 return true;
2003
2004 /* Caller must call heap_log_freeze_new_plan again for frz */
2005 return false;
2006}
#define plan(x)
Definition: pg_regress.c:161

References HeapTupleFreeze::frzflags, plan, HeapTupleFreeze::t_infomask, HeapTupleFreeze::t_infomask2, and HeapTupleFreeze::xmax.

Referenced by heap_log_freeze_plan().

◆ heap_log_freeze_new_plan()

static void heap_log_freeze_new_plan ( xlhp_freeze_plan plan,
HeapTupleFreeze frz 
)
inlinestatic

Definition at line 2058 of file pruneheap.c.

2059{
2060 plan->xmax = frz->xmax;
2061 plan->t_infomask2 = frz->t_infomask2;
2062 plan->t_infomask = frz->t_infomask;
2063 plan->frzflags = frz->frzflags;
2064 plan->ntuples = 1; /* for now */
2065}

References HeapTupleFreeze::frzflags, plan, HeapTupleFreeze::t_infomask, HeapTupleFreeze::t_infomask2, and HeapTupleFreeze::xmax.

Referenced by heap_log_freeze_plan().

◆ heap_log_freeze_plan()

static int heap_log_freeze_plan ( HeapTupleFreeze tuples,
int  ntuples,
xlhp_freeze_plan plans_out,
OffsetNumber offsets_out 
)
static

Definition at line 2078 of file pruneheap.c.

2081{
2082 int nplans = 0;
2083
2084 /* Sort tuple-based freeze plans in the order required to deduplicate */
2085 qsort(tuples, ntuples, sizeof(HeapTupleFreeze), heap_log_freeze_cmp);
2086
2087 for (int i = 0; i < ntuples; i++)
2088 {
2089 HeapTupleFreeze *frz = tuples + i;
2090
2091 if (i == 0)
2092 {
2093 /* New canonical freeze plan starting with first tup */
2094 heap_log_freeze_new_plan(plans_out, frz);
2095 nplans++;
2096 }
2097 else if (heap_log_freeze_eq(plans_out, frz))
2098 {
2099 /* tup matches open canonical plan -- include tup in it */
2100 Assert(offsets_out[i - 1] < frz->offset);
2101 plans_out->ntuples++;
2102 }
2103 else
2104 {
2105 /* Tup doesn't match current plan -- done with it now */
2106 plans_out++;
2107
2108 /* New canonical freeze plan starting with this tup */
2109 heap_log_freeze_new_plan(plans_out, frz);
2110 nplans++;
2111 }
2112
2113 /*
2114 * Save page offset number in dedicated buffer in passing.
2115 *
2116 * REDO routine relies on the record's offset numbers array grouping
2117 * offset numbers by freeze plan. The sort order within each grouping
2118 * is ascending offset number order, just to keep things tidy.
2119 */
2120 offsets_out[i] = frz->offset;
2121 }
2122
2123 Assert(nplans > 0 && nplans <= ntuples);
2124
2125 return nplans;
2126}
int i
Definition: isn.c:77
#define qsort(a, b, c, d)
Definition: port.h:500
static int heap_log_freeze_cmp(const void *arg1, const void *arg2)
Definition: pruneheap.c:2012
static bool heap_log_freeze_eq(xlhp_freeze_plan *plan, HeapTupleFreeze *frz)
Definition: pruneheap.c:1996
static void heap_log_freeze_new_plan(xlhp_freeze_plan *plan, HeapTupleFreeze *frz)
Definition: pruneheap.c:2058

References Assert(), heap_log_freeze_cmp(), heap_log_freeze_eq(), heap_log_freeze_new_plan(), i, xlhp_freeze_plan::ntuples, HeapTupleFreeze::offset, and qsort.

Referenced by log_heap_prune_and_freeze().

◆ heap_page_prune_and_freeze()

void heap_page_prune_and_freeze ( PruneFreezeParams params,
PruneFreezeResult presult,
OffsetNumber off_loc,
TransactionId new_relfrozen_xid,
MultiXactId new_relmin_mxid 
)

Definition at line 809 of file pruneheap.c.

814{
815 Buffer buffer = params->buffer;
816 Page page = BufferGetPage(buffer);
817 PruneState prstate;
818 bool do_freeze;
819 bool do_prune;
820 bool do_hint_prune;
821 bool did_tuple_hint_fpi;
822 int64 fpi_before = pgWalUsage.wal_fpi;
823
824 /* Initialize prstate */
825 prune_freeze_setup(params,
826 new_relfrozen_xid, new_relmin_mxid,
827 presult, &prstate);
828
829 /*
830 * Examine all line pointers and tuple visibility information to determine
831 * which line pointers should change state and which tuples may be frozen.
832 * Prepare queue of state changes to later be executed in a critical
833 * section.
834 */
836 buffer, &prstate, off_loc);
837
838 /*
839 * If checksums are enabled, calling heap_prune_satisfies_vacuum() while
840 * checking tuple visibility information in prune_freeze_plan() may have
841 * caused an FPI to be emitted.
842 */
843 did_tuple_hint_fpi = fpi_before != pgWalUsage.wal_fpi;
844
845 do_prune = prstate.nredirected > 0 ||
846 prstate.ndead > 0 ||
847 prstate.nunused > 0;
848
849 /*
850 * Even if we don't prune anything, if we found a new value for the
851 * pd_prune_xid field or the page was marked full, we will update the hint
852 * bit.
853 */
854 do_hint_prune = ((PageHeader) page)->pd_prune_xid != prstate.new_prune_xid ||
855 PageIsFull(page);
856
857 /*
858 * Decide if we want to go ahead with freezing according to the freeze
859 * plans we prepared, or not.
860 */
861 do_freeze = heap_page_will_freeze(params->relation, buffer,
862 did_tuple_hint_fpi,
863 do_prune,
864 do_hint_prune,
865 &prstate);
866
867 /*
868 * While scanning the line pointers, we did not clear
869 * all_visible/all_frozen when encountering LP_DEAD items because we
870 * wanted the decision whether or not to freeze the page to be unaffected
871 * by the short-term presence of LP_DEAD items. These LP_DEAD items are
872 * effectively assumed to be LP_UNUSED items in the making. It doesn't
873 * matter which vacuum heap pass (initial pass or final pass) ends up
874 * setting the page all-frozen, as long as the ongoing VACUUM does it.
875 *
876 * Now that we finished determining whether or not to freeze the page,
877 * update all_visible and all_frozen so that they reflect the true state
878 * of the page for setting PD_ALL_VISIBLE and VM bits.
879 */
880 if (prstate.lpdead_items > 0)
881 prstate.all_visible = prstate.all_frozen = false;
882
883 Assert(!prstate.all_frozen || prstate.all_visible);
884
885 /* Any error while applying the changes is critical */
887
888 if (do_hint_prune)
889 {
890 /*
891 * Update the page's pd_prune_xid field to either zero, or the lowest
892 * XID of any soon-prunable tuple.
893 */
894 ((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;
895
896 /*
897 * Also clear the "page is full" flag, since there's no point in
898 * repeating the prune/defrag process until something else happens to
899 * the page.
900 */
901 PageClearFull(page);
902
903 /*
904 * If that's all we had to do to the page, this is a non-WAL-logged
905 * hint. If we are going to freeze or prune the page, we will mark
906 * the buffer dirty below.
907 */
908 if (!do_freeze && !do_prune)
909 MarkBufferDirtyHint(buffer, true);
910 }
911
912 if (do_prune || do_freeze)
913 {
914 /* Apply the planned item changes and repair page fragmentation. */
915 if (do_prune)
916 {
917 heap_page_prune_execute(buffer, false,
918 prstate.redirected, prstate.nredirected,
919 prstate.nowdead, prstate.ndead,
920 prstate.nowunused, prstate.nunused);
921 }
922
923 if (do_freeze)
924 heap_freeze_prepared_tuples(buffer, prstate.frozen, prstate.nfrozen);
925
926 MarkBufferDirty(buffer);
927
928 /*
929 * Emit a WAL XLOG_HEAP2_PRUNE* record showing what we did
930 */
931 if (RelationNeedsWAL(params->relation))
932 {
933 /*
934 * The snapshotConflictHorizon for the whole record should be the
935 * most conservative of all the horizons calculated for any of the
936 * possible modifications. If this record will prune tuples, any
937 * transactions on the standby older than the youngest xmax of the
938 * most recently removed tuple this record will prune will
939 * conflict. If this record will freeze tuples, any transactions
940 * on the standby with xids older than the youngest tuple this
941 * record will freeze will conflict.
942 */
943 TransactionId conflict_xid;
944
946 prstate.latest_xid_removed))
947 conflict_xid = prstate.frz_conflict_horizon;
948 else
949 conflict_xid = prstate.latest_xid_removed;
950
951 log_heap_prune_and_freeze(params->relation, buffer,
952 InvalidBuffer, /* vmbuffer */
953 0, /* vmflags */
954 conflict_xid,
955 true, params->reason,
956 prstate.frozen, prstate.nfrozen,
957 prstate.redirected, prstate.nredirected,
958 prstate.nowdead, prstate.ndead,
959 prstate.nowunused, prstate.nunused);
960 }
961 }
962
964
965 /* Copy information back for caller */
966 presult->ndeleted = prstate.ndeleted;
967 presult->nnewlpdead = prstate.ndead;
968 presult->nfrozen = prstate.nfrozen;
969 presult->live_tuples = prstate.live_tuples;
971 presult->all_visible = prstate.all_visible;
972 presult->all_frozen = prstate.all_frozen;
973 presult->hastup = prstate.hastup;
974
975 /*
976 * For callers planning to update the visibility map, the conflict horizon
977 * for that record must be the newest xmin on the page. However, if the
978 * page is completely frozen, there can be no conflict and the
979 * vm_conflict_horizon should remain InvalidTransactionId. This includes
980 * the case that we just froze all the tuples; the prune-freeze record
981 * included the conflict XID already so the caller doesn't need it.
982 */
983 if (presult->all_frozen)
985 else
987
988 presult->lpdead_items = prstate.lpdead_items;
989 /* the presult->deadoffsets array was already filled in */
990
991 if (prstate.attempt_freeze)
992 {
993 if (presult->nfrozen > 0)
994 {
995 *new_relfrozen_xid = prstate.pagefrz.FreezePageRelfrozenXid;
996 *new_relmin_mxid = prstate.pagefrz.FreezePageRelminMxid;
997 }
998 else
999 {
1000 *new_relfrozen_xid = prstate.pagefrz.NoFreezePageRelfrozenXid;
1001 *new_relmin_mxid = prstate.pagefrz.NoFreezePageRelminMxid;
1002 }
1003 }
1004}
int Buffer
Definition: buf.h:23
#define InvalidBuffer
Definition: buf.h:25
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:2943
void MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
Definition: bufmgr.c:5430
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:436
PageHeaderData * PageHeader
Definition: bufpage.h:173
static void PageClearFull(Page page)
Definition: bufpage.h:422
PageData * Page
Definition: bufpage.h:81
static bool PageIsFull(const PageData *page)
Definition: bufpage.h:412
int64_t int64
Definition: c.h:538
void heap_freeze_prepared_tuples(Buffer buffer, HeapTupleFreeze *tuples, int ntuples)
Definition: heapam.c:7406
WalUsage pgWalUsage
Definition: instrument.c:22
#define START_CRIT_SECTION()
Definition: miscadmin.h:150
#define END_CRIT_SECTION()
Definition: miscadmin.h:152
static bool heap_page_will_freeze(Relation relation, Buffer buffer, bool did_tuple_hint_fpi, bool do_prune, bool do_hint_prune, PruneState *prstate)
Definition: pruneheap.c:653
static void prune_freeze_plan(Oid reloid, Buffer buffer, PruneState *prstate, OffsetNumber *off_loc)
Definition: pruneheap.c:448
void log_heap_prune_and_freeze(Relation relation, Buffer buffer, Buffer vmbuffer, uint8 vmflags, TransactionId conflict_xid, bool cleanup_lock, PruneReason reason, HeapTupleFreeze *frozen, int nfrozen, OffsetNumber *redirected, int nredirected, OffsetNumber *dead, int ndead, OffsetNumber *unused, int nunused)
Definition: pruneheap.c:2157
static void prune_freeze_setup(PruneFreezeParams *params, TransactionId *new_relfrozen_xid, MultiXactId *new_relmin_mxid, const PruneFreezeResult *presult, PruneState *prstate)
Definition: pruneheap.c:327
void heap_page_prune_execute(Buffer buffer, bool lp_truncate_only, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused)
Definition: pruneheap.c:1661
#define RelationGetRelid(relation)
Definition: rel.h:515
#define RelationNeedsWAL(relation)
Definition: rel.h:638
MultiXactId NoFreezePageRelminMxid
Definition: heapam.h:220
TransactionId FreezePageRelfrozenXid
Definition: heapam.h:208
MultiXactId FreezePageRelminMxid
Definition: heapam.h:209
TransactionId NoFreezePageRelfrozenXid
Definition: heapam.h:219
PruneReason reason
Definition: heapam.h:245
Buffer buffer
Definition: heapam.h:239
Relation relation
Definition: heapam.h:238
int recently_dead_tuples
Definition: heapam.h:285
TransactionId vm_conflict_horizon
Definition: heapam.h:300
bool all_visible
Definition: heapam.h:298
HeapPageFreeze pagefrz
Definition: pruneheap.c:104
bool all_visible
Definition: pruneheap.c:154
int ndead
Definition: pruneheap.c:56
TransactionId new_prune_xid
Definition: pruneheap.c:53
bool attempt_freeze
Definition: pruneheap.c:46
bool hastup
Definition: pruneheap.c:123
int recently_dead_tuples
Definition: pruneheap.c:120
OffsetNumber nowdead[MaxHeapTuplesPerPage]
Definition: pruneheap.c:61
TransactionId frz_conflict_horizon
Definition: pruneheap.c:137
OffsetNumber nowunused[MaxHeapTuplesPerPage]
Definition: pruneheap.c:62
int live_tuples
Definition: pruneheap.c:119
TransactionId visibility_cutoff_xid
Definition: pruneheap.c:156
bool all_frozen
Definition: pruneheap.c:155
HeapTupleFreeze frozen[MaxHeapTuplesPerPage]
Definition: pruneheap.c:63
int lpdead_items
Definition: pruneheap.c:129
int nfrozen
Definition: pruneheap.c:58
OffsetNumber redirected[MaxHeapTuplesPerPage *2]
Definition: pruneheap.c:60
int ndeleted
Definition: pruneheap.c:116
int nredirected
Definition: pruneheap.c:55
TransactionId latest_xid_removed
Definition: pruneheap.c:54
int nunused
Definition: pruneheap.c:57
int64 wal_fpi
Definition: instrument.h:54
static bool TransactionIdFollows(TransactionId id1, TransactionId id2)
Definition: transam.h:297

References PruneState::all_frozen, PruneFreezeResult::all_frozen, PruneState::all_visible, PruneFreezeResult::all_visible, Assert(), PruneState::attempt_freeze, PruneFreezeParams::buffer, BufferGetPage(), END_CRIT_SECTION, HeapPageFreeze::FreezePageRelfrozenXid, HeapPageFreeze::FreezePageRelminMxid, PruneState::frozen, PruneState::frz_conflict_horizon, PruneState::hastup, PruneFreezeResult::hastup, heap_freeze_prepared_tuples(), heap_page_prune_execute(), heap_page_will_freeze(), InvalidBuffer, InvalidTransactionId, PruneState::latest_xid_removed, PruneState::live_tuples, PruneFreezeResult::live_tuples, log_heap_prune_and_freeze(), PruneState::lpdead_items, PruneFreezeResult::lpdead_items, MarkBufferDirty(), MarkBufferDirtyHint(), PruneState::ndead, PruneState::ndeleted, PruneFreezeResult::ndeleted, PruneState::new_prune_xid, PruneState::nfrozen, PruneFreezeResult::nfrozen, PruneFreezeResult::nnewlpdead, HeapPageFreeze::NoFreezePageRelfrozenXid, HeapPageFreeze::NoFreezePageRelminMxid, PruneState::nowdead, PruneState::nowunused, PruneState::nredirected, PruneState::nunused, PageClearFull(), PruneState::pagefrz, PageIsFull(), pgWalUsage, prune_freeze_plan(), prune_freeze_setup(), PruneFreezeParams::reason, PruneState::recently_dead_tuples, PruneFreezeResult::recently_dead_tuples, PruneState::redirected, PruneFreezeParams::relation, RelationGetRelid, RelationNeedsWAL, START_CRIT_SECTION, TransactionIdFollows(), PruneState::visibility_cutoff_xid, PruneFreezeResult::vm_conflict_horizon, and WalUsage::wal_fpi.

Referenced by heap_page_prune_opt(), and lazy_scan_prune().

◆ heap_page_prune_execute()

void heap_page_prune_execute ( Buffer  buffer,
bool  lp_truncate_only,
OffsetNumber redirected,
int  nredirected,
OffsetNumber nowdead,
int  ndead,
OffsetNumber nowunused,
int  nunused 
)

Definition at line 1661 of file pruneheap.c.

1665{
1666 Page page = BufferGetPage(buffer);
1667 OffsetNumber *offnum;
1669
1670 /* Shouldn't be called unless there's something to do */
1671 Assert(nredirected > 0 || ndead > 0 || nunused > 0);
1672
1673 /* If 'lp_truncate_only', we can only remove already-dead line pointers */
1674 Assert(!lp_truncate_only || (nredirected == 0 && ndead == 0));
1675
1676 /* Update all redirected line pointers */
1677 offnum = redirected;
1678 for (int i = 0; i < nredirected; i++)
1679 {
1680 OffsetNumber fromoff = *offnum++;
1681 OffsetNumber tooff = *offnum++;
1682 ItemId fromlp = PageGetItemId(page, fromoff);
1684
1685#ifdef USE_ASSERT_CHECKING
1686
1687 /*
1688 * Any existing item that we set as an LP_REDIRECT (any 'from' item)
1689 * must be the first item from a HOT chain. If the item has tuple
1690 * storage then it can't be a heap-only tuple. Otherwise we are just
1691 * maintaining an existing LP_REDIRECT from an existing HOT chain that
1692 * has been pruned at least once before now.
1693 */
1694 if (!ItemIdIsRedirected(fromlp))
1695 {
1696 Assert(ItemIdHasStorage(fromlp) && ItemIdIsNormal(fromlp));
1697
1698 htup = (HeapTupleHeader) PageGetItem(page, fromlp);
1700 }
1701 else
1702 {
1703 /* We shouldn't need to redundantly set the redirect */
1704 Assert(ItemIdGetRedirect(fromlp) != tooff);
1705 }
1706
1707 /*
1708 * The item that we're about to set as an LP_REDIRECT (the 'from'
1709 * item) will point to an existing item (the 'to' item) that is
1710 * already a heap-only tuple. There can be at most one LP_REDIRECT
1711 * item per HOT chain.
1712 *
1713 * We need to keep around an LP_REDIRECT item (after original
1714 * non-heap-only root tuple gets pruned away) so that it's always
1715 * possible for VACUUM to easily figure out what TID to delete from
1716 * indexes when an entire HOT chain becomes dead. A heap-only tuple
1717 * can never become LP_DEAD; an LP_REDIRECT item or a regular heap
1718 * tuple can.
1719 *
1720 * This check may miss problems, e.g. the target of a redirect could
1721 * be marked as unused subsequently. The page_verify_redirects() check
1722 * below will catch such problems.
1723 */
1724 tolp = PageGetItemId(page, tooff);
1725 Assert(ItemIdHasStorage(tolp) && ItemIdIsNormal(tolp));
1726 htup = (HeapTupleHeader) PageGetItem(page, tolp);
1728#endif
1729
1730 ItemIdSetRedirect(fromlp, tooff);
1731 }
1732
1733 /* Update all now-dead line pointers */
1734 offnum = nowdead;
1735 for (int i = 0; i < ndead; i++)
1736 {
1737 OffsetNumber off = *offnum++;
1738 ItemId lp = PageGetItemId(page, off);
1739
1740#ifdef USE_ASSERT_CHECKING
1741
1742 /*
1743 * An LP_DEAD line pointer must be left behind when the original item
1744 * (which is dead to everybody) could still be referenced by a TID in
1745 * an index. This should never be necessary with any individual
1746 * heap-only tuple item, though. (It's not clear how much of a problem
1747 * that would be, but there is no reason to allow it.)
1748 */
1749 if (ItemIdHasStorage(lp))
1750 {
1752 htup = (HeapTupleHeader) PageGetItem(page, lp);
1754 }
1755 else
1756 {
1757 /* Whole HOT chain becomes dead */
1759 }
1760#endif
1761
1762 ItemIdSetDead(lp);
1763 }
1764
1765 /* Update all now-unused line pointers */
1766 offnum = nowunused;
1767 for (int i = 0; i < nunused; i++)
1768 {
1769 OffsetNumber off = *offnum++;
1770 ItemId lp = PageGetItemId(page, off);
1771
1772#ifdef USE_ASSERT_CHECKING
1773
1774 if (lp_truncate_only)
1775 {
1776 /* Setting LP_DEAD to LP_UNUSED in vacuum's second pass */
1778 }
1779 else
1780 {
1781 /*
1782 * When heap_page_prune_and_freeze() was called, mark_unused_now
1783 * may have been passed as true, which allows would-be LP_DEAD
1784 * items to be made LP_UNUSED instead. This is only possible if
1785 * the relation has no indexes. If there are any dead items, then
1786 * mark_unused_now was not true and every item being marked
1787 * LP_UNUSED must refer to a heap-only tuple.
1788 */
1789 if (ndead > 0)
1790 {
1792 htup = (HeapTupleHeader) PageGetItem(page, lp);
1794 }
1795 else
1796 Assert(ItemIdIsUsed(lp));
1797 }
1798
1799#endif
1800
1801 ItemIdSetUnused(lp);
1802 }
1803
1804 if (lp_truncate_only)
1806 else
1807 {
1808 /*
1809 * Finally, repair any fragmentation, and update the page's hint bit
1810 * about whether it has free pointers.
1811 */
1813
1814 /*
1815 * Now that the page has been modified, assert that redirect items
1816 * still point to valid targets.
1817 */
1819 }
1820}
void PageRepairFragmentation(Page page)
Definition: bufpage.c:698
void PageTruncateLinePointerArray(Page page)
Definition: bufpage.c:834
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:228
#define ItemIdSetRedirect(itemId, link)
Definition: itemid.h:152
#define ItemIdSetDead(itemId)
Definition: itemid.h:164
#define ItemIdSetUnused(itemId)
Definition: itemid.h:128
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
static void page_verify_redirects(Page page)
Definition: pruneheap.c:1837

References Assert(), BufferGetPage(), HeapTupleHeaderIsHeapOnly(), i, ItemIdGetRedirect, ItemIdHasStorage, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemIdSetDead, ItemIdSetRedirect, ItemIdSetUnused, page_verify_redirects(), PageGetItem(), PageGetItemId(), PageRepairFragmentation(), PageTruncateLinePointerArray(), and PG_USED_FOR_ASSERTS_ONLY.

Referenced by heap_page_prune_and_freeze(), and heap_xlog_prune_freeze().

◆ heap_page_prune_opt()

void heap_page_prune_opt ( Relation  relation,
Buffer  buffer 
)

Definition at line 209 of file pruneheap.c.

210{
211 Page page = BufferGetPage(buffer);
212 TransactionId prune_xid;
213 GlobalVisState *vistest;
214 Size minfree;
215
216 /*
217 * We can't write WAL in recovery mode, so there's no point trying to
218 * clean the page. The primary will likely issue a cleaning WAL record
219 * soon anyway, so this is no particular loss.
220 */
221 if (RecoveryInProgress())
222 return;
223
224 /*
225 * First check whether there's any chance there's something to prune,
226 * determining the appropriate horizon is a waste if there's no prune_xid
227 * (i.e. no updates/deletes left potentially dead tuples around).
228 */
229 prune_xid = ((PageHeader) page)->pd_prune_xid;
230 if (!TransactionIdIsValid(prune_xid))
231 return;
232
233 /*
234 * Check whether prune_xid indicates that there may be dead rows that can
235 * be cleaned up.
236 */
237 vistest = GlobalVisTestFor(relation);
238
239 if (!GlobalVisTestIsRemovableXid(vistest, prune_xid))
240 return;
241
242 /*
243 * We prune when a previous UPDATE failed to find enough space on the page
244 * for a new tuple version, or when free space falls below the relation's
245 * fill-factor target (but not less than 10%).
246 *
247 * Checking free space here is questionable since we aren't holding any
248 * lock on the buffer; in the worst case we could get a bogus answer. It's
249 * unlikely to be *seriously* wrong, though, since reading either pd_lower
250 * or pd_upper is probably atomic. Avoiding taking a lock seems more
251 * important than sometimes getting a wrong answer in what is after all
252 * just a heuristic estimate.
253 */
254 minfree = RelationGetTargetPageFreeSpace(relation,
256 minfree = Max(minfree, BLCKSZ / 10);
257
258 if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
259 {
260 /* OK, try to get exclusive buffer lock */
262 return;
263
264 /*
265 * Now that we have buffer lock, get accurate information about the
266 * page's free space, and recheck the heuristic about whether to
267 * prune.
268 */
269 if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
270 {
271 OffsetNumber dummy_off_loc;
272 PruneFreezeResult presult;
273
274 /*
275 * We don't pass the HEAP_PAGE_PRUNE_MARK_UNUSED_NOW option
276 * regardless of whether or not the relation has indexes, since we
277 * cannot safely determine that during on-access pruning with the
278 * current implementation.
279 */
280 PruneFreezeParams params = {
281 .relation = relation,
282 .buffer = buffer,
283 .reason = PRUNE_ON_ACCESS,
284 .options = 0,
285 .vistest = vistest,
286 .cutoffs = NULL,
287 };
288
289 heap_page_prune_and_freeze(&params, &presult, &dummy_off_loc,
290 NULL, NULL);
291
292 /*
293 * Report the number of tuples reclaimed to pgstats. This is
294 * presult.ndeleted minus the number of newly-LP_DEAD-set items.
295 *
296 * We derive the number of dead tuples like this to avoid totally
297 * forgetting about items that were set to LP_DEAD, since they
298 * still need to be cleaned up by VACUUM. We only want to count
299 * heap-only tuples that just became LP_UNUSED in our report,
300 * which don't.
301 *
302 * VACUUM doesn't have to compensate in the same way when it
303 * tracks ndeleted, since it will set the same LP_DEAD items to
304 * LP_UNUSED separately.
305 */
306 if (presult.ndeleted > presult.nnewlpdead)
308 presult.ndeleted - presult.nnewlpdead);
309 }
310
311 /* And release buffer lock */
313
314 /*
315 * We avoid reuse of any free space created on the page by unrelated
316 * UPDATEs/INSERTs by opting to not update the FSM at this point. The
317 * free space should be reused by UPDATEs to *this* page.
318 */
319 }
320}
void LockBuffer(Buffer buffer, BufferLockMode mode)
Definition: bufmgr.c:5604
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:5857
@ BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:205
Size PageGetHeapFreeSpace(const PageData *page)
Definition: bufpage.c:990
#define Max(x, y)
Definition: c.h:1000
size_t Size
Definition: c.h:613
@ PRUNE_ON_ACCESS
Definition: heapam.h:228
void pgstat_update_heap_dead_tuples(Relation rel, int delta)
bool GlobalVisTestIsRemovableXid(GlobalVisState *state, TransactionId xid)
Definition: procarray.c:4226
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:4069
void heap_page_prune_and_freeze(PruneFreezeParams *params, PruneFreezeResult *presult, OffsetNumber *off_loc, TransactionId *new_relfrozen_xid, MultiXactId *new_relmin_mxid)
Definition: pruneheap.c:809
#define RelationGetTargetPageFreeSpace(relation, defaultff)
Definition: rel.h:390
#define HEAP_DEFAULT_FILLFACTOR
Definition: rel.h:361
bool RecoveryInProgress(void)
Definition: xlog.c:6406

References BUFFER_LOCK_UNLOCK, BufferGetPage(), ConditionalLockBufferForCleanup(), GlobalVisTestFor(), GlobalVisTestIsRemovableXid(), HEAP_DEFAULT_FILLFACTOR, heap_page_prune_and_freeze(), LockBuffer(), Max, PruneFreezeResult::ndeleted, PruneFreezeResult::nnewlpdead, PageGetHeapFreeSpace(), PageIsFull(), pgstat_update_heap_dead_tuples(), PRUNE_ON_ACCESS, RecoveryInProgress(), PruneFreezeParams::relation, RelationGetTargetPageFreeSpace, and TransactionIdIsValid.

Referenced by BitmapHeapScanNextBlock(), heap_prepare_pagescan(), and heapam_index_fetch_tuple().

◆ heap_page_will_freeze()

static bool heap_page_will_freeze ( Relation  relation,
Buffer  buffer,
bool  did_tuple_hint_fpi,
bool  do_prune,
bool  do_hint_prune,
PruneState prstate 
)
static

Definition at line 653 of file pruneheap.c.

658{
659 bool do_freeze = false;
660
661 /*
662 * If the caller specified we should not attempt to freeze any tuples,
663 * validate that everything is in the right state and return.
664 */
665 if (!prstate->attempt_freeze)
666 {
667 Assert(!prstate->all_frozen && prstate->nfrozen == 0);
668 Assert(prstate->lpdead_items == 0 || !prstate->all_visible);
669 return false;
670 }
671
672 if (prstate->pagefrz.freeze_required)
673 {
674 /*
675 * heap_prepare_freeze_tuple indicated that at least one XID/MXID from
676 * before FreezeLimit/MultiXactCutoff is present. Must freeze to
677 * advance relfrozenxid/relminmxid.
678 */
679 do_freeze = true;
680 }
681 else
682 {
683 /*
684 * Opportunistically freeze the page if we are generating an FPI
685 * anyway and if doing so means that we can set the page all-frozen
686 * afterwards (might not happen until VACUUM's final heap pass).
687 *
688 * XXX: Previously, we knew if pruning emitted an FPI by checking
689 * pgWalUsage.wal_fpi before and after pruning. Once the freeze and
690 * prune records were combined, this heuristic couldn't be used
691 * anymore. The opportunistic freeze heuristic must be improved;
692 * however, for now, try to approximate the old logic.
693 */
694 if (prstate->all_frozen && prstate->nfrozen > 0)
695 {
696 Assert(prstate->all_visible);
697
698 /*
699 * Freezing would make the page all-frozen. Have already emitted
700 * an FPI or will do so anyway?
701 */
702 if (RelationNeedsWAL(relation))
703 {
704 if (did_tuple_hint_fpi)
705 do_freeze = true;
706 else if (do_prune)
707 {
708 if (XLogCheckBufferNeedsBackup(buffer))
709 do_freeze = true;
710 }
711 else if (do_hint_prune)
712 {
714 do_freeze = true;
715 }
716 }
717 }
718 }
719
720 if (do_freeze)
721 {
722 /*
723 * Validate the tuples we will be freezing before entering the
724 * critical section.
725 */
726 heap_pre_freeze_checks(buffer, prstate->frozen, prstate->nfrozen);
727
728 /*
729 * Calculate what the snapshot conflict horizon should be for a record
730 * freezing tuples. We can use the visibility_cutoff_xid as our cutoff
731 * for conflicts when the whole page is eligible to become all-frozen
732 * in the VM once we're done with it. Otherwise, we generate a
733 * conservative cutoff by stepping back from OldestXmin.
734 */
735 if (prstate->all_frozen)
737 else
738 {
739 /* Avoids false conflicts when hot_standby_feedback in use */
740 prstate->frz_conflict_horizon = prstate->cutoffs->OldestXmin;
742 }
743 }
744 else if (prstate->nfrozen > 0)
745 {
746 /*
747 * The page contained some tuples that were not already frozen, and we
748 * chose not to freeze them now. The page won't be all-frozen then.
749 */
750 Assert(!prstate->pagefrz.freeze_required);
751
752 prstate->all_frozen = false;
753 prstate->nfrozen = 0; /* avoid miscounts in instrumentation */
754 }
755 else
756 {
757 /*
758 * We have no freeze plans to execute. The page might already be
759 * all-frozen (perhaps only following pruning), though. Such pages
760 * can be marked all-frozen in the VM by our caller, even though none
761 * of its tuples were newly frozen here.
762 */
763 }
764
765 return do_freeze;
766}
void heap_pre_freeze_checks(Buffer buffer, HeapTupleFreeze *tuples, int ntuples)
Definition: heapam.c:7353
bool freeze_required
Definition: heapam.h:182
struct VacuumCutoffs * cutoffs
Definition: pruneheap.c:47
TransactionId OldestXmin
Definition: vacuum.h:279
#define TransactionIdRetreat(dest)
Definition: transam.h:141
#define XLogHintBitIsNeeded()
Definition: xlog.h:120
bool XLogCheckBufferNeedsBackup(Buffer buffer)
Definition: xloginsert.c:1049

References PruneState::all_frozen, PruneState::all_visible, Assert(), PruneState::attempt_freeze, PruneState::cutoffs, HeapPageFreeze::freeze_required, PruneState::frozen, PruneState::frz_conflict_horizon, heap_pre_freeze_checks(), PruneState::lpdead_items, PruneState::nfrozen, VacuumCutoffs::OldestXmin, PruneState::pagefrz, RelationNeedsWAL, TransactionIdRetreat, PruneState::visibility_cutoff_xid, XLogCheckBufferNeedsBackup(), and XLogHintBitIsNeeded.

Referenced by heap_page_prune_and_freeze().

◆ heap_prune_chain()

static void heap_prune_chain ( Page  page,
BlockNumber  blockno,
OffsetNumber  maxoff,
OffsetNumber  rootoffnum,
PruneState prstate 
)
static

Definition at line 1093 of file pruneheap.c.

1095{
1097 ItemId rootlp;
1098 OffsetNumber offnum;
1100
1101 /*
1102 * After traversing the HOT chain, ndeadchain is the index in chainitems
1103 * of the first live successor after the last dead item.
1104 */
1105 int ndeadchain = 0,
1106 nchain = 0;
1107
1108 rootlp = PageGetItemId(page, rootoffnum);
1109
1110 /* Start from the root tuple */
1111 offnum = rootoffnum;
1112
1113 /* while not end of the chain */
1114 for (;;)
1115 {
1116 HeapTupleHeader htup;
1117 ItemId lp;
1118
1119 /* Sanity check (pure paranoia) */
1120 if (offnum < FirstOffsetNumber)
1121 break;
1122
1123 /*
1124 * An offset past the end of page's line pointer array is possible
1125 * when the array was truncated (original item must have been unused)
1126 */
1127 if (offnum > maxoff)
1128 break;
1129
1130 /* If item is already processed, stop --- it must not be same chain */
1131 if (prstate->processed[offnum])
1132 break;
1133
1134 lp = PageGetItemId(page, offnum);
1135
1136 /*
1137 * Unused item obviously isn't part of the chain. Likewise, a dead
1138 * line pointer can't be part of the chain. Both of those cases were
1139 * already marked as processed.
1140 */
1141 Assert(ItemIdIsUsed(lp));
1142 Assert(!ItemIdIsDead(lp));
1143
1144 /*
1145 * If we are looking at the redirected root line pointer, jump to the
1146 * first normal tuple in the chain. If we find a redirect somewhere
1147 * else, stop --- it must not be same chain.
1148 */
1149 if (ItemIdIsRedirected(lp))
1150 {
1151 if (nchain > 0)
1152 break; /* not at start of chain */
1153 chainitems[nchain++] = offnum;
1154 offnum = ItemIdGetRedirect(rootlp);
1155 continue;
1156 }
1157
1159
1160 htup = (HeapTupleHeader) PageGetItem(page, lp);
1161
1162 /*
1163 * Check the tuple XMIN against prior XMAX, if any
1164 */
1165 if (TransactionIdIsValid(priorXmax) &&
1167 break;
1168
1169 /*
1170 * OK, this tuple is indeed a member of the chain.
1171 */
1172 chainitems[nchain++] = offnum;
1173
1174 switch (htsv_get_valid_status(prstate->htsv[offnum]))
1175 {
1176 case HEAPTUPLE_DEAD:
1177
1178 /* Remember the last DEAD tuple seen */
1179 ndeadchain = nchain;
1181 &prstate->latest_xid_removed);
1182 /* Advance to next chain member */
1183 break;
1184
1186
1187 /*
1188 * We don't need to advance the conflict horizon for
1189 * RECENTLY_DEAD tuples, even if we are removing them. This
1190 * is because we only remove RECENTLY_DEAD tuples if they
1191 * precede a DEAD tuple, and the DEAD tuple must have been
1192 * inserted by a newer transaction than the RECENTLY_DEAD
1193 * tuple by virtue of being later in the chain. We will have
1194 * advanced the conflict horizon for the DEAD tuple.
1195 */
1196
1197 /*
1198 * Advance past RECENTLY_DEAD tuples just in case there's a
1199 * DEAD one after them. We have to make sure that we don't
1200 * miss any DEAD tuples, since DEAD tuples that still have
1201 * tuple storage after pruning will confuse VACUUM.
1202 */
1203 break;
1204
1206 case HEAPTUPLE_LIVE:
1208 goto process_chain;
1209
1210 default:
1211 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1212 goto process_chain;
1213 }
1214
1215 /*
1216 * If the tuple is not HOT-updated, then we are at the end of this
1217 * HOT-update chain.
1218 */
1219 if (!HeapTupleHeaderIsHotUpdated(htup))
1220 goto process_chain;
1221
1222 /* HOT implies it can't have moved to different partition */
1224
1225 /*
1226 * Advance to next chain member.
1227 */
1228 Assert(ItemPointerGetBlockNumber(&htup->t_ctid) == blockno);
1229 offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
1230 priorXmax = HeapTupleHeaderGetUpdateXid(htup);
1231 }
1232
1233 if (ItemIdIsRedirected(rootlp) && nchain < 2)
1234 {
1235 /*
1236 * We found a redirect item that doesn't point to a valid follow-on
1237 * item. This can happen if the loop in heap_page_prune_and_freeze()
1238 * caused us to visit the dead successor of a redirect item before
1239 * visiting the redirect item. We can clean up by setting the
1240 * redirect item to LP_DEAD state or LP_UNUSED if the caller
1241 * indicated.
1242 */
1243 heap_prune_record_dead_or_unused(prstate, rootoffnum, false);
1244 return;
1245 }
1246
1247process_chain:
1248
1249 if (ndeadchain == 0)
1250 {
1251 /*
1252 * No DEAD tuple was found, so the chain is entirely composed of
1253 * normal, unchanged tuples. Leave it alone.
1254 */
1255 int i = 0;
1256
1257 if (ItemIdIsRedirected(rootlp))
1258 {
1259 heap_prune_record_unchanged_lp_redirect(prstate, rootoffnum);
1260 i++;
1261 }
1262 for (; i < nchain; i++)
1263 heap_prune_record_unchanged_lp_normal(page, prstate, chainitems[i]);
1264 }
1265 else if (ndeadchain == nchain)
1266 {
1267 /*
1268 * The entire chain is dead. Mark the root line pointer LP_DEAD, and
1269 * fully remove the other tuples in the chain.
1270 */
1271 heap_prune_record_dead_or_unused(prstate, rootoffnum, ItemIdIsNormal(rootlp));
1272 for (int i = 1; i < nchain; i++)
1273 heap_prune_record_unused(prstate, chainitems[i], true);
1274 }
1275 else
1276 {
1277 /*
1278 * We found a DEAD tuple in the chain. Redirect the root line pointer
1279 * to the first non-DEAD tuple, and mark as unused each intermediate
1280 * item that we are able to remove from the chain.
1281 */
1282 heap_prune_record_redirect(prstate, rootoffnum, chainitems[ndeadchain],
1283 ItemIdIsNormal(rootlp));
1284 for (int i = 1; i < ndeadchain; i++)
1285 heap_prune_record_unused(prstate, chainitems[i], true);
1286
1287 /* the rest of tuples in the chain are normal, unchanged tuples */
1288 for (int i = ndeadchain; i < nchain; i++)
1289 heap_prune_record_unchanged_lp_normal(page, prstate, chainitems[i]);
1290 }
1291}
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
void HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple, TransactionId *snapshotConflictHorizon)
Definition: heapam.c:7999
@ HEAPTUPLE_RECENTLY_DEAD
Definition: heapam.h:128
@ HEAPTUPLE_INSERT_IN_PROGRESS
Definition: heapam.h:129
@ HEAPTUPLE_LIVE
Definition: heapam.h:127
@ HEAPTUPLE_DELETE_IN_PROGRESS
Definition: heapam.h:130
@ HEAPTUPLE_DEAD
Definition: heapam.h:126
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
static HTSV_Result htsv_get_valid_status(int status)
Definition: pruneheap.c:1054
static void heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum, bool was_normal)
Definition: pruneheap.c:1392
static void heap_prune_record_redirect(PruneState *prstate, OffsetNumber offnum, OffsetNumber rdoffnum, bool was_normal)
Definition: pruneheap.c:1309
static void heap_prune_record_unchanged_lp_normal(Page page, PruneState *prstate, OffsetNumber offnum)
Definition: pruneheap.c:1425
static void heap_prune_record_dead_or_unused(PruneState *prstate, OffsetNumber offnum, bool was_normal)
Definition: pruneheap.c:1375
static void heap_prune_record_unchanged_lp_redirect(PruneState *prstate, OffsetNumber offnum)
Definition: pruneheap.c:1636
bool processed[MaxHeapTuplesPerPage+1]
Definition: pruneheap.c:87
int8 htsv[MaxHeapTuplesPerPage+1]
Definition: pruneheap.c:99

References Assert(), elog, ERROR, FirstOffsetNumber, heap_prune_record_dead_or_unused(), heap_prune_record_redirect(), heap_prune_record_unchanged_lp_normal(), heap_prune_record_unchanged_lp_redirect(), heap_prune_record_unused(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderAdvanceConflictHorizon(), HeapTupleHeaderGetUpdateXid(), HeapTupleHeaderGetXmin(), HeapTupleHeaderIndicatesMovedPartitions(), HeapTupleHeaderIsHotUpdated(), PruneState::htsv, htsv_get_valid_status(), i, InvalidTransactionId, ItemIdGetRedirect, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerGetBlockNumber(), ItemPointerGetOffsetNumber(), PruneState::latest_xid_removed, MaxHeapTuplesPerPage, PageGetItem(), PageGetItemId(), PruneState::processed, HeapTupleHeaderData::t_ctid, TransactionIdEquals, and TransactionIdIsValid.

Referenced by prune_freeze_plan().

◆ heap_prune_record_dead()

static void heap_prune_record_dead ( PruneState prstate,
OffsetNumber  offnum,
bool  was_normal 
)
static

Definition at line 1340 of file pruneheap.c.

1342{
1343 Assert(!prstate->processed[offnum]);
1344 prstate->processed[offnum] = true;
1345
1346 Assert(prstate->ndead < MaxHeapTuplesPerPage);
1347 prstate->nowdead[prstate->ndead] = offnum;
1348 prstate->ndead++;
1349
1350 /*
1351 * Deliberately delay unsetting all_visible and all_frozen until later
1352 * during pruning. Removable dead tuples shouldn't preclude freezing the
1353 * page.
1354 */
1355
1356 /* Record the dead offset for vacuum */
1357 prstate->deadoffsets[prstate->lpdead_items++] = offnum;
1358
1359 /*
1360 * If the root entry had been a normal tuple, we are deleting it, so count
1361 * it in the result. But changing a redirect (even to DEAD state) doesn't
1362 * count.
1363 */
1364 if (was_normal)
1365 prstate->ndeleted++;
1366}
OffsetNumber * deadoffsets
Definition: pruneheap.c:130

References Assert(), PruneState::deadoffsets, PruneState::lpdead_items, MaxHeapTuplesPerPage, PruneState::ndead, PruneState::ndeleted, PruneState::nowdead, and PruneState::processed.

Referenced by heap_prune_record_dead_or_unused().

◆ heap_prune_record_dead_or_unused()

static void heap_prune_record_dead_or_unused ( PruneState prstate,
OffsetNumber  offnum,
bool  was_normal 
)
static

Definition at line 1375 of file pruneheap.c.

1377{
1378 /*
1379 * If the caller set mark_unused_now to true, we can remove dead tuples
1380 * during pruning instead of marking their line pointers dead. Set this
1381 * tuple's line pointer LP_UNUSED. We hint that this option is less
1382 * likely.
1383 */
1384 if (unlikely(prstate->mark_unused_now))
1385 heap_prune_record_unused(prstate, offnum, was_normal);
1386 else
1387 heap_prune_record_dead(prstate, offnum, was_normal);
1388}
#define unlikely(x)
Definition: c.h:407
static void heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum, bool was_normal)
Definition: pruneheap.c:1340
bool mark_unused_now
Definition: pruneheap.c:44

References heap_prune_record_dead(), heap_prune_record_unused(), PruneState::mark_unused_now, and unlikely.

Referenced by heap_prune_chain().

◆ heap_prune_record_prunable()

static void heap_prune_record_prunable ( PruneState prstate,
TransactionId  xid 
)
static

Definition at line 1295 of file pruneheap.c.

1296{
1297 /*
1298 * This should exactly match the PageSetPrunable macro. We can't store
1299 * directly into the page header yet, so we update working state.
1300 */
1302 if (!TransactionIdIsValid(prstate->new_prune_xid) ||
1303 TransactionIdPrecedes(xid, prstate->new_prune_xid))
1304 prstate->new_prune_xid = xid;
1305}
#define TransactionIdIsNormal(xid)
Definition: transam.h:42
static bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.h:263

References Assert(), PruneState::new_prune_xid, TransactionIdIsNormal, TransactionIdIsValid, and TransactionIdPrecedes().

Referenced by heap_prune_record_unchanged_lp_normal().

◆ heap_prune_record_redirect()

static void heap_prune_record_redirect ( PruneState prstate,
OffsetNumber  offnum,
OffsetNumber  rdoffnum,
bool  was_normal 
)
static

Definition at line 1309 of file pruneheap.c.

1312{
1313 Assert(!prstate->processed[offnum]);
1314 prstate->processed[offnum] = true;
1315
1316 /*
1317 * Do not mark the redirect target here. It needs to be counted
1318 * separately as an unchanged tuple.
1319 */
1320
1322 prstate->redirected[prstate->nredirected * 2] = offnum;
1323 prstate->redirected[prstate->nredirected * 2 + 1] = rdoffnum;
1324
1325 prstate->nredirected++;
1326
1327 /*
1328 * If the root entry had been a normal tuple, we are deleting it, so count
1329 * it in the result. But changing a redirect (even to DEAD state) doesn't
1330 * count.
1331 */
1332 if (was_normal)
1333 prstate->ndeleted++;
1334
1335 prstate->hastup = true;
1336}

References Assert(), PruneState::hastup, MaxHeapTuplesPerPage, PruneState::ndeleted, PruneState::nredirected, PruneState::processed, and PruneState::redirected.

Referenced by heap_prune_chain().

◆ heap_prune_record_unchanged_lp_dead()

static void heap_prune_record_unchanged_lp_dead ( Page  page,
PruneState prstate,
OffsetNumber  offnum 
)
static

Definition at line 1608 of file pruneheap.c.

1609{
1610 Assert(!prstate->processed[offnum]);
1611 prstate->processed[offnum] = true;
1612
1613 /*
1614 * Deliberately don't set hastup for LP_DEAD items. We make the soft
1615 * assumption that any LP_DEAD items encountered here will become
1616 * LP_UNUSED later on, before count_nondeletable_pages is reached. If we
1617 * don't make this assumption then rel truncation will only happen every
1618 * other VACUUM, at most. Besides, VACUUM must treat
1619 * hastup/nonempty_pages as provisional no matter how LP_DEAD items are
1620 * handled (handled here, or handled later on).
1621 *
1622 * Similarly, don't unset all_visible and all_frozen until later, at the
1623 * end of heap_page_prune_and_freeze(). This will allow us to attempt to
1624 * freeze the page after pruning. As long as we unset it before updating
1625 * the visibility map, this will be correct.
1626 */
1627
1628 /* Record the dead offset for vacuum */
1629 prstate->deadoffsets[prstate->lpdead_items++] = offnum;
1630}

References Assert(), PruneState::deadoffsets, PruneState::lpdead_items, and PruneState::processed.

Referenced by prune_freeze_plan().

◆ heap_prune_record_unchanged_lp_normal()

static void heap_prune_record_unchanged_lp_normal ( Page  page,
PruneState prstate,
OffsetNumber  offnum 
)
static

Definition at line 1425 of file pruneheap.c.

1426{
1427 HeapTupleHeader htup;
1428
1429 Assert(!prstate->processed[offnum]);
1430 prstate->processed[offnum] = true;
1431
1432 prstate->hastup = true; /* the page is not empty */
1433
1434 /*
1435 * The criteria for counting a tuple as live in this block need to match
1436 * what analyze.c's acquire_sample_rows() does, otherwise VACUUM and
1437 * ANALYZE may produce wildly different reltuples values, e.g. when there
1438 * are many recently-dead tuples.
1439 *
1440 * The logic here is a bit simpler than acquire_sample_rows(), as VACUUM
1441 * can't run inside a transaction block, which makes some cases impossible
1442 * (e.g. in-progress insert from the same transaction).
1443 *
1444 * HEAPTUPLE_DEAD are handled by the other heap_prune_record_*()
1445 * subroutines. They don't count dead items like acquire_sample_rows()
1446 * does, because we assume that all dead items will become LP_UNUSED
1447 * before VACUUM finishes. This difference is only superficial. VACUUM
1448 * effectively agrees with ANALYZE about DEAD items, in the end. VACUUM
1449 * won't remember LP_DEAD items, but only because they're not supposed to
1450 * be left behind when it is done. (Cases where we bypass index vacuuming
1451 * will violate this optimistic assumption, but the overall impact of that
1452 * should be negligible.)
1453 */
1454 htup = (HeapTupleHeader) PageGetItem(page, PageGetItemId(page, offnum));
1455
1456 switch (prstate->htsv[offnum])
1457 {
1458 case HEAPTUPLE_LIVE:
1459
1460 /*
1461 * Count it as live. Not only is this natural, but it's also what
1462 * acquire_sample_rows() does.
1463 */
1464 prstate->live_tuples++;
1465
1466 /*
1467 * Is the tuple definitely visible to all transactions?
1468 *
1469 * NB: Like with per-tuple hint bits, we can't set the
1470 * PD_ALL_VISIBLE flag if the inserter committed asynchronously.
1471 * See SetHintBits for more info. Check that the tuple is hinted
1472 * xmin-committed because of that.
1473 */
1474 if (prstate->all_visible)
1475 {
1476 TransactionId xmin;
1477
1479 {
1480 prstate->all_visible = false;
1481 prstate->all_frozen = false;
1482 break;
1483 }
1484
1485 /*
1486 * The inserter definitely committed. But is it old enough
1487 * that everyone sees it as committed? A FrozenTransactionId
1488 * is seen as committed to everyone. Otherwise, we check if
1489 * there is a snapshot that considers this xid to still be
1490 * running, and if so, we don't consider the page all-visible.
1491 */
1492 xmin = HeapTupleHeaderGetXmin(htup);
1493
1494 /*
1495 * For now always use prstate->cutoffs for this test, because
1496 * we only update 'all_visible' and 'all_frozen' when freezing
1497 * is requested. We could use GlobalVisTestIsRemovableXid
1498 * instead, if a non-freezing caller wanted to set the VM bit.
1499 */
1500 Assert(prstate->cutoffs);
1501 if (!TransactionIdPrecedes(xmin, prstate->cutoffs->OldestXmin))
1502 {
1503 prstate->all_visible = false;
1504 prstate->all_frozen = false;
1505 break;
1506 }
1507
1508 /* Track newest xmin on page. */
1509 if (TransactionIdFollows(xmin, prstate->visibility_cutoff_xid) &&
1511 prstate->visibility_cutoff_xid = xmin;
1512 }
1513 break;
1514
1516 prstate->recently_dead_tuples++;
1517 prstate->all_visible = false;
1518 prstate->all_frozen = false;
1519
1520 /*
1521 * This tuple will soon become DEAD. Update the hint field so
1522 * that the page is reconsidered for pruning in future.
1523 */
1526 break;
1527
1529
1530 /*
1531 * We do not count these rows as live, because we expect the
1532 * inserting transaction to update the counters at commit, and we
1533 * assume that will happen only after we report our results. This
1534 * assumption is a bit shaky, but it is what acquire_sample_rows()
1535 * does, so be consistent.
1536 */
1537 prstate->all_visible = false;
1538 prstate->all_frozen = false;
1539
1540 /*
1541 * If we wanted to optimize for aborts, we might consider marking
1542 * the page prunable when we see INSERT_IN_PROGRESS. But we
1543 * don't. See related decisions about when to mark the page
1544 * prunable in heapam.c.
1545 */
1546 break;
1547
1549
1550 /*
1551 * This an expected case during concurrent vacuum. Count such
1552 * rows as live. As above, we assume the deleting transaction
1553 * will commit and update the counters after we report.
1554 */
1555 prstate->live_tuples++;
1556 prstate->all_visible = false;
1557 prstate->all_frozen = false;
1558
1559 /*
1560 * This tuple may soon become DEAD. Update the hint field so that
1561 * the page is reconsidered for pruning in future.
1562 */
1565 break;
1566
1567 default:
1568
1569 /*
1570 * DEAD tuples should've been passed to heap_prune_record_dead()
1571 * or heap_prune_record_unused() instead.
1572 */
1573 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result %d",
1574 prstate->htsv[offnum]);
1575 break;
1576 }
1577
1578 /* Consider freezing any normal tuples which will not be removed */
1579 if (prstate->attempt_freeze)
1580 {
1581 bool totally_frozen;
1582
1583 if ((heap_prepare_freeze_tuple(htup,
1584 prstate->cutoffs,
1585 &prstate->pagefrz,
1586 &prstate->frozen[prstate->nfrozen],
1587 &totally_frozen)))
1588 {
1589 /* Save prepared freeze plan for later */
1590 prstate->frozen[prstate->nfrozen++].offset = offnum;
1591 }
1592
1593 /*
1594 * If any tuple isn't either totally frozen already or eligible to
1595 * become totally frozen (according to its freeze plan), then the page
1596 * definitely cannot be set all-frozen in the visibility map later on.
1597 */
1598 if (!totally_frozen)
1599 prstate->all_frozen = false;
1600 }
1601}
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, const struct VacuumCutoffs *cutoffs, HeapPageFreeze *pagefrz, HeapTupleFreeze *frz, bool *totally_frozen)
Definition: heapam.c:7080
static bool HeapTupleHeaderXminCommitted(const HeapTupleHeaderData *tup)
Definition: htup_details.h:337
static void heap_prune_record_prunable(PruneState *prstate, TransactionId xid)
Definition: pruneheap.c:1295

References PruneState::all_frozen, PruneState::all_visible, Assert(), PruneState::attempt_freeze, PruneState::cutoffs, elog, ERROR, PruneState::frozen, PruneState::hastup, heap_prepare_freeze_tuple(), heap_prune_record_prunable(), HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderGetUpdateXid(), HeapTupleHeaderGetXmin(), HeapTupleHeaderXminCommitted(), PruneState::htsv, PruneState::live_tuples, PruneState::nfrozen, HeapTupleFreeze::offset, VacuumCutoffs::OldestXmin, PruneState::pagefrz, PageGetItem(), PageGetItemId(), PruneState::processed, PruneState::recently_dead_tuples, TransactionIdFollows(), TransactionIdIsNormal, TransactionIdPrecedes(), and PruneState::visibility_cutoff_xid.

Referenced by heap_prune_chain(), and prune_freeze_plan().

◆ heap_prune_record_unchanged_lp_redirect()

static void heap_prune_record_unchanged_lp_redirect ( PruneState prstate,
OffsetNumber  offnum 
)
static

Definition at line 1636 of file pruneheap.c.

1637{
1638 /*
1639 * A redirect line pointer doesn't count as a live tuple.
1640 *
1641 * If we leave a redirect line pointer in place, there will be another
1642 * tuple on the page that it points to. We will do the bookkeeping for
1643 * that separately. So we have nothing to do here, except remember that
1644 * we processed this item.
1645 */
1646 Assert(!prstate->processed[offnum]);
1647 prstate->processed[offnum] = true;
1648}

References Assert(), and PruneState::processed.

Referenced by heap_prune_chain().

◆ heap_prune_record_unchanged_lp_unused()

static void heap_prune_record_unchanged_lp_unused ( Page  page,
PruneState prstate,
OffsetNumber  offnum 
)
static

Definition at line 1414 of file pruneheap.c.

1415{
1416 Assert(!prstate->processed[offnum]);
1417 prstate->processed[offnum] = true;
1418}

References Assert(), and PruneState::processed.

Referenced by prune_freeze_plan().

◆ heap_prune_record_unused()

static void heap_prune_record_unused ( PruneState prstate,
OffsetNumber  offnum,
bool  was_normal 
)
static

Definition at line 1392 of file pruneheap.c.

1393{
1394 Assert(!prstate->processed[offnum]);
1395 prstate->processed[offnum] = true;
1396
1398 prstate->nowunused[prstate->nunused] = offnum;
1399 prstate->nunused++;
1400
1401 /*
1402 * If the root entry had been a normal tuple, we are deleting it, so count
1403 * it in the result. But changing a redirect (even to DEAD state) doesn't
1404 * count.
1405 */
1406 if (was_normal)
1407 prstate->ndeleted++;
1408}

References Assert(), MaxHeapTuplesPerPage, PruneState::ndeleted, PruneState::nowunused, PruneState::nunused, and PruneState::processed.

Referenced by heap_prune_chain(), heap_prune_record_dead_or_unused(), and prune_freeze_plan().

◆ heap_prune_satisfies_vacuum()

static HTSV_Result heap_prune_satisfies_vacuum ( PruneState prstate,
HeapTuple  tup,
Buffer  buffer 
)
static

Definition at line 1011 of file pruneheap.c.

1012{
1013 HTSV_Result res;
1014 TransactionId dead_after;
1015
1016 res = HeapTupleSatisfiesVacuumHorizon(tup, buffer, &dead_after);
1017
1018 if (res != HEAPTUPLE_RECENTLY_DEAD)
1019 return res;
1020
1021 /*
1022 * For VACUUM, we must be sure to prune tuples with xmax older than
1023 * OldestXmin -- a visibility cutoff determined at the beginning of
1024 * vacuuming the relation. OldestXmin is used for freezing determination
1025 * and we cannot freeze dead tuples' xmaxes.
1026 */
1027 if (prstate->cutoffs &&
1029 NormalTransactionIdPrecedes(dead_after, prstate->cutoffs->OldestXmin))
1030 return HEAPTUPLE_DEAD;
1031
1032 /*
1033 * Determine whether or not the tuple is considered dead when compared
1034 * with the provided GlobalVisState. On-access pruning does not provide
1035 * VacuumCutoffs. And for vacuum, even if the tuple's xmax is not older
1036 * than OldestXmin, GlobalVisTestIsRemovableXid() could find the row dead
1037 * if the GlobalVisState has been updated since the beginning of vacuuming
1038 * the relation.
1039 */
1040 if (GlobalVisTestIsRemovableXid(prstate->vistest, dead_after))
1041 return HEAPTUPLE_DEAD;
1042
1043 return res;
1044}
HTSV_Result
Definition: heapam.h:125
HTSV_Result HeapTupleSatisfiesVacuumHorizon(HeapTuple htup, Buffer buffer, TransactionId *dead_after)
GlobalVisState * vistest
Definition: pruneheap.c:42
#define NormalTransactionIdPrecedes(id1, id2)
Definition: transam.h:147

References PruneState::cutoffs, GlobalVisTestIsRemovableXid(), HEAPTUPLE_DEAD, HEAPTUPLE_RECENTLY_DEAD, HeapTupleSatisfiesVacuumHorizon(), NormalTransactionIdPrecedes, VacuumCutoffs::OldestXmin, TransactionIdIsValid, and PruneState::vistest.

Referenced by prune_freeze_plan().

◆ htsv_get_valid_status()

static HTSV_Result htsv_get_valid_status ( int  status)
inlinestatic

Definition at line 1054 of file pruneheap.c.

1055{
1056 Assert(status >= HEAPTUPLE_DEAD &&
1058 return (HTSV_Result) status;
1059}

References Assert(), HEAPTUPLE_DEAD, and HEAPTUPLE_DELETE_IN_PROGRESS.

Referenced by heap_prune_chain().

◆ log_heap_prune_and_freeze()

void log_heap_prune_and_freeze ( Relation  relation,
Buffer  buffer,
Buffer  vmbuffer,
uint8  vmflags,
TransactionId  conflict_xid,
bool  cleanup_lock,
PruneReason  reason,
HeapTupleFreeze frozen,
int  nfrozen,
OffsetNumber redirected,
int  nredirected,
OffsetNumber dead,
int  ndead,
OffsetNumber unused,
int  nunused 
)

Definition at line 2157 of file pruneheap.c.

2166{
2167 xl_heap_prune xlrec;
2168 XLogRecPtr recptr;
2169 uint8 info;
2170 uint8 regbuf_flags_heap;
2171
2172 /* The following local variables hold data registered in the WAL record: */
2174 xlhp_freeze_plans freeze_plans;
2175 xlhp_prune_items redirect_items;
2176 xlhp_prune_items dead_items;
2177 xlhp_prune_items unused_items;
2179 bool do_prune = nredirected > 0 || ndead > 0 || nunused > 0;
2180 bool do_set_vm = vmflags & VISIBILITYMAP_VALID_BITS;
2181
2182 Assert((vmflags & VISIBILITYMAP_VALID_BITS) == vmflags);
2183
2184 xlrec.flags = 0;
2185 regbuf_flags_heap = REGBUF_STANDARD;
2186
2187 /*
2188 * We can avoid an FPI of the heap page if the only modification we are
2189 * making to it is to set PD_ALL_VISIBLE and checksums/wal_log_hints are
2190 * disabled. Note that if we explicitly skip an FPI, we must not stamp the
2191 * heap page with this record's LSN. Recovery skips records <= the stamped
2192 * LSN, so this could lead to skipping an earlier FPI needed to repair a
2193 * torn page.
2194 */
2195 if (!do_prune &&
2196 nfrozen == 0 &&
2197 (!do_set_vm || !XLogHintBitIsNeeded()))
2198 regbuf_flags_heap |= REGBUF_NO_IMAGE;
2199
2200 /*
2201 * Prepare data for the buffer. The arrays are not actually in the
2202 * buffer, but we pretend that they are. When XLogInsert stores a full
2203 * page image, the arrays can be omitted.
2204 */
2206 XLogRegisterBuffer(0, buffer, regbuf_flags_heap);
2207
2208 if (do_set_vm)
2209 XLogRegisterBuffer(1, vmbuffer, 0);
2210
2211 if (nfrozen > 0)
2212 {
2213 int nplans;
2214
2216
2217 /*
2218 * Prepare deduplicated representation for use in the WAL record. This
2219 * destructively sorts frozen tuples array in-place.
2220 */
2221 nplans = heap_log_freeze_plan(frozen, nfrozen, plans, frz_offsets);
2222
2223 freeze_plans.nplans = nplans;
2224 XLogRegisterBufData(0, &freeze_plans,
2225 offsetof(xlhp_freeze_plans, plans));
2226 XLogRegisterBufData(0, plans,
2227 sizeof(xlhp_freeze_plan) * nplans);
2228 }
2229 if (nredirected > 0)
2230 {
2232
2233 redirect_items.ntargets = nredirected;
2234 XLogRegisterBufData(0, &redirect_items,
2235 offsetof(xlhp_prune_items, data));
2236 XLogRegisterBufData(0, redirected,
2237 sizeof(OffsetNumber[2]) * nredirected);
2238 }
2239 if (ndead > 0)
2240 {
2241 xlrec.flags |= XLHP_HAS_DEAD_ITEMS;
2242
2243 dead_items.ntargets = ndead;
2244 XLogRegisterBufData(0, &dead_items,
2245 offsetof(xlhp_prune_items, data));
2246 XLogRegisterBufData(0, dead,
2247 sizeof(OffsetNumber) * ndead);
2248 }
2249 if (nunused > 0)
2250 {
2252
2253 unused_items.ntargets = nunused;
2254 XLogRegisterBufData(0, &unused_items,
2255 offsetof(xlhp_prune_items, data));
2256 XLogRegisterBufData(0, unused,
2257 sizeof(OffsetNumber) * nunused);
2258 }
2259 if (nfrozen > 0)
2260 XLogRegisterBufData(0, frz_offsets,
2261 sizeof(OffsetNumber) * nfrozen);
2262
2263 /*
2264 * Prepare the main xl_heap_prune record. We already set the XLHP_HAS_*
2265 * flag above.
2266 */
2267 if (vmflags & VISIBILITYMAP_ALL_VISIBLE)
2268 {
2269 xlrec.flags |= XLHP_VM_ALL_VISIBLE;
2270 if (vmflags & VISIBILITYMAP_ALL_FROZEN)
2271 xlrec.flags |= XLHP_VM_ALL_FROZEN;
2272 }
2274 xlrec.flags |= XLHP_IS_CATALOG_REL;
2275 if (TransactionIdIsValid(conflict_xid))
2277 if (cleanup_lock)
2278 xlrec.flags |= XLHP_CLEANUP_LOCK;
2279 else
2280 {
2281 Assert(nredirected == 0 && ndead == 0);
2282 /* also, any items in 'unused' must've been LP_DEAD previously */
2283 }
2285 if (TransactionIdIsValid(conflict_xid))
2286 XLogRegisterData(&conflict_xid, sizeof(TransactionId));
2287
2288 switch (reason)
2289 {
2290 case PRUNE_ON_ACCESS:
2292 break;
2293 case PRUNE_VACUUM_SCAN:
2295 break;
2298 break;
2299 default:
2300 elog(ERROR, "unrecognized prune reason: %d", (int) reason);
2301 break;
2302 }
2303 recptr = XLogInsert(RM_HEAP2_ID, info);
2304
2305 if (do_set_vm)
2306 {
2307 Assert(BufferIsDirty(vmbuffer));
2308 PageSetLSN(BufferGetPage(vmbuffer), recptr);
2309 }
2310
2311 /*
2312 * See comment at the top of the function about regbuf_flags_heap for
2313 * details on when we can advance the page LSN.
2314 */
2315 if (do_prune || nfrozen > 0 || (do_set_vm && XLogHintBitIsNeeded()))
2316 {
2317 Assert(BufferIsDirty(buffer));
2318 PageSetLSN(BufferGetPage(buffer), recptr);
2319 }
2320}
bool BufferIsDirty(Buffer buffer)
Definition: bufmgr.c:2911
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:390
uint8_t uint8
Definition: c.h:539
@ PRUNE_VACUUM_CLEANUP
Definition: heapam.h:230
@ PRUNE_VACUUM_SCAN
Definition: heapam.h:229
#define XLHP_HAS_CONFLICT_HORIZON
Definition: heapam_xlog.h:316
#define XLHP_HAS_FREEZE_PLANS
Definition: heapam_xlog.h:322
#define XLHP_VM_ALL_VISIBLE
Definition: heapam_xlog.h:339
#define SizeOfHeapPrune
Definition: heapam_xlog.h:295
#define XLHP_HAS_NOW_UNUSED_ITEMS
Definition: heapam_xlog.h:331
#define XLHP_VM_ALL_FROZEN
Definition: heapam_xlog.h:340
#define XLHP_HAS_REDIRECTIONS
Definition: heapam_xlog.h:329
#define XLOG_HEAP2_PRUNE_VACUUM_SCAN
Definition: heapam_xlog.h:61
#define XLOG_HEAP2_PRUNE_ON_ACCESS
Definition: heapam_xlog.h:60
#define XLHP_CLEANUP_LOCK
Definition: heapam_xlog.h:308
#define XLHP_HAS_DEAD_ITEMS
Definition: heapam_xlog.h:330
#define XLOG_HEAP2_PRUNE_VACUUM_CLEANUP
Definition: heapam_xlog.h:62
#define XLHP_IS_CATALOG_REL
Definition: heapam_xlog.h:298
const void * data
static int heap_log_freeze_plan(HeapTupleFreeze *tuples, int ntuples, xlhp_freeze_plan *plans_out, OffsetNumber *offsets_out)
Definition: pruneheap.c:2078
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:694
#define VISIBILITYMAP_VALID_BITS
#define VISIBILITYMAP_ALL_FROZEN
#define VISIBILITYMAP_ALL_VISIBLE
uint64 XLogRecPtr
Definition: xlogdefs.h:21
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:478
void XLogRegisterBufData(uint8 block_id, const void *data, uint32 len)
Definition: xloginsert.c:409
void XLogRegisterData(const void *data, uint32 len)
Definition: xloginsert.c:368
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:245
void XLogBeginInsert(void)
Definition: xloginsert.c:152
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define REGBUF_NO_IMAGE
Definition: xloginsert.h:33

References Assert(), BufferGetPage(), BufferIsDirty(), data, elog, ERROR, xl_heap_prune::flags, heap_log_freeze_plan(), MaxHeapTuplesPerPage, xlhp_freeze_plans::nplans, xlhp_prune_items::ntargets, PageSetLSN(), PRUNE_ON_ACCESS, PRUNE_VACUUM_CLEANUP, PRUNE_VACUUM_SCAN, REGBUF_NO_IMAGE, REGBUF_STANDARD, RelationIsAccessibleInLogicalDecoding, SizeOfHeapPrune, TransactionIdIsValid, VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, VISIBILITYMAP_VALID_BITS, XLHP_CLEANUP_LOCK, XLHP_HAS_CONFLICT_HORIZON, XLHP_HAS_DEAD_ITEMS, XLHP_HAS_FREEZE_PLANS, XLHP_HAS_NOW_UNUSED_ITEMS, XLHP_HAS_REDIRECTIONS, XLHP_IS_CATALOG_REL, XLHP_VM_ALL_FROZEN, XLHP_VM_ALL_VISIBLE, XLOG_HEAP2_PRUNE_ON_ACCESS, XLOG_HEAP2_PRUNE_VACUUM_CLEANUP, XLOG_HEAP2_PRUNE_VACUUM_SCAN, XLogBeginInsert(), XLogHintBitIsNeeded, XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by heap_page_prune_and_freeze(), and lazy_vacuum_heap_page().

◆ page_verify_redirects()

static void page_verify_redirects ( Page  page)
static

Definition at line 1837 of file pruneheap.c.

1838{
1839#ifdef USE_ASSERT_CHECKING
1840 OffsetNumber offnum;
1841 OffsetNumber maxoff;
1842
1843 maxoff = PageGetMaxOffsetNumber(page);
1844 for (offnum = FirstOffsetNumber;
1845 offnum <= maxoff;
1846 offnum = OffsetNumberNext(offnum))
1847 {
1848 ItemId itemid = PageGetItemId(page, offnum);
1849 OffsetNumber targoff;
1850 ItemId targitem;
1851 HeapTupleHeader htup;
1852
1853 if (!ItemIdIsRedirected(itemid))
1854 continue;
1855
1856 targoff = ItemIdGetRedirect(itemid);
1857 targitem = PageGetItemId(page, targoff);
1858
1859 Assert(ItemIdIsUsed(targitem));
1860 Assert(ItemIdIsNormal(targitem));
1861 Assert(ItemIdHasStorage(targitem));
1862 htup = (HeapTupleHeader) PageGetItem(page, targitem);
1864 }
1865#endif
1866}

References Assert(), FirstOffsetNumber, HeapTupleHeaderIsHeapOnly(), ItemIdGetRedirect, ItemIdHasStorage, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, OffsetNumberNext, PageGetItem(), PageGetItemId(), and PageGetMaxOffsetNumber().

Referenced by heap_page_prune_execute().

◆ prune_freeze_plan()

static void prune_freeze_plan ( Oid  reloid,
Buffer  buffer,
PruneState prstate,
OffsetNumber off_loc 
)
static

Definition at line 448 of file pruneheap.c.

450{
451 Page page = BufferGetPage(buffer);
452 BlockNumber blockno = BufferGetBlockNumber(buffer);
454 OffsetNumber offnum;
455 HeapTupleData tup;
456
457 tup.t_tableOid = reloid;
458
459 /*
460 * Determine HTSV for all tuples, and queue them up for processing as HOT
461 * chain roots or as heap-only items.
462 *
463 * Determining HTSV only once for each tuple is required for correctness,
464 * to deal with cases where running HTSV twice could result in different
465 * results. For example, RECENTLY_DEAD can turn to DEAD if another
466 * checked item causes GlobalVisTestIsRemovableFullXid() to update the
467 * horizon, or INSERT_IN_PROGRESS can change to DEAD if the inserting
468 * transaction aborts.
469 *
470 * It's also good for performance. Most commonly tuples within a page are
471 * stored at decreasing offsets (while the items are stored at increasing
472 * offsets). When processing all tuples on a page this leads to reading
473 * memory at decreasing offsets within a page, with a variable stride.
474 * That's hard for CPU prefetchers to deal with. Processing the items in
475 * reverse order (and thus the tuples in increasing order) increases
476 * prefetching efficiency significantly / decreases the number of cache
477 * misses.
478 */
479 for (offnum = maxoff;
480 offnum >= FirstOffsetNumber;
481 offnum = OffsetNumberPrev(offnum))
482 {
483 ItemId itemid = PageGetItemId(page, offnum);
484 HeapTupleHeader htup;
485
486 /*
487 * Set the offset number so that we can display it along with any
488 * error that occurred while processing this tuple.
489 */
490 *off_loc = offnum;
491
492 prstate->processed[offnum] = false;
493 prstate->htsv[offnum] = -1;
494
495 /* Nothing to do if slot doesn't contain a tuple */
496 if (!ItemIdIsUsed(itemid))
497 {
498 heap_prune_record_unchanged_lp_unused(page, prstate, offnum);
499 continue;
500 }
501
502 if (ItemIdIsDead(itemid))
503 {
504 /*
505 * If the caller set mark_unused_now true, we can set dead line
506 * pointers LP_UNUSED now.
507 */
508 if (unlikely(prstate->mark_unused_now))
509 heap_prune_record_unused(prstate, offnum, false);
510 else
511 heap_prune_record_unchanged_lp_dead(page, prstate, offnum);
512 continue;
513 }
514
515 if (ItemIdIsRedirected(itemid))
516 {
517 /* This is the start of a HOT chain */
518 prstate->root_items[prstate->nroot_items++] = offnum;
519 continue;
520 }
521
522 Assert(ItemIdIsNormal(itemid));
523
524 /*
525 * Get the tuple's visibility status and queue it up for processing.
526 */
527 htup = (HeapTupleHeader) PageGetItem(page, itemid);
528 tup.t_data = htup;
529 tup.t_len = ItemIdGetLength(itemid);
530 ItemPointerSet(&tup.t_self, blockno, offnum);
531
532 prstate->htsv[offnum] = heap_prune_satisfies_vacuum(prstate, &tup,
533 buffer);
534
535 if (!HeapTupleHeaderIsHeapOnly(htup))
536 prstate->root_items[prstate->nroot_items++] = offnum;
537 else
538 prstate->heaponly_items[prstate->nheaponly_items++] = offnum;
539 }
540
541 /*
542 * Process HOT chains.
543 *
544 * We added the items to the array starting from 'maxoff', so by
545 * processing the array in reverse order, we process the items in
546 * ascending offset number order. The order doesn't matter for
547 * correctness, but some quick micro-benchmarking suggests that this is
548 * faster. (Earlier PostgreSQL versions, which scanned all the items on
549 * the page instead of using the root_items array, also did it in
550 * ascending offset number order.)
551 */
552 for (int i = prstate->nroot_items - 1; i >= 0; i--)
553 {
554 offnum = prstate->root_items[i];
555
556 /* Ignore items already processed as part of an earlier chain */
557 if (prstate->processed[offnum])
558 continue;
559
560 /* see preceding loop */
561 *off_loc = offnum;
562
563 /* Process this item or chain of items */
564 heap_prune_chain(page, blockno, maxoff, offnum, prstate);
565 }
566
567 /*
568 * Process any heap-only tuples that were not already processed as part of
569 * a HOT chain.
570 */
571 for (int i = prstate->nheaponly_items - 1; i >= 0; i--)
572 {
573 offnum = prstate->heaponly_items[i];
574
575 if (prstate->processed[offnum])
576 continue;
577
578 /* see preceding loop */
579 *off_loc = offnum;
580
581 /*
582 * If the tuple is DEAD and doesn't chain to anything else, mark it
583 * unused. (If it does chain, we can only remove it as part of
584 * pruning its chain.)
585 *
586 * We need this primarily to handle aborted HOT updates, that is,
587 * XMIN_INVALID heap-only tuples. Those might not be linked to by any
588 * chain, since the parent tuple might be re-updated before any
589 * pruning occurs. So we have to be able to reap them separately from
590 * chain-pruning. (Note that HeapTupleHeaderIsHotUpdated will never
591 * return true for an XMIN_INVALID tuple, so this code will work even
592 * when there were sequential updates within the aborted transaction.)
593 */
594 if (prstate->htsv[offnum] == HEAPTUPLE_DEAD)
595 {
596 ItemId itemid = PageGetItemId(page, offnum);
597 HeapTupleHeader htup = (HeapTupleHeader) PageGetItem(page, itemid);
598
600 {
602 &prstate->latest_xid_removed);
603 heap_prune_record_unused(prstate, offnum, true);
604 }
605 else
606 {
607 /*
608 * This tuple should've been processed and removed as part of
609 * a HOT chain, so something's wrong. To preserve evidence,
610 * we don't dare to remove it. We cannot leave behind a DEAD
611 * tuple either, because that will cause VACUUM to error out.
612 * Throwing an error with a distinct error message seems like
613 * the least bad option.
614 */
615 elog(ERROR, "dead heap-only tuple (%u, %d) is not linked to from any HOT chain",
616 blockno, offnum);
617 }
618 }
619 else
620 heap_prune_record_unchanged_lp_normal(page, prstate, offnum);
621 }
622
623 /* We should now have processed every tuple exactly once */
624#ifdef USE_ASSERT_CHECKING
625 for (offnum = FirstOffsetNumber;
626 offnum <= maxoff;
627 offnum = OffsetNumberNext(offnum))
628 {
629 *off_loc = offnum;
630
631 Assert(prstate->processed[offnum]);
632 }
633#endif
634
635 /* Clear the offset information once we have processed the given page. */
636 *off_loc = InvalidOffsetNumber;
637}
uint32 BlockNumber
Definition: block.h:31
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:4223
#define likely(x)
Definition: c.h:406
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition: itemptr.h:135
#define OffsetNumberPrev(offsetNumber)
Definition: off.h:54
static void heap_prune_chain(Page page, BlockNumber blockno, OffsetNumber maxoff, OffsetNumber rootoffnum, PruneState *prstate)
Definition: pruneheap.c:1093
static void heap_prune_record_unchanged_lp_dead(Page page, PruneState *prstate, OffsetNumber offnum)
Definition: pruneheap.c:1608
static void heap_prune_record_unchanged_lp_unused(Page page, PruneState *prstate, OffsetNumber offnum)
Definition: pruneheap.c:1414
static HTSV_Result heap_prune_satisfies_vacuum(PruneState *prstate, HeapTuple tup, Buffer buffer)
Definition: pruneheap.c:1011
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
OffsetNumber heaponly_items[MaxHeapTuplesPerPage]
Definition: pruneheap.c:79
int nroot_items
Definition: pruneheap.c:76
int nheaponly_items
Definition: pruneheap.c:78
OffsetNumber root_items[MaxHeapTuplesPerPage]
Definition: pruneheap.c:77

References Assert(), BufferGetBlockNumber(), BufferGetPage(), elog, ERROR, FirstOffsetNumber, heap_prune_chain(), heap_prune_record_unchanged_lp_dead(), heap_prune_record_unchanged_lp_normal(), heap_prune_record_unchanged_lp_unused(), heap_prune_record_unused(), heap_prune_satisfies_vacuum(), PruneState::heaponly_items, HEAPTUPLE_DEAD, HeapTupleHeaderAdvanceConflictHorizon(), HeapTupleHeaderIsHeapOnly(), HeapTupleHeaderIsHotUpdated(), PruneState::htsv, i, InvalidOffsetNumber, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerSet(), PruneState::latest_xid_removed, likely, PruneState::mark_unused_now, PruneState::nheaponly_items, PruneState::nroot_items, OffsetNumberNext, OffsetNumberPrev, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PruneState::processed, PruneState::root_items, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, and unlikely.

Referenced by heap_page_prune_and_freeze().

◆ prune_freeze_setup()

static void prune_freeze_setup ( PruneFreezeParams params,
TransactionId new_relfrozen_xid,
MultiXactId new_relmin_mxid,
const PruneFreezeResult presult,
PruneState prstate 
)
static

Definition at line 327 of file pruneheap.c.

332{
333 /* Copy parameters to prstate */
334 prstate->vistest = params->vistest;
335 prstate->mark_unused_now =
337
338 /* cutoffs must be provided if we will attempt freezing */
339 Assert(!(params->options & HEAP_PAGE_PRUNE_FREEZE) || params->cutoffs);
340 prstate->attempt_freeze = (params->options & HEAP_PAGE_PRUNE_FREEZE) != 0;
341 prstate->cutoffs = params->cutoffs;
342
343 /*
344 * Our strategy is to scan the page and make lists of items to change,
345 * then apply the changes within a critical section. This keeps as much
346 * logic as possible out of the critical section, and also ensures that
347 * WAL replay will work the same as the normal case.
348 *
349 * First, initialize the new pd_prune_xid value to zero (indicating no
350 * prunable tuples). If we find any tuples which may soon become
351 * prunable, we will save the lowest relevant XID in new_prune_xid. Also
352 * initialize the rest of our working state.
353 */
356 prstate->nredirected = prstate->ndead = prstate->nunused = 0;
357 prstate->nfrozen = 0;
358 prstate->nroot_items = 0;
359 prstate->nheaponly_items = 0;
360
361 /* initialize page freezing working state */
362 prstate->pagefrz.freeze_required = false;
363 if (prstate->attempt_freeze)
364 {
365 Assert(new_relfrozen_xid && new_relmin_mxid);
366 prstate->pagefrz.FreezePageRelfrozenXid = *new_relfrozen_xid;
367 prstate->pagefrz.NoFreezePageRelfrozenXid = *new_relfrozen_xid;
368 prstate->pagefrz.FreezePageRelminMxid = *new_relmin_mxid;
369 prstate->pagefrz.NoFreezePageRelminMxid = *new_relmin_mxid;
370 }
371 else
372 {
373 Assert(!new_relfrozen_xid && !new_relmin_mxid);
378 }
379
380 prstate->ndeleted = 0;
381 prstate->live_tuples = 0;
382 prstate->recently_dead_tuples = 0;
383 prstate->hastup = false;
384 prstate->lpdead_items = 0;
385 prstate->deadoffsets = (OffsetNumber *) presult->deadoffsets;
387
388 /*
389 * Vacuum may update the VM after we're done. We can keep track of
390 * whether the page will be all-visible and all-frozen after pruning and
391 * freezing to help the caller to do that.
392 *
393 * Currently, only VACUUM sets the VM bits. To save the effort, only do
394 * the bookkeeping if the caller needs it. Currently, that's tied to
395 * HEAP_PAGE_PRUNE_FREEZE, but it could be a separate flag if you wanted
396 * to update the VM bits without also freezing or freeze without also
397 * setting the VM bits.
398 *
399 * In addition to telling the caller whether it can set the VM bit, we
400 * also use 'all_visible' and 'all_frozen' for our own decision-making. If
401 * the whole page would become frozen, we consider opportunistically
402 * freezing tuples. We will not be able to freeze the whole page if there
403 * are tuples present that are not visible to everyone or if there are
404 * dead tuples which are not yet removable. However, dead tuples which
405 * will be removed by the end of vacuuming should not preclude us from
406 * opportunistically freezing. Because of that, we do not immediately
407 * clear all_visible and all_frozen when we see LP_DEAD items. We fix
408 * that after scanning the line pointers. We must correct all_visible and
409 * all_frozen before we return them to the caller, so that the caller
410 * doesn't set the VM bits incorrectly.
411 */
412 if (prstate->attempt_freeze)
413 {
414 prstate->all_visible = true;
415 prstate->all_frozen = true;
416 }
417 else
418 {
419 /*
420 * Initializing to false allows skipping the work to update them in
421 * heap_prune_record_unchanged_lp_normal().
422 */
423 prstate->all_visible = false;
424 prstate->all_frozen = false;
425 }
426
427 /*
428 * The visibility cutoff xid is the newest xmin of live tuples on the
429 * page. In the common case, this will be set as the conflict horizon the
430 * caller can use for updating the VM. If, at the end of freezing and
431 * pruning, the page is all-frozen, there is no possibility that any
432 * running transaction on the standby does not see tuples on the page as
433 * all-visible, so the conflict horizon remains InvalidTransactionId.
434 */
436}
#define HEAP_PAGE_PRUNE_FREEZE
Definition: heapam.h:44
#define HEAP_PAGE_PRUNE_MARK_UNUSED_NOW
Definition: heapam.h:43
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
#define InvalidMultiXactId
Definition: multixact.h:25
GlobalVisState * vistest
Definition: heapam.h:262
struct VacuumCutoffs * cutoffs
Definition: heapam.h:271
OffsetNumber deadoffsets[MaxHeapTuplesPerPage]
Definition: heapam.h:314

References PruneState::all_frozen, PruneState::all_visible, Assert(), PruneState::attempt_freeze, PruneState::cutoffs, PruneFreezeParams::cutoffs, PruneState::deadoffsets, PruneFreezeResult::deadoffsets, HeapPageFreeze::freeze_required, HeapPageFreeze::FreezePageRelfrozenXid, HeapPageFreeze::FreezePageRelminMxid, PruneState::frz_conflict_horizon, PruneState::hastup, HEAP_PAGE_PRUNE_FREEZE, HEAP_PAGE_PRUNE_MARK_UNUSED_NOW, if(), InvalidMultiXactId, InvalidTransactionId, PruneState::latest_xid_removed, PruneState::live_tuples, PruneState::lpdead_items, PruneState::mark_unused_now, PruneState::ndead, PruneState::ndeleted, PruneState::new_prune_xid, PruneState::nfrozen, PruneState::nheaponly_items, HeapPageFreeze::NoFreezePageRelfrozenXid, HeapPageFreeze::NoFreezePageRelminMxid, PruneState::nredirected, PruneState::nroot_items, PruneState::nunused, PruneFreezeParams::options, PruneState::pagefrz, PruneState::recently_dead_tuples, PruneState::visibility_cutoff_xid, PruneState::vistest, and PruneFreezeParams::vistest.

Referenced by heap_page_prune_and_freeze().