PostgreSQL Source Code  git master
pruneheap.c File Reference
#include "postgres.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/htup_details.h"
#include "access/transam.h"
#include "access/xlog.h"
#include "access/xloginsert.h"
#include "catalog/catalog.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "storage/bufmgr.h"
#include "utils/snapmgr.h"
#include "utils/rel.h"
Include dependency graph for pruneheap.c:

Go to the source code of this file.

Data Structures

struct  PruneState
 

Functions

static HTSV_Result heap_prune_satisfies_vacuum (PruneState *prstate, HeapTuple tup, Buffer buffer)
 
static int heap_prune_chain (Buffer buffer, OffsetNumber rootoffnum, PruneState *prstate)
 
static void heap_prune_record_prunable (PruneState *prstate, TransactionId xid)
 
static void heap_prune_record_redirect (PruneState *prstate, OffsetNumber offnum, OffsetNumber rdoffnum)
 
static void heap_prune_record_dead (PruneState *prstate, OffsetNumber offnum)
 
static void heap_prune_record_unused (PruneState *prstate, OffsetNumber offnum)
 
static void page_verify_redirects (Page page)
 
void heap_page_prune_opt (Relation relation, Buffer buffer)
 
int heap_page_prune (Relation relation, Buffer buffer, GlobalVisState *vistest, TransactionId old_snap_xmin, TimestampTz old_snap_ts, int *nnewlpdead, OffsetNumber *off_loc)
 
void heap_page_prune_execute (Buffer buffer, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused)
 
void heap_get_root_tuples (Page page, OffsetNumber *root_offsets)
 

Function Documentation

◆ heap_get_root_tuples()

void heap_get_root_tuples ( Page  page,
OffsetNumber root_offsets 
)

Definition at line 1111 of file pruneheap.c.

1112 {
1113  OffsetNumber offnum,
1114  maxoff;
1115 
1116  MemSet(root_offsets, InvalidOffsetNumber,
1118 
1119  maxoff = PageGetMaxOffsetNumber(page);
1120  for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum))
1121  {
1122  ItemId lp = PageGetItemId(page, offnum);
1123  HeapTupleHeader htup;
1124  OffsetNumber nextoffnum;
1125  TransactionId priorXmax;
1126 
1127  /* skip unused and dead items */
1128  if (!ItemIdIsUsed(lp) || ItemIdIsDead(lp))
1129  continue;
1130 
1131  if (ItemIdIsNormal(lp))
1132  {
1133  htup = (HeapTupleHeader) PageGetItem(page, lp);
1134 
1135  /*
1136  * Check if this tuple is part of a HOT-chain rooted at some other
1137  * tuple. If so, skip it for now; we'll process it when we find
1138  * its root.
1139  */
1140  if (HeapTupleHeaderIsHeapOnly(htup))
1141  continue;
1142 
1143  /*
1144  * This is either a plain tuple or the root of a HOT-chain.
1145  * Remember it in the mapping.
1146  */
1147  root_offsets[offnum - 1] = offnum;
1148 
1149  /* If it's not the start of a HOT-chain, we're done with it */
1150  if (!HeapTupleHeaderIsHotUpdated(htup))
1151  continue;
1152 
1153  /* Set up to scan the HOT-chain */
1154  nextoffnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
1155  priorXmax = HeapTupleHeaderGetUpdateXid(htup);
1156  }
1157  else
1158  {
1159  /* Must be a redirect item. We do not set its root_offsets entry */
1161  /* Set up to scan the HOT-chain */
1162  nextoffnum = ItemIdGetRedirect(lp);
1163  priorXmax = InvalidTransactionId;
1164  }
1165 
1166  /*
1167  * Now follow the HOT-chain and collect other tuples in the chain.
1168  *
1169  * Note: Even though this is a nested loop, the complexity of the
1170  * function is O(N) because a tuple in the page should be visited not
1171  * more than twice, once in the outer loop and once in HOT-chain
1172  * chases.
1173  */
1174  for (;;)
1175  {
1176  /* Sanity check (pure paranoia) */
1177  if (offnum < FirstOffsetNumber)
1178  break;
1179 
1180  /*
1181  * An offset past the end of page's line pointer array is possible
1182  * when the array was truncated
1183  */
1184  if (offnum > maxoff)
1185  break;
1186 
1187  lp = PageGetItemId(page, nextoffnum);
1188 
1189  /* Check for broken chains */
1190  if (!ItemIdIsNormal(lp))
1191  break;
1192 
1193  htup = (HeapTupleHeader) PageGetItem(page, lp);
1194 
1195  if (TransactionIdIsValid(priorXmax) &&
1196  !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(htup)))
1197  break;
1198 
1199  /* Remember the root line pointer for this item */
1200  root_offsets[nextoffnum - 1] = offnum;
1201 
1202  /* Advance to next chain member, if any */
1203  if (!HeapTupleHeaderIsHotUpdated(htup))
1204  break;
1205 
1206  /* HOT implies it can't have moved to different partition */
1208 
1209  nextoffnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
1210  priorXmax = HeapTupleHeaderGetUpdateXid(htup);
1211  }
1212  }
1213 }
static Item PageGetItem(Page page, ItemId itemId)
Definition: bufpage.h:351
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:240
static OffsetNumber PageGetMaxOffsetNumber(Page page)
Definition: bufpage.h:369
#define MemSet(start, val, len)
Definition: c.h:1004
uint32 TransactionId
Definition: c.h:636
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define HeapTupleHeaderIsHeapOnly(tup)
Definition: htup_details.h:499
#define HeapTupleHeaderIndicatesMovedPartitions(tup)
Definition: htup_details.h:444
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:309
#define MaxHeapTuplesPerPage
Definition: htup_details.h:572
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:361
#define HeapTupleHeaderIsHotUpdated(tup)
Definition: htup_details.h:482
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:78
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
Assert(fmt[strlen(fmt) - 1] !='\n')
#define InvalidOffsetNumber
Definition: off.h:26
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define FirstOffsetNumber
Definition: off.h:27
ItemPointerData t_ctid
Definition: htup_details.h:161
#define InvalidTransactionId
Definition: transam.h:31
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
#define TransactionIdIsValid(xid)
Definition: transam.h:41

References Assert(), FirstOffsetNumber, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleHeaderIndicatesMovedPartitions, HeapTupleHeaderIsHeapOnly, HeapTupleHeaderIsHotUpdated, InvalidOffsetNumber, InvalidTransactionId, ItemIdGetRedirect, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerGetOffsetNumber(), MaxHeapTuplesPerPage, MemSet, OffsetNumberNext, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), HeapTupleHeaderData::t_ctid, TransactionIdEquals, and TransactionIdIsValid.

Referenced by heapam_index_build_range_scan(), and heapam_index_validate_scan().

◆ heap_page_prune()

int heap_page_prune ( Relation  relation,
Buffer  buffer,
GlobalVisState vistest,
TransactionId  old_snap_xmin,
TimestampTz  old_snap_ts,
int *  nnewlpdead,
OffsetNumber off_loc 
)

Definition at line 266 of file pruneheap.c.

272 {
273  int ndeleted = 0;
274  Page page = BufferGetPage(buffer);
275  BlockNumber blockno = BufferGetBlockNumber(buffer);
276  OffsetNumber offnum,
277  maxoff;
278  PruneState prstate;
279  HeapTupleData tup;
280 
281  /*
282  * Our strategy is to scan the page and make lists of items to change,
283  * then apply the changes within a critical section. This keeps as much
284  * logic as possible out of the critical section, and also ensures that
285  * WAL replay will work the same as the normal case.
286  *
287  * First, initialize the new pd_prune_xid value to zero (indicating no
288  * prunable tuples). If we find any tuples which may soon become
289  * prunable, we will save the lowest relevant XID in new_prune_xid. Also
290  * initialize the rest of our working state.
291  */
293  prstate.rel = relation;
294  prstate.vistest = vistest;
295  prstate.old_snap_xmin = old_snap_xmin;
296  prstate.old_snap_ts = old_snap_ts;
297  prstate.old_snap_used = false;
299  prstate.nredirected = prstate.ndead = prstate.nunused = 0;
300  memset(prstate.marked, 0, sizeof(prstate.marked));
301 
302  maxoff = PageGetMaxOffsetNumber(page);
303  tup.t_tableOid = RelationGetRelid(prstate.rel);
304 
305  /*
306  * Determine HTSV for all tuples.
307  *
308  * This is required for correctness to deal with cases where running HTSV
309  * twice could result in different results (e.g. RECENTLY_DEAD can turn to
310  * DEAD if another checked item causes GlobalVisTestIsRemovableFullXid()
311  * to update the horizon, INSERT_IN_PROGRESS can change to DEAD if the
312  * inserting transaction aborts, ...). That in turn could cause
313  * heap_prune_chain() to behave incorrectly if a tuple is reached twice,
314  * once directly via a heap_prune_chain() and once following a HOT chain.
315  *
316  * It's also good for performance. Most commonly tuples within a page are
317  * stored at decreasing offsets (while the items are stored at increasing
318  * offsets). When processing all tuples on a page this leads to reading
319  * memory at decreasing offsets within a page, with a variable stride.
320  * That's hard for CPU prefetchers to deal with. Processing the items in
321  * reverse order (and thus the tuples in increasing order) increases
322  * prefetching efficiency significantly / decreases the number of cache
323  * misses.
324  */
325  for (offnum = maxoff;
326  offnum >= FirstOffsetNumber;
327  offnum = OffsetNumberPrev(offnum))
328  {
329  ItemId itemid = PageGetItemId(page, offnum);
330  HeapTupleHeader htup;
331 
332  /* Nothing to do if slot doesn't contain a tuple */
333  if (!ItemIdIsNormal(itemid))
334  {
335  prstate.htsv[offnum] = -1;
336  continue;
337  }
338 
339  htup = (HeapTupleHeader) PageGetItem(page, itemid);
340  tup.t_data = htup;
341  tup.t_len = ItemIdGetLength(itemid);
342  ItemPointerSet(&(tup.t_self), blockno, offnum);
343 
344  /*
345  * Set the offset number so that we can display it along with any
346  * error that occurred while processing this tuple.
347  */
348  if (off_loc)
349  *off_loc = offnum;
350 
351  prstate.htsv[offnum] = heap_prune_satisfies_vacuum(&prstate, &tup,
352  buffer);
353  }
354 
355  /* Scan the page */
356  for (offnum = FirstOffsetNumber;
357  offnum <= maxoff;
358  offnum = OffsetNumberNext(offnum))
359  {
360  ItemId itemid;
361 
362  /* Ignore items already processed as part of an earlier chain */
363  if (prstate.marked[offnum])
364  continue;
365 
366  /* see preceding loop */
367  if (off_loc)
368  *off_loc = offnum;
369 
370  /* Nothing to do if slot is empty or already dead */
371  itemid = PageGetItemId(page, offnum);
372  if (!ItemIdIsUsed(itemid) || ItemIdIsDead(itemid))
373  continue;
374 
375  /* Process this item or chain of items */
376  ndeleted += heap_prune_chain(buffer, offnum, &prstate);
377  }
378 
379  /* Clear the offset information once we have processed the given page. */
380  if (off_loc)
381  *off_loc = InvalidOffsetNumber;
382 
383  /* Any error while applying the changes is critical */
385 
386  /* Have we found any prunable items? */
387  if (prstate.nredirected > 0 || prstate.ndead > 0 || prstate.nunused > 0)
388  {
389  /*
390  * Apply the planned item changes, then repair page fragmentation, and
391  * update the page's hint bit about whether it has free line pointers.
392  */
394  prstate.redirected, prstate.nredirected,
395  prstate.nowdead, prstate.ndead,
396  prstate.nowunused, prstate.nunused);
397 
398  /*
399  * Update the page's pd_prune_xid field to either zero, or the lowest
400  * XID of any soon-prunable tuple.
401  */
402  ((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;
403 
404  /*
405  * Also clear the "page is full" flag, since there's no point in
406  * repeating the prune/defrag process until something else happens to
407  * the page.
408  */
409  PageClearFull(page);
410 
411  MarkBufferDirty(buffer);
412 
413  /*
414  * Emit a WAL XLOG_HEAP2_PRUNE record showing what we did
415  */
416  if (RelationNeedsWAL(relation))
417  {
418  xl_heap_prune xlrec;
419  XLogRecPtr recptr;
420 
422  xlrec.nredirected = prstate.nredirected;
423  xlrec.ndead = prstate.ndead;
424 
425  XLogBeginInsert();
426  XLogRegisterData((char *) &xlrec, SizeOfHeapPrune);
427 
429 
430  /*
431  * The OffsetNumber arrays are not actually in the buffer, but we
432  * pretend that they are. When XLogInsert stores the whole
433  * buffer, the offset arrays need not be stored too.
434  */
435  if (prstate.nredirected > 0)
436  XLogRegisterBufData(0, (char *) prstate.redirected,
437  prstate.nredirected *
438  sizeof(OffsetNumber) * 2);
439 
440  if (prstate.ndead > 0)
441  XLogRegisterBufData(0, (char *) prstate.nowdead,
442  prstate.ndead * sizeof(OffsetNumber));
443 
444  if (prstate.nunused > 0)
445  XLogRegisterBufData(0, (char *) prstate.nowunused,
446  prstate.nunused * sizeof(OffsetNumber));
447 
448  recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_PRUNE);
449 
450  PageSetLSN(BufferGetPage(buffer), recptr);
451  }
452  }
453  else
454  {
455  /*
456  * If we didn't prune anything, but have found a new value for the
457  * pd_prune_xid field, update it and mark the buffer dirty. This is
458  * treated as a non-WAL-logged hint.
459  *
460  * Also clear the "page is full" flag if it is set, since there's no
461  * point in repeating the prune/defrag process until something else
462  * happens to the page.
463  */
464  if (((PageHeader) page)->pd_prune_xid != prstate.new_prune_xid ||
465  PageIsFull(page))
466  {
467  ((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;
468  PageClearFull(page);
469  MarkBufferDirtyHint(buffer, true);
470  }
471  }
472 
474 
475  /* Record number of newly-set-LP_DEAD items for caller */
476  *nnewlpdead = prstate.ndead;
477 
478  return ndeleted;
479 }
uint32 BlockNumber
Definition: block.h:31
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2811
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1631
void MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
Definition: bufmgr.c:4074
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:285
PageHeaderData * PageHeader
Definition: bufpage.h:170
Pointer Page
Definition: bufpage.h:78
static void PageClearFull(Page page)
Definition: bufpage.h:420
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:388
static bool PageIsFull(Page page)
Definition: bufpage.h:410
#define XLOG_HEAP2_PRUNE
Definition: heapam_xlog.h:54
#define SizeOfHeapPrune
Definition: heapam_xlog.h:251
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition: itemptr.h:135
#define START_CRIT_SECTION()
Definition: miscadmin.h:148
#define END_CRIT_SECTION()
Definition: miscadmin.h:150
#define OffsetNumberPrev(offsetNumber)
Definition: off.h:54
static int heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum, PruneState *prstate)
Definition: pruneheap.c:591
static HTSV_Result heap_prune_satisfies_vacuum(PruneState *prstate, HeapTuple tup, Buffer buffer)
Definition: pruneheap.c:499
void heap_page_prune_execute(Buffer buffer, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused)
Definition: pruneheap.c:912
#define RelationGetRelid(relation)
Definition: rel.h:503
#define RelationNeedsWAL(relation)
Definition: rel.h:628
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
int ndead
Definition: pruneheap.c:54
TransactionId new_prune_xid
Definition: pruneheap.c:51
TimestampTz old_snap_ts
Definition: pruneheap.c:47
OffsetNumber nowdead[MaxHeapTuplesPerPage]
Definition: pruneheap.c:58
bool old_snap_used
Definition: pruneheap.c:49
bool marked[MaxHeapTuplesPerPage+1]
Definition: pruneheap.c:67
TransactionId old_snap_xmin
Definition: pruneheap.c:48
OffsetNumber nowunused[MaxHeapTuplesPerPage]
Definition: pruneheap.c:59
GlobalVisState * vistest
Definition: pruneheap.c:37
Relation rel
Definition: pruneheap.c:34
OffsetNumber redirected[MaxHeapTuplesPerPage *2]
Definition: pruneheap.c:57
int nredirected
Definition: pruneheap.c:53
int8 htsv[MaxHeapTuplesPerPage+1]
Definition: pruneheap.c:77
int nunused
Definition: pruneheap.c:55
TransactionId snapshotConflictHorizon
Definition: pruneheap.c:52
TransactionId snapshotConflictHorizon
Definition: heapam_xlog.h:245
uint16 nredirected
Definition: heapam_xlog.h:246
uint64 XLogRecPtr
Definition: xlogdefs.h:21
void XLogRegisterData(char *data, uint32 len)
Definition: xloginsert.c:351
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:451
void XLogRegisterBufData(uint8 block_id, char *data, uint32 len)
Definition: xloginsert.c:389
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:243
void XLogBeginInsert(void)
Definition: xloginsert.c:150
#define REGBUF_STANDARD
Definition: xloginsert.h:34

References BufferGetBlockNumber(), BufferGetPage(), END_CRIT_SECTION, FirstOffsetNumber, heap_page_prune_execute(), heap_prune_chain(), heap_prune_satisfies_vacuum(), PruneState::htsv, InvalidOffsetNumber, InvalidTransactionId, ItemIdGetLength, ItemIdIsDead, ItemIdIsNormal, ItemIdIsUsed, ItemPointerSet(), MarkBufferDirty(), MarkBufferDirtyHint(), PruneState::marked, PruneState::ndead, xl_heap_prune::ndead, PruneState::new_prune_xid, PruneState::nowdead, PruneState::nowunused, PruneState::nredirected, xl_heap_prune::nredirected, PruneState::nunused, OffsetNumberNext, OffsetNumberPrev, PruneState::old_snap_ts, PruneState::old_snap_used, PruneState::old_snap_xmin, PageClearFull(), PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageIsFull(), PageSetLSN(), PruneState::redirected, REGBUF_STANDARD, PruneState::rel, RelationGetRelid, RelationNeedsWAL, SizeOfHeapPrune, PruneState::snapshotConflictHorizon, xl_heap_prune::snapshotConflictHorizon, START_CRIT_SECTION, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, PruneState::vistest, XLOG_HEAP2_PRUNE, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by heap_page_prune_opt(), and lazy_scan_prune().

◆ heap_page_prune_execute()

void heap_page_prune_execute ( Buffer  buffer,
OffsetNumber redirected,
int  nredirected,
OffsetNumber nowdead,
int  ndead,
OffsetNumber nowunused,
int  nunused 
)

Definition at line 912 of file pruneheap.c.

916 {
917  Page page = (Page) BufferGetPage(buffer);
918  OffsetNumber *offnum;
920 
921  /* Shouldn't be called unless there's something to do */
922  Assert(nredirected > 0 || ndead > 0 || nunused > 0);
923 
924  /* Update all redirected line pointers */
925  offnum = redirected;
926  for (int i = 0; i < nredirected; i++)
927  {
928  OffsetNumber fromoff = *offnum++;
929  OffsetNumber tooff = *offnum++;
930  ItemId fromlp = PageGetItemId(page, fromoff);
932 
933 #ifdef USE_ASSERT_CHECKING
934 
935  /*
936  * Any existing item that we set as an LP_REDIRECT (any 'from' item)
937  * must be the first item from a HOT chain. If the item has tuple
938  * storage then it can't be a heap-only tuple. Otherwise we are just
939  * maintaining an existing LP_REDIRECT from an existing HOT chain that
940  * has been pruned at least once before now.
941  */
942  if (!ItemIdIsRedirected(fromlp))
943  {
944  Assert(ItemIdHasStorage(fromlp) && ItemIdIsNormal(fromlp));
945 
946  htup = (HeapTupleHeader) PageGetItem(page, fromlp);
948  }
949  else
950  {
951  /* We shouldn't need to redundantly set the redirect */
952  Assert(ItemIdGetRedirect(fromlp) != tooff);
953  }
954 
955  /*
956  * The item that we're about to set as an LP_REDIRECT (the 'from'
957  * item) will point to an existing item (the 'to' item) that is
958  * already a heap-only tuple. There can be at most one LP_REDIRECT
959  * item per HOT chain.
960  *
961  * We need to keep around an LP_REDIRECT item (after original
962  * non-heap-only root tuple gets pruned away) so that it's always
963  * possible for VACUUM to easily figure out what TID to delete from
964  * indexes when an entire HOT chain becomes dead. A heap-only tuple
965  * can never become LP_DEAD; an LP_REDIRECT item or a regular heap
966  * tuple can.
967  *
968  * This check may miss problems, e.g. the target of a redirect could
969  * be marked as unused subsequently. The page_verify_redirects() check
970  * below will catch such problems.
971  */
972  tolp = PageGetItemId(page, tooff);
973  Assert(ItemIdHasStorage(tolp) && ItemIdIsNormal(tolp));
974  htup = (HeapTupleHeader) PageGetItem(page, tolp);
976 #endif
977 
978  ItemIdSetRedirect(fromlp, tooff);
979  }
980 
981  /* Update all now-dead line pointers */
982  offnum = nowdead;
983  for (int i = 0; i < ndead; i++)
984  {
985  OffsetNumber off = *offnum++;
986  ItemId lp = PageGetItemId(page, off);
987 
988 #ifdef USE_ASSERT_CHECKING
989 
990  /*
991  * An LP_DEAD line pointer must be left behind when the original item
992  * (which is dead to everybody) could still be referenced by a TID in
993  * an index. This should never be necessary with any individual
994  * heap-only tuple item, though. (It's not clear how much of a problem
995  * that would be, but there is no reason to allow it.)
996  */
997  if (ItemIdHasStorage(lp))
998  {
999  Assert(ItemIdIsNormal(lp));
1000  htup = (HeapTupleHeader) PageGetItem(page, lp);
1002  }
1003  else
1004  {
1005  /* Whole HOT chain becomes dead */
1007  }
1008 #endif
1009 
1010  ItemIdSetDead(lp);
1011  }
1012 
1013  /* Update all now-unused line pointers */
1014  offnum = nowunused;
1015  for (int i = 0; i < nunused; i++)
1016  {
1017  OffsetNumber off = *offnum++;
1018  ItemId lp = PageGetItemId(page, off);
1019 
1020 #ifdef USE_ASSERT_CHECKING
1021 
1022  /*
1023  * Only heap-only tuples can become LP_UNUSED during pruning. They
1024  * don't need to be left in place as LP_DEAD items until VACUUM gets
1025  * around to doing index vacuuming.
1026  */
1028  htup = (HeapTupleHeader) PageGetItem(page, lp);
1030 #endif
1031 
1032  ItemIdSetUnused(lp);
1033  }
1034 
1035  /*
1036  * Finally, repair any fragmentation, and update the page's hint bit about
1037  * whether it has free pointers.
1038  */
1040 
1041  /*
1042  * Now that the page has been modified, assert that redirect items still
1043  * point to valid targets.
1044  */
1045  page_verify_redirects(page);
1046 }
void PageRepairFragmentation(Page page)
Definition: bufpage.c:699
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:166
int i
Definition: isn.c:73
#define ItemIdSetRedirect(itemId, link)
Definition: itemid.h:152
#define ItemIdSetDead(itemId)
Definition: itemid.h:164
#define ItemIdSetUnused(itemId)
Definition: itemid.h:128
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
static void page_verify_redirects(Page page)
Definition: pruneheap.c:1063

References Assert(), BufferGetPage(), HeapTupleHeaderIsHeapOnly, i, ItemIdGetRedirect, ItemIdHasStorage, ItemIdIsNormal, ItemIdIsRedirected, ItemIdSetDead, ItemIdSetRedirect, ItemIdSetUnused, page_verify_redirects(), PageGetItem(), PageGetItemId(), PageRepairFragmentation(), and PG_USED_FOR_ASSERTS_ONLY.

Referenced by heap_page_prune(), and heap_xlog_prune().

◆ heap_page_prune_opt()

void heap_page_prune_opt ( Relation  relation,
Buffer  buffer 
)

Definition at line 108 of file pruneheap.c.

109 {
110  Page page = BufferGetPage(buffer);
111  TransactionId prune_xid;
112  GlobalVisState *vistest;
113  TransactionId limited_xmin = InvalidTransactionId;
114  TimestampTz limited_ts = 0;
115  Size minfree;
116 
117  /*
118  * We can't write WAL in recovery mode, so there's no point trying to
119  * clean the page. The primary will likely issue a cleaning WAL record
120  * soon anyway, so this is no particular loss.
121  */
122  if (RecoveryInProgress())
123  return;
124 
125  /*
126  * XXX: Magic to keep old_snapshot_threshold tests appear "working". They
127  * currently are broken, and discussion of what to do about them is
128  * ongoing. See
129  * https://www.postgresql.org/message-id/20200403001235.e6jfdll3gh2ygbuc%40alap3.anarazel.de
130  */
131  if (old_snapshot_threshold == 0)
133 
134  /*
135  * First check whether there's any chance there's something to prune,
136  * determining the appropriate horizon is a waste if there's no prune_xid
137  * (i.e. no updates/deletes left potentially dead tuples around).
138  */
139  prune_xid = ((PageHeader) page)->pd_prune_xid;
140  if (!TransactionIdIsValid(prune_xid))
141  return;
142 
143  /*
144  * Check whether prune_xid indicates that there may be dead rows that can
145  * be cleaned up.
146  *
147  * It is OK to check the old snapshot limit before acquiring the cleanup
148  * lock because the worst that can happen is that we are not quite as
149  * aggressive about the cleanup (by however many transaction IDs are
150  * consumed between this point and acquiring the lock). This allows us to
151  * save significant overhead in the case where the page is found not to be
152  * prunable.
153  *
154  * Even if old_snapshot_threshold is set, we first check whether the page
155  * can be pruned without. Both because
156  * TransactionIdLimitedForOldSnapshots() is not cheap, and because not
157  * unnecessarily relying on old_snapshot_threshold avoids causing
158  * conflicts.
159  */
160  vistest = GlobalVisTestFor(relation);
161 
162  if (!GlobalVisTestIsRemovableXid(vistest, prune_xid))
163  {
165  return;
166 
168  relation,
169  &limited_xmin, &limited_ts))
170  return;
171 
172  if (!TransactionIdPrecedes(prune_xid, limited_xmin))
173  return;
174  }
175 
176  /*
177  * We prune when a previous UPDATE failed to find enough space on the page
178  * for a new tuple version, or when free space falls below the relation's
179  * fill-factor target (but not less than 10%).
180  *
181  * Checking free space here is questionable since we aren't holding any
182  * lock on the buffer; in the worst case we could get a bogus answer. It's
183  * unlikely to be *seriously* wrong, though, since reading either pd_lower
184  * or pd_upper is probably atomic. Avoiding taking a lock seems more
185  * important than sometimes getting a wrong answer in what is after all
186  * just a heuristic estimate.
187  */
188  minfree = RelationGetTargetPageFreeSpace(relation,
190  minfree = Max(minfree, BLCKSZ / 10);
191 
192  if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
193  {
194  /* OK, try to get exclusive buffer lock */
195  if (!ConditionalLockBufferForCleanup(buffer))
196  return;
197 
198  /*
199  * Now that we have buffer lock, get accurate information about the
200  * page's free space, and recheck the heuristic about whether to
201  * prune. (We needn't recheck PageIsPrunable, since no one else could
202  * have pruned while we hold pin.)
203  */
204  if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
205  {
206  int ndeleted,
207  nnewlpdead;
208 
209  ndeleted = heap_page_prune(relation, buffer, vistest, limited_xmin,
210  limited_ts, &nnewlpdead, NULL);
211 
212  /*
213  * Report the number of tuples reclaimed to pgstats. This is
214  * ndeleted minus the number of newly-LP_DEAD-set items.
215  *
216  * We derive the number of dead tuples like this to avoid totally
217  * forgetting about items that were set to LP_DEAD, since they
218  * still need to be cleaned up by VACUUM. We only want to count
219  * heap-only tuples that just became LP_UNUSED in our report,
220  * which don't.
221  *
222  * VACUUM doesn't have to compensate in the same way when it
223  * tracks ndeleted, since it will set the same LP_DEAD items to
224  * LP_UNUSED separately.
225  */
226  if (ndeleted > nnewlpdead)
228  ndeleted - nnewlpdead);
229  }
230 
231  /* And release buffer lock */
233 
234  /*
235  * We avoid reuse of any free space created on the page by unrelated
236  * UPDATEs/INSERTs by opting to not update the FSM at this point. The
237  * free space should be reused by UPDATEs to *this* page.
238  */
239  }
240 }
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4245
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:4472
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:110
Size PageGetHeapFreeSpace(Page page)
Definition: bufpage.c:991
#define Max(x, y)
Definition: c.h:982
size_t Size
Definition: c.h:589
int64 TimestampTz
Definition: timestamp.h:39
void pgstat_update_heap_dead_tuples(Relation rel, int delta)
bool GlobalVisTestIsRemovableXid(GlobalVisState *state, TransactionId xid)
Definition: procarray.c:4248
TransactionId GlobalVisTestNonRemovableHorizon(GlobalVisState *state)
Definition: procarray.c:4286
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:4091
int heap_page_prune(Relation relation, Buffer buffer, GlobalVisState *vistest, TransactionId old_snap_xmin, TimestampTz old_snap_ts, int *nnewlpdead, OffsetNumber *off_loc)
Definition: pruneheap.c:266
#define RelationGetTargetPageFreeSpace(relation, defaultff)
Definition: rel.h:376
#define HEAP_DEFAULT_FILLFACTOR
Definition: rel.h:347
void SnapshotTooOldMagicForTest(void)
Definition: snapmgr.c:1734
bool TransactionIdLimitedForOldSnapshots(TransactionId recentXmin, Relation relation, TransactionId *limit_xid, TimestampTz *limit_ts)
Definition: snapmgr.c:1796
int old_snapshot_threshold
Definition: snapmgr.c:79
static bool OldSnapshotThresholdActive(void)
Definition: snapmgr.h:102
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:280
bool RecoveryInProgress(void)
Definition: xlog.c:5908

References BUFFER_LOCK_UNLOCK, BufferGetPage(), ConditionalLockBufferForCleanup(), GlobalVisTestFor(), GlobalVisTestIsRemovableXid(), GlobalVisTestNonRemovableHorizon(), HEAP_DEFAULT_FILLFACTOR, heap_page_prune(), InvalidTransactionId, LockBuffer(), Max, old_snapshot_threshold, OldSnapshotThresholdActive(), PageGetHeapFreeSpace(), PageIsFull(), pgstat_update_heap_dead_tuples(), RecoveryInProgress(), RelationGetTargetPageFreeSpace, SnapshotTooOldMagicForTest(), TransactionIdIsValid, TransactionIdLimitedForOldSnapshots(), and TransactionIdPrecedes().

Referenced by heapam_index_fetch_tuple(), heapam_scan_bitmap_next_block(), and heapgetpage().

◆ heap_prune_chain()

static int heap_prune_chain ( Buffer  buffer,
OffsetNumber  rootoffnum,
PruneState prstate 
)
static

Definition at line 591 of file pruneheap.c.

592 {
593  int ndeleted = 0;
594  Page dp = (Page) BufferGetPage(buffer);
596  ItemId rootlp;
597  HeapTupleHeader htup;
598  OffsetNumber latestdead = InvalidOffsetNumber,
599  maxoff = PageGetMaxOffsetNumber(dp),
600  offnum;
602  int nchain = 0,
603  i;
604 
605  rootlp = PageGetItemId(dp, rootoffnum);
606 
607  /*
608  * If it's a heap-only tuple, then it is not the start of a HOT chain.
609  */
610  if (ItemIdIsNormal(rootlp))
611  {
612  Assert(prstate->htsv[rootoffnum] != -1);
613  htup = (HeapTupleHeader) PageGetItem(dp, rootlp);
614 
615  if (HeapTupleHeaderIsHeapOnly(htup))
616  {
617  /*
618  * If the tuple is DEAD and doesn't chain to anything else, mark
619  * it unused immediately. (If it does chain, we can only remove
620  * it as part of pruning its chain.)
621  *
622  * We need this primarily to handle aborted HOT updates, that is,
623  * XMIN_INVALID heap-only tuples. Those might not be linked to by
624  * any chain, since the parent tuple might be re-updated before
625  * any pruning occurs. So we have to be able to reap them
626  * separately from chain-pruning. (Note that
627  * HeapTupleHeaderIsHotUpdated will never return true for an
628  * XMIN_INVALID tuple, so this code will work even when there were
629  * sequential updates within the aborted transaction.)
630  *
631  * Note that we might first arrive at a dead heap-only tuple
632  * either here or while following a chain below. Whichever path
633  * gets there first will mark the tuple unused.
634  */
635  if (prstate->htsv[rootoffnum] == HEAPTUPLE_DEAD &&
637  {
638  heap_prune_record_unused(prstate, rootoffnum);
640  &prstate->snapshotConflictHorizon);
641  ndeleted++;
642  }
643 
644  /* Nothing more to do */
645  return ndeleted;
646  }
647  }
648 
649  /* Start from the root tuple */
650  offnum = rootoffnum;
651 
652  /* while not end of the chain */
653  for (;;)
654  {
655  ItemId lp;
656  bool tupdead,
657  recent_dead;
658 
659  /* Sanity check (pure paranoia) */
660  if (offnum < FirstOffsetNumber)
661  break;
662 
663  /*
664  * An offset past the end of page's line pointer array is possible
665  * when the array was truncated (original item must have been unused)
666  */
667  if (offnum > maxoff)
668  break;
669 
670  /* If item is already processed, stop --- it must not be same chain */
671  if (prstate->marked[offnum])
672  break;
673 
674  lp = PageGetItemId(dp, offnum);
675 
676  /* Unused item obviously isn't part of the chain */
677  if (!ItemIdIsUsed(lp))
678  break;
679 
680  /*
681  * If we are looking at the redirected root line pointer, jump to the
682  * first normal tuple in the chain. If we find a redirect somewhere
683  * else, stop --- it must not be same chain.
684  */
685  if (ItemIdIsRedirected(lp))
686  {
687  if (nchain > 0)
688  break; /* not at start of chain */
689  chainitems[nchain++] = offnum;
690  offnum = ItemIdGetRedirect(rootlp);
691  continue;
692  }
693 
694  /*
695  * Likewise, a dead line pointer can't be part of the chain. (We
696  * already eliminated the case of dead root tuple outside this
697  * function.)
698  */
699  if (ItemIdIsDead(lp))
700  break;
701 
702  Assert(ItemIdIsNormal(lp));
703  Assert(prstate->htsv[offnum] != -1);
704  htup = (HeapTupleHeader) PageGetItem(dp, lp);
705 
706  /*
707  * Check the tuple XMIN against prior XMAX, if any
708  */
709  if (TransactionIdIsValid(priorXmax) &&
710  !TransactionIdEquals(HeapTupleHeaderGetXmin(htup), priorXmax))
711  break;
712 
713  /*
714  * OK, this tuple is indeed a member of the chain.
715  */
716  chainitems[nchain++] = offnum;
717 
718  /*
719  * Check tuple's visibility status.
720  */
721  tupdead = recent_dead = false;
722 
723  switch ((HTSV_Result) prstate->htsv[offnum])
724  {
725  case HEAPTUPLE_DEAD:
726  tupdead = true;
727  break;
728 
730  recent_dead = true;
731 
732  /*
733  * This tuple may soon become DEAD. Update the hint field so
734  * that the page is reconsidered for pruning in future.
735  */
738  break;
739 
741 
742  /*
743  * This tuple may soon become DEAD. Update the hint field so
744  * that the page is reconsidered for pruning in future.
745  */
748  break;
749 
750  case HEAPTUPLE_LIVE:
752 
753  /*
754  * If we wanted to optimize for aborts, we might consider
755  * marking the page prunable when we see INSERT_IN_PROGRESS.
756  * But we don't. See related decisions about when to mark the
757  * page prunable in heapam.c.
758  */
759  break;
760 
761  default:
762  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
763  break;
764  }
765 
766  /*
767  * Remember the last DEAD tuple seen. We will advance past
768  * RECENTLY_DEAD tuples just in case there's a DEAD one after them;
769  * but we can't advance past anything else. We have to make sure that
770  * we don't miss any DEAD tuples, since DEAD tuples that still have
771  * tuple storage after pruning will confuse VACUUM.
772  */
773  if (tupdead)
774  {
775  latestdead = offnum;
777  &prstate->snapshotConflictHorizon);
778  }
779  else if (!recent_dead)
780  break;
781 
782  /*
783  * If the tuple is not HOT-updated, then we are at the end of this
784  * HOT-update chain.
785  */
786  if (!HeapTupleHeaderIsHotUpdated(htup))
787  break;
788 
789  /* HOT implies it can't have moved to different partition */
791 
792  /*
793  * Advance to next chain member.
794  */
796  BufferGetBlockNumber(buffer));
797  offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
798  priorXmax = HeapTupleHeaderGetUpdateXid(htup);
799  }
800 
801  /*
802  * If we found a DEAD tuple in the chain, adjust the HOT chain so that all
803  * the DEAD tuples at the start of the chain are removed and the root line
804  * pointer is appropriately redirected.
805  */
806  if (OffsetNumberIsValid(latestdead))
807  {
808  /*
809  * Mark as unused each intermediate item that we are able to remove
810  * from the chain.
811  *
812  * When the previous item is the last dead tuple seen, we are at the
813  * right candidate for redirection.
814  */
815  for (i = 1; (i < nchain) && (chainitems[i - 1] != latestdead); i++)
816  {
817  heap_prune_record_unused(prstate, chainitems[i]);
818  ndeleted++;
819  }
820 
821  /*
822  * If the root entry had been a normal tuple, we are deleting it, so
823  * count it in the result. But changing a redirect (even to DEAD
824  * state) doesn't count.
825  */
826  if (ItemIdIsNormal(rootlp))
827  ndeleted++;
828 
829  /*
830  * If the DEAD tuple is at the end of the chain, the entire chain is
831  * dead and the root line pointer can be marked dead. Otherwise just
832  * redirect the root to the correct chain member.
833  */
834  if (i >= nchain)
835  heap_prune_record_dead(prstate, rootoffnum);
836  else
837  heap_prune_record_redirect(prstate, rootoffnum, chainitems[i]);
838  }
839  else if (nchain < 2 && ItemIdIsRedirected(rootlp))
840  {
841  /*
842  * We found a redirect item that doesn't point to a valid follow-on
843  * item. This can happen if the loop in heap_page_prune caused us to
844  * visit the dead successor of a redirect item before visiting the
845  * redirect item. We can clean up by setting the redirect item to
846  * DEAD state.
847  */
848  heap_prune_record_dead(prstate, rootoffnum);
849  }
850 
851  return ndeleted;
852 }
#define ERROR
Definition: elog.h:39
void HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple, TransactionId *snapshotConflictHorizon)
Definition: heapam.c:7439
HTSV_Result
Definition: heapam.h:95
@ HEAPTUPLE_RECENTLY_DEAD
Definition: heapam.h:98
@ HEAPTUPLE_INSERT_IN_PROGRESS
Definition: heapam.h:99
@ HEAPTUPLE_LIVE
Definition: heapam.h:97
@ HEAPTUPLE_DELETE_IN_PROGRESS
Definition: heapam.h:100
@ HEAPTUPLE_DEAD
Definition: heapam.h:96
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
static void heap_prune_record_redirect(PruneState *prstate, OffsetNumber offnum, OffsetNumber rdoffnum)
Definition: pruneheap.c:870
static void heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum)
Definition: pruneheap.c:885
static void heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum)
Definition: pruneheap.c:896
static void heap_prune_record_prunable(PruneState *prstate, TransactionId xid)
Definition: pruneheap.c:856

References Assert(), BufferGetBlockNumber(), BufferGetPage(), elog(), ERROR, FirstOffsetNumber, heap_prune_record_dead(), heap_prune_record_prunable(), heap_prune_record_redirect(), heap_prune_record_unused(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderAdvanceConflictHorizon(), HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleHeaderIndicatesMovedPartitions, HeapTupleHeaderIsHeapOnly, HeapTupleHeaderIsHotUpdated, PruneState::htsv, i, InvalidOffsetNumber, InvalidTransactionId, ItemIdGetRedirect, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerGetBlockNumber(), ItemPointerGetOffsetNumber(), PruneState::marked, MaxHeapTuplesPerPage, OffsetNumberIsValid, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PruneState::snapshotConflictHorizon, HeapTupleHeaderData::t_ctid, TransactionIdEquals, and TransactionIdIsValid.

Referenced by heap_page_prune().

◆ heap_prune_record_dead()

static void heap_prune_record_dead ( PruneState prstate,
OffsetNumber  offnum 
)
static

Definition at line 885 of file pruneheap.c.

886 {
887  Assert(prstate->ndead < MaxHeapTuplesPerPage);
888  prstate->nowdead[prstate->ndead] = offnum;
889  prstate->ndead++;
890  Assert(!prstate->marked[offnum]);
891  prstate->marked[offnum] = true;
892 }

References Assert(), PruneState::marked, MaxHeapTuplesPerPage, PruneState::ndead, and PruneState::nowdead.

Referenced by heap_prune_chain().

◆ heap_prune_record_prunable()

static void heap_prune_record_prunable ( PruneState prstate,
TransactionId  xid 
)
static

Definition at line 856 of file pruneheap.c.

857 {
858  /*
859  * This should exactly match the PageSetPrunable macro. We can't store
860  * directly into the page header yet, so we update working state.
861  */
863  if (!TransactionIdIsValid(prstate->new_prune_xid) ||
864  TransactionIdPrecedes(xid, prstate->new_prune_xid))
865  prstate->new_prune_xid = xid;
866 }
#define TransactionIdIsNormal(xid)
Definition: transam.h:42

References Assert(), PruneState::new_prune_xid, TransactionIdIsNormal, TransactionIdIsValid, and TransactionIdPrecedes().

Referenced by heap_prune_chain().

◆ heap_prune_record_redirect()

static void heap_prune_record_redirect ( PruneState prstate,
OffsetNumber  offnum,
OffsetNumber  rdoffnum 
)
static

Definition at line 870 of file pruneheap.c.

872 {
874  prstate->redirected[prstate->nredirected * 2] = offnum;
875  prstate->redirected[prstate->nredirected * 2 + 1] = rdoffnum;
876  prstate->nredirected++;
877  Assert(!prstate->marked[offnum]);
878  prstate->marked[offnum] = true;
879  Assert(!prstate->marked[rdoffnum]);
880  prstate->marked[rdoffnum] = true;
881 }

References Assert(), PruneState::marked, MaxHeapTuplesPerPage, PruneState::nredirected, and PruneState::redirected.

Referenced by heap_prune_chain().

◆ heap_prune_record_unused()

static void heap_prune_record_unused ( PruneState prstate,
OffsetNumber  offnum 
)
static

Definition at line 896 of file pruneheap.c.

897 {
898  Assert(prstate->nunused < MaxHeapTuplesPerPage);
899  prstate->nowunused[prstate->nunused] = offnum;
900  prstate->nunused++;
901  Assert(!prstate->marked[offnum]);
902  prstate->marked[offnum] = true;
903 }

References Assert(), PruneState::marked, MaxHeapTuplesPerPage, PruneState::nowunused, and PruneState::nunused.

Referenced by heap_prune_chain().

◆ heap_prune_satisfies_vacuum()

static HTSV_Result heap_prune_satisfies_vacuum ( PruneState prstate,
HeapTuple  tup,
Buffer  buffer 
)
static

Definition at line 499 of file pruneheap.c.

500 {
502  TransactionId dead_after;
503 
504  res = HeapTupleSatisfiesVacuumHorizon(tup, buffer, &dead_after);
505 
507  return res;
508 
509  /*
510  * If we are already relying on the limited xmin, there is no need to
511  * delay doing so anymore.
512  */
513  if (prstate->old_snap_used)
514  {
516 
517  if (TransactionIdPrecedes(dead_after, prstate->old_snap_xmin))
519  return res;
520  }
521 
522  /*
523  * First check if GlobalVisTestIsRemovableXid() is sufficient to find the
524  * row dead. If not, and old_snapshot_threshold is enabled, try to use the
525  * lowered horizon.
526  */
527  if (GlobalVisTestIsRemovableXid(prstate->vistest, dead_after))
529  else if (OldSnapshotThresholdActive())
530  {
531  /* haven't determined limited horizon yet, requests */
532  if (!TransactionIdIsValid(prstate->old_snap_xmin))
533  {
534  TransactionId horizon =
536 
537  TransactionIdLimitedForOldSnapshots(horizon, prstate->rel,
538  &prstate->old_snap_xmin,
539  &prstate->old_snap_ts);
540  }
541 
542  if (TransactionIdIsValid(prstate->old_snap_xmin) &&
543  TransactionIdPrecedes(dead_after, prstate->old_snap_xmin))
544  {
545  /*
546  * About to remove row based on snapshot_too_old. Need to raise
547  * the threshold so problematic accesses would error.
548  */
549  Assert(!prstate->old_snap_used);
551  prstate->old_snap_xmin);
552  prstate->old_snap_used = true;
554  }
555  }
556 
557  return res;
558 }
HTSV_Result HeapTupleSatisfiesVacuumHorizon(HeapTuple htup, Buffer buffer, TransactionId *dead_after)
void SetOldSnapshotThresholdTimestamp(TimestampTz ts, TransactionId xlimit)
Definition: snapmgr.c:1717

References Assert(), GlobalVisTestIsRemovableXid(), GlobalVisTestNonRemovableHorizon(), HEAPTUPLE_DEAD, HEAPTUPLE_RECENTLY_DEAD, HeapTupleSatisfiesVacuumHorizon(), PruneState::old_snap_ts, PruneState::old_snap_used, PruneState::old_snap_xmin, OldSnapshotThresholdActive(), PruneState::rel, res, SetOldSnapshotThresholdTimestamp(), TransactionIdIsValid, TransactionIdLimitedForOldSnapshots(), TransactionIdPrecedes(), and PruneState::vistest.

Referenced by heap_page_prune().

◆ page_verify_redirects()

static void page_verify_redirects ( Page  page)
static

Definition at line 1063 of file pruneheap.c.

1064 {
1065 #ifdef USE_ASSERT_CHECKING
1066  OffsetNumber offnum;
1067  OffsetNumber maxoff;
1068 
1069  maxoff = PageGetMaxOffsetNumber(page);
1070  for (offnum = FirstOffsetNumber;
1071  offnum <= maxoff;
1072  offnum = OffsetNumberNext(offnum))
1073  {
1074  ItemId itemid = PageGetItemId(page, offnum);
1075  OffsetNumber targoff;
1076  ItemId targitem;
1077  HeapTupleHeader htup;
1078 
1079  if (!ItemIdIsRedirected(itemid))
1080  continue;
1081 
1082  targoff = ItemIdGetRedirect(itemid);
1083  targitem = PageGetItemId(page, targoff);
1084 
1085  Assert(ItemIdIsUsed(targitem));
1086  Assert(ItemIdIsNormal(targitem));
1087  Assert(ItemIdHasStorage(targitem));
1088  htup = (HeapTupleHeader) PageGetItem(page, targitem);
1090  }
1091 #endif
1092 }

References Assert(), FirstOffsetNumber, HeapTupleHeaderIsHeapOnly, ItemIdGetRedirect, ItemIdHasStorage, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, OffsetNumberNext, PageGetItem(), PageGetItemId(), and PageGetMaxOffsetNumber().

Referenced by heap_page_prune_execute().