PostgreSQL Source Code  git master
pruneheap.c File Reference
#include "postgres.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/htup_details.h"
#include "access/transam.h"
#include "access/xlog.h"
#include "access/xloginsert.h"
#include "catalog/catalog.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "storage/bufmgr.h"
#include "utils/snapmgr.h"
#include "utils/rel.h"
Include dependency graph for pruneheap.c:

Go to the source code of this file.

Data Structures

struct  PruneState
 

Functions

static HTSV_Result heap_prune_satisfies_vacuum (PruneState *prstate, HeapTuple tup, Buffer buffer)
 
static int heap_prune_chain (Buffer buffer, OffsetNumber rootoffnum, int8 *htsv, PruneState *prstate)
 
static void heap_prune_record_prunable (PruneState *prstate, TransactionId xid)
 
static void heap_prune_record_redirect (PruneState *prstate, OffsetNumber offnum, OffsetNumber rdoffnum)
 
static void heap_prune_record_dead (PruneState *prstate, OffsetNumber offnum)
 
static void heap_prune_record_dead_or_unused (PruneState *prstate, OffsetNumber offnum)
 
static void heap_prune_record_unused (PruneState *prstate, OffsetNumber offnum)
 
static void page_verify_redirects (Page page)
 
void heap_page_prune_opt (Relation relation, Buffer buffer)
 
void heap_page_prune (Relation relation, Buffer buffer, GlobalVisState *vistest, bool mark_unused_now, PruneResult *presult, OffsetNumber *off_loc)
 
void heap_page_prune_execute (Buffer buffer, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused)
 
void heap_get_root_tuples (Page page, OffsetNumber *root_offsets)
 

Function Documentation

◆ heap_get_root_tuples()

void heap_get_root_tuples ( Page  page,
OffsetNumber root_offsets 
)

Definition at line 1050 of file pruneheap.c.

1051 {
1052  OffsetNumber offnum,
1053  maxoff;
1054 
1055  MemSet(root_offsets, InvalidOffsetNumber,
1057 
1058  maxoff = PageGetMaxOffsetNumber(page);
1059  for (offnum = FirstOffsetNumber; offnum <= maxoff; offnum = OffsetNumberNext(offnum))
1060  {
1061  ItemId lp = PageGetItemId(page, offnum);
1062  HeapTupleHeader htup;
1063  OffsetNumber nextoffnum;
1064  TransactionId priorXmax;
1065 
1066  /* skip unused and dead items */
1067  if (!ItemIdIsUsed(lp) || ItemIdIsDead(lp))
1068  continue;
1069 
1070  if (ItemIdIsNormal(lp))
1071  {
1072  htup = (HeapTupleHeader) PageGetItem(page, lp);
1073 
1074  /*
1075  * Check if this tuple is part of a HOT-chain rooted at some other
1076  * tuple. If so, skip it for now; we'll process it when we find
1077  * its root.
1078  */
1079  if (HeapTupleHeaderIsHeapOnly(htup))
1080  continue;
1081 
1082  /*
1083  * This is either a plain tuple or the root of a HOT-chain.
1084  * Remember it in the mapping.
1085  */
1086  root_offsets[offnum - 1] = offnum;
1087 
1088  /* If it's not the start of a HOT-chain, we're done with it */
1089  if (!HeapTupleHeaderIsHotUpdated(htup))
1090  continue;
1091 
1092  /* Set up to scan the HOT-chain */
1093  nextoffnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
1094  priorXmax = HeapTupleHeaderGetUpdateXid(htup);
1095  }
1096  else
1097  {
1098  /* Must be a redirect item. We do not set its root_offsets entry */
1100  /* Set up to scan the HOT-chain */
1101  nextoffnum = ItemIdGetRedirect(lp);
1102  priorXmax = InvalidTransactionId;
1103  }
1104 
1105  /*
1106  * Now follow the HOT-chain and collect other tuples in the chain.
1107  *
1108  * Note: Even though this is a nested loop, the complexity of the
1109  * function is O(N) because a tuple in the page should be visited not
1110  * more than twice, once in the outer loop and once in HOT-chain
1111  * chases.
1112  */
1113  for (;;)
1114  {
1115  /* Sanity check (pure paranoia) */
1116  if (offnum < FirstOffsetNumber)
1117  break;
1118 
1119  /*
1120  * An offset past the end of page's line pointer array is possible
1121  * when the array was truncated
1122  */
1123  if (offnum > maxoff)
1124  break;
1125 
1126  lp = PageGetItemId(page, nextoffnum);
1127 
1128  /* Check for broken chains */
1129  if (!ItemIdIsNormal(lp))
1130  break;
1131 
1132  htup = (HeapTupleHeader) PageGetItem(page, lp);
1133 
1134  if (TransactionIdIsValid(priorXmax) &&
1135  !TransactionIdEquals(priorXmax, HeapTupleHeaderGetXmin(htup)))
1136  break;
1137 
1138  /* Remember the root line pointer for this item */
1139  root_offsets[nextoffnum - 1] = offnum;
1140 
1141  /* Advance to next chain member, if any */
1142  if (!HeapTupleHeaderIsHotUpdated(htup))
1143  break;
1144 
1145  /* HOT implies it can't have moved to different partition */
1147 
1148  nextoffnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
1149  priorXmax = HeapTupleHeaderGetUpdateXid(htup);
1150  }
1151  }
1152 }
static Item PageGetItem(Page page, ItemId itemId)
Definition: bufpage.h:351
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:240
static OffsetNumber PageGetMaxOffsetNumber(Page page)
Definition: bufpage.h:369
#define MemSet(start, val, len)
Definition: c.h:1009
uint32 TransactionId
Definition: c.h:641
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define HeapTupleHeaderIsHeapOnly(tup)
Definition: htup_details.h:499
#define HeapTupleHeaderIndicatesMovedPartitions(tup)
Definition: htup_details.h:444
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:309
#define MaxHeapTuplesPerPage
Definition: htup_details.h:572
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:361
#define HeapTupleHeaderIsHotUpdated(tup)
Definition: htup_details.h:482
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:78
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
Assert(fmt[strlen(fmt) - 1] !='\n')
#define InvalidOffsetNumber
Definition: off.h:26
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define FirstOffsetNumber
Definition: off.h:27
ItemPointerData t_ctid
Definition: htup_details.h:161
#define InvalidTransactionId
Definition: transam.h:31
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
#define TransactionIdIsValid(xid)
Definition: transam.h:41

References Assert(), FirstOffsetNumber, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleHeaderIndicatesMovedPartitions, HeapTupleHeaderIsHeapOnly, HeapTupleHeaderIsHotUpdated, InvalidOffsetNumber, InvalidTransactionId, ItemIdGetRedirect, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerGetOffsetNumber(), MaxHeapTuplesPerPage, MemSet, OffsetNumberNext, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), HeapTupleHeaderData::t_ctid, TransactionIdEquals, and TransactionIdIsValid.

Referenced by heapam_index_build_range_scan(), and heapam_index_validate_scan().

◆ heap_page_prune()

void heap_page_prune ( Relation  relation,
Buffer  buffer,
GlobalVisState vistest,
bool  mark_unused_now,
PruneResult presult,
OffsetNumber off_loc 
)

Definition at line 216 of file pruneheap.c.

221 {
222  Page page = BufferGetPage(buffer);
223  BlockNumber blockno = BufferGetBlockNumber(buffer);
224  OffsetNumber offnum,
225  maxoff;
226  PruneState prstate;
227  HeapTupleData tup;
228 
229  /*
230  * Our strategy is to scan the page and make lists of items to change,
231  * then apply the changes within a critical section. This keeps as much
232  * logic as possible out of the critical section, and also ensures that
233  * WAL replay will work the same as the normal case.
234  *
235  * First, initialize the new pd_prune_xid value to zero (indicating no
236  * prunable tuples). If we find any tuples which may soon become
237  * prunable, we will save the lowest relevant XID in new_prune_xid. Also
238  * initialize the rest of our working state.
239  */
241  prstate.rel = relation;
242  prstate.vistest = vistest;
243  prstate.mark_unused_now = mark_unused_now;
245  prstate.nredirected = prstate.ndead = prstate.nunused = 0;
246  memset(prstate.marked, 0, sizeof(prstate.marked));
247 
248  /*
249  * presult->htsv is not initialized here because all ntuple spots in the
250  * array will be set either to a valid HTSV_Result value or -1.
251  */
252  presult->ndeleted = 0;
253  presult->nnewlpdead = 0;
254 
255  maxoff = PageGetMaxOffsetNumber(page);
256  tup.t_tableOid = RelationGetRelid(prstate.rel);
257 
258  /*
259  * Determine HTSV for all tuples.
260  *
261  * This is required for correctness to deal with cases where running HTSV
262  * twice could result in different results (e.g. RECENTLY_DEAD can turn to
263  * DEAD if another checked item causes GlobalVisTestIsRemovableFullXid()
264  * to update the horizon, INSERT_IN_PROGRESS can change to DEAD if the
265  * inserting transaction aborts, ...). That in turn could cause
266  * heap_prune_chain() to behave incorrectly if a tuple is reached twice,
267  * once directly via a heap_prune_chain() and once following a HOT chain.
268  *
269  * It's also good for performance. Most commonly tuples within a page are
270  * stored at decreasing offsets (while the items are stored at increasing
271  * offsets). When processing all tuples on a page this leads to reading
272  * memory at decreasing offsets within a page, with a variable stride.
273  * That's hard for CPU prefetchers to deal with. Processing the items in
274  * reverse order (and thus the tuples in increasing order) increases
275  * prefetching efficiency significantly / decreases the number of cache
276  * misses.
277  */
278  for (offnum = maxoff;
279  offnum >= FirstOffsetNumber;
280  offnum = OffsetNumberPrev(offnum))
281  {
282  ItemId itemid = PageGetItemId(page, offnum);
283  HeapTupleHeader htup;
284 
285  /* Nothing to do if slot doesn't contain a tuple */
286  if (!ItemIdIsNormal(itemid))
287  {
288  presult->htsv[offnum] = -1;
289  continue;
290  }
291 
292  htup = (HeapTupleHeader) PageGetItem(page, itemid);
293  tup.t_data = htup;
294  tup.t_len = ItemIdGetLength(itemid);
295  ItemPointerSet(&(tup.t_self), blockno, offnum);
296 
297  /*
298  * Set the offset number so that we can display it along with any
299  * error that occurred while processing this tuple.
300  */
301  if (off_loc)
302  *off_loc = offnum;
303 
304  presult->htsv[offnum] = heap_prune_satisfies_vacuum(&prstate, &tup,
305  buffer);
306  }
307 
308  /* Scan the page */
309  for (offnum = FirstOffsetNumber;
310  offnum <= maxoff;
311  offnum = OffsetNumberNext(offnum))
312  {
313  ItemId itemid;
314 
315  /* Ignore items already processed as part of an earlier chain */
316  if (prstate.marked[offnum])
317  continue;
318 
319  /* see preceding loop */
320  if (off_loc)
321  *off_loc = offnum;
322 
323  /* Nothing to do if slot is empty */
324  itemid = PageGetItemId(page, offnum);
325  if (!ItemIdIsUsed(itemid))
326  continue;
327 
328  /* Process this item or chain of items */
329  presult->ndeleted += heap_prune_chain(buffer, offnum,
330  presult->htsv, &prstate);
331  }
332 
333  /* Clear the offset information once we have processed the given page. */
334  if (off_loc)
335  *off_loc = InvalidOffsetNumber;
336 
337  /* Any error while applying the changes is critical */
339 
340  /* Have we found any prunable items? */
341  if (prstate.nredirected > 0 || prstate.ndead > 0 || prstate.nunused > 0)
342  {
343  /*
344  * Apply the planned item changes, then repair page fragmentation, and
345  * update the page's hint bit about whether it has free line pointers.
346  */
348  prstate.redirected, prstate.nredirected,
349  prstate.nowdead, prstate.ndead,
350  prstate.nowunused, prstate.nunused);
351 
352  /*
353  * Update the page's pd_prune_xid field to either zero, or the lowest
354  * XID of any soon-prunable tuple.
355  */
356  ((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;
357 
358  /*
359  * Also clear the "page is full" flag, since there's no point in
360  * repeating the prune/defrag process until something else happens to
361  * the page.
362  */
363  PageClearFull(page);
364 
365  MarkBufferDirty(buffer);
366 
367  /*
368  * Emit a WAL XLOG_HEAP2_PRUNE record showing what we did
369  */
370  if (RelationNeedsWAL(relation))
371  {
372  xl_heap_prune xlrec;
373  XLogRecPtr recptr;
374 
377  xlrec.nredirected = prstate.nredirected;
378  xlrec.ndead = prstate.ndead;
379 
380  XLogBeginInsert();
381  XLogRegisterData((char *) &xlrec, SizeOfHeapPrune);
382 
384 
385  /*
386  * The OffsetNumber arrays are not actually in the buffer, but we
387  * pretend that they are. When XLogInsert stores the whole
388  * buffer, the offset arrays need not be stored too.
389  */
390  if (prstate.nredirected > 0)
391  XLogRegisterBufData(0, (char *) prstate.redirected,
392  prstate.nredirected *
393  sizeof(OffsetNumber) * 2);
394 
395  if (prstate.ndead > 0)
396  XLogRegisterBufData(0, (char *) prstate.nowdead,
397  prstate.ndead * sizeof(OffsetNumber));
398 
399  if (prstate.nunused > 0)
400  XLogRegisterBufData(0, (char *) prstate.nowunused,
401  prstate.nunused * sizeof(OffsetNumber));
402 
403  recptr = XLogInsert(RM_HEAP2_ID, XLOG_HEAP2_PRUNE);
404 
405  PageSetLSN(BufferGetPage(buffer), recptr);
406  }
407  }
408  else
409  {
410  /*
411  * If we didn't prune anything, but have found a new value for the
412  * pd_prune_xid field, update it and mark the buffer dirty. This is
413  * treated as a non-WAL-logged hint.
414  *
415  * Also clear the "page is full" flag if it is set, since there's no
416  * point in repeating the prune/defrag process until something else
417  * happens to the page.
418  */
419  if (((PageHeader) page)->pd_prune_xid != prstate.new_prune_xid ||
420  PageIsFull(page))
421  {
422  ((PageHeader) page)->pd_prune_xid = prstate.new_prune_xid;
423  PageClearFull(page);
424  MarkBufferDirtyHint(buffer, true);
425  }
426  }
427 
429 
430  /* Record number of newly-set-LP_DEAD items for caller */
431  presult->nnewlpdead = prstate.ndead;
432 }
uint32 BlockNumber
Definition: block.h:31
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:3378
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:2190
void MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
Definition: bufmgr.c:4625
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:350
PageHeaderData * PageHeader
Definition: bufpage.h:170
Pointer Page
Definition: bufpage.h:78
static void PageClearFull(Page page)
Definition: bufpage.h:420
static void PageSetLSN(Page page, XLogRecPtr lsn)
Definition: bufpage.h:388
static bool PageIsFull(Page page)
Definition: bufpage.h:410
#define XLOG_HEAP2_PRUNE
Definition: heapam_xlog.h:54
#define SizeOfHeapPrune
Definition: heapam_xlog.h:253
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
static void ItemPointerSet(ItemPointerData *pointer, BlockNumber blockNumber, OffsetNumber offNum)
Definition: itemptr.h:135
#define START_CRIT_SECTION()
Definition: miscadmin.h:149
#define END_CRIT_SECTION()
Definition: miscadmin.h:151
#define OffsetNumberPrev(offsetNumber)
Definition: off.h:54
static int heap_prune_chain(Buffer buffer, OffsetNumber rootoffnum, int8 *htsv, PruneState *prstate)
Definition: pruneheap.c:488
static HTSV_Result heap_prune_satisfies_vacuum(PruneState *prstate, HeapTuple tup, Buffer buffer)
Definition: pruneheap.c:439
void heap_page_prune_execute(Buffer buffer, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused)
Definition: pruneheap.c:840
#define RelationGetRelid(relation)
Definition: rel.h:504
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:683
#define RelationNeedsWAL(relation)
Definition: rel.h:627
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
HeapTupleHeader t_data
Definition: htup.h:68
Oid t_tableOid
Definition: htup.h:66
int nnewlpdead
Definition: heapam.h:200
int ndeleted
Definition: heapam.h:199
int8 htsv[MaxHeapTuplesPerPage+1]
Definition: heapam.h:211
int ndead
Definition: pruneheap.c:44
TransactionId new_prune_xid
Definition: pruneheap.c:41
OffsetNumber nowdead[MaxHeapTuplesPerPage]
Definition: pruneheap.c:48
bool marked[MaxHeapTuplesPerPage+1]
Definition: pruneheap.c:57
OffsetNumber nowunused[MaxHeapTuplesPerPage]
Definition: pruneheap.c:49
bool mark_unused_now
Definition: pruneheap.c:39
GlobalVisState * vistest
Definition: pruneheap.c:37
Relation rel
Definition: pruneheap.c:34
OffsetNumber redirected[MaxHeapTuplesPerPage *2]
Definition: pruneheap.c:47
int nredirected
Definition: pruneheap.c:43
int nunused
Definition: pruneheap.c:45
TransactionId snapshotConflictHorizon
Definition: pruneheap.c:42
TransactionId snapshotConflictHorizon
Definition: heapam_xlog.h:245
uint16 nredirected
Definition: heapam_xlog.h:246
uint64 XLogRecPtr
Definition: xlogdefs.h:21
void XLogRegisterData(char *data, uint32 len)
Definition: xloginsert.c:365
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:475
void XLogRegisterBufData(uint8 block_id, char *data, uint32 len)
Definition: xloginsert.c:406
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:243
void XLogBeginInsert(void)
Definition: xloginsert.c:150
#define REGBUF_STANDARD
Definition: xloginsert.h:34

References BufferGetBlockNumber(), BufferGetPage(), END_CRIT_SECTION, FirstOffsetNumber, heap_page_prune_execute(), heap_prune_chain(), heap_prune_satisfies_vacuum(), PruneResult::htsv, InvalidOffsetNumber, InvalidTransactionId, xl_heap_prune::isCatalogRel, ItemIdGetLength, ItemIdIsNormal, ItemIdIsUsed, ItemPointerSet(), PruneState::mark_unused_now, MarkBufferDirty(), MarkBufferDirtyHint(), PruneState::marked, PruneState::ndead, xl_heap_prune::ndead, PruneResult::ndeleted, PruneState::new_prune_xid, PruneResult::nnewlpdead, PruneState::nowdead, PruneState::nowunused, PruneState::nredirected, xl_heap_prune::nredirected, PruneState::nunused, OffsetNumberNext, OffsetNumberPrev, PageClearFull(), PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PageIsFull(), PageSetLSN(), PruneState::redirected, REGBUF_STANDARD, PruneState::rel, RelationGetRelid, RelationIsAccessibleInLogicalDecoding, RelationNeedsWAL, SizeOfHeapPrune, PruneState::snapshotConflictHorizon, xl_heap_prune::snapshotConflictHorizon, START_CRIT_SECTION, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, PruneState::vistest, XLOG_HEAP2_PRUNE, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by heap_page_prune_opt(), and lazy_scan_prune().

◆ heap_page_prune_execute()

void heap_page_prune_execute ( Buffer  buffer,
OffsetNumber redirected,
int  nredirected,
OffsetNumber nowdead,
int  ndead,
OffsetNumber nowunused,
int  nunused 
)

Definition at line 840 of file pruneheap.c.

844 {
845  Page page = (Page) BufferGetPage(buffer);
846  OffsetNumber *offnum;
848 
849  /* Shouldn't be called unless there's something to do */
850  Assert(nredirected > 0 || ndead > 0 || nunused > 0);
851 
852  /* Update all redirected line pointers */
853  offnum = redirected;
854  for (int i = 0; i < nredirected; i++)
855  {
856  OffsetNumber fromoff = *offnum++;
857  OffsetNumber tooff = *offnum++;
858  ItemId fromlp = PageGetItemId(page, fromoff);
860 
861 #ifdef USE_ASSERT_CHECKING
862 
863  /*
864  * Any existing item that we set as an LP_REDIRECT (any 'from' item)
865  * must be the first item from a HOT chain. If the item has tuple
866  * storage then it can't be a heap-only tuple. Otherwise we are just
867  * maintaining an existing LP_REDIRECT from an existing HOT chain that
868  * has been pruned at least once before now.
869  */
870  if (!ItemIdIsRedirected(fromlp))
871  {
872  Assert(ItemIdHasStorage(fromlp) && ItemIdIsNormal(fromlp));
873 
874  htup = (HeapTupleHeader) PageGetItem(page, fromlp);
876  }
877  else
878  {
879  /* We shouldn't need to redundantly set the redirect */
880  Assert(ItemIdGetRedirect(fromlp) != tooff);
881  }
882 
883  /*
884  * The item that we're about to set as an LP_REDIRECT (the 'from'
885  * item) will point to an existing item (the 'to' item) that is
886  * already a heap-only tuple. There can be at most one LP_REDIRECT
887  * item per HOT chain.
888  *
889  * We need to keep around an LP_REDIRECT item (after original
890  * non-heap-only root tuple gets pruned away) so that it's always
891  * possible for VACUUM to easily figure out what TID to delete from
892  * indexes when an entire HOT chain becomes dead. A heap-only tuple
893  * can never become LP_DEAD; an LP_REDIRECT item or a regular heap
894  * tuple can.
895  *
896  * This check may miss problems, e.g. the target of a redirect could
897  * be marked as unused subsequently. The page_verify_redirects() check
898  * below will catch such problems.
899  */
900  tolp = PageGetItemId(page, tooff);
901  Assert(ItemIdHasStorage(tolp) && ItemIdIsNormal(tolp));
902  htup = (HeapTupleHeader) PageGetItem(page, tolp);
904 #endif
905 
906  ItemIdSetRedirect(fromlp, tooff);
907  }
908 
909  /* Update all now-dead line pointers */
910  offnum = nowdead;
911  for (int i = 0; i < ndead; i++)
912  {
913  OffsetNumber off = *offnum++;
914  ItemId lp = PageGetItemId(page, off);
915 
916 #ifdef USE_ASSERT_CHECKING
917 
918  /*
919  * An LP_DEAD line pointer must be left behind when the original item
920  * (which is dead to everybody) could still be referenced by a TID in
921  * an index. This should never be necessary with any individual
922  * heap-only tuple item, though. (It's not clear how much of a problem
923  * that would be, but there is no reason to allow it.)
924  */
925  if (ItemIdHasStorage(lp))
926  {
927  Assert(ItemIdIsNormal(lp));
928  htup = (HeapTupleHeader) PageGetItem(page, lp);
930  }
931  else
932  {
933  /* Whole HOT chain becomes dead */
935  }
936 #endif
937 
938  ItemIdSetDead(lp);
939  }
940 
941  /* Update all now-unused line pointers */
942  offnum = nowunused;
943  for (int i = 0; i < nunused; i++)
944  {
945  OffsetNumber off = *offnum++;
946  ItemId lp = PageGetItemId(page, off);
947 
948 #ifdef USE_ASSERT_CHECKING
949 
950  /*
951  * When heap_page_prune() was called, mark_unused_now may have been
952  * passed as true, which allows would-be LP_DEAD items to be made
953  * LP_UNUSED instead. This is only possible if the relation has no
954  * indexes. If there are any dead items, then mark_unused_now was not
955  * true and every item being marked LP_UNUSED must refer to a
956  * heap-only tuple.
957  */
958  if (ndead > 0)
959  {
961  htup = (HeapTupleHeader) PageGetItem(page, lp);
963  }
964  else
965  {
966  Assert(ItemIdIsUsed(lp));
967  }
968 
969 #endif
970 
971  ItemIdSetUnused(lp);
972  }
973 
974  /*
975  * Finally, repair any fragmentation, and update the page's hint bit about
976  * whether it has free pointers.
977  */
979 
980  /*
981  * Now that the page has been modified, assert that redirect items still
982  * point to valid targets.
983  */
984  page_verify_redirects(page);
985 }
void PageRepairFragmentation(Page page)
Definition: bufpage.c:699
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:171
int i
Definition: isn.c:73
#define ItemIdSetRedirect(itemId, link)
Definition: itemid.h:152
#define ItemIdSetDead(itemId)
Definition: itemid.h:164
#define ItemIdSetUnused(itemId)
Definition: itemid.h:128
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
static void page_verify_redirects(Page page)
Definition: pruneheap.c:1002

References Assert(), BufferGetPage(), HeapTupleHeaderIsHeapOnly, i, ItemIdGetRedirect, ItemIdHasStorage, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemIdSetDead, ItemIdSetRedirect, ItemIdSetUnused, page_verify_redirects(), PageGetItem(), PageGetItemId(), PageRepairFragmentation(), and PG_USED_FOR_ASSERTS_ONLY.

Referenced by heap_page_prune(), and heap_xlog_prune().

◆ heap_page_prune_opt()

void heap_page_prune_opt ( Relation  relation,
Buffer  buffer 
)

Definition at line 90 of file pruneheap.c.

91 {
92  Page page = BufferGetPage(buffer);
93  TransactionId prune_xid;
94  GlobalVisState *vistest;
95  Size minfree;
96 
97  /*
98  * We can't write WAL in recovery mode, so there's no point trying to
99  * clean the page. The primary will likely issue a cleaning WAL record
100  * soon anyway, so this is no particular loss.
101  */
102  if (RecoveryInProgress())
103  return;
104 
105  /*
106  * First check whether there's any chance there's something to prune,
107  * determining the appropriate horizon is a waste if there's no prune_xid
108  * (i.e. no updates/deletes left potentially dead tuples around).
109  */
110  prune_xid = ((PageHeader) page)->pd_prune_xid;
111  if (!TransactionIdIsValid(prune_xid))
112  return;
113 
114  /*
115  * Check whether prune_xid indicates that there may be dead rows that can
116  * be cleaned up.
117  */
118  vistest = GlobalVisTestFor(relation);
119 
120  if (!GlobalVisTestIsRemovableXid(vistest, prune_xid))
121  return;
122 
123  /*
124  * We prune when a previous UPDATE failed to find enough space on the page
125  * for a new tuple version, or when free space falls below the relation's
126  * fill-factor target (but not less than 10%).
127  *
128  * Checking free space here is questionable since we aren't holding any
129  * lock on the buffer; in the worst case we could get a bogus answer. It's
130  * unlikely to be *seriously* wrong, though, since reading either pd_lower
131  * or pd_upper is probably atomic. Avoiding taking a lock seems more
132  * important than sometimes getting a wrong answer in what is after all
133  * just a heuristic estimate.
134  */
135  minfree = RelationGetTargetPageFreeSpace(relation,
137  minfree = Max(minfree, BLCKSZ / 10);
138 
139  if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
140  {
141  /* OK, try to get exclusive buffer lock */
142  if (!ConditionalLockBufferForCleanup(buffer))
143  return;
144 
145  /*
146  * Now that we have buffer lock, get accurate information about the
147  * page's free space, and recheck the heuristic about whether to
148  * prune.
149  */
150  if (PageIsFull(page) || PageGetHeapFreeSpace(page) < minfree)
151  {
152  PruneResult presult;
153 
154  /*
155  * For now, pass mark_unused_now as false regardless of whether or
156  * not the relation has indexes, since we cannot safely determine
157  * that during on-access pruning with the current implementation.
158  */
159  heap_page_prune(relation, buffer, vistest, false,
160  &presult, NULL);
161 
162  /*
163  * Report the number of tuples reclaimed to pgstats. This is
164  * presult.ndeleted minus the number of newly-LP_DEAD-set items.
165  *
166  * We derive the number of dead tuples like this to avoid totally
167  * forgetting about items that were set to LP_DEAD, since they
168  * still need to be cleaned up by VACUUM. We only want to count
169  * heap-only tuples that just became LP_UNUSED in our report,
170  * which don't.
171  *
172  * VACUUM doesn't have to compensate in the same way when it
173  * tracks ndeleted, since it will set the same LP_DEAD items to
174  * LP_UNUSED separately.
175  */
176  if (presult.ndeleted > presult.nnewlpdead)
178  presult.ndeleted - presult.nnewlpdead);
179  }
180 
181  /* And release buffer lock */
183 
184  /*
185  * We avoid reuse of any free space created on the page by unrelated
186  * UPDATEs/INSERTs by opting to not update the FSM at this point. The
187  * free space should be reused by UPDATEs to *this* page.
188  */
189  }
190 }
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4796
bool ConditionalLockBufferForCleanup(Buffer buffer)
Definition: bufmgr.c:5037
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:157
Size PageGetHeapFreeSpace(Page page)
Definition: bufpage.c:991
#define Max(x, y)
Definition: c.h:987
size_t Size
Definition: c.h:594
void pgstat_update_heap_dead_tuples(Relation rel, int delta)
bool GlobalVisTestIsRemovableXid(GlobalVisState *state, TransactionId xid)
Definition: procarray.c:4175
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:4018
void heap_page_prune(Relation relation, Buffer buffer, GlobalVisState *vistest, bool mark_unused_now, PruneResult *presult, OffsetNumber *off_loc)
Definition: pruneheap.c:216
#define RelationGetTargetPageFreeSpace(relation, defaultff)
Definition: rel.h:377
#define HEAP_DEFAULT_FILLFACTOR
Definition: rel.h:348
bool RecoveryInProgress(void)
Definition: xlog.c:6211

References BUFFER_LOCK_UNLOCK, BufferGetPage(), ConditionalLockBufferForCleanup(), GlobalVisTestFor(), GlobalVisTestIsRemovableXid(), HEAP_DEFAULT_FILLFACTOR, heap_page_prune(), LockBuffer(), Max, PruneResult::ndeleted, PruneResult::nnewlpdead, PageGetHeapFreeSpace(), PageIsFull(), pgstat_update_heap_dead_tuples(), RecoveryInProgress(), RelationGetTargetPageFreeSpace, and TransactionIdIsValid.

Referenced by heapam_index_fetch_tuple(), heapam_scan_bitmap_next_block(), and heapgetpage().

◆ heap_prune_chain()

static int heap_prune_chain ( Buffer  buffer,
OffsetNumber  rootoffnum,
int8 htsv,
PruneState prstate 
)
static

Definition at line 488 of file pruneheap.c.

490 {
491  int ndeleted = 0;
492  Page dp = (Page) BufferGetPage(buffer);
494  ItemId rootlp;
495  HeapTupleHeader htup;
496  OffsetNumber latestdead = InvalidOffsetNumber,
497  maxoff = PageGetMaxOffsetNumber(dp),
498  offnum;
500  int nchain = 0,
501  i;
502 
503  rootlp = PageGetItemId(dp, rootoffnum);
504 
505  /*
506  * If it's a heap-only tuple, then it is not the start of a HOT chain.
507  */
508  if (ItemIdIsNormal(rootlp))
509  {
510  Assert(htsv[rootoffnum] != -1);
511  htup = (HeapTupleHeader) PageGetItem(dp, rootlp);
512 
513  if (HeapTupleHeaderIsHeapOnly(htup))
514  {
515  /*
516  * If the tuple is DEAD and doesn't chain to anything else, mark
517  * it unused immediately. (If it does chain, we can only remove
518  * it as part of pruning its chain.)
519  *
520  * We need this primarily to handle aborted HOT updates, that is,
521  * XMIN_INVALID heap-only tuples. Those might not be linked to by
522  * any chain, since the parent tuple might be re-updated before
523  * any pruning occurs. So we have to be able to reap them
524  * separately from chain-pruning. (Note that
525  * HeapTupleHeaderIsHotUpdated will never return true for an
526  * XMIN_INVALID tuple, so this code will work even when there were
527  * sequential updates within the aborted transaction.)
528  *
529  * Note that we might first arrive at a dead heap-only tuple
530  * either here or while following a chain below. Whichever path
531  * gets there first will mark the tuple unused.
532  */
533  if (htsv[rootoffnum] == HEAPTUPLE_DEAD &&
535  {
536  heap_prune_record_unused(prstate, rootoffnum);
538  &prstate->snapshotConflictHorizon);
539  ndeleted++;
540  }
541 
542  /* Nothing more to do */
543  return ndeleted;
544  }
545  }
546 
547  /* Start from the root tuple */
548  offnum = rootoffnum;
549 
550  /* while not end of the chain */
551  for (;;)
552  {
553  ItemId lp;
554  bool tupdead,
555  recent_dead;
556 
557  /* Sanity check (pure paranoia) */
558  if (offnum < FirstOffsetNumber)
559  break;
560 
561  /*
562  * An offset past the end of page's line pointer array is possible
563  * when the array was truncated (original item must have been unused)
564  */
565  if (offnum > maxoff)
566  break;
567 
568  /* If item is already processed, stop --- it must not be same chain */
569  if (prstate->marked[offnum])
570  break;
571 
572  lp = PageGetItemId(dp, offnum);
573 
574  /* Unused item obviously isn't part of the chain */
575  if (!ItemIdIsUsed(lp))
576  break;
577 
578  /*
579  * If we are looking at the redirected root line pointer, jump to the
580  * first normal tuple in the chain. If we find a redirect somewhere
581  * else, stop --- it must not be same chain.
582  */
583  if (ItemIdIsRedirected(lp))
584  {
585  if (nchain > 0)
586  break; /* not at start of chain */
587  chainitems[nchain++] = offnum;
588  offnum = ItemIdGetRedirect(rootlp);
589  continue;
590  }
591 
592  /*
593  * Likewise, a dead line pointer can't be part of the chain. (We
594  * already eliminated the case of dead root tuple outside this
595  * function.)
596  */
597  if (ItemIdIsDead(lp))
598  {
599  /*
600  * If the caller set mark_unused_now true, we can set dead line
601  * pointers LP_UNUSED now. We don't increment ndeleted here since
602  * the LP was already marked dead.
603  */
604  if (unlikely(prstate->mark_unused_now))
605  heap_prune_record_unused(prstate, offnum);
606 
607  break;
608  }
609 
610  Assert(ItemIdIsNormal(lp));
611  htup = (HeapTupleHeader) PageGetItem(dp, lp);
612 
613  /*
614  * Check the tuple XMIN against prior XMAX, if any
615  */
616  if (TransactionIdIsValid(priorXmax) &&
617  !TransactionIdEquals(HeapTupleHeaderGetXmin(htup), priorXmax))
618  break;
619 
620  /*
621  * OK, this tuple is indeed a member of the chain.
622  */
623  chainitems[nchain++] = offnum;
624 
625  /*
626  * Check tuple's visibility status.
627  */
628  tupdead = recent_dead = false;
629 
630  switch (htsv_get_valid_status(htsv[offnum]))
631  {
632  case HEAPTUPLE_DEAD:
633  tupdead = true;
634  break;
635 
637  recent_dead = true;
638 
639  /*
640  * This tuple may soon become DEAD. Update the hint field so
641  * that the page is reconsidered for pruning in future.
642  */
645  break;
646 
648 
649  /*
650  * This tuple may soon become DEAD. Update the hint field so
651  * that the page is reconsidered for pruning in future.
652  */
655  break;
656 
657  case HEAPTUPLE_LIVE:
659 
660  /*
661  * If we wanted to optimize for aborts, we might consider
662  * marking the page prunable when we see INSERT_IN_PROGRESS.
663  * But we don't. See related decisions about when to mark the
664  * page prunable in heapam.c.
665  */
666  break;
667 
668  default:
669  elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
670  break;
671  }
672 
673  /*
674  * Remember the last DEAD tuple seen. We will advance past
675  * RECENTLY_DEAD tuples just in case there's a DEAD one after them;
676  * but we can't advance past anything else. We have to make sure that
677  * we don't miss any DEAD tuples, since DEAD tuples that still have
678  * tuple storage after pruning will confuse VACUUM.
679  */
680  if (tupdead)
681  {
682  latestdead = offnum;
684  &prstate->snapshotConflictHorizon);
685  }
686  else if (!recent_dead)
687  break;
688 
689  /*
690  * If the tuple is not HOT-updated, then we are at the end of this
691  * HOT-update chain.
692  */
693  if (!HeapTupleHeaderIsHotUpdated(htup))
694  break;
695 
696  /* HOT implies it can't have moved to different partition */
698 
699  /*
700  * Advance to next chain member.
701  */
703  BufferGetBlockNumber(buffer));
704  offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
705  priorXmax = HeapTupleHeaderGetUpdateXid(htup);
706  }
707 
708  /*
709  * If we found a DEAD tuple in the chain, adjust the HOT chain so that all
710  * the DEAD tuples at the start of the chain are removed and the root line
711  * pointer is appropriately redirected.
712  */
713  if (OffsetNumberIsValid(latestdead))
714  {
715  /*
716  * Mark as unused each intermediate item that we are able to remove
717  * from the chain.
718  *
719  * When the previous item is the last dead tuple seen, we are at the
720  * right candidate for redirection.
721  */
722  for (i = 1; (i < nchain) && (chainitems[i - 1] != latestdead); i++)
723  {
724  heap_prune_record_unused(prstate, chainitems[i]);
725  ndeleted++;
726  }
727 
728  /*
729  * If the root entry had been a normal tuple, we are deleting it, so
730  * count it in the result. But changing a redirect (even to DEAD
731  * state) doesn't count.
732  */
733  if (ItemIdIsNormal(rootlp))
734  ndeleted++;
735 
736  /*
737  * If the DEAD tuple is at the end of the chain, the entire chain is
738  * dead and the root line pointer can be marked dead. Otherwise just
739  * redirect the root to the correct chain member.
740  */
741  if (i >= nchain)
742  heap_prune_record_dead_or_unused(prstate, rootoffnum);
743  else
744  heap_prune_record_redirect(prstate, rootoffnum, chainitems[i]);
745  }
746  else if (nchain < 2 && ItemIdIsRedirected(rootlp))
747  {
748  /*
749  * We found a redirect item that doesn't point to a valid follow-on
750  * item. This can happen if the loop in heap_page_prune caused us to
751  * visit the dead successor of a redirect item before visiting the
752  * redirect item. We can clean up by setting the redirect item to
753  * DEAD state or LP_UNUSED if the caller indicated.
754  */
755  heap_prune_record_dead_or_unused(prstate, rootoffnum);
756  }
757 
758  return ndeleted;
759 }
#define unlikely(x)
Definition: c.h:300
#define ERROR
Definition: elog.h:39
void HeapTupleHeaderAdvanceConflictHorizon(HeapTupleHeader tuple, TransactionId *snapshotConflictHorizon)
Definition: heapam.c:7502
@ HEAPTUPLE_RECENTLY_DEAD
Definition: heapam.h:98
@ HEAPTUPLE_INSERT_IN_PROGRESS
Definition: heapam.h:99
@ HEAPTUPLE_LIVE
Definition: heapam.h:97
@ HEAPTUPLE_DELETE_IN_PROGRESS
Definition: heapam.h:100
@ HEAPTUPLE_DEAD
Definition: heapam.h:96
static HTSV_Result htsv_get_valid_status(int status)
Definition: heapam.h:221
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
#define OffsetNumberIsValid(offsetNumber)
Definition: off.h:39
static void heap_prune_record_redirect(PruneState *prstate, OffsetNumber offnum, OffsetNumber rdoffnum)
Definition: pruneheap.c:777
static void heap_prune_record_unused(PruneState *prstate, OffsetNumber offnum)
Definition: pruneheap.c:824
static void heap_prune_record_prunable(PruneState *prstate, TransactionId xid)
Definition: pruneheap.c:763
static void heap_prune_record_dead_or_unused(PruneState *prstate, OffsetNumber offnum)
Definition: pruneheap.c:808

References Assert(), BufferGetBlockNumber(), BufferGetPage(), elog(), ERROR, FirstOffsetNumber, heap_prune_record_dead_or_unused(), heap_prune_record_prunable(), heap_prune_record_redirect(), heap_prune_record_unused(), HEAPTUPLE_DEAD, HEAPTUPLE_DELETE_IN_PROGRESS, HEAPTUPLE_INSERT_IN_PROGRESS, HEAPTUPLE_LIVE, HEAPTUPLE_RECENTLY_DEAD, HeapTupleHeaderAdvanceConflictHorizon(), HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleHeaderIndicatesMovedPartitions, HeapTupleHeaderIsHeapOnly, HeapTupleHeaderIsHotUpdated, htsv_get_valid_status(), i, InvalidOffsetNumber, InvalidTransactionId, ItemIdGetRedirect, ItemIdIsDead, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerGetBlockNumber(), ItemPointerGetOffsetNumber(), PruneState::mark_unused_now, PruneState::marked, MaxHeapTuplesPerPage, OffsetNumberIsValid, PageGetItem(), PageGetItemId(), PageGetMaxOffsetNumber(), PruneState::snapshotConflictHorizon, HeapTupleHeaderData::t_ctid, TransactionIdEquals, TransactionIdIsValid, and unlikely.

Referenced by heap_page_prune().

◆ heap_prune_record_dead()

static void heap_prune_record_dead ( PruneState prstate,
OffsetNumber  offnum 
)
static

Definition at line 792 of file pruneheap.c.

793 {
794  Assert(prstate->ndead < MaxHeapTuplesPerPage);
795  prstate->nowdead[prstate->ndead] = offnum;
796  prstate->ndead++;
797  Assert(!prstate->marked[offnum]);
798  prstate->marked[offnum] = true;
799 }

References Assert(), PruneState::marked, MaxHeapTuplesPerPage, PruneState::ndead, and PruneState::nowdead.

Referenced by heap_prune_record_dead_or_unused().

◆ heap_prune_record_dead_or_unused()

static void heap_prune_record_dead_or_unused ( PruneState prstate,
OffsetNumber  offnum 
)
static

Definition at line 808 of file pruneheap.c.

809 {
810  /*
811  * If the caller set mark_unused_now to true, we can remove dead tuples
812  * during pruning instead of marking their line pointers dead. Set this
813  * tuple's line pointer LP_UNUSED. We hint that this option is less
814  * likely.
815  */
816  if (unlikely(prstate->mark_unused_now))
817  heap_prune_record_unused(prstate, offnum);
818  else
819  heap_prune_record_dead(prstate, offnum);
820 }
static void heap_prune_record_dead(PruneState *prstate, OffsetNumber offnum)
Definition: pruneheap.c:792

References heap_prune_record_dead(), heap_prune_record_unused(), PruneState::mark_unused_now, and unlikely.

Referenced by heap_prune_chain().

◆ heap_prune_record_prunable()

static void heap_prune_record_prunable ( PruneState prstate,
TransactionId  xid 
)
static

Definition at line 763 of file pruneheap.c.

764 {
765  /*
766  * This should exactly match the PageSetPrunable macro. We can't store
767  * directly into the page header yet, so we update working state.
768  */
770  if (!TransactionIdIsValid(prstate->new_prune_xid) ||
771  TransactionIdPrecedes(xid, prstate->new_prune_xid))
772  prstate->new_prune_xid = xid;
773 }
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:280
#define TransactionIdIsNormal(xid)
Definition: transam.h:42

References Assert(), PruneState::new_prune_xid, TransactionIdIsNormal, TransactionIdIsValid, and TransactionIdPrecedes().

Referenced by heap_prune_chain().

◆ heap_prune_record_redirect()

static void heap_prune_record_redirect ( PruneState prstate,
OffsetNumber  offnum,
OffsetNumber  rdoffnum 
)
static

Definition at line 777 of file pruneheap.c.

779 {
781  prstate->redirected[prstate->nredirected * 2] = offnum;
782  prstate->redirected[prstate->nredirected * 2 + 1] = rdoffnum;
783  prstate->nredirected++;
784  Assert(!prstate->marked[offnum]);
785  prstate->marked[offnum] = true;
786  Assert(!prstate->marked[rdoffnum]);
787  prstate->marked[rdoffnum] = true;
788 }

References Assert(), PruneState::marked, MaxHeapTuplesPerPage, PruneState::nredirected, and PruneState::redirected.

Referenced by heap_prune_chain().

◆ heap_prune_record_unused()

static void heap_prune_record_unused ( PruneState prstate,
OffsetNumber  offnum 
)
static

Definition at line 824 of file pruneheap.c.

825 {
826  Assert(prstate->nunused < MaxHeapTuplesPerPage);
827  prstate->nowunused[prstate->nunused] = offnum;
828  prstate->nunused++;
829  Assert(!prstate->marked[offnum]);
830  prstate->marked[offnum] = true;
831 }

References Assert(), PruneState::marked, MaxHeapTuplesPerPage, PruneState::nowunused, and PruneState::nunused.

Referenced by heap_prune_chain(), and heap_prune_record_dead_or_unused().

◆ heap_prune_satisfies_vacuum()

static HTSV_Result heap_prune_satisfies_vacuum ( PruneState prstate,
HeapTuple  tup,
Buffer  buffer 
)
static

Definition at line 439 of file pruneheap.c.

440 {
442  TransactionId dead_after;
443 
444  res = HeapTupleSatisfiesVacuumHorizon(tup, buffer, &dead_after);
445 
447  return res;
448 
449  if (GlobalVisTestIsRemovableXid(prstate->vistest, dead_after))
451 
452  return res;
453 }
HTSV_Result
Definition: heapam.h:95
HTSV_Result HeapTupleSatisfiesVacuumHorizon(HeapTuple htup, Buffer buffer, TransactionId *dead_after)

References GlobalVisTestIsRemovableXid(), HEAPTUPLE_DEAD, HEAPTUPLE_RECENTLY_DEAD, HeapTupleSatisfiesVacuumHorizon(), res, and PruneState::vistest.

Referenced by heap_page_prune().

◆ page_verify_redirects()

static void page_verify_redirects ( Page  page)
static

Definition at line 1002 of file pruneheap.c.

1003 {
1004 #ifdef USE_ASSERT_CHECKING
1005  OffsetNumber offnum;
1006  OffsetNumber maxoff;
1007 
1008  maxoff = PageGetMaxOffsetNumber(page);
1009  for (offnum = FirstOffsetNumber;
1010  offnum <= maxoff;
1011  offnum = OffsetNumberNext(offnum))
1012  {
1013  ItemId itemid = PageGetItemId(page, offnum);
1014  OffsetNumber targoff;
1015  ItemId targitem;
1016  HeapTupleHeader htup;
1017 
1018  if (!ItemIdIsRedirected(itemid))
1019  continue;
1020 
1021  targoff = ItemIdGetRedirect(itemid);
1022  targitem = PageGetItemId(page, targoff);
1023 
1024  Assert(ItemIdIsUsed(targitem));
1025  Assert(ItemIdIsNormal(targitem));
1026  Assert(ItemIdHasStorage(targitem));
1027  htup = (HeapTupleHeader) PageGetItem(page, targitem);
1029  }
1030 #endif
1031 }

References Assert(), FirstOffsetNumber, HeapTupleHeaderIsHeapOnly, ItemIdGetRedirect, ItemIdHasStorage, ItemIdIsNormal, ItemIdIsRedirected, ItemIdIsUsed, OffsetNumberNext, PageGetItem(), PageGetItemId(), and PageGetMaxOffsetNumber().

Referenced by heap_page_prune_execute().