PostgreSQL Source Code  git master
heapam.c File Reference
#include "postgres.h"
#include "access/bufmask.h"
#include "access/genam.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/heaptoast.h"
#include "access/hio.h"
#include "access/multixact.h"
#include "access/parallel.h"
#include "access/relscan.h"
#include "access/subtrans.h"
#include "access/syncscan.h"
#include "access/sysattr.h"
#include "access/tableam.h"
#include "access/transam.h"
#include "access/valid.h"
#include "access/visibilitymap.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "access/xloginsert.h"
#include "access/xlogutils.h"
#include "catalog/catalog.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "port/atomics.h"
#include "port/pg_bitutils.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
#include "storage/procarray.h"
#include "storage/smgr.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "utils/datum.h"
#include "utils/inval.h"
#include "utils/lsyscache.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
#include "utils/spccache.h"
Include dependency graph for heapam.c:

Go to the source code of this file.

Data Structures

struct  IndexDeleteCounts
 

Macros

#define LOCKMODE_from_mxstatus(status)   (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
 
#define LockTupleTuplock(rel, tup, mode)   LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define UnlockTupleTuplock(rel, tup, mode)   UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define ConditionalLockTupleTuplock(rel, tup, mode)   ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define BOTTOMUP_MAX_NBLOCKS   6
 
#define BOTTOMUP_TOLERANCE_NBLOCKS   3
 
#define TUPLOCK_from_mxstatus(status)   (MultiXactStatusLock[(status)])
 
#define FRM_NOOP   0x0001
 
#define FRM_INVALIDATE_XMAX   0x0002
 
#define FRM_RETURN_IS_XID   0x0004
 
#define FRM_RETURN_IS_MULTI   0x0008
 
#define FRM_MARK_COMMITTED   0x0010
 

Typedefs

typedef struct IndexDeleteCounts IndexDeleteCounts
 

Functions

static HeapTuple heap_prepare_insert (Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
 
static XLogRecPtr log_heap_update (Relation reln, Buffer oldbuf, Buffer newbuf, HeapTuple oldtup, HeapTuple newtup, HeapTuple old_key_tuple, bool all_visible_cleared, bool new_all_visible_cleared)
 
static BitmapsetHeapDetermineModifiedColumns (Relation relation, Bitmapset *interesting_cols, HeapTuple oldtup, HeapTuple newtup)
 
static bool heap_acquire_tuplock (Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
 
static void compute_new_xmax_infomask (TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
 
static TM_Result heap_lock_updated_tuple (Relation rel, HeapTuple tuple, ItemPointer ctid, TransactionId xid, LockTupleMode mode)
 
static void GetMultiXactIdHintBits (MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
 
static TransactionId MultiXactIdGetUpdateXid (TransactionId xmax, uint16 t_infomask)
 
static bool DoesMultiXactIdConflict (MultiXactId multi, uint16 infomask, LockTupleMode lockmode, bool *current_is_member)
 
static void MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
static bool ConditionalMultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, int *remaining)
 
static void index_delete_sort (TM_IndexDeleteOp *delstate)
 
static int bottomup_sort_and_shrink (TM_IndexDeleteOp *delstate)
 
static XLogRecPtr log_heap_new_cid (Relation relation, HeapTuple tup)
 
static HeapTuple ExtractReplicaIdentity (Relation rel, HeapTuple tup, bool key_changed, bool *copy)
 
static void initscan (HeapScanDesc scan, ScanKey key, bool keep_startblock)
 
void heap_setscanlimits (TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
 
void heapgetpage (TableScanDesc sscan, BlockNumber page)
 
static void heapgettup (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
static void heapgettup_pagemode (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
TableScanDesc heap_beginscan (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelTableScanDesc parallel_scan, uint32 flags)
 
void heap_rescan (TableScanDesc sscan, ScanKey key, bool set_params, bool allow_strat, bool allow_sync, bool allow_pagemode)
 
void heap_endscan (TableScanDesc sscan)
 
HeapTuple heap_getnext (TableScanDesc sscan, ScanDirection direction)
 
bool heap_getnextslot (TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
 
void heap_set_tidrange (TableScanDesc sscan, ItemPointer mintid, ItemPointer maxtid)
 
bool heap_getnextslot_tidrange (TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
 
bool heap_fetch (Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf)
 
bool heap_hot_search_buffer (ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
 
void heap_get_latest_tid (TableScanDesc sscan, ItemPointer tid)
 
static void UpdateXmaxHintBits (HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
 
BulkInsertState GetBulkInsertState (void)
 
void FreeBulkInsertState (BulkInsertState bistate)
 
void ReleaseBulkInsertStatePin (BulkInsertState bistate)
 
void heap_insert (Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate)
 
void heap_multi_insert (Relation relation, TupleTableSlot **slots, int ntuples, CommandId cid, int options, BulkInsertState bistate)
 
void simple_heap_insert (Relation relation, HeapTuple tup)
 
static uint8 compute_infobits (uint16 infomask, uint16 infomask2)
 
static bool xmax_infomask_changed (uint16 new_infomask, uint16 old_infomask)
 
TM_Result heap_delete (Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
 
void simple_heap_delete (Relation relation, ItemPointer tid)
 
TM_Result heap_update (Relation relation, ItemPointer otid, HeapTuple newtup, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode)
 
static bool heap_tuple_attr_equals (TupleDesc tupdesc, int attrnum, HeapTuple tup1, HeapTuple tup2)
 
void simple_heap_update (Relation relation, ItemPointer otid, HeapTuple tup)
 
static MultiXactStatus get_mxact_status_for_lock (LockTupleMode mode, bool is_update)
 
TM_Result heap_lock_tuple (Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, TM_FailureData *tmfd)
 
static TM_Result test_lockmode_for_conflict (MultiXactStatus status, TransactionId xid, LockTupleMode mode, HeapTuple tup, bool *needwait)
 
static TM_Result heap_lock_updated_tuple_rec (Relation rel, ItemPointer tid, TransactionId xid, LockTupleMode mode)
 
void heap_finish_speculative (Relation relation, ItemPointer tid)
 
void heap_abort_speculative (Relation relation, ItemPointer tid)
 
void heap_inplace_update (Relation relation, HeapTuple tuple)
 
static TransactionId FreezeMultiXactId (MultiXactId multi, uint16 t_infomask, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, MultiXactId cutoff_multi, uint16 *flags)
 
bool heap_prepare_freeze_tuple (HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
 
void heap_execute_freeze_tuple (HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
 
bool heap_freeze_tuple (HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi)
 
TransactionId HeapTupleGetUpdateXid (HeapTupleHeader tuple)
 
static bool Do_MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
bool heap_tuple_needs_eventual_freeze (HeapTupleHeader tuple)
 
bool heap_tuple_needs_freeze (HeapTupleHeader tuple, TransactionId cutoff_xid, MultiXactId cutoff_multi, Buffer buf)
 
void HeapTupleHeaderAdvanceLatestRemovedXid (HeapTupleHeader tuple, TransactionId *latestRemovedXid)
 
TransactionId heap_index_delete_tuples (Relation rel, TM_IndexDeleteOp *delstate)
 
static int index_delete_sort_cmp (TM_IndexDelete *deltid1, TM_IndexDelete *deltid2)
 
static int bottomup_nblocksfavorable (IndexDeleteCounts *blockgroups, int nblockgroups, TM_IndexDelete *deltids)
 
static int bottomup_sort_and_shrink_cmp (const void *arg1, const void *arg2)
 
XLogRecPtr log_heap_freeze (Relation reln, Buffer buffer, TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples, int ntuples)
 
XLogRecPtr log_heap_visible (RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer, TransactionId cutoff_xid, uint8 vmflags)
 
static void heap_xlog_prune (XLogReaderState *record)
 
static void heap_xlog_vacuum (XLogReaderState *record)
 
static void heap_xlog_visible (XLogReaderState *record)
 
static void heap_xlog_freeze_page (XLogReaderState *record)
 
static void fix_infomask_from_infobits (uint8 infobits, uint16 *infomask, uint16 *infomask2)
 
static void heap_xlog_delete (XLogReaderState *record)
 
static void heap_xlog_insert (XLogReaderState *record)
 
static void heap_xlog_multi_insert (XLogReaderState *record)
 
static void heap_xlog_update (XLogReaderState *record, bool hot_update)
 
static void heap_xlog_confirm (XLogReaderState *record)
 
static void heap_xlog_lock (XLogReaderState *record)
 
static void heap_xlog_lock_updated (XLogReaderState *record)
 
static void heap_xlog_inplace (XLogReaderState *record)
 
void heap_redo (XLogReaderState *record)
 
void heap2_redo (XLogReaderState *record)
 
void heap_mask (char *pagedata, BlockNumber blkno)
 
void HeapCheckForSerializableConflictOut (bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
 

Variables

struct {
   LOCKMODE   hwlock
 
   int   lockstatus
 
   int   updstatus
 
tupleLockExtraInfo [MaxLockTupleMode+1]
 
static const int MultiXactStatusLock [MaxMultiXactStatus+1]
 

Macro Definition Documentation

◆ BOTTOMUP_MAX_NBLOCKS

#define BOTTOMUP_MAX_NBLOCKS   6

◆ BOTTOMUP_TOLERANCE_NBLOCKS

#define BOTTOMUP_TOLERANCE_NBLOCKS   3

Definition at line 186 of file heapam.c.

Referenced by bottomup_nblocksfavorable().

◆ ConditionalLockTupleTuplock

#define ConditionalLockTupleTuplock (   rel,
  tup,
  mode 
)    ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 167 of file heapam.c.

Referenced by heap_acquire_tuplock().

◆ FRM_INVALIDATE_XMAX

#define FRM_INVALIDATE_XMAX   0x0002

Definition at line 6143 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_MARK_COMMITTED

#define FRM_MARK_COMMITTED   0x0010

Definition at line 6146 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_NOOP

#define FRM_NOOP   0x0001

Definition at line 6142 of file heapam.c.

Referenced by FreezeMultiXactId().

◆ FRM_RETURN_IS_MULTI

#define FRM_RETURN_IS_MULTI   0x0008

Definition at line 6145 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_RETURN_IS_XID

#define FRM_RETURN_IS_XID   0x0004

Definition at line 6144 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ LOCKMODE_from_mxstatus

#define LOCKMODE_from_mxstatus (   status)    (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)

◆ LockTupleTuplock

#define LockTupleTuplock (   rel,
  tup,
  mode 
)    LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 163 of file heapam.c.

Referenced by heap_acquire_tuplock().

◆ TUPLOCK_from_mxstatus

#define TUPLOCK_from_mxstatus (   status)    (MultiXactStatusLock[(status)])

Definition at line 214 of file heapam.c.

Referenced by compute_new_xmax_infomask(), GetMultiXactIdHintBits(), and heap_lock_tuple().

◆ UnlockTupleTuplock

#define UnlockTupleTuplock (   rel,
  tup,
  mode 
)    UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 165 of file heapam.c.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

Typedef Documentation

◆ IndexDeleteCounts

Function Documentation

◆ bottomup_nblocksfavorable()

static int bottomup_nblocksfavorable ( IndexDeleteCounts blockgroups,
int  nblockgroups,
TM_IndexDelete deltids 
)
static

Definition at line 7766 of file heapam.c.

References Assert, BOTTOMUP_MAX_NBLOCKS, BOTTOMUP_TOLERANCE_NBLOCKS, IndexDeleteCounts::ifirsttid, ItemPointerGetBlockNumber, and TM_IndexDelete::tid.

Referenced by bottomup_sort_and_shrink().

7768 {
7769  int64 lastblock = -1;
7770  int nblocksfavorable = 0;
7771 
7772  Assert(nblockgroups >= 1);
7773  Assert(nblockgroups <= BOTTOMUP_MAX_NBLOCKS);
7774 
7775  /*
7776  * We tolerate heap blocks that will be accessed only slightly out of
7777  * physical order. Small blips occur when a pair of almost-contiguous
7778  * blocks happen to fall into different buckets (perhaps due only to a
7779  * small difference in npromisingtids that the bucketing scheme didn't
7780  * quite manage to ignore). We effectively ignore these blips by applying
7781  * a small tolerance. The precise tolerance we use is a little arbitrary,
7782  * but it works well enough in practice.
7783  */
7784  for (int b = 0; b < nblockgroups; b++)
7785  {
7786  IndexDeleteCounts *group = blockgroups + b;
7787  TM_IndexDelete *firstdtid = deltids + group->ifirsttid;
7788  BlockNumber block = ItemPointerGetBlockNumber(&firstdtid->tid);
7789 
7790  if (lastblock != -1 &&
7791  ((int64) block < lastblock - BOTTOMUP_TOLERANCE_NBLOCKS ||
7792  (int64) block > lastblock + BOTTOMUP_TOLERANCE_NBLOCKS))
7793  break;
7794 
7795  nblocksfavorable++;
7796  lastblock = block;
7797  }
7798 
7799  /* Always indicate that there is at least 1 favorable block */
7800  Assert(nblocksfavorable >= 1);
7801 
7802  return nblocksfavorable;
7803 }
uint32 BlockNumber
Definition: block.h:31
#define BOTTOMUP_TOLERANCE_NBLOCKS
Definition: heapam.c:186
#define BOTTOMUP_MAX_NBLOCKS
Definition: heapam.c:185
ItemPointerData tid
Definition: tableam.h:189
int16 ifirsttid
Definition: heapam.c:196
#define Assert(condition)
Definition: c.h:804
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98

◆ bottomup_sort_and_shrink()

static int bottomup_sort_and_shrink ( TM_IndexDeleteOp delstate)
static

Definition at line 7882 of file heapam.c.

References Assert, BlockNumberIsValid, TM_IndexDeleteOp::bottomup, BOTTOMUP_MAX_NBLOCKS, bottomup_nblocksfavorable(), bottomup_sort_and_shrink_cmp(), TM_IndexDeleteOp::deltids, i, TM_IndexDelete::id, IndexDeleteCounts::ifirsttid, InvalidBlockNumber, ItemPointerGetBlockNumber, Min, TM_IndexDeleteOp::ndeltids, IndexDeleteCounts::npromisingtids, IndexDeleteCounts::ntids, palloc(), pfree(), pg_nextpower2_32(), TM_IndexStatus::promising, qsort, TM_IndexDeleteOp::status, and TM_IndexDelete::tid.

Referenced by heap_index_delete_tuples().

7883 {
7884  IndexDeleteCounts *blockgroups;
7885  TM_IndexDelete *reordereddeltids;
7886  BlockNumber curblock = InvalidBlockNumber;
7887  int nblockgroups = 0;
7888  int ncopied = 0;
7889  int nblocksfavorable = 0;
7890 
7891  Assert(delstate->bottomup);
7892  Assert(delstate->ndeltids > 0);
7893 
7894  /* Calculate per-heap-block count of TIDs */
7895  blockgroups = palloc(sizeof(IndexDeleteCounts) * delstate->ndeltids);
7896  for (int i = 0; i < delstate->ndeltids; i++)
7897  {
7898  TM_IndexDelete *ideltid = &delstate->deltids[i];
7899  TM_IndexStatus *istatus = delstate->status + ideltid->id;
7900  ItemPointer htid = &ideltid->tid;
7901  bool promising = istatus->promising;
7902 
7903  if (curblock != ItemPointerGetBlockNumber(htid))
7904  {
7905  /* New block group */
7906  nblockgroups++;
7907 
7908  Assert(curblock < ItemPointerGetBlockNumber(htid) ||
7909  !BlockNumberIsValid(curblock));
7910 
7911  curblock = ItemPointerGetBlockNumber(htid);
7912  blockgroups[nblockgroups - 1].ifirsttid = i;
7913  blockgroups[nblockgroups - 1].ntids = 1;
7914  blockgroups[nblockgroups - 1].npromisingtids = 0;
7915  }
7916  else
7917  {
7918  blockgroups[nblockgroups - 1].ntids++;
7919  }
7920 
7921  if (promising)
7922  blockgroups[nblockgroups - 1].npromisingtids++;
7923  }
7924 
7925  /*
7926  * We're about ready to sort block groups to determine the optimal order
7927  * for visiting heap blocks. But before we do, round the number of
7928  * promising tuples for each block group up to the next power-of-two,
7929  * unless it is very low (less than 4), in which case we round up to 4.
7930  * npromisingtids is far too noisy to trust when choosing between a pair
7931  * of block groups that both have very low values.
7932  *
7933  * This scheme divides heap blocks/block groups into buckets. Each bucket
7934  * contains blocks that have _approximately_ the same number of promising
7935  * TIDs as each other. The goal is to ignore relatively small differences
7936  * in the total number of promising entries, so that the whole process can
7937  * give a little weight to heapam factors (like heap block locality)
7938  * instead. This isn't a trade-off, really -- we have nothing to lose. It
7939  * would be foolish to interpret small differences in npromisingtids
7940  * values as anything more than noise.
7941  *
7942  * We tiebreak on nhtids when sorting block group subsets that have the
7943  * same npromisingtids, but this has the same issues as npromisingtids,
7944  * and so nhtids is subject to the same power-of-two bucketing scheme. The
7945  * only reason that we don't fix nhtids in the same way here too is that
7946  * we'll need accurate nhtids values after the sort. We handle nhtids
7947  * bucketization dynamically instead (in the sort comparator).
7948  *
7949  * See bottomup_nblocksfavorable() for a full explanation of when and how
7950  * heap locality/favorable blocks can significantly influence when and how
7951  * heap blocks are accessed.
7952  */
7953  for (int b = 0; b < nblockgroups; b++)
7954  {
7955  IndexDeleteCounts *group = blockgroups + b;
7956 
7957  /* Better off falling back on nhtids with low npromisingtids */
7958  if (group->npromisingtids <= 4)
7959  group->npromisingtids = 4;
7960  else
7961  group->npromisingtids =
7963  }
7964 
7965  /* Sort groups and rearrange caller's deltids array */
7966  qsort(blockgroups, nblockgroups, sizeof(IndexDeleteCounts),
7968  reordereddeltids = palloc(delstate->ndeltids * sizeof(TM_IndexDelete));
7969 
7970  nblockgroups = Min(BOTTOMUP_MAX_NBLOCKS, nblockgroups);
7971  /* Determine number of favorable blocks at the start of final deltids */
7972  nblocksfavorable = bottomup_nblocksfavorable(blockgroups, nblockgroups,
7973  delstate->deltids);
7974 
7975  for (int b = 0; b < nblockgroups; b++)
7976  {
7977  IndexDeleteCounts *group = blockgroups + b;
7978  TM_IndexDelete *firstdtid = delstate->deltids + group->ifirsttid;
7979 
7980  memcpy(reordereddeltids + ncopied, firstdtid,
7981  sizeof(TM_IndexDelete) * group->ntids);
7982  ncopied += group->ntids;
7983  }
7984 
7985  /* Copy final grouped and sorted TIDs back into start of caller's array */
7986  memcpy(delstate->deltids, reordereddeltids,
7987  sizeof(TM_IndexDelete) * ncopied);
7988  delstate->ndeltids = ncopied;
7989 
7990  pfree(reordereddeltids);
7991  pfree(blockgroups);
7992 
7993  return nblocksfavorable;
7994 }
TM_IndexDelete * deltids
Definition: tableam.h:228
static int bottomup_sort_and_shrink_cmp(const void *arg1, const void *arg2)
Definition: heapam.c:7809
#define Min(x, y)
Definition: c.h:986
int16 npromisingtids
Definition: heapam.c:194
uint32 BlockNumber
Definition: block.h:31
void pfree(void *pointer)
Definition: mcxt.c:1169
#define BOTTOMUP_MAX_NBLOCKS
Definition: heapam.c:185
static uint32 pg_nextpower2_32(uint32 num)
Definition: pg_bitutils.h:146
unsigned int uint32
Definition: c.h:441
bool promising
Definition: tableam.h:199
TM_IndexStatus * status
Definition: tableam.h:229
ItemPointerData tid
Definition: tableam.h:189
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
int16 ifirsttid
Definition: heapam.c:196
#define Assert(condition)
Definition: c.h:804
static int bottomup_nblocksfavorable(IndexDeleteCounts *blockgroups, int nblockgroups, TM_IndexDelete *deltids)
Definition: heapam.c:7766
#define InvalidBlockNumber
Definition: block.h:33
void * palloc(Size size)
Definition: mcxt.c:1062
int i
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define qsort(a, b, c, d)
Definition: port.h:504

◆ bottomup_sort_and_shrink_cmp()

static int bottomup_sort_and_shrink_cmp ( const void *  arg1,
const void *  arg2 
)
static

Definition at line 7809 of file heapam.c.

References IndexDeleteCounts::ifirsttid, IndexDeleteCounts::npromisingtids, IndexDeleteCounts::ntids, pg_nextpower2_32(), and pg_unreachable.

Referenced by bottomup_sort_and_shrink().

7810 {
7811  const IndexDeleteCounts *group1 = (const IndexDeleteCounts *) arg1;
7812  const IndexDeleteCounts *group2 = (const IndexDeleteCounts *) arg2;
7813 
7814  /*
7815  * Most significant field is npromisingtids (which we invert the order of
7816  * so as to sort in desc order).
7817  *
7818  * Caller should have already normalized npromisingtids fields into
7819  * power-of-two values (buckets).
7820  */
7821  if (group1->npromisingtids > group2->npromisingtids)
7822  return -1;
7823  if (group1->npromisingtids < group2->npromisingtids)
7824  return 1;
7825 
7826  /*
7827  * Tiebreak: desc ntids sort order.
7828  *
7829  * We cannot expect power-of-two values for ntids fields. We should
7830  * behave as if they were already rounded up for us instead.
7831  */
7832  if (group1->ntids != group2->ntids)
7833  {
7834  uint32 ntids1 = pg_nextpower2_32((uint32) group1->ntids);
7835  uint32 ntids2 = pg_nextpower2_32((uint32) group2->ntids);
7836 
7837  if (ntids1 > ntids2)
7838  return -1;
7839  if (ntids1 < ntids2)
7840  return 1;
7841  }
7842 
7843  /*
7844  * Tiebreak: asc offset-into-deltids-for-block (offset to first TID for
7845  * block in deltids array) order.
7846  *
7847  * This is equivalent to sorting in ascending heap block number order
7848  * (among otherwise equal subsets of the array). This approach allows us
7849  * to avoid accessing the out-of-line TID. (We rely on the assumption
7850  * that the deltids array was sorted in ascending heap TID order when
7851  * these offsets to the first TID from each heap block group were formed.)
7852  */
7853  if (group1->ifirsttid > group2->ifirsttid)
7854  return 1;
7855  if (group1->ifirsttid < group2->ifirsttid)
7856  return -1;
7857 
7858  pg_unreachable();
7859 
7860  return 0;
7861 }
#define pg_unreachable()
Definition: c.h:258
int16 npromisingtids
Definition: heapam.c:194
static uint32 pg_nextpower2_32(uint32 num)
Definition: pg_bitutils.h:146
unsigned int uint32
Definition: c.h:441
int16 ifirsttid
Definition: heapam.c:196

◆ compute_infobits()

static uint8 compute_infobits ( uint16  infomask,
uint16  infomask2 
)
static

Definition at line 2723 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_abort_speculative(), heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_update(), and log_heap_update().

2724 {
2725  return
2726  ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
2727  ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
2728  ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
2729  /* note we ignore HEAP_XMAX_SHR_LOCK here */
2730  ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
2731  ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
2732  XLHL_KEYS_UPDATED : 0);
2733 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:193
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:196
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:269
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:268
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:195
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:270
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:272
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:271

◆ compute_new_xmax_infomask()

static void compute_new_xmax_infomask ( TransactionId  xmax,
uint16  old_infomask,
uint16  old_infomask2,
TransactionId  add_to_xmax,
LockTupleMode  mode,
bool  is_update,
TransactionId result_xmax,
uint16 result_infomask,
uint16 result_infomask2 
)
static

Definition at line 5053 of file heapam.c.

References Assert, elog, ERROR, get_mxact_status_for_lock(), GetMultiXactIdHintBits(), HEAP_KEYS_UPDATED, HEAP_LOCKED_UPGRADED, HEAP_XMAX_COMMITTED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_INVALID, HEAP_XMAX_IS_EXCL_LOCKED, HEAP_XMAX_IS_KEYSHR_LOCKED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HEAP_XMAX_IS_SHR_LOCKED, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, InvalidTransactionId, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, MultiXactIdCreate(), MultiXactIdExpand(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TUPLOCK_from_mxstatus, and WARNING.

Referenced by heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), and heap_update().

5058 {
5059  TransactionId new_xmax;
5060  uint16 new_infomask,
5061  new_infomask2;
5062 
5064 
5065 l5:
5066  new_infomask = 0;
5067  new_infomask2 = 0;
5068  if (old_infomask & HEAP_XMAX_INVALID)
5069  {
5070  /*
5071  * No previous locker; we just insert our own TransactionId.
5072  *
5073  * Note that it's critical that this case be the first one checked,
5074  * because there are several blocks below that come back to this one
5075  * to implement certain optimizations; old_infomask might contain
5076  * other dirty bits in those cases, but we don't really care.
5077  */
5078  if (is_update)
5079  {
5080  new_xmax = add_to_xmax;
5081  if (mode == LockTupleExclusive)
5082  new_infomask2 |= HEAP_KEYS_UPDATED;
5083  }
5084  else
5085  {
5086  new_infomask |= HEAP_XMAX_LOCK_ONLY;
5087  switch (mode)
5088  {
5089  case LockTupleKeyShare:
5090  new_xmax = add_to_xmax;
5091  new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
5092  break;
5093  case LockTupleShare:
5094  new_xmax = add_to_xmax;
5095  new_infomask |= HEAP_XMAX_SHR_LOCK;
5096  break;
5098  new_xmax = add_to_xmax;
5099  new_infomask |= HEAP_XMAX_EXCL_LOCK;
5100  break;
5101  case LockTupleExclusive:
5102  new_xmax = add_to_xmax;
5103  new_infomask |= HEAP_XMAX_EXCL_LOCK;
5104  new_infomask2 |= HEAP_KEYS_UPDATED;
5105  break;
5106  default:
5107  new_xmax = InvalidTransactionId; /* silence compiler */
5108  elog(ERROR, "invalid lock mode");
5109  }
5110  }
5111  }
5112  else if (old_infomask & HEAP_XMAX_IS_MULTI)
5113  {
5114  MultiXactStatus new_status;
5115 
5116  /*
5117  * Currently we don't allow XMAX_COMMITTED to be set for multis, so
5118  * cross-check.
5119  */
5120  Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
5121 
5122  /*
5123  * A multixact together with LOCK_ONLY set but neither lock bit set
5124  * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
5125  * anymore. This check is critical for databases upgraded by
5126  * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
5127  * that such multis are never passed.
5128  */
5129  if (HEAP_LOCKED_UPGRADED(old_infomask))
5130  {
5131  old_infomask &= ~HEAP_XMAX_IS_MULTI;
5132  old_infomask |= HEAP_XMAX_INVALID;
5133  goto l5;
5134  }
5135 
5136  /*
5137  * If the XMAX is already a MultiXactId, then we need to expand it to
5138  * include add_to_xmax; but if all the members were lockers and are
5139  * all gone, we can do away with the IS_MULTI bit and just set
5140  * add_to_xmax as the only locker/updater. If all lockers are gone
5141  * and we have an updater that aborted, we can also do without a
5142  * multi.
5143  *
5144  * The cost of doing GetMultiXactIdMembers would be paid by
5145  * MultiXactIdExpand if we weren't to do this, so this check is not
5146  * incurring extra work anyhow.
5147  */
5148  if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
5149  {
5150  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
5152  old_infomask)))
5153  {
5154  /*
5155  * Reset these bits and restart; otherwise fall through to
5156  * create a new multi below.
5157  */
5158  old_infomask &= ~HEAP_XMAX_IS_MULTI;
5159  old_infomask |= HEAP_XMAX_INVALID;
5160  goto l5;
5161  }
5162  }
5163 
5164  new_status = get_mxact_status_for_lock(mode, is_update);
5165 
5166  new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
5167  new_status);
5168  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5169  }
5170  else if (old_infomask & HEAP_XMAX_COMMITTED)
5171  {
5172  /*
5173  * It's a committed update, so we need to preserve him as updater of
5174  * the tuple.
5175  */
5177  MultiXactStatus new_status;
5178 
5179  if (old_infomask2 & HEAP_KEYS_UPDATED)
5180  status = MultiXactStatusUpdate;
5181  else
5182  status = MultiXactStatusNoKeyUpdate;
5183 
5184  new_status = get_mxact_status_for_lock(mode, is_update);
5185 
5186  /*
5187  * since it's not running, it's obviously impossible for the old
5188  * updater to be identical to the current one, so we need not check
5189  * for that case as we do in the block above.
5190  */
5191  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5192  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5193  }
5194  else if (TransactionIdIsInProgress(xmax))
5195  {
5196  /*
5197  * If the XMAX is a valid, in-progress TransactionId, then we need to
5198  * create a new MultiXactId that includes both the old locker or
5199  * updater and our own TransactionId.
5200  */
5201  MultiXactStatus new_status;
5202  MultiXactStatus old_status;
5203  LockTupleMode old_mode;
5204 
5205  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5206  {
5207  if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5208  old_status = MultiXactStatusForKeyShare;
5209  else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5210  old_status = MultiXactStatusForShare;
5211  else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5212  {
5213  if (old_infomask2 & HEAP_KEYS_UPDATED)
5214  old_status = MultiXactStatusForUpdate;
5215  else
5216  old_status = MultiXactStatusForNoKeyUpdate;
5217  }
5218  else
5219  {
5220  /*
5221  * LOCK_ONLY can be present alone only when a page has been
5222  * upgraded by pg_upgrade. But in that case,
5223  * TransactionIdIsInProgress() should have returned false. We
5224  * assume it's no longer locked in this case.
5225  */
5226  elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
5227  old_infomask |= HEAP_XMAX_INVALID;
5228  old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
5229  goto l5;
5230  }
5231  }
5232  else
5233  {
5234  /* it's an update, but which kind? */
5235  if (old_infomask2 & HEAP_KEYS_UPDATED)
5236  old_status = MultiXactStatusUpdate;
5237  else
5238  old_status = MultiXactStatusNoKeyUpdate;
5239  }
5240 
5241  old_mode = TUPLOCK_from_mxstatus(old_status);
5242 
5243  /*
5244  * If the lock to be acquired is for the same TransactionId as the
5245  * existing lock, there's an optimization possible: consider only the
5246  * strongest of both locks as the only one present, and restart.
5247  */
5248  if (xmax == add_to_xmax)
5249  {
5250  /*
5251  * Note that it's not possible for the original tuple to be
5252  * updated: we wouldn't be here because the tuple would have been
5253  * invisible and we wouldn't try to update it. As a subtlety,
5254  * this code can also run when traversing an update chain to lock
5255  * future versions of a tuple. But we wouldn't be here either,
5256  * because the add_to_xmax would be different from the original
5257  * updater.
5258  */
5259  Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5260 
5261  /* acquire the strongest of both */
5262  if (mode < old_mode)
5263  mode = old_mode;
5264  /* mustn't touch is_update */
5265 
5266  old_infomask |= HEAP_XMAX_INVALID;
5267  goto l5;
5268  }
5269 
5270  /* otherwise, just fall back to creating a new multixact */
5271  new_status = get_mxact_status_for_lock(mode, is_update);
5272  new_xmax = MultiXactIdCreate(xmax, old_status,
5273  add_to_xmax, new_status);
5274  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5275  }
5276  else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
5277  TransactionIdDidCommit(xmax))
5278  {
5279  /*
5280  * It's a committed update, so we gotta preserve him as updater of the
5281  * tuple.
5282  */
5284  MultiXactStatus new_status;
5285 
5286  if (old_infomask2 & HEAP_KEYS_UPDATED)
5287  status = MultiXactStatusUpdate;
5288  else
5289  status = MultiXactStatusNoKeyUpdate;
5290 
5291  new_status = get_mxact_status_for_lock(mode, is_update);
5292 
5293  /*
5294  * since it's not running, it's obviously impossible for the old
5295  * updater to be identical to the current one, so we need not check
5296  * for that case as we do in the block above.
5297  */
5298  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5299  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5300  }
5301  else
5302  {
5303  /*
5304  * Can get here iff the locking/updating transaction was running when
5305  * the infomask was extracted from the tuple, but finished before
5306  * TransactionIdIsInProgress got to run. Deal with it as if there was
5307  * no locker at all in the first place.
5308  */
5309  old_infomask |= HEAP_XMAX_INVALID;
5310  goto l5;
5311  }
5312 
5313  *result_infomask = new_infomask;
5314  *result_infomask2 = new_infomask2;
5315  *result_xmax = new_xmax;
5316 }
static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
Definition: heapam.c:6751
static PgChecksumMode mode
Definition: pg_checksums.c:61
MultiXactStatus
Definition: multixact.h:41
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:193
LockTupleMode
Definition: lockoptions.h:49
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:196
uint32 TransactionId
Definition: c.h:587
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:869
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:1325
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:252
#define HEAP_XMAX_COMMITTED
Definition: htup_details.h:206
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:199
#define HEAP_XMAX_IS_SHR_LOCKED(infomask)
Definition: htup_details.h:262
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:6832
unsigned short uint16
Definition: c.h:440
#define ERROR
Definition: elog.h:46
#define HEAP_XMAX_INVALID
Definition: htup_details.h:207
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:195
#define InvalidTransactionId
Definition: transam.h:31
#define WARNING
Definition: elog.h:40
MultiXactId MultiXactIdCreate(TransactionId xid1, MultiXactStatus status1, TransactionId xid2, MultiXactStatus status2)
Definition: multixact.c:386
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
TransactionId MultiXactId
Definition: c.h:597
#define Assert(condition)
Definition: c.h:804
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:214
#define elog(elevel,...)
Definition: elog.h:232
static MultiXactStatus get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
Definition: heapam.c:4267
#define HEAP_XMAX_IS_EXCL_LOCKED(infomask)
Definition: htup_details.h:264
#define HEAP_XMAX_IS_KEYSHR_LOCKED(infomask)
Definition: htup_details.h:266
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:551
MultiXactId MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
Definition: multixact.c:439

◆ ConditionalMultiXactIdWait()

static bool ConditionalMultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
Relation  rel,
int *  remaining 
)
static

Definition at line 7099 of file heapam.c.

References Do_MultiXactIdWait(), and XLTW_None.

Referenced by heap_lock_tuple().

7101 {
7102  return Do_MultiXactIdWait(multi, status, infomask, true,
7103  rel, NULL, XLTW_None, remaining);
7104 }
int remaining
Definition: informix.c:667
Definition: lmgr.h:26
static bool Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:6999
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227

◆ Do_MultiXactIdWait()

static bool Do_MultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
bool  nowait,
Relation  rel,
ItemPointer  ctid,
XLTW_Oper  oper,
int *  remaining 
)
static

Definition at line 6999 of file heapam.c.

References ConditionalXactLockTableWait(), DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, LOCKMODE_from_mxstatus, pfree(), MultiXactMember::status, TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), XactLockTableWait(), and MultiXactMember::xid.

Referenced by ConditionalMultiXactIdWait(), and MultiXactIdWait().

7003 {
7004  bool result = true;
7005  MultiXactMember *members;
7006  int nmembers;
7007  int remain = 0;
7008 
7009  /* for pre-pg_upgrade tuples, no need to sleep at all */
7010  nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
7011  GetMultiXactIdMembers(multi, &members, false,
7012  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7013 
7014  if (nmembers >= 0)
7015  {
7016  int i;
7017 
7018  for (i = 0; i < nmembers; i++)
7019  {
7020  TransactionId memxid = members[i].xid;
7021  MultiXactStatus memstatus = members[i].status;
7022 
7024  {
7025  remain++;
7026  continue;
7027  }
7028 
7031  {
7032  if (remaining && TransactionIdIsInProgress(memxid))
7033  remain++;
7034  continue;
7035  }
7036 
7037  /*
7038  * This member conflicts with our multi, so we have to sleep (or
7039  * return failure, if asked to avoid waiting.)
7040  *
7041  * Note that we don't set up an error context callback ourselves,
7042  * but instead we pass the info down to XactLockTableWait. This
7043  * might seem a bit wasteful because the context is set up and
7044  * tore down for each member of the multixact, but in reality it
7045  * should be barely noticeable, and it avoids duplicate code.
7046  */
7047  if (nowait)
7048  {
7049  result = ConditionalXactLockTableWait(memxid);
7050  if (!result)
7051  break;
7052  }
7053  else
7054  XactLockTableWait(memxid, rel, ctid, oper);
7055  }
7056 
7057  pfree(members);
7058  }
7059 
7060  if (remaining)
7061  *remaining = remain;
7062 
7063  return result;
7064 }
int remaining
Definition: informix.c:667
MultiXactStatus
Definition: multixact.h:41
uint32 TransactionId
Definition: c.h:587
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:869
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:1325
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:252
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:155
bool ConditionalXactLockTableWait(TransactionId xid)
Definition: lmgr.c:713
void pfree(void *pointer)
Definition: mcxt.c:1169
TransactionId xid
Definition: multixact.h:62
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:582
MultiXactStatus status
Definition: multixact.h:63
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:640
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1223
Operator oper(ParseState *pstate, List *opname, Oid ltypeId, Oid rtypeId, bool noError, int location)
Definition: parse_oper.c:382
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227

◆ DoesMultiXactIdConflict()

static bool DoesMultiXactIdConflict ( MultiXactId  multi,
uint16  infomask,
LockTupleMode  lockmode,
bool current_is_member 
)
static

Definition at line 6900 of file heapam.c.

References DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, ISUPDATE_from_mxstatus, LOCKMODE_from_mxstatus, pfree(), status(), TransactionIdDidAbort(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), tupleLockExtraInfo, and MultiXactMember::xid.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

6902 {
6903  int nmembers;
6904  MultiXactMember *members;
6905  bool result = false;
6906  LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
6907 
6908  if (HEAP_LOCKED_UPGRADED(infomask))
6909  return false;
6910 
6911  nmembers = GetMultiXactIdMembers(multi, &members, false,
6912  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
6913  if (nmembers >= 0)
6914  {
6915  int i;
6916 
6917  for (i = 0; i < nmembers; i++)
6918  {
6919  TransactionId memxid;
6920  LOCKMODE memlockmode;
6921 
6922  if (result && (current_is_member == NULL || *current_is_member))
6923  break;
6924 
6925  memlockmode = LOCKMODE_from_mxstatus(members[i].status);
6926 
6927  /* ignore members from current xact (but track their presence) */
6928  memxid = members[i].xid;
6930  {
6931  if (current_is_member != NULL)
6932  *current_is_member = true;
6933  continue;
6934  }
6935  else if (result)
6936  continue;
6937 
6938  /* ignore members that don't conflict with the lock we want */
6939  if (!DoLockModesConflict(memlockmode, wanted))
6940  continue;
6941 
6942  if (ISUPDATE_from_mxstatus(members[i].status))
6943  {
6944  /* ignore aborted updaters */
6945  if (TransactionIdDidAbort(memxid))
6946  continue;
6947  }
6948  else
6949  {
6950  /* ignore lockers-only that are no longer in progress */
6951  if (!TransactionIdIsInProgress(memxid))
6952  continue;
6953  }
6954 
6955  /*
6956  * Whatever remains are either live lockers that conflict with our
6957  * wanted lock, and updaters that are not aborted. Those conflict
6958  * with what we want. Set up to return true, but keep going to
6959  * look for the current transaction among the multixact members,
6960  * if needed.
6961  */
6962  result = true;
6963  }
6964  pfree(members);
6965  }
6966 
6967  return result;
6968 }
uint32 TransactionId
Definition: c.h:587
int LOCKMODE
Definition: lockdefs.h:26
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:869
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:1325
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:252
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:155
void pfree(void *pointer)
Definition: mcxt.c:1169
TransactionId xid
Definition: multixact.h:62
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:582
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:56
static const struct @13 tupleLockExtraInfo[MaxLockTupleMode+1]
bool TransactionIdDidAbort(TransactionId transactionId)
Definition: transam.c:181
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1223
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227

◆ ExtractReplicaIdentity()

static HeapTuple ExtractReplicaIdentity ( Relation  rel,
HeapTuple  tup,
bool  key_changed,
bool copy 
)
static

Definition at line 8378 of file heapam.c.

References Assert, bms_free(), bms_is_empty(), bms_is_member(), FirstLowInvalidHeapAttributeNumber, heap_deform_tuple(), heap_form_tuple(), heap_freetuple(), HeapTupleHasExternal, i, INDEX_ATTR_BITMAP_IDENTITY_KEY, MaxHeapAttributeNumber, TupleDescData::natts, RelationData::rd_rel, RelationGetDescr, RelationGetIndexAttrBitmap(), RelationIsLogicallyLogged, toast_flatten_tuple(), and values.

Referenced by heap_delete(), and heap_update().

8380 {
8381  TupleDesc desc = RelationGetDescr(relation);
8382  char replident = relation->rd_rel->relreplident;
8383  Bitmapset *idattrs;
8384  HeapTuple key_tuple;
8385  bool nulls[MaxHeapAttributeNumber];
8387 
8388  *copy = false;
8389 
8390  if (!RelationIsLogicallyLogged(relation))
8391  return NULL;
8392 
8393  if (replident == REPLICA_IDENTITY_NOTHING)
8394  return NULL;
8395 
8396  if (replident == REPLICA_IDENTITY_FULL)
8397  {
8398  /*
8399  * When logging the entire old tuple, it very well could contain
8400  * toasted columns. If so, force them to be inlined.
8401  */
8402  if (HeapTupleHasExternal(tp))
8403  {
8404  *copy = true;
8405  tp = toast_flatten_tuple(tp, desc);
8406  }
8407  return tp;
8408  }
8409 
8410  /* if the key hasn't changed and we're only logging the key, we're done */
8411  if (!key_changed)
8412  return NULL;
8413 
8414  /* find out the replica identity columns */
8415  idattrs = RelationGetIndexAttrBitmap(relation,
8417 
8418  /*
8419  * If there's no defined replica identity columns, treat as !key_changed.
8420  * (This case should not be reachable from heap_update, since that should
8421  * calculate key_changed accurately. But heap_delete just passes constant
8422  * true for key_changed, so we can hit this case in deletes.)
8423  */
8424  if (bms_is_empty(idattrs))
8425  return NULL;
8426 
8427  /*
8428  * Construct a new tuple containing only the replica identity columns,
8429  * with nulls elsewhere. While we're at it, assert that the replica
8430  * identity columns aren't null.
8431  */
8432  heap_deform_tuple(tp, desc, values, nulls);
8433 
8434  for (int i = 0; i < desc->natts; i++)
8435  {
8437  idattrs))
8438  Assert(!nulls[i]);
8439  else
8440  nulls[i] = true;
8441  }
8442 
8443  key_tuple = heap_form_tuple(desc, values, nulls);
8444  *copy = true;
8445 
8446  bms_free(idattrs);
8447 
8448  /*
8449  * If the tuple, which by here only contains indexed columns, still has
8450  * toasted columns, force them to be inlined. This is somewhat unlikely
8451  * since there's limits on the size of indexed columns, so we don't
8452  * duplicate toast_flatten_tuple()s functionality in the above loop over
8453  * the indexed columns, even if it would be more efficient.
8454  */
8455  if (HeapTupleHasExternal(key_tuple))
8456  {
8457  HeapTuple oldtup = key_tuple;
8458 
8459  key_tuple = toast_flatten_tuple(oldtup, desc);
8460  heap_freetuple(oldtup);
8461  }
8462 
8463  return key_tuple;
8464 }
#define RelationGetDescr(relation)
Definition: rel.h:495
#define FirstLowInvalidHeapAttributeNumber
Definition: sysattr.h:27
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: heaptuple.c:1020
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:655
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1338
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition: heaptoast.c:350
bool bms_is_empty(const Bitmapset *a)
Definition: bitmapset.c:701
uintptr_t Datum
Definition: postgres.h:411
#define MaxHeapAttributeNumber
Definition: htup_details.h:47
void bms_free(Bitmapset *a)
Definition: bitmapset.c:208
#define Assert(condition)
Definition: c.h:804
void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull)
Definition: heaptuple.c:1249
static Datum values[MAXATTR]
Definition: bootstrap.c:166
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:672
int i
Bitmapset * RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
Definition: relcache.c:5000
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:427

◆ fix_infomask_from_infobits()

static void fix_infomask_from_infobits ( uint8  infobits,
uint16 infomask,
uint16 infomask2 
)
static

Definition at line 8831 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_xlog_delete(), heap_xlog_lock(), heap_xlog_lock_updated(), and heap_xlog_update().

8832 {
8833  *infomask &= ~(HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY |
8835  *infomask2 &= ~HEAP_KEYS_UPDATED;
8836 
8837  if (infobits & XLHL_XMAX_IS_MULTI)
8838  *infomask |= HEAP_XMAX_IS_MULTI;
8839  if (infobits & XLHL_XMAX_LOCK_ONLY)
8840  *infomask |= HEAP_XMAX_LOCK_ONLY;
8841  if (infobits & XLHL_XMAX_EXCL_LOCK)
8842  *infomask |= HEAP_XMAX_EXCL_LOCK;
8843  /* note HEAP_XMAX_SHR_LOCK isn't considered here */
8844  if (infobits & XLHL_XMAX_KEYSHR_LOCK)
8845  *infomask |= HEAP_XMAX_KEYSHR_LOCK;
8846 
8847  if (infobits & XLHL_KEYS_UPDATED)
8848  *infomask2 |= HEAP_KEYS_UPDATED;
8849 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:193
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:196
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:269
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:268
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:195
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:270
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:272
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:271

◆ FreeBulkInsertState()

void FreeBulkInsertState ( BulkInsertState  bistate)

Definition at line 2021 of file heapam.c.

References BulkInsertStateData::current_buf, FreeAccessStrategy(), InvalidBuffer, pfree(), ReleaseBuffer(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), CopyMultiInsertBufferCleanup(), intorel_shutdown(), and transientrel_shutdown().

2022 {
2023  if (bistate->current_buf != InvalidBuffer)
2024  ReleaseBuffer(bistate->current_buf);
2025  FreeAccessStrategy(bistate->strategy);
2026  pfree(bistate);
2027 }
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3784
void pfree(void *pointer)
Definition: mcxt.c:1169
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:597
BufferAccessStrategy strategy
Definition: hio.h:31
Buffer current_buf
Definition: hio.h:32

◆ FreezeMultiXactId()

static TransactionId FreezeMultiXactId ( MultiXactId  multi,
uint16  t_infomask,
TransactionId  relfrozenxid,
TransactionId  relminmxid,
TransactionId  cutoff_xid,
MultiXactId  cutoff_multi,
uint16 flags 
)
static

Definition at line 6170 of file heapam.c.

References Assert, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg_internal(), ERROR, FRM_INVALIDATE_XMAX, FRM_MARK_COMMITTED, FRM_NOOP, FRM_RETURN_IS_MULTI, FRM_RETURN_IS_XID, GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, i, InvalidTransactionId, ISUPDATE_from_mxstatus, MultiXactIdCreateFromMembers(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactIdIsValid, MultiXactIdPrecedes(), palloc(), pfree(), status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TransactionIdIsValid, TransactionIdPrecedes(), and MultiXactMember::xid.

Referenced by heap_prepare_freeze_tuple().

6174 {
6176  int i;
6177  MultiXactMember *members;
6178  int nmembers;
6179  bool need_replace;
6180  int nnewmembers;
6181  MultiXactMember *newmembers;
6182  bool has_lockers;
6183  TransactionId update_xid;
6184  bool update_committed;
6185 
6186  *flags = 0;
6187 
6188  /* We should only be called in Multis */
6189  Assert(t_infomask & HEAP_XMAX_IS_MULTI);
6190 
6191  if (!MultiXactIdIsValid(multi) ||
6192  HEAP_LOCKED_UPGRADED(t_infomask))
6193  {
6194  /* Ensure infomask bits are appropriately set/reset */
6195  *flags |= FRM_INVALIDATE_XMAX;
6196  return InvalidTransactionId;
6197  }
6198  else if (MultiXactIdPrecedes(multi, relminmxid))
6199  ereport(ERROR,
6201  errmsg_internal("found multixact %u from before relminmxid %u",
6202  multi, relminmxid)));
6203  else if (MultiXactIdPrecedes(multi, cutoff_multi))
6204  {
6205  /*
6206  * This old multi cannot possibly have members still running, but
6207  * verify just in case. If it was a locker only, it can be removed
6208  * without any further consideration; but if it contained an update,
6209  * we might need to preserve it.
6210  */
6211  if (MultiXactIdIsRunning(multi,
6212  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
6213  ereport(ERROR,
6215  errmsg_internal("multixact %u from before cutoff %u found to be still running",
6216  multi, cutoff_multi)));
6217 
6218  if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
6219  {
6220  *flags |= FRM_INVALIDATE_XMAX;
6221  xid = InvalidTransactionId; /* not strictly necessary */
6222  }
6223  else
6224  {
6225  /* replace multi by update xid */
6226  xid = MultiXactIdGetUpdateXid(multi, t_infomask);
6227 
6228  /* wasn't only a lock, xid needs to be valid */
6230 
6231  if (TransactionIdPrecedes(xid, relfrozenxid))
6232  ereport(ERROR,
6234  errmsg_internal("found update xid %u from before relfrozenxid %u",
6235  xid, relfrozenxid)));
6236 
6237  /*
6238  * If the xid is older than the cutoff, it has to have aborted,
6239  * otherwise the tuple would have gotten pruned away.
6240  */
6241  if (TransactionIdPrecedes(xid, cutoff_xid))
6242  {
6243  if (TransactionIdDidCommit(xid))
6244  ereport(ERROR,
6246  errmsg_internal("cannot freeze committed update xid %u", xid)));
6247  *flags |= FRM_INVALIDATE_XMAX;
6248  xid = InvalidTransactionId; /* not strictly necessary */
6249  }
6250  else
6251  {
6252  *flags |= FRM_RETURN_IS_XID;
6253  }
6254  }
6255 
6256  return xid;
6257  }
6258 
6259  /*
6260  * This multixact might have or might not have members still running, but
6261  * we know it's valid and is newer than the cutoff point for multis.
6262  * However, some member(s) of it may be below the cutoff for Xids, so we
6263  * need to walk the whole members array to figure out what to do, if
6264  * anything.
6265  */
6266 
6267  nmembers =
6268  GetMultiXactIdMembers(multi, &members, false,
6269  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
6270  if (nmembers <= 0)
6271  {
6272  /* Nothing worth keeping */
6273  *flags |= FRM_INVALIDATE_XMAX;
6274  return InvalidTransactionId;
6275  }
6276 
6277  /* is there anything older than the cutoff? */
6278  need_replace = false;
6279  for (i = 0; i < nmembers; i++)
6280  {
6281  if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
6282  {
6283  need_replace = true;
6284  break;
6285  }
6286  }
6287 
6288  /*
6289  * In the simplest case, there is no member older than the cutoff; we can
6290  * keep the existing MultiXactId as is.
6291  */
6292  if (!need_replace)
6293  {
6294  *flags |= FRM_NOOP;
6295  pfree(members);
6296  return InvalidTransactionId;
6297  }
6298 
6299  /*
6300  * If the multi needs to be updated, figure out which members do we need
6301  * to keep.
6302  */
6303  nnewmembers = 0;
6304  newmembers = palloc(sizeof(MultiXactMember) * nmembers);
6305  has_lockers = false;
6306  update_xid = InvalidTransactionId;
6307  update_committed = false;
6308 
6309  for (i = 0; i < nmembers; i++)
6310  {
6311  /*
6312  * Determine whether to keep this member or ignore it.
6313  */
6314  if (ISUPDATE_from_mxstatus(members[i].status))
6315  {
6316  TransactionId xid = members[i].xid;
6317 
6319  if (TransactionIdPrecedes(xid, relfrozenxid))
6320  ereport(ERROR,
6322  errmsg_internal("found update xid %u from before relfrozenxid %u",
6323  xid, relfrozenxid)));
6324 
6325  /*
6326  * It's an update; should we keep it? If the transaction is known
6327  * aborted or crashed then it's okay to ignore it, otherwise not.
6328  * Note that an updater older than cutoff_xid cannot possibly be
6329  * committed, because HeapTupleSatisfiesVacuum would have returned
6330  * HEAPTUPLE_DEAD and we would not be trying to freeze the tuple.
6331  *
6332  * As with all tuple visibility routines, it's critical to test
6333  * TransactionIdIsInProgress before TransactionIdDidCommit,
6334  * because of race conditions explained in detail in
6335  * heapam_visibility.c.
6336  */
6339  {
6340  Assert(!TransactionIdIsValid(update_xid));
6341  update_xid = xid;
6342  }
6343  else if (TransactionIdDidCommit(xid))
6344  {
6345  /*
6346  * The transaction committed, so we can tell caller to set
6347  * HEAP_XMAX_COMMITTED. (We can only do this because we know
6348  * the transaction is not running.)
6349  */
6350  Assert(!TransactionIdIsValid(update_xid));
6351  update_committed = true;
6352  update_xid = xid;
6353  }
6354  else
6355  {
6356  /*
6357  * Not in progress, not committed -- must be aborted or
6358  * crashed; we can ignore it.
6359  */
6360  }
6361 
6362  /*
6363  * Since the tuple wasn't marked HEAPTUPLE_DEAD by vacuum, the
6364  * update Xid cannot possibly be older than the xid cutoff. The
6365  * presence of such a tuple would cause corruption, so be paranoid
6366  * and check.
6367  */
6368  if (TransactionIdIsValid(update_xid) &&
6369  TransactionIdPrecedes(update_xid, cutoff_xid))
6370  ereport(ERROR,
6372  errmsg_internal("found update xid %u from before xid cutoff %u",
6373  update_xid, cutoff_xid)));
6374 
6375  /*
6376  * If we determined that it's an Xid corresponding to an update
6377  * that must be retained, additionally add it to the list of
6378  * members of the new Multi, in case we end up using that. (We
6379  * might still decide to use only an update Xid and not a multi,
6380  * but it's easier to maintain the list as we walk the old members
6381  * list.)
6382  */
6383  if (TransactionIdIsValid(update_xid))
6384  newmembers[nnewmembers++] = members[i];
6385  }
6386  else
6387  {
6388  /* We only keep lockers if they are still running */
6389  if (TransactionIdIsCurrentTransactionId(members[i].xid) ||
6390  TransactionIdIsInProgress(members[i].xid))
6391  {
6392  /* running locker cannot possibly be older than the cutoff */
6393  Assert(!TransactionIdPrecedes(members[i].xid, cutoff_xid));
6394  newmembers[nnewmembers++] = members[i];
6395  has_lockers = true;
6396  }
6397  }
6398  }
6399 
6400  pfree(members);
6401 
6402  if (nnewmembers == 0)
6403  {
6404  /* nothing worth keeping!? Tell caller to remove the whole thing */
6405  *flags |= FRM_INVALIDATE_XMAX;
6406  xid = InvalidTransactionId;
6407  }
6408  else if (TransactionIdIsValid(update_xid) && !has_lockers)
6409  {
6410  /*
6411  * If there's a single member and it's an update, pass it back alone
6412  * without creating a new Multi. (XXX we could do this when there's a
6413  * single remaining locker, too, but that would complicate the API too
6414  * much; moreover, the case with the single updater is more
6415  * interesting, because those are longer-lived.)
6416  */
6417  Assert(nnewmembers == 1);
6418  *flags |= FRM_RETURN_IS_XID;
6419  if (update_committed)
6420  *flags |= FRM_MARK_COMMITTED;
6421  xid = update_xid;
6422  }
6423  else
6424  {
6425  /*
6426  * Create a new multixact with the surviving members of the previous
6427  * one, to set as new Xmax in the tuple.
6428  */
6429  xid = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
6430  *flags |= FRM_RETURN_IS_MULTI;
6431  }
6432 
6433  pfree(newmembers);
6434 
6435  return xid;
6436 }
#define FRM_RETURN_IS_XID
Definition: heapam.c:6144
#define FRM_MARK_COMMITTED
Definition: heapam.c:6146
uint32 TransactionId
Definition: c.h:587
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:869
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:1325
MultiXactId MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members)
Definition: multixact.c:767
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:252
int errcode(int sqlerrcode)
Definition: elog.c:698
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:6832
void pfree(void *pointer)
Definition: mcxt.c:1169
#define ERROR
Definition: elog.h:46
TransactionId xid
Definition: multixact.h:62
#define FRM_INVALIDATE_XMAX
Definition: heapam.c:6143
#define InvalidTransactionId
Definition: transam.h:31
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:56
#define MultiXactIdIsValid(multi)
Definition: multixact.h:28
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:45
#define FRM_RETURN_IS_MULTI
Definition: heapam.c:6145
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
#define ereport(elevel,...)
Definition: elog.h:157
int errmsg_internal(const char *fmt,...)
Definition: elog.c:996
#define Assert(condition)
Definition: c.h:804
#define FRM_NOOP
Definition: heapam.c:6142
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3156
void * palloc(Size size)
Definition: mcxt.c:1062
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1223
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:551

◆ get_mxact_status_for_lock()

static MultiXactStatus get_mxact_status_for_lock ( LockTupleMode  mode,
bool  is_update 
)
static

Definition at line 4267 of file heapam.c.

References elog, ERROR, mode, and tupleLockExtraInfo.

Referenced by compute_new_xmax_infomask(), heap_lock_tuple(), and test_lockmode_for_conflict().

4268 {
4269  int retval;
4270 
4271  if (is_update)
4272  retval = tupleLockExtraInfo[mode].updstatus;
4273  else
4274  retval = tupleLockExtraInfo[mode].lockstatus;
4275 
4276  if (retval == -1)
4277  elog(ERROR, "invalid lock tuple mode %d/%s", mode,
4278  is_update ? "true" : "false");
4279 
4280  return (MultiXactStatus) retval;
4281 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
MultiXactStatus
Definition: multixact.h:41
#define ERROR
Definition: elog.h:46
static const struct @13 tupleLockExtraInfo[MaxLockTupleMode+1]
#define elog(elevel,...)
Definition: elog.h:232

◆ GetBulkInsertState()

BulkInsertState GetBulkInsertState ( void  )

Definition at line 2007 of file heapam.c.

References BAS_BULKWRITE, BulkInsertStateData::current_buf, GetAccessStrategy(), InvalidBuffer, palloc(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), CopyMultiInsertBufferInit(), intorel_startup(), and transientrel_startup().

2008 {
2009  BulkInsertState bistate;
2010 
2011  bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
2013  bistate->current_buf = InvalidBuffer;
2014  return bistate;
2015 }
BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
Definition: freelist.c:542
#define InvalidBuffer
Definition: buf.h:25
struct BulkInsertStateData * BulkInsertState
Definition: heapam.h:39
BufferAccessStrategy strategy
Definition: hio.h:31
void * palloc(Size size)
Definition: mcxt.c:1062
Buffer current_buf
Definition: hio.h:32

◆ GetMultiXactIdHintBits()

static void GetMultiXactIdHintBits ( MultiXactId  multi,
uint16 new_infomask,
uint16 new_infomask2 
)
static

Definition at line 6751 of file heapam.c.

References GetMultiXactIdMembers(), HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, i, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, mode, MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, pfree(), status(), and TUPLOCK_from_mxstatus.

Referenced by compute_new_xmax_infomask(), heap_prepare_freeze_tuple(), and heap_update().

6753 {
6754  int nmembers;
6755  MultiXactMember *members;
6756  int i;
6757  uint16 bits = HEAP_XMAX_IS_MULTI;
6758  uint16 bits2 = 0;
6759  bool has_update = false;
6760  LockTupleMode strongest = LockTupleKeyShare;
6761 
6762  /*
6763  * We only use this in multis we just created, so they cannot be values
6764  * pre-pg_upgrade.
6765  */
6766  nmembers = GetMultiXactIdMembers(multi, &members, false, false);
6767 
6768  for (i = 0; i < nmembers; i++)
6769  {
6771 
6772  /*
6773  * Remember the strongest lock mode held by any member of the
6774  * multixact.
6775  */
6776  mode = TUPLOCK_from_mxstatus(members[i].status);
6777  if (mode > strongest)
6778  strongest = mode;
6779 
6780  /* See what other bits we need */
6781  switch (members[i].status)
6782  {
6786  break;
6787 
6789  bits2 |= HEAP_KEYS_UPDATED;
6790  break;
6791 
6793  has_update = true;
6794  break;
6795 
6796  case MultiXactStatusUpdate:
6797  bits2 |= HEAP_KEYS_UPDATED;
6798  has_update = true;
6799  break;
6800  }
6801  }
6802 
6803  if (strongest == LockTupleExclusive ||
6804  strongest == LockTupleNoKeyExclusive)
6805  bits |= HEAP_XMAX_EXCL_LOCK;
6806  else if (strongest == LockTupleShare)
6807  bits |= HEAP_XMAX_SHR_LOCK;
6808  else if (strongest == LockTupleKeyShare)
6809  bits |= HEAP_XMAX_KEYSHR_LOCK;
6810 
6811  if (!has_update)
6812  bits |= HEAP_XMAX_LOCK_ONLY;
6813 
6814  if (nmembers > 0)
6815  pfree(members);
6816 
6817  *new_infomask = bits;
6818  *new_infomask2 = bits2;
6819 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:193
LockTupleMode
Definition: lockoptions.h:49
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:196
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:199
unsigned short uint16
Definition: c.h:440
void pfree(void *pointer)
Definition: mcxt.c:1169
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:195
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:214
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1223
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227

◆ heap2_redo()

void heap2_redo ( XLogReaderState record)

Definition at line 9722 of file heapam.c.

References elog, heap_xlog_freeze_page(), heap_xlog_lock_updated(), heap_xlog_logical_rewrite(), heap_xlog_multi_insert(), heap_xlog_prune(), heap_xlog_vacuum(), heap_xlog_visible(), PANIC, XLOG_HEAP2_FREEZE_PAGE, XLOG_HEAP2_LOCK_UPDATED, XLOG_HEAP2_MULTI_INSERT, XLOG_HEAP2_NEW_CID, XLOG_HEAP2_PRUNE, XLOG_HEAP2_REWRITE, XLOG_HEAP2_VACUUM, XLOG_HEAP2_VISIBLE, XLOG_HEAP_OPMASK, XLogRecGetInfo, and XLR_INFO_MASK.

9723 {
9724  uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
9725 
9726  switch (info & XLOG_HEAP_OPMASK)
9727  {
9728  case XLOG_HEAP2_PRUNE:
9729  heap_xlog_prune(record);
9730  break;
9731  case XLOG_HEAP2_VACUUM:
9732  heap_xlog_vacuum(record);
9733  break;
9735  heap_xlog_freeze_page(record);
9736  break;
9737  case XLOG_HEAP2_VISIBLE:
9738  heap_xlog_visible(record);
9739  break;
9741  heap_xlog_multi_insert(record);
9742  break;
9744  heap_xlog_lock_updated(record);
9745  break;
9746  case XLOG_HEAP2_NEW_CID:
9747 
9748  /*
9749  * Nothing to do on a real replay, only used during logical
9750  * decoding.
9751  */
9752  break;
9753  case XLOG_HEAP2_REWRITE:
9754  heap_xlog_logical_rewrite(record);
9755  break;
9756  default:
9757  elog(PANIC, "heap2_redo: unknown op code %u", info);
9758  }
9759 }
static void heap_xlog_prune(XLogReaderState *record)
Definition: heapam.c:8472
void heap_xlog_logical_rewrite(XLogReaderState *r)
Definition: rewriteheap.c:1117
#define XLOG_HEAP2_LOCK_UPDATED
Definition: heapam_xlog.h:59
#define XLOG_HEAP2_REWRITE
Definition: heapam_xlog.h:53
unsigned char uint8
Definition: c.h:439
#define XLOG_HEAP_OPMASK
Definition: heapam_xlog.h:41
#define XLOG_HEAP2_PRUNE
Definition: heapam_xlog.h:54
#define PANIC
Definition: elog.h:50
#define XLOG_HEAP2_MULTI_INSERT
Definition: heapam_xlog.h:58
#define XLOG_HEAP2_VISIBLE
Definition: heapam_xlog.h:57
static void heap_xlog_lock_updated(XLogReaderState *record)
Definition: heapam.c:9575
static void heap_xlog_freeze_page(XLogReaderState *record)
Definition: heapam.c:8773
static void heap_xlog_vacuum(XLogReaderState *record)
Definition: heapam.c:8558
static void heap_xlog_multi_insert(XLogReaderState *record)
Definition: heapam.c:9049
#define XLOG_HEAP2_NEW_CID
Definition: heapam_xlog.h:60
#define XLOG_HEAP2_VACUUM
Definition: heapam_xlog.h:55
#define XLogRecGetInfo(decoder)
Definition: xlogreader.h:305
#define XLR_INFO_MASK
Definition: xlogrecord.h:62
#define XLOG_HEAP2_FREEZE_PAGE
Definition: heapam_xlog.h:56
#define elog(elevel,...)
Definition: elog.h:232
static void heap_xlog_visible(XLogReaderState *record)
Definition: heapam.c:8633

◆ heap_abort_speculative()

void heap_abort_speculative ( Relation  relation,
ItemPointer  tid 
)

Definition at line 5906 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, compute_infobits(), elog, END_CRIT_SECTION, ERROR, xl_heap_delete::flags, GetCurrentTransactionId(), HEAP_KEYS_UPDATED, HEAP_MOVED, heap_toast_delete(), HEAP_XMAX_BITS, HeapTupleHasExternal, HeapTupleHeaderIsHeapOnly, HeapTupleHeaderIsSpeculative, HeapTupleHeaderSetXmin, xl_heap_delete::infobits_set, InvalidTransactionId, IsToastRelation(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), MarkBufferDirty(), xl_heap_delete::offnum, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), RelationData::rd_rel, ReadBuffer(), REGBUF_STANDARD, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapDelete, START_CRIT_SECTION, HeapTupleHeaderData::t_choice, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_heap, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, HeapTupleFields::t_xmin, TransactionIdIsValid, TransactionIdPrecedes(), TransactionXmin, XLH_DELETE_IS_SUPER, XLOG_HEAP_DELETE, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and xl_heap_delete::xmax.

Referenced by heapam_tuple_complete_speculative(), and toast_delete_datum().

5907 {
5909  ItemId lp;
5910  HeapTupleData tp;
5911  Page page;
5912  BlockNumber block;
5913  Buffer buffer;
5914  TransactionId prune_xid;
5915 
5916  Assert(ItemPointerIsValid(tid));
5917 
5918  block = ItemPointerGetBlockNumber(tid);
5919  buffer = ReadBuffer(relation, block);
5920  page = BufferGetPage(buffer);
5921 
5923 
5924  /*
5925  * Page can't be all visible, we just inserted into it, and are still
5926  * running.
5927  */
5928  Assert(!PageIsAllVisible(page));
5929 
5930  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
5931  Assert(ItemIdIsNormal(lp));
5932 
5933  tp.t_tableOid = RelationGetRelid(relation);
5934  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
5935  tp.t_len = ItemIdGetLength(lp);
5936  tp.t_self = *tid;
5937 
5938  /*
5939  * Sanity check that the tuple really is a speculatively inserted tuple,
5940  * inserted by us.
5941  */
5942  if (tp.t_data->t_choice.t_heap.t_xmin != xid)
5943  elog(ERROR, "attempted to kill a tuple inserted by another transaction");
5944  if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
5945  elog(ERROR, "attempted to kill a non-speculative tuple");
5947 
5948  /*
5949  * No need to check for serializable conflicts here. There is never a
5950  * need for a combo CID, either. No need to extract replica identity, or
5951  * do anything special with infomask bits.
5952  */
5953 
5955 
5956  /*
5957  * The tuple will become DEAD immediately. Flag that this page is a
5958  * candidate for pruning by setting xmin to TransactionXmin. While not
5959  * immediately prunable, it is the oldest xid we can cheaply determine
5960  * that's safe against wraparound / being older than the table's
5961  * relfrozenxid. To defend against the unlikely case of a new relation
5962  * having a newer relfrozenxid than our TransactionXmin, use relfrozenxid
5963  * if so (vacuum can't subsequently move relfrozenxid to beyond
5964  * TransactionXmin, so there's no race here).
5965  */
5967  if (TransactionIdPrecedes(TransactionXmin, relation->rd_rel->relfrozenxid))
5968  prune_xid = relation->rd_rel->relfrozenxid;
5969  else
5970  prune_xid = TransactionXmin;
5971  PageSetPrunable(page, prune_xid);
5972 
5973  /* store transaction information of xact deleting the tuple */
5976 
5977  /*
5978  * Set the tuple header xmin to InvalidTransactionId. This makes the
5979  * tuple immediately invisible everyone. (In particular, to any
5980  * transactions waiting on the speculative token, woken up later.)
5981  */
5983 
5984  /* Clear the speculative insertion token too */
5985  tp.t_data->t_ctid = tp.t_self;
5986 
5987  MarkBufferDirty(buffer);
5988 
5989  /*
5990  * XLOG stuff
5991  *
5992  * The WAL records generated here match heap_delete(). The same recovery
5993  * routines are used.
5994  */
5995  if (RelationNeedsWAL(relation))
5996  {
5997  xl_heap_delete xlrec;
5998  XLogRecPtr recptr;
5999 
6000  xlrec.flags = XLH_DELETE_IS_SUPER;
6002  tp.t_data->t_infomask2);
6004  xlrec.xmax = xid;
6005 
6006  XLogBeginInsert();
6007  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
6008  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6009 
6010  /* No replica identity & replication origin logged */
6011 
6012  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
6013 
6014  PageSetLSN(page, recptr);
6015  }
6016 
6017  END_CRIT_SECTION();
6018 
6019  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6020 
6021  if (HeapTupleHasExternal(&tp))
6022  {
6023  Assert(!IsToastRelation(relation));
6024  heap_toast_delete(relation, &tp, true);
6025  }
6026 
6027  /*
6028  * Never need to mark tuple for invalidation, since catalogs don't support
6029  * speculative insertion
6030  */
6031 
6032  /* Now we can release the buffer */
6033  ReleaseBuffer(buffer);
6034 
6035  /* count deletion, as we counted the insertion too */
6036  pgstat_count_heap_delete(relation);
6037 }
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
bool IsToastRelation(Relation relation)
Definition: catalog.c:145
#define HEAP_XMAX_BITS
Definition: htup_details.h:270
#define XLH_DELETE_IS_SUPER
Definition: heapam_xlog.h:99
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2723
HeapTupleFields t_heap
Definition: htup_details.h:156
#define PageIsAllVisible(page)
Definition: bufpage.h:385
uint32 TransactionId
Definition: c.h:587
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1562
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:220
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:149
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:429
#define PageSetPrunable(page, xid)
Definition: bufpage.h:392
#define START_CRIT_SECTION()
Definition: miscadmin.h:147
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3784
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
Form_pg_class rd_rel
Definition: rel.h:109
union HeapTupleHeaderData::@43 t_choice
OffsetNumber offnum
Definition: heapam_xlog.h:110
TransactionId TransactionXmin
Definition: snapmgr.c:112
HeapTupleHeader t_data
Definition: htup.h:68
#define HeapTupleHeaderIsHeapOnly(tup)
Definition: htup_details.h:500
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ERROR
Definition: elog.h:46
ItemPointerData t_ctid
Definition: htup_details.h:160
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:109
void heap_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: heaptoast.c:43
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:438
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:115
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
TransactionId t_xmin
Definition: htup_details.h:123
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:330
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:422
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4023
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_MOVED
Definition: htup_details.h:216
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:804
uint8 infobits_set
Definition: heapam_xlog.h:111
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:697
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define RelationNeedsWAL(relation)
Definition: rel.h:582
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:2265
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:672
#define elog(elevel,...)
Definition: elog.h:232
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define TransactionIdIsValid(xid)
Definition: transam.h:41
void XLogBeginInsert(void)
Definition: xloginsert.c:123
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:469
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define HeapTupleHeaderSetXmin(tup, xid)
Definition: htup_details.h:319

◆ heap_acquire_tuplock()

static bool heap_acquire_tuplock ( Relation  relation,
ItemPointer  tid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
bool have_tuple_lock 
)
static

Definition at line 5004 of file heapam.c.

References ConditionalLockTupleTuplock, ereport, errcode(), errmsg(), ERROR, LockTupleTuplock, LockWaitBlock, LockWaitError, LockWaitSkip, and RelationGetRelationName.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

5006 {
5007  if (*have_tuple_lock)
5008  return true;
5009 
5010  switch (wait_policy)
5011  {
5012  case LockWaitBlock:
5013  LockTupleTuplock(relation, tid, mode);
5014  break;
5015 
5016  case LockWaitSkip:
5017  if (!ConditionalLockTupleTuplock(relation, tid, mode))
5018  return false;
5019  break;
5020 
5021  case LockWaitError:
5022  if (!ConditionalLockTupleTuplock(relation, tid, mode))
5023  ereport(ERROR,
5024  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5025  errmsg("could not obtain lock on row in relation \"%s\"",
5026  RelationGetRelationName(relation))));
5027  break;
5028  }
5029  *have_tuple_lock = true;
5030 
5031  return true;
5032 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define LockTupleTuplock(rel, tup, mode)
Definition: heapam.c:163
#define ConditionalLockTupleTuplock(rel, tup, mode)
Definition: heapam.c:167
int errcode(int sqlerrcode)
Definition: elog.c:698
#define ERROR
Definition: elog.h:46
#define RelationGetRelationName(relation)
Definition: rel.h:503
#define ereport(elevel,...)
Definition: elog.h:157
int errmsg(const char *fmt,...)
Definition: elog.c:909

◆ heap_beginscan()

TableScanDesc heap_beginscan ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
ParallelTableScanDesc  parallel_scan,
uint32  flags 
)

Definition at line 1185 of file heapam.c.

References Assert, initscan(), IsMVCCSnapshot, palloc(), PredicateLockRelation(), RelationGetRelid, RelationIncrementReferenceCount(), HeapScanDescData::rs_base, HeapScanDescData::rs_ctup, TableScanDescData::rs_flags, TableScanDescData::rs_key, TableScanDescData::rs_nkeys, TableScanDescData::rs_parallel, HeapScanDescData::rs_parallelworkerdata, TableScanDescData::rs_rd, TableScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, SO_ALLOW_PAGEMODE, SO_TYPE_SAMPLESCAN, SO_TYPE_SEQSCAN, and HeapTupleData::t_tableOid.

Referenced by SampleHeapTupleVisible().

1189 {
1190  HeapScanDesc scan;
1191 
1192  /*
1193  * increment relation ref count while scanning relation
1194  *
1195  * This is just to make really sure the relcache entry won't go away while
1196  * the scan has a pointer to it. Caller should be holding the rel open
1197  * anyway, so this is redundant in all normal scenarios...
1198  */
1200 
1201  /*
1202  * allocate and initialize scan descriptor
1203  */
1204  scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
1205 
1206  scan->rs_base.rs_rd = relation;
1207  scan->rs_base.rs_snapshot = snapshot;
1208  scan->rs_base.rs_nkeys = nkeys;
1209  scan->rs_base.rs_flags = flags;
1210  scan->rs_base.rs_parallel = parallel_scan;
1211  scan->rs_strategy = NULL; /* set in initscan */
1212 
1213  /*
1214  * Disable page-at-a-time mode if it's not a MVCC-safe snapshot.
1215  */
1216  if (!(snapshot && IsMVCCSnapshot(snapshot)))
1218 
1219  /*
1220  * For seqscan and sample scans in a serializable transaction, acquire a
1221  * predicate lock on the entire relation. This is required not only to
1222  * lock all the matching tuples, but also to conflict with new insertions
1223  * into the table. In an indexscan, we take page locks on the index pages
1224  * covering the range specified in the scan qual, but in a heap scan there
1225  * is nothing more fine-grained to lock. A bitmap scan is a different
1226  * story, there we have already scanned the index and locked the index
1227  * pages covering the predicate. But in that case we still have to lock
1228  * any matching heap tuples. For sample scan we could optimize the locking
1229  * to be at least page-level granularity, but we'd need to add per-tuple
1230  * locking for that.
1231  */
1233  {
1234  /*
1235  * Ensure a missing snapshot is noticed reliably, even if the
1236  * isolation mode means predicate locking isn't performed (and
1237  * therefore the snapshot isn't used here).
1238  */
1239  Assert(snapshot);
1240  PredicateLockRelation(relation, snapshot);
1241  }
1242 
1243  /* we only need to set this up once */
1244  scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1245 
1246  /*
1247  * Allocate memory to keep track of page allocation for parallel workers
1248  * when doing a parallel scan.
1249  */
1250  if (parallel_scan != NULL)
1252  else
1253  scan->rs_parallelworkerdata = NULL;
1254 
1255  /*
1256  * we do this here instead of in initscan() because heap_rescan also calls
1257  * initscan() and we don't want to allocate memory again
1258  */
1259  if (nkeys > 0)
1260  scan->rs_base.rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
1261  else
1262  scan->rs_base.rs_key = NULL;
1263 
1264  initscan(scan, key, false);
1265 
1266  return (TableScanDesc) scan;
1267 }
TableScanDescData rs_base
Definition: heapam.h:49
void PredicateLockRelation(Relation relation, Snapshot snapshot)
Definition: predicate.c:2569
uint32 rs_flags
Definition: relscan.h:47
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:79
HeapTupleData rs_ctup
Definition: heapam.h:66
ScanKeyData * ScanKey
Definition: skey.h:75
ParallelBlockTableScanWorkerData * rs_parallelworkerdata
Definition: heapam.h:72
Oid t_tableOid
Definition: htup.h:66
struct ScanKeyData * rs_key
Definition: relscan.h:37
void RelationIncrementReferenceCount(Relation rel)
Definition: relcache.c:2065
BufferAccessStrategy rs_strategy
Definition: heapam.h:64
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:96
#define Assert(condition)
Definition: c.h:804
Relation rs_rd
Definition: relscan.h:34
struct SnapshotData * rs_snapshot
Definition: relscan.h:35
void * palloc(Size size)
Definition: mcxt.c:1062
struct ParallelTableScanDescData * rs_parallel
Definition: relscan.h:49
static void initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
Definition: heapam.c:227
#define RelationGetRelid(relation)
Definition: rel.h:469

◆ heap_delete()

TM_Result heap_delete ( Relation  relation,
ItemPointer  tid,
CommandId  cid,
Snapshot  crosscheck,
bool  wait,
TM_FailureData tmfd,
bool  changingPart 
)

Definition at line 2768 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), TM_FailureData::cmax, compute_infobits(), compute_new_xmax_infomask(), TM_FailureData::ctid, DoesMultiXactIdConflict(), END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, ExtractReplicaIdentity(), xl_heap_delete::flags, GetCurrentTransactionId(), heap_acquire_tuplock(), heap_freetuple(), HEAP_KEYS_UPDATED, HEAP_MOVED, heap_toast_delete(), HEAP_XMAX_BITS, HEAP_XMAX_INVALID, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HeapTupleHasExternal, HeapTupleHeaderAdjustCmax(), HeapTupleHeaderClearHotUpdated, HeapTupleHeaderGetCmax(), HeapTupleHeaderGetRawXmax, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIsOnlyLocked(), HeapTupleHeaderSetCmax, HeapTupleHeaderSetMovedPartitions, HeapTupleHeaderSetXmax, HeapTupleSatisfiesUpdate(), HeapTupleSatisfiesVisibility(), xl_heap_delete::infobits_set, InvalidBuffer, InvalidCommandId, InvalidSnapshot, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerEquals(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), LockTupleExclusive, LockWaitBlock, log_heap_new_cid(), MarkBufferDirty(), MultiXactIdSetOldestMember(), MultiXactIdWait(), MultiXactStatusUpdate, xl_heap_delete::offnum, PageClearAllVisible, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), RelationData::rd_rel, ReadBuffer(), REGBUF_STANDARD, RelationGetRelid, RelationIsAccessibleInLogicalDecoding, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapDelete, SizeOfHeapHeader, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TM_BeingModified, TM_Deleted, TM_Invisible, TM_Ok, TM_SelfModified, TM_Updated, TransactionIdEquals, TransactionIdIsCurrentTransactionId(), UnlockReleaseBuffer(), UnlockTupleTuplock, UpdateXmaxHintBits(), visibilitymap_clear(), visibilitymap_pin(), VISIBILITYMAP_VALID_BITS, XactLockTableWait(), XLH_DELETE_ALL_VISIBLE_CLEARED, XLH_DELETE_CONTAINS_OLD_KEY, XLH_DELETE_CONTAINS_OLD_TUPLE, XLH_DELETE_IS_PARTITION_MOVE, XLOG_HEAP_DELETE, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), XLogSetRecordFlags(), XLTW_Delete, xl_heap_delete::xmax, TM_FailureData::xmax, and xmax_infomask_changed().

Referenced by heapam_tuple_delete(), and simple_heap_delete().

2771 {
2772  TM_Result result;
2774  ItemId lp;
2775  HeapTupleData tp;
2776  Page page;
2777  BlockNumber block;
2778  Buffer buffer;
2779  Buffer vmbuffer = InvalidBuffer;
2780  TransactionId new_xmax;
2781  uint16 new_infomask,
2782  new_infomask2;
2783  bool have_tuple_lock = false;
2784  bool iscombo;
2785  bool all_visible_cleared = false;
2786  HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
2787  bool old_key_copied = false;
2788 
2789  Assert(ItemPointerIsValid(tid));
2790 
2791  /*
2792  * Forbid this during a parallel operation, lest it allocate a combo CID.
2793  * Other workers might need that combo CID for visibility checks, and we
2794  * have no provision for broadcasting it to them.
2795  */
2796  if (IsInParallelMode())
2797  ereport(ERROR,
2798  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
2799  errmsg("cannot delete tuples during a parallel operation")));
2800 
2801  block = ItemPointerGetBlockNumber(tid);
2802  buffer = ReadBuffer(relation, block);
2803  page = BufferGetPage(buffer);
2804 
2805  /*
2806  * Before locking the buffer, pin the visibility map page if it appears to
2807  * be necessary. Since we haven't got the lock yet, someone else might be
2808  * in the middle of changing this, so we'll need to recheck after we have
2809  * the lock.
2810  */
2811  if (PageIsAllVisible(page))
2812  visibilitymap_pin(relation, block, &vmbuffer);
2813 
2815 
2816  /*
2817  * If we didn't pin the visibility map page and the page has become all
2818  * visible while we were busy locking the buffer, we'll have to unlock and
2819  * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
2820  * unfortunate, but hopefully shouldn't happen often.
2821  */
2822  if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
2823  {
2824  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2825  visibilitymap_pin(relation, block, &vmbuffer);
2827  }
2828 
2829  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
2830  Assert(ItemIdIsNormal(lp));
2831 
2832  tp.t_tableOid = RelationGetRelid(relation);
2833  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2834  tp.t_len = ItemIdGetLength(lp);
2835  tp.t_self = *tid;
2836 
2837 l1:
2838  result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
2839 
2840  if (result == TM_Invisible)
2841  {
2842  UnlockReleaseBuffer(buffer);
2843  ereport(ERROR,
2844  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2845  errmsg("attempted to delete invisible tuple")));
2846  }
2847  else if (result == TM_BeingModified && wait)
2848  {
2849  TransactionId xwait;
2850  uint16 infomask;
2851 
2852  /* must copy state data before unlocking buffer */
2853  xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
2854  infomask = tp.t_data->t_infomask;
2855 
2856  /*
2857  * Sleep until concurrent transaction ends -- except when there's a
2858  * single locker and it's our own transaction. Note we don't care
2859  * which lock mode the locker has, because we need the strongest one.
2860  *
2861  * Before sleeping, we need to acquire tuple lock to establish our
2862  * priority for the tuple (see heap_lock_tuple). LockTuple will
2863  * release us when we are next-in-line for the tuple.
2864  *
2865  * If we are forced to "start over" below, we keep the tuple lock;
2866  * this arranges that we stay at the head of the line while rechecking
2867  * tuple state.
2868  */
2869  if (infomask & HEAP_XMAX_IS_MULTI)
2870  {
2871  bool current_is_member = false;
2872 
2873  if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
2874  LockTupleExclusive, &current_is_member))
2875  {
2876  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2877 
2878  /*
2879  * Acquire the lock, if necessary (but skip it when we're
2880  * requesting a lock and already have one; avoids deadlock).
2881  */
2882  if (!current_is_member)
2884  LockWaitBlock, &have_tuple_lock);
2885 
2886  /* wait for multixact */
2888  relation, &(tp.t_self), XLTW_Delete,
2889  NULL);
2891 
2892  /*
2893  * If xwait had just locked the tuple then some other xact
2894  * could update this tuple before we get to this point. Check
2895  * for xmax change, and start over if so.
2896  */
2897  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2899  xwait))
2900  goto l1;
2901  }
2902 
2903  /*
2904  * You might think the multixact is necessarily done here, but not
2905  * so: it could have surviving members, namely our own xact or
2906  * other subxacts of this backend. It is legal for us to delete
2907  * the tuple in either case, however (the latter case is
2908  * essentially a situation of upgrading our former shared lock to
2909  * exclusive). We don't bother changing the on-disk hint bits
2910  * since we are about to overwrite the xmax altogether.
2911  */
2912  }
2913  else if (!TransactionIdIsCurrentTransactionId(xwait))
2914  {
2915  /*
2916  * Wait for regular transaction to end; but first, acquire tuple
2917  * lock.
2918  */
2919  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2921  LockWaitBlock, &have_tuple_lock);
2922  XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
2924 
2925  /*
2926  * xwait is done, but if xwait had just locked the tuple then some
2927  * other xact could update this tuple before we get to this point.
2928  * Check for xmax change, and start over if so.
2929  */
2930  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2932  xwait))
2933  goto l1;
2934 
2935  /* Otherwise check if it committed or aborted */
2936  UpdateXmaxHintBits(tp.t_data, buffer, xwait);
2937  }
2938 
2939  /*
2940  * We may overwrite if previous xmax aborted, or if it committed but
2941  * only locked the tuple without updating it.
2942  */
2943  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2946  result = TM_Ok;
2947  else if (!ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
2948  result = TM_Updated;
2949  else
2950  result = TM_Deleted;
2951  }
2952 
2953  if (crosscheck != InvalidSnapshot && result == TM_Ok)
2954  {
2955  /* Perform additional check for transaction-snapshot mode RI updates */
2956  if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
2957  result = TM_Updated;
2958  }
2959 
2960  if (result != TM_Ok)
2961  {
2962  Assert(result == TM_SelfModified ||
2963  result == TM_Updated ||
2964  result == TM_Deleted ||
2965  result == TM_BeingModified);
2967  Assert(result != TM_Updated ||
2968  !ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid));
2969  tmfd->ctid = tp.t_data->t_ctid;
2971  if (result == TM_SelfModified)
2972  tmfd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
2973  else
2974  tmfd->cmax = InvalidCommandId;
2975  UnlockReleaseBuffer(buffer);
2976  if (have_tuple_lock)
2977  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
2978  if (vmbuffer != InvalidBuffer)
2979  ReleaseBuffer(vmbuffer);
2980  return result;
2981  }
2982 
2983  /*
2984  * We're about to do the actual delete -- check for conflict first, to
2985  * avoid possibly having to roll back work we've just done.
2986  *
2987  * This is safe without a recheck as long as there is no possibility of
2988  * another process scanning the page between this check and the delete
2989  * being visible to the scan (i.e., an exclusive buffer content lock is
2990  * continuously held from this point until the tuple delete is visible).
2991  */
2992  CheckForSerializableConflictIn(relation, tid, BufferGetBlockNumber(buffer));
2993 
2994  /* replace cid with a combo CID if necessary */
2995  HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
2996 
2997  /*
2998  * Compute replica identity tuple before entering the critical section so
2999  * we don't PANIC upon a memory allocation failure.
3000  */
3001  old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
3002 
3003  /*
3004  * If this is the first possibly-multixact-able operation in the current
3005  * transaction, set my per-backend OldestMemberMXactId setting. We can be
3006  * certain that the transaction will never become a member of any older
3007  * MultiXactIds than that. (We have to do this even if we end up just
3008  * using our own TransactionId below, since some other backend could
3009  * incorporate our XID into a MultiXact immediately afterwards.)
3010  */
3012 
3015  xid, LockTupleExclusive, true,
3016  &new_xmax, &new_infomask, &new_infomask2);
3017 
3019 
3020  /*
3021  * If this transaction commits, the tuple will become DEAD sooner or
3022  * later. Set flag that this page is a candidate for pruning once our xid
3023  * falls below the OldestXmin horizon. If the transaction finally aborts,
3024  * the subsequent page pruning will be a no-op and the hint will be
3025  * cleared.
3026  */
3027  PageSetPrunable(page, xid);
3028 
3029  if (PageIsAllVisible(page))
3030  {
3031  all_visible_cleared = true;
3032  PageClearAllVisible(page);
3033  visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
3034  vmbuffer, VISIBILITYMAP_VALID_BITS);
3035  }
3036 
3037  /* store transaction information of xact deleting the tuple */
3040  tp.t_data->t_infomask |= new_infomask;
3041  tp.t_data->t_infomask2 |= new_infomask2;
3043  HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
3044  HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
3045  /* Make sure there is no forward chain link in t_ctid */
3046  tp.t_data->t_ctid = tp.t_self;
3047 
3048  /* Signal that this is actually a move into another partition */
3049  if (changingPart)
3051 
3052  MarkBufferDirty(buffer);
3053 
3054  /*
3055  * XLOG stuff
3056  *
3057  * NB: heap_abort_speculative() uses the same xlog record and replay
3058  * routines.
3059  */
3060  if (RelationNeedsWAL(relation))
3061  {
3062  xl_heap_delete xlrec;
3063  xl_heap_header xlhdr;
3064  XLogRecPtr recptr;
3065 
3066  /*
3067  * For logical decode we need combo CIDs to properly decode the
3068  * catalog
3069  */
3071  log_heap_new_cid(relation, &tp);
3072 
3073  xlrec.flags = 0;
3074  if (all_visible_cleared)
3076  if (changingPart)
3079  tp.t_data->t_infomask2);
3081  xlrec.xmax = new_xmax;
3082 
3083  if (old_key_tuple != NULL)
3084  {
3085  if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
3087  else
3089  }
3090 
3091  XLogBeginInsert();
3092  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
3093 
3094  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
3095 
3096  /*
3097  * Log replica identity of the deleted tuple if there is one
3098  */
3099  if (old_key_tuple != NULL)
3100  {
3101  xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
3102  xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
3103  xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
3104 
3105  XLogRegisterData((char *) &xlhdr, SizeOfHeapHeader);
3106  XLogRegisterData((char *) old_key_tuple->t_data
3108  old_key_tuple->t_len
3110  }
3111 
3112  /* filtering by origin on a row level is much more efficient */
3114 
3115  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
3116 
3117  PageSetLSN(page, recptr);
3118  }
3119 
3120  END_CRIT_SECTION();
3121 
3122  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3123 
3124  if (vmbuffer != InvalidBuffer)
3125  ReleaseBuffer(vmbuffer);
3126 
3127  /*
3128  * If the tuple has toasted out-of-line attributes, we need to delete
3129  * those items too. We have to do this before releasing the buffer
3130  * because we need to look at the contents of the tuple, but it's OK to
3131  * release the content lock on the buffer first.
3132  */
3133  if (relation->rd_rel->relkind != RELKIND_RELATION &&
3134  relation->rd_rel->relkind != RELKIND_MATVIEW)
3135  {
3136  /* toast table entries should never be recursively toasted */
3138  }
3139  else if (HeapTupleHasExternal(&tp))
3140  heap_toast_delete(relation, &tp, false);
3141 
3142  /*
3143  * Mark tuple for invalidation from system caches at next command
3144  * boundary. We have to do this before releasing the buffer because we
3145  * need to look at the contents of the tuple.
3146  */
3147  CacheInvalidateHeapTuple(relation, &tp, NULL);
3148 
3149  /* Now we can release the buffer */
3150  ReleaseBuffer(buffer);
3151 
3152  /*
3153  * Release the lmgr tuple lock, if we had it.
3154  */
3155  if (have_tuple_lock)
3156  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3157 
3158  pgstat_count_heap_delete(relation);
3159 
3160  if (old_key_tuple != NULL && old_key_copied)
3161  heap_freetuple(old_key_tuple);
3162 
3163  return TM_Ok;
3164 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
ItemPointerData ctid
Definition: tableam.h:126
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
#define SizeofHeapTupleHeader
Definition: htup_details.h:184
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:8296
#define HEAP_XMAX_BITS
Definition: htup_details.h:270
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2723
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1123
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
#define PageIsAllVisible(page)
Definition: bufpage.h:385
uint32 TransactionId
Definition: c.h:587
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:869
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1562
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:220
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_changed, bool *copy)
Definition: heapam.c:8378
static bool xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
Definition: heapam.c:2745
#define HeapTupleHeaderClearHotUpdated(tup)
Definition: htup_details.h:495
#define END_CRIT_SECTION()
Definition: miscadmin.h:149
CommandId cmax
Definition: tableam.h:128
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
#define InvalidBuffer
Definition: buf.h:25
uint16 t_infomask2
Definition: heapam_xlog.h:146
#define PageSetPrunable(page, xid)
Definition: bufpage.h:392
#define START_CRIT_SECTION()
Definition: miscadmin.h:147
int errcode(int sqlerrcode)
Definition: elog.c:698
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:247
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3784
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
Form_pg_class rd_rel
Definition: rel.h:109
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1338
TM_Result HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid, Buffer buffer)
#define UnlockTupleTuplock(rel, tup, mode)
Definition: heapam.c:165
OffsetNumber offnum
Definition: heapam_xlog.h:110
void MultiXactIdSetOldestMember(void)
Definition: multixact.c:625
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:68
#define HeapTupleHeaderGetRawXmax(tup)
Definition: htup_details.h:375
unsigned short uint16
Definition: c.h:440
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
bool IsInParallelMode(void)
Definition: xact.c:1012
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3807
TransactionId xmax
Definition: tableam.h:127
#define ERROR
Definition: elog.h:46
#define HEAP_XMAX_INVALID
Definition: htup_details.h:207
ItemPointerData t_ctid
Definition: htup_details.h:160
#define HeapTupleHeaderSetMovedPartitions(tup)
Definition: htup_details.h:448
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:109
static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:7077
void heap_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: heaptoast.c:43
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:438
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:115
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define XLH_DELETE_CONTAINS_OLD_KEY
Definition: heapam_xlog.h:98
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:380
Oid t_tableOid
Definition: htup.h:66
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:404
#define HeapTupleHeaderSetCmax(tup, cid, iscombo)
Definition: htup_details.h:405
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
Definition: heapam.c:5053
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
#define InvalidSnapshot
Definition: snapshot.h:123
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:330
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:422
TM_Result
Definition: tableam.h:71
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:638
#define InvalidCommandId
Definition: c.h:604
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4023
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
void CheckForSerializableConflictIn(Relation relation, ItemPointer tid, BlockNumber blkno)
Definition: predicate.c:4446
static void UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
Definition: heapam.c:1985
#define HEAP_MOVED
Definition: htup_details.h:216
static bool heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
Definition: heapam.c:5004
#define ereport(elevel,...)
Definition: elog.h:157
TransactionId MultiXactId
Definition: c.h:597
#define PageClearAllVisible(page)
Definition: bufpage.h:389
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:640
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:804
uint8 infobits_set
Definition: heapam_xlog.h:111
CommandId HeapTupleHeaderGetCmax(HeapTupleHeader tup)
Definition: combocid.c:118
Definition: tableam.h:77
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:697
uint16 t_infomask
Definition: heapam_xlog.h:147
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask, LockTupleMode lockmode, bool *current_is_member)
Definition: heapam.c:6900
#define RelationNeedsWAL(relation)
Definition: rel.h:582
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:2265
void HeapTupleHeaderAdjustCmax(HeapTupleHeader tup, CommandId *cmax, bool *iscombo)
Definition: combocid.c:153
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2758
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:672
int errmsg(const char *fmt,...)
Definition: elog.c:909
#define XLH_DELETE_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:96
#define XLH_DELETE_IS_PARTITION_MOVE
Definition: heapam_xlog.h:100
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
void XLogBeginInsert(void)
Definition: xloginsert.c:123
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:469
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
#define SizeOfHeapHeader
Definition: heapam_xlog.h:151
Pointer Page
Definition: bufpage.h:78
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)
#define XLH_DELETE_CONTAINS_OLD_TUPLE
Definition: heapam_xlog.h:97

◆ heap_endscan()

void heap_endscan ( TableScanDesc  sscan)

Definition at line 1307 of file heapam.c.

References BufferIsValid, FreeAccessStrategy(), pfree(), RelationDecrementReferenceCount(), ReleaseBuffer(), HeapScanDescData::rs_base, HeapScanDescData::rs_cbuf, TableScanDescData::rs_flags, TableScanDescData::rs_key, HeapScanDescData::rs_parallelworkerdata, TableScanDescData::rs_rd, TableScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, SO_TEMP_SNAPSHOT, and UnregisterSnapshot().

Referenced by SampleHeapTupleVisible().

1308 {
1309  HeapScanDesc scan = (HeapScanDesc) sscan;
1310 
1311  /* Note: no locking manipulations needed */
1312 
1313  /*
1314  * unpin scan buffers
1315  */
1316  if (BufferIsValid(scan->rs_cbuf))
1317  ReleaseBuffer(scan->rs_cbuf);
1318 
1319  /*
1320  * decrement relation reference count and free scan descriptor storage
1321  */
1323 
1324  if (scan->rs_base.rs_key)
1325  pfree(scan->rs_base.rs_key);
1326 
1327  if (scan->rs_strategy != NULL)
1329 
1330  if (scan->rs_parallelworkerdata != NULL)
1332 
1333  if (scan->rs_base.rs_flags & SO_TEMP_SNAPSHOT)
1335 
1336  pfree(scan);
1337 }
TableScanDescData rs_base
Definition: heapam.h:49
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3784
uint32 rs_flags
Definition: relscan.h:47
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:79
void pfree(void *pointer)
Definition: mcxt.c:1169
void RelationDecrementReferenceCount(Relation rel)
Definition: relcache.c:2078
ParallelBlockTableScanWorkerData * rs_parallelworkerdata
Definition: heapam.h:72
struct ScanKeyData * rs_key
Definition: relscan.h:37
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:852
BufferAccessStrategy rs_strategy
Definition: heapam.h:64
Buffer rs_cbuf
Definition: heapam.h:60
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:597
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
Relation rs_rd
Definition: relscan.h:34
struct SnapshotData * rs_snapshot
Definition: relscan.h:35

◆ heap_execute_freeze_tuple()

void heap_execute_freeze_tuple ( HeapTupleHeader  tuple,
xl_heap_freeze_tuple frz 
)

Definition at line 6699 of file heapam.c.

References FrozenTransactionId, xl_heap_freeze_tuple::frzflags, HeapTupleHeaderSetXmax, HeapTupleHeaderSetXvac, InvalidTransactionId, HeapTupleHeaderData::t_infomask, xl_heap_freeze_tuple::t_infomask, HeapTupleHeaderData::t_infomask2, xl_heap_freeze_tuple::t_infomask2, XLH_FREEZE_XVAC, XLH_INVALID_XVAC, and xl_heap_freeze_tuple::xmax.

Referenced by heap_freeze_tuple(), heap_xlog_freeze_page(), and lazy_scan_prune().

6700 {
6701  HeapTupleHeaderSetXmax(tuple, frz->xmax);
6702 
6703  if (frz->frzflags & XLH_FREEZE_XVAC)
6705 
6706  if (frz->frzflags & XLH_INVALID_XVAC)
6708 
6709  tuple->t_infomask = frz->t_infomask;
6710  tuple->t_infomask2 = frz->t_infomask2;
6711 }
#define HeapTupleHeaderSetXvac(tup, xid)
Definition: htup_details.h:423
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:380
#define InvalidTransactionId
Definition: transam.h:31
#define FrozenTransactionId
Definition: transam.h:33
TransactionId xmax
Definition: heapam_xlog.h:326
#define XLH_INVALID_XVAC
Definition: heapam_xlog.h:322
#define XLH_FREEZE_XVAC
Definition: heapam_xlog.h:321

◆ heap_fetch()

bool heap_fetch ( Relation  relation,
Snapshot  snapshot,
HeapTuple  tuple,
Buffer userbuf 
)

Definition at line 1595 of file heapam.c.

References BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, HeapCheckForSerializableConflictOut(), HeapTupleHeaderGetXmin, HeapTupleSatisfiesVisibility(), InvalidBuffer, ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PredicateLockTID(), ReadBuffer(), RelationGetRelid, ReleaseBuffer(), HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, and TestForOldSnapshot().

Referenced by heap_lock_updated_tuple_rec(), heapam_fetch_row_version(), and heapam_tuple_lock().

1599 {
1600  ItemPointer tid = &(tuple->t_self);
1601  ItemId lp;
1602  Buffer buffer;
1603  Page page;
1604  OffsetNumber offnum;
1605  bool valid;
1606 
1607  /*
1608  * Fetch and pin the appropriate page of the relation.
1609  */
1610  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1611 
1612  /*
1613  * Need share lock on buffer to examine tuple commit status.
1614  */
1615  LockBuffer(buffer, BUFFER_LOCK_SHARE);
1616  page = BufferGetPage(buffer);
1617  TestForOldSnapshot(snapshot, relation, page);
1618 
1619  /*
1620  * We'd better check for out-of-range offnum in case of VACUUM since the
1621  * TID was obtained.
1622  */
1623  offnum = ItemPointerGetOffsetNumber(tid);
1624  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1625  {
1626  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1627  ReleaseBuffer(buffer);
1628  *userbuf = InvalidBuffer;
1629  tuple->t_data = NULL;
1630  return false;
1631  }
1632 
1633  /*
1634  * get the item line pointer corresponding to the requested tid
1635  */
1636  lp = PageGetItemId(page, offnum);
1637 
1638  /*
1639  * Must check for deleted tuple.
1640  */
1641  if (!ItemIdIsNormal(lp))
1642  {
1643  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1644  ReleaseBuffer(buffer);
1645  *userbuf = InvalidBuffer;
1646  tuple->t_data = NULL;
1647  return false;
1648  }
1649 
1650  /*
1651  * fill in *tuple fields
1652  */
1653  tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1654  tuple->t_len = ItemIdGetLength(lp);
1655  tuple->t_tableOid = RelationGetRelid(relation);
1656 
1657  /*
1658  * check tuple visibility, then release lock
1659  */
1660  valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1661 
1662  if (valid)
1663  PredicateLockTID(relation, &(tuple->t_self), snapshot,
1664  HeapTupleHeaderGetXmin(tuple->t_data));
1665 
1666  HeapCheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1667 
1668  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1669 
1670  if (valid)
1671  {
1672  /*
1673  * All checks passed, so return the tuple as valid. Caller is now
1674  * responsible for releasing the buffer.
1675  */
1676  *userbuf = buffer;
1677 
1678  return true;
1679  }
1680 
1681  /* Tuple failed time qual */
1682  ReleaseBuffer(buffer);
1683  *userbuf = InvalidBuffer;
1684 
1685  return false;
1686 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:279
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3784
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4023
void PredicateLockTID(Relation relation, ItemPointer tid, Snapshot snapshot, TransactionId tuple_xid)
Definition: predicate.c:2614
void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: heapam.c:9861
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:697
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:469
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)

◆ heap_finish_speculative()

void heap_finish_speculative ( Relation  relation,
ItemPointer  tid 
)

Definition at line 5815 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, elog, END_CRIT_SECTION, ERROR, HeapTupleHeaderIsSpeculative, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), MaxOffsetNumber, xl_heap_confirm::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapConfirm, SpecTokenOffsetNumber, START_CRIT_SECTION, StaticAssertStmt, HeapTupleHeaderData::t_ctid, UnlockReleaseBuffer(), XLOG_HEAP_CONFIRM, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by heapam_tuple_complete_speculative().

5816 {
5817  Buffer buffer;
5818  Page page;
5819  OffsetNumber offnum;
5820  ItemId lp = NULL;
5821  HeapTupleHeader htup;
5822 
5823  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
5825  page = (Page) BufferGetPage(buffer);
5826 
5827  offnum = ItemPointerGetOffsetNumber(tid);
5828  if (PageGetMaxOffsetNumber(page) >= offnum)
5829  lp = PageGetItemId(page, offnum);
5830 
5831  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
5832  elog(ERROR, "invalid lp");
5833 
5834  htup = (HeapTupleHeader) PageGetItem(page, lp);
5835 
5836  /* SpecTokenOffsetNumber should be distinguishable from any real offset */
5838  "invalid speculative token constant");
5839 
5840  /* NO EREPORT(ERROR) from here till changes are logged */
5842 
5844 
5845  MarkBufferDirty(buffer);
5846 
5847  /*
5848  * Replace the speculative insertion token with a real t_ctid, pointing to
5849  * itself like it does on regular tuples.
5850  */
5851  htup->t_ctid = *tid;
5852 
5853  /* XLOG stuff */
5854  if (RelationNeedsWAL(relation))
5855  {
5856  xl_heap_confirm xlrec;
5857  XLogRecPtr recptr;
5858 
5859  xlrec.offnum = ItemPointerGetOffsetNumber(tid);
5860 
5861  XLogBeginInsert();
5862 
5863  /* We want the same filtering on this as on a plain insert */
5865 
5866  XLogRegisterData((char *) &xlrec, SizeOfHeapConfirm);
5867  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
5868 
5869  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
5870 
5871  PageSetLSN(page, recptr);
5872  }
5873 
5874  END_CRIT_SECTION();
5875 
5876  UnlockReleaseBuffer(buffer);
5877 }
OffsetNumber offnum
Definition: heapam_xlog.h:302
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1562
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:220
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define MaxOffsetNumber
Definition: off.h:28
#define END_CRIT_SECTION()
Definition: miscadmin.h:149
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:429
#define START_CRIT_SECTION()
Definition: miscadmin.h:147
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:247
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
#define SpecTokenOffsetNumber
Definition: itemptr.h:63
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:918
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3807
#define ERROR
Definition: elog.h:46
ItemPointerData t_ctid
Definition: htup_details.h:160
#define REGBUF_STANDARD
Definition: xloginsert.h:35
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:404
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define SizeOfHeapConfirm
Definition: heapam_xlog.h:305
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:330
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:422
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4023
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:804
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:697
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define RelationNeedsWAL(relation)
Definition: rel.h:582
#define elog(elevel,...)
Definition: elog.h:232
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
void XLogBeginInsert(void)
Definition: xloginsert.c:123
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define XLOG_HEAP_CONFIRM
Definition: heapam_xlog.h:37

◆ heap_freeze_tuple()

bool heap_freeze_tuple ( HeapTupleHeader  tuple,
TransactionId  relfrozenxid,
TransactionId  relminmxid,
TransactionId  cutoff_xid,
TransactionId  cutoff_multi 
)

Definition at line 6720 of file heapam.c.

References heap_execute_freeze_tuple(), and heap_prepare_freeze_tuple().

Referenced by rewrite_heap_tuple().

6723 {
6725  bool do_freeze;
6726  bool tuple_totally_frozen;
6727 
6728  do_freeze = heap_prepare_freeze_tuple(tuple,
6729  relfrozenxid, relminmxid,
6730  cutoff_xid, cutoff_multi,
6731  &frz, &tuple_totally_frozen);
6732 
6733  /*
6734  * Note that because this is not a WAL-logged operation, we don't need to
6735  * fill in the offset in the freeze record.
6736  */
6737 
6738  if (do_freeze)
6739  heap_execute_freeze_tuple(tuple, &frz);
6740  return do_freeze;
6741 }
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
Definition: heapam.c:6470
void heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
Definition: heapam.c:6699

◆ heap_get_latest_tid()

void heap_get_latest_tid ( TableScanDesc  sscan,
ItemPointer  tid 
)

Definition at line 1862 of file heapam.c.

References Assert, BUFFER_LOCK_SHARE, BufferGetPage, HEAP_XMAX_INVALID, HeapCheckForSerializableConflictOut(), HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleHeaderIndicatesMovedPartitions, HeapTupleHeaderIsOnlyLocked(), HeapTupleSatisfiesVisibility(), InvalidTransactionId, ItemIdGetLength, ItemIdIsNormal, ItemPointerEquals(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, ReadBuffer(), RelationGetRelid, TableScanDescData::rs_rd, TableScanDescData::rs_snapshot, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TestForOldSnapshot(), TransactionIdEquals, TransactionIdIsValid, and UnlockReleaseBuffer().

Referenced by SampleHeapTupleVisible().

1864 {
1865  Relation relation = sscan->rs_rd;
1866  Snapshot snapshot = sscan->rs_snapshot;
1867  ItemPointerData ctid;
1868  TransactionId priorXmax;
1869 
1870  /*
1871  * table_tuple_get_latest_tid() verified that the passed in tid is valid.
1872  * Assume that t_ctid links are valid however - there shouldn't be invalid
1873  * ones in the table.
1874  */
1875  Assert(ItemPointerIsValid(tid));
1876 
1877  /*
1878  * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
1879  * need to examine, and *tid is the TID we will return if ctid turns out
1880  * to be bogus.
1881  *
1882  * Note that we will loop until we reach the end of the t_ctid chain.
1883  * Depending on the snapshot passed, there might be at most one visible
1884  * version of the row, but we don't try to optimize for that.
1885  */
1886  ctid = *tid;
1887  priorXmax = InvalidTransactionId; /* cannot check first XMIN */
1888  for (;;)
1889  {
1890  Buffer buffer;
1891  Page page;
1892  OffsetNumber offnum;
1893  ItemId lp;
1894  HeapTupleData tp;
1895  bool valid;
1896 
1897  /*
1898  * Read, pin, and lock the page.
1899  */
1900  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
1901  LockBuffer(buffer, BUFFER_LOCK_SHARE);
1902  page = BufferGetPage(buffer);
1903  TestForOldSnapshot(snapshot, relation, page);
1904 
1905  /*
1906  * Check for bogus item number. This is not treated as an error
1907  * condition because it can happen while following a t_ctid link. We
1908  * just assume that the prior tid is OK and return it unchanged.
1909  */
1910  offnum = ItemPointerGetOffsetNumber(&ctid);
1911  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1912  {
1913  UnlockReleaseBuffer(buffer);
1914  break;
1915  }
1916  lp = PageGetItemId(page, offnum);
1917  if (!ItemIdIsNormal(lp))
1918  {
1919  UnlockReleaseBuffer(buffer);
1920  break;
1921  }
1922 
1923  /* OK to access the tuple */
1924  tp.t_self = ctid;
1925  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
1926  tp.t_len = ItemIdGetLength(lp);
1927  tp.t_tableOid = RelationGetRelid(relation);
1928 
1929  /*
1930  * After following a t_ctid link, we might arrive at an unrelated
1931  * tuple. Check for XMIN match.
1932  */
1933  if (TransactionIdIsValid(priorXmax) &&
1935  {
1936  UnlockReleaseBuffer(buffer);
1937  break;
1938  }
1939 
1940  /*
1941  * Check tuple visibility; if visible, set it as the new result
1942  * candidate.
1943  */
1944  valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
1945  HeapCheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
1946  if (valid)
1947  *tid = ctid;
1948 
1949  /*
1950  * If there's a valid t_ctid link, follow it, else we're done.
1951  */
1952  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
1956  {
1957  UnlockReleaseBuffer(buffer);
1958  break;
1959  }
1960 
1961  ctid = tp.t_data->t_ctid;
1962  priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
1963  UnlockReleaseBuffer(buffer);
1964  } /* end of loop */
1965 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:279
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:587
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
#define HeapTupleHeaderIndicatesMovedPartitions(tup)
Definition: htup_details.h:445
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3807
#define HEAP_XMAX_INVALID
Definition: htup_details.h:207
ItemPointerData t_ctid
Definition: htup_details.h:160
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4023
void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: heapam.c:9861
#define Assert(condition)
Definition: c.h:804
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:697
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
Relation rs_rd
Definition: relscan.h:34
struct SnapshotData * rs_snapshot
Definition: relscan.h:35
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define TransactionIdIsValid(xid)
Definition: transam.h:41
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:469
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)

◆ heap_getnext()

HeapTuple heap_getnext ( TableScanDesc  sscan,
ScanDirection  direction 
)

Definition at line 1340 of file heapam.c.

References bsysscan, CheckXidAlive, elog, ereport, errcode(), errmsg_internal(), ERROR, GetHeapamTableAmRoutine(), heapgettup(), heapgettup_pagemode(), pgstat_count_heap_getnext, RelationData::rd_tableam, HeapScanDescData::rs_base, HeapScanDescData::rs_ctup, TableScanDescData::rs_flags, TableScanDescData::rs_key, TableScanDescData::rs_nkeys, TableScanDescData::rs_rd, SO_ALLOW_PAGEMODE, HeapTupleData::t_data, TransactionIdIsValid, and unlikely.

Referenced by AlterTableMoveAll(), AlterTableSpaceOptions(), check_db_file_conflict(), createdb(), do_autovacuum(), DropSetting(), DropTableSpace(), find_typed_table_dependencies(), get_all_vacuum_rels(), get_database_list(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), heapam_index_build_range_scan(), heapam_index_validate_scan(), index_update_stats(), objectsInSchemaToOids(), pgrowlocks(), pgstat_collect_oids(), pgstat_heap(), populate_typ_list(), ReindexMultipleTables(), remove_dbtablespaces(), RemoveSubscriptionRel(), RenameTableSpace(), ThereIsAtLeastOneRole(), and vac_truncate_clog().

1341 {
1342  HeapScanDesc scan = (HeapScanDesc) sscan;
1343 
1344  /*
1345  * This is still widely used directly, without going through table AM, so
1346  * add a safety check. It's possible we should, at a later point,
1347  * downgrade this to an assert. The reason for checking the AM routine,
1348  * rather than the AM oid, is that this allows to write regression tests
1349  * that create another AM reusing the heap handler.
1350  */
1352  ereport(ERROR,
1353  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1354  errmsg_internal("only heap AM is supported")));
1355 
1356  /*
1357  * We don't expect direct calls to heap_getnext with valid CheckXidAlive
1358  * for catalog or regular tables. See detailed comments in xact.c where
1359  * these variables are declared. Normally we have such a check at tableam
1360  * level API but this is called from many places so we need to ensure it
1361  * here.
1362  */
1364  elog(ERROR, "unexpected heap_getnext call during logical decoding");
1365 
1366  /* Note: no locking manipulations needed */
1367 
1368  if (scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE)
1369  heapgettup_pagemode(scan, direction,
1370  scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1371  else
1372  heapgettup(scan, direction,
1373  scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1374 
1375  if (scan->rs_ctup.t_data == NULL)
1376  return NULL;
1377 
1378  /*
1379  * if we get here it means we have a new current scan tuple, so point to
1380  * the proper return buffer and return the tuple.
1381  */
1382 
1384 
1385  return &scan->rs_ctup;
1386 }
TableScanDescData rs_base
Definition: heapam.h:49
int errcode(int sqlerrcode)
Definition: elog.c:698
uint32 rs_flags
Definition: relscan.h:47
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:79
HeapTupleData rs_ctup
Definition: heapam.h:66
HeapTupleHeader t_data
Definition: htup.h:68
#define ERROR
Definition: elog.h:46
bool bsysscan
Definition: xact.c:96
struct ScanKeyData * rs_key
Definition: relscan.h:37
TransactionId CheckXidAlive
Definition: xact.c:95
const struct TableAmRoutine * rd_tableam
Definition: rel.h:184
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:506
#define ereport(elevel,...)
Definition: elog.h:157
int errmsg_internal(const char *fmt,...)
Definition: elog.c:996
Relation rs_rd
Definition: relscan.h:34
#define elog(elevel,...)
Definition: elog.h:232
#define unlikely(x)
Definition: c.h:273
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:1030
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:844
const TableAmRoutine * GetHeapamTableAmRoutine(void)

◆ heap_getnextslot()

bool heap_getnextslot ( TableScanDesc  sscan,
ScanDirection  direction,
TupleTableSlot slot 
)

Definition at line 1389 of file heapam.c.

References ExecClearTuple(), ExecStoreBufferHeapTuple(), heapgettup(), heapgettup_pagemode(), pgstat_count_heap_getnext, HeapScanDescData::rs_base, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_ctup, TableScanDescData::rs_flags, TableScanDescData::rs_key, TableScanDescData::rs_nkeys, TableScanDescData::rs_rd, SO_ALLOW_PAGEMODE, and HeapTupleData::t_data.

Referenced by SampleHeapTupleVisible().

1390 {
1391  HeapScanDesc scan = (HeapScanDesc) sscan;
1392 
1393  /* Note: no locking manipulations needed */
1394 
1395  if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
1396  heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1397  else
1398  heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1399 
1400  if (scan->rs_ctup.t_data == NULL)
1401  {
1402  ExecClearTuple(slot);
1403  return false;
1404  }
1405 
1406  /*
1407  * if we get here it means we have a new current scan tuple, so point to
1408  * the proper return buffer and return the tuple.
1409  */
1410 
1412 
1413  ExecStoreBufferHeapTuple(&scan->rs_ctup, slot,
1414  scan->rs_cbuf);
1415  return true;
1416 }
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:425
TableScanDescData rs_base
Definition: heapam.h:49
uint32 rs_flags
Definition: relscan.h:47
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:79
HeapTupleData rs_ctup
Definition: heapam.h:66
HeapTupleHeader t_data
Definition: htup.h:68
struct ScanKeyData * rs_key
Definition: relscan.h:37
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:506
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1392
Buffer rs_cbuf
Definition: heapam.h:60
Relation rs_rd
Definition: relscan.h:34
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:1030
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:844

◆ heap_getnextslot_tidrange()

bool heap_getnextslot_tidrange ( TableScanDesc  sscan,
ScanDirection  direction,
TupleTableSlot slot 
)

Definition at line 1492 of file heapam.c.

References ExecClearTuple(), ExecStoreBufferHeapTuple(), heapgettup(), heapgettup_pagemode(), ItemPointerCompare(), pgstat_count_heap_getnext, HeapScanDescData::rs_base, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_ctup, TableScanDescData::rs_flags, TableScanDescData::rs_key, TableScanDescData::rs_maxtid, TableScanDescData::rs_mintid, TableScanDescData::rs_nkeys, TableScanDescData::rs_rd, ScanDirectionIsBackward, ScanDirectionIsForward, SO_ALLOW_PAGEMODE, HeapTupleData::t_data, and HeapTupleData::t_self.

Referenced by SampleHeapTupleVisible().

1494 {
1495  HeapScanDesc scan = (HeapScanDesc) sscan;
1496  ItemPointer mintid = &sscan->rs_mintid;
1497  ItemPointer maxtid = &sscan->rs_maxtid;
1498 
1499  /* Note: no locking manipulations needed */
1500  for (;;)
1501  {
1502  if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
1503  heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1504  else
1505  heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1506 
1507  if (scan->rs_ctup.t_data == NULL)
1508  {
1509  ExecClearTuple(slot);
1510  return false;
1511  }
1512 
1513  /*
1514  * heap_set_tidrange will have used heap_setscanlimits to limit the
1515  * range of pages we scan to only ones that can contain the TID range
1516  * we're scanning for. Here we must filter out any tuples from these
1517  * pages that are outwith that range.
1518  */
1519  if (ItemPointerCompare(&scan->rs_ctup.t_self, mintid) < 0)
1520  {
1521  ExecClearTuple(slot);
1522 
1523  /*
1524  * When scanning backwards, the TIDs will be in descending order.
1525  * Future tuples in this direction will be lower still, so we can
1526  * just return false to indicate there will be no more tuples.
1527  */
1528  if (ScanDirectionIsBackward(direction))
1529  return false;
1530 
1531  continue;
1532  }
1533 
1534  /*
1535  * Likewise for the final page, we must filter out TIDs greater than
1536  * maxtid.
1537  */
1538  if (ItemPointerCompare(&scan->rs_ctup.t_self, maxtid) > 0)
1539  {
1540  ExecClearTuple(slot);
1541 
1542  /*
1543  * When scanning forward, the TIDs will be in ascending order.
1544  * Future tuples in this direction will be higher still, so we can
1545  * just return false to indicate there will be no more tuples.
1546  */
1547  if (ScanDirectionIsForward(direction))
1548  return false;
1549  continue;
1550  }
1551 
1552  break;
1553  }
1554 
1555  /*
1556  * if we get here it means we have a new current scan tuple, so point to
1557  * the proper return buffer and return the tuple.
1558  */
1560 
1561  ExecStoreBufferHeapTuple(&scan->rs_ctup, slot, scan->rs_cbuf);
1562  return true;
1563 }
int32 ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
Definition: itemptr.c:52
ItemPointerData rs_mintid
Definition: relscan.h:40
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:425
#define ScanDirectionIsForward(direction)
Definition: sdir.h:55
TableScanDescData rs_base
Definition: heapam.h:49
uint32 rs_flags
Definition: relscan.h:47
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:79
HeapTupleData rs_ctup
Definition: heapam.h:66
HeapTupleHeader t_data
Definition: htup.h:68
#define ScanDirectionIsBackward(direction)
Definition: sdir.h:41
ItemPointerData t_self
Definition: htup.h:65
struct ScanKeyData * rs_key
Definition: relscan.h:37
ItemPointerData rs_maxtid
Definition: relscan.h:41
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:506
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1392
Buffer rs_cbuf
Definition: heapam.h:60
Relation rs_rd
Definition: relscan.h:34
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:1030
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:844

◆ heap_hot_search_buffer()

bool heap_hot_search_buffer ( ItemPointer  tid,
Relation  relation,
Buffer  buffer,
Snapshot  snapshot,
HeapTuple  heapTuple,
bool all_dead,
bool  first_call 
)

Definition at line 1710 of file heapam.c.

References Assert, BufferGetBlockNumber(), BufferGetPage, GlobalVisTestFor(), HeapCheckForSerializableConflictOut(), HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleIsHeapOnly, HeapTupleIsHotUpdated, HeapTupleIsSurelyDead(), HeapTupleSatisfiesVisibility(), InvalidTransactionId, ItemIdGetLength, ItemIdGetRedirect, ItemIdIsNormal, ItemIdIsRedirected, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerSet, ItemPointerSetOffsetNumber, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PredicateLockTID(), RecentXmin, RelationGetRelid, skip, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdEquals, and TransactionIdIsValid.

Referenced by heap_index_delete_tuples(), heapam_index_fetch_tuple(), and heapam_scan_bitmap_next_block().

1713 {
1714  Page dp = (Page) BufferGetPage(buffer);
1715  TransactionId prev_xmax = InvalidTransactionId;
1716  BlockNumber blkno;
1717  OffsetNumber offnum;
1718  bool at_chain_start;
1719  bool valid;
1720  bool skip;
1721  GlobalVisState *vistest = NULL;
1722 
1723  /* If this is not the first call, previous call returned a (live!) tuple */
1724  if (all_dead)
1725  *all_dead = first_call;
1726 
1727  blkno = ItemPointerGetBlockNumber(tid);
1728  offnum = ItemPointerGetOffsetNumber(tid);
1729  at_chain_start = first_call;
1730  skip = !first_call;
1731 
1732  /* XXX: we should assert that a snapshot is pushed or registered */
1734  Assert(BufferGetBlockNumber(buffer) == blkno);
1735 
1736  /* Scan through possible multiple members of HOT-chain */
1737  for (;;)
1738  {
1739  ItemId lp;
1740 
1741  /* check for bogus TID */
1742  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
1743  break;
1744 
1745  lp = PageGetItemId(dp, offnum);
1746 
1747  /* check for unused, dead, or redirected items */
1748  if (!ItemIdIsNormal(lp))
1749  {
1750  /* We should only see a redirect at start of chain */
1751  if (ItemIdIsRedirected(lp) && at_chain_start)
1752  {
1753  /* Follow the redirect */
1754  offnum = ItemIdGetRedirect(lp);
1755  at_chain_start = false;
1756  continue;
1757  }
1758  /* else must be end of chain */
1759  break;
1760  }
1761 
1762  /*
1763  * Update heapTuple to point to the element of the HOT chain we're
1764  * currently investigating. Having t_self set correctly is important
1765  * because the SSI checks and the *Satisfies routine for historical
1766  * MVCC snapshots need the correct tid to decide about the visibility.
1767  */
1768  heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
1769  heapTuple->t_len = ItemIdGetLength(lp);
1770  heapTuple->t_tableOid = RelationGetRelid(relation);
1771  ItemPointerSet(&heapTuple->t_self, blkno, offnum);
1772 
1773  /*
1774  * Shouldn't see a HEAP_ONLY tuple at chain start.
1775  */
1776  if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
1777  break;
1778 
1779  /*
1780  * The xmin should match the previous xmax value, else chain is
1781  * broken.
1782  */
1783  if (TransactionIdIsValid(prev_xmax) &&
1784  !TransactionIdEquals(prev_xmax,
1785  HeapTupleHeaderGetXmin(heapTuple->t_data)))
1786  break;
1787 
1788  /*
1789  * When first_call is true (and thus, skip is initially false) we'll
1790  * return the first tuple we find. But on later passes, heapTuple
1791  * will initially be pointing to the tuple we returned last time.
1792  * Returning it again would be incorrect (and would loop forever), so
1793  * we skip it and return the next match we find.
1794  */
1795  if (!skip)
1796  {
1797  /* If it's visible per the snapshot, we must return it */
1798  valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
1799  HeapCheckForSerializableConflictOut(valid, relation, heapTuple,
1800  buffer, snapshot);
1801 
1802  if (valid)
1803  {
1804  ItemPointerSetOffsetNumber(tid, offnum);
1805  PredicateLockTID(relation, &heapTuple->t_self, snapshot,
1806  HeapTupleHeaderGetXmin(heapTuple->t_data));
1807  if (all_dead)
1808  *all_dead = false;
1809  return true;
1810  }
1811  }
1812  skip = false;
1813 
1814  /*
1815  * If we can't see it, maybe no one else can either. At caller
1816  * request, check whether all chain members are dead to all
1817  * transactions.
1818  *
1819  * Note: if you change the criterion here for what is "dead", fix the
1820  * planner's get_actual_variable_range() function to match.
1821  */
1822  if (all_dead && *all_dead)
1823  {
1824  if (!vistest)
1825  vistest = GlobalVisTestFor(relation);
1826 
1827  if (!HeapTupleIsSurelyDead(heapTuple, vistest))
1828  *all_dead = false;
1829  }
1830 
1831  /*
1832  * Check to see if HOT chain continues past this tuple; if so fetch
1833  * the next offnum and loop around.
1834  */
1835  if (HeapTupleIsHotUpdated(heapTuple))
1836  {
1838  blkno);
1839  offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
1840  at_chain_start = false;
1841  prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
1842  }
1843  else
1844  break; /* end of chain */
1845  }
1846 
1847  return false;
1848 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:587
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:78
static const struct exclude_list_item skip[]
Definition: pg_checksums.c:112
TransactionId RecentXmin
Definition: snapmgr.c:113
uint32 BlockNumber
Definition: block.h:31
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define HeapTupleIsHotUpdated(tuple)
Definition: htup_details.h:675
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:3963
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
ItemPointerData t_ctid
Definition: htup_details.h:160
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void PredicateLockTID(Relation relation, ItemPointer tid, Snapshot snapshot, TransactionId tuple_xid)
Definition: predicate.c:2614
void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: heapam.c:9861
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:684
#define Assert(condition)
Definition: c.h:804
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define ItemPointerSetOffsetNumber(pointer, offsetNumber)
Definition: itemptr.h:148
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2758
bool HeapTupleIsSurelyDead(HeapTuple htup, GlobalVisState *vistest)
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define TransactionIdIsValid(xid)
Definition: transam.h:41
#define RelationGetRelid(relation)
Definition: rel.h:469
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)

◆ heap_index_delete_tuples()

TransactionId heap_index_delete_tuples ( Relation  rel,
TM_IndexDeleteOp delstate 
)

Definition at line 7337 of file heapam.c.

References Assert, TM_IndexDeleteOp::bottomup, BOTTOMUP_MAX_NBLOCKS, bottomup_sort_and_shrink(), TM_IndexDeleteOp::bottomupfreespace, buf, BUFFER_LOCK_SHARE, BufferGetPage, BufferIsValid, TM_IndexDeleteOp::deltids, TM_IndexStatus::freespace, get_tablespace_maintenance_io_concurrency(), GlobalVisTestFor(), heap_hot_search_buffer(), HeapTupleHeaderAdvanceLatestRemovedXid(), HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleHeaderIsHotUpdated, i, TM_IndexDelete::id, index_delete_sort(), InitNonVacuumableSnapshot, InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, InvalidTransactionId, IsCatalogRelation(), ItemIdGetRedirect, ItemIdIsNormal, ItemIdIsRedirected, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, TM_IndexStatus::knowndeletable, LockBuffer(), maintenance_io_concurrency, Min, TM_IndexDeleteOp::ndeltids, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, TM_IndexStatus::promising, RelationData::rd_rel, ReadBuffer(), TM_IndexDeleteOp::status, HeapTupleHeaderData::t_ctid, TM_IndexDelete::tid, TransactionIdEquals, TransactionIdIsValid, and UnlockReleaseBuffer().

Referenced by SampleHeapTupleVisible().

7338 {
7339  /* Initial assumption is that earlier pruning took care of conflict */
7340  TransactionId latestRemovedXid = InvalidTransactionId;
7343  Page page = NULL;
7345  TransactionId priorXmax;
7346 #ifdef USE_PREFETCH
7347  IndexDeletePrefetchState prefetch_state;
7348  int prefetch_distance;
7349 #endif
7350  SnapshotData SnapshotNonVacuumable;
7351  int finalndeltids = 0,
7352  nblocksaccessed = 0;
7353 
7354  /* State that's only used in bottom-up index deletion case */
7355  int nblocksfavorable = 0;
7356  int curtargetfreespace = delstate->bottomupfreespace,
7357  lastfreespace = 0,
7358  actualfreespace = 0;
7359  bool bottomup_final_block = false;
7360 
7361  InitNonVacuumableSnapshot(SnapshotNonVacuumable, GlobalVisTestFor(rel));
7362 
7363  /* Sort caller's deltids array by TID for further processing */
7364  index_delete_sort(delstate);
7365 
7366  /*
7367  * Bottom-up case: resort deltids array in an order attuned to where the
7368  * greatest number of promising TIDs are to be found, and determine how
7369  * many blocks from the start of sorted array should be considered
7370  * favorable. This will also shrink the deltids array in order to
7371  * eliminate completely unfavorable blocks up front.
7372  */
7373  if (delstate->bottomup)
7374  nblocksfavorable = bottomup_sort_and_shrink(delstate);
7375 
7376 #ifdef USE_PREFETCH
7377  /* Initialize prefetch state. */
7378  prefetch_state.cur_hblkno = InvalidBlockNumber;
7379  prefetch_state.next_item = 0;
7380  prefetch_state.ndeltids = delstate->ndeltids;
7381  prefetch_state.deltids = delstate->deltids;
7382 
7383  /*
7384  * Determine the prefetch distance that we will attempt to maintain.
7385  *
7386  * Since the caller holds a buffer lock somewhere in rel, we'd better make
7387  * sure that isn't a catalog relation before we call code that does
7388  * syscache lookups, to avoid risk of deadlock.
7389  */
7390  if (IsCatalogRelation(rel))
7391  prefetch_distance = maintenance_io_concurrency;
7392  else
7393  prefetch_distance =
7395 
7396  /* Cap initial prefetch distance for bottom-up deletion caller */
7397  if (delstate->bottomup)
7398  {
7399  Assert(nblocksfavorable >= 1);
7400  Assert(nblocksfavorable <= BOTTOMUP_MAX_NBLOCKS);
7401  prefetch_distance = Min(prefetch_distance, nblocksfavorable);
7402  }
7403 
7404  /* Start prefetching. */
7405  index_delete_prefetch_buffer(rel, &prefetch_state, prefetch_distance);
7406 #endif
7407 
7408  /* Iterate over deltids, determine which to delete, check their horizon */
7409  Assert(delstate->ndeltids > 0);
7410  for (int i = 0; i < delstate->ndeltids; i++)
7411  {
7412  TM_IndexDelete *ideltid = &delstate->deltids[i];
7413  TM_IndexStatus *istatus = delstate->status + ideltid->id;
7414  ItemPointer htid = &ideltid->tid;
7415  OffsetNumber offnum;
7416 
7417  /*
7418  * Read buffer, and perform required extra steps each time a new block
7419  * is encountered. Avoid refetching if it's the same block as the one
7420  * from the last htid.
7421  */
7422  if (blkno == InvalidBlockNumber ||
7423  ItemPointerGetBlockNumber(htid) != blkno)
7424  {
7425  /*
7426  * Consider giving up early for bottom-up index deletion caller
7427  * first. (Only prefetch next-next block afterwards, when it
7428  * becomes clear that we're at least going to access the next
7429  * block in line.)
7430  *
7431  * Sometimes the first block frees so much space for bottom-up
7432  * caller that the deletion process can end without accessing any
7433  * more blocks. It is usually necessary to access 2 or 3 blocks
7434  * per bottom-up deletion operation, though.
7435  */
7436  if (delstate->bottomup)
7437  {
7438  /*
7439  * We often allow caller to delete a few additional items
7440  * whose entries we reached after the point that space target
7441  * from caller was satisfied. The cost of accessing the page
7442  * was already paid at that point, so it made sense to finish
7443  * it off. When that happened, we finalize everything here
7444  * (by finishing off the whole bottom-up deletion operation
7445  * without needlessly paying the cost of accessing any more
7446  * blocks).
7447  */
7448  if (bottomup_final_block)
7449  break;
7450 
7451  /*
7452  * Give up when we didn't enable our caller to free any
7453  * additional space as a result of processing the page that we
7454  * just finished up with. This rule is the main way in which
7455  * we keep the cost of bottom-up deletion under control.
7456  */
7457  if (nblocksaccessed >= 1 && actualfreespace == lastfreespace)
7458  break;
7459  lastfreespace = actualfreespace; /* for next time */
7460 
7461  /*
7462  * Deletion operation (which is bottom-up) will definitely
7463  * access the next block in line. Prepare for that now.
7464  *
7465  * Decay target free space so that we don't hang on for too
7466  * long with a marginal case. (Space target is only truly
7467  * helpful when it allows us to recognize that we don't need
7468  * to access more than 1 or 2 blocks to satisfy caller due to
7469  * agreeable workload characteristics.)
7470  *
7471  * We are a bit more patient when we encounter contiguous
7472  * blocks, though: these are treated as favorable blocks. The
7473  * decay process is only applied when the next block in line
7474  * is not a favorable/contiguous block. This is not an
7475  * exception to the general rule; we still insist on finding
7476  * at least one deletable item per block accessed. See
7477  * bottomup_nblocksfavorable() for full details of the theory
7478  * behind favorable blocks and heap block locality in general.
7479  *
7480  * Note: The first block in line is always treated as a
7481  * favorable block, so the earliest possible point that the
7482  * decay can be applied is just before we access the second
7483  * block in line. The Assert() verifies this for us.
7484  */
7485  Assert(nblocksaccessed > 0 || nblocksfavorable > 0);
7486  if (nblocksfavorable > 0)
7487  nblocksfavorable--;
7488  else
7489  curtargetfreespace /= 2;
7490  }
7491 
7492  /* release old buffer */
7493  if (BufferIsValid(buf))
7494  UnlockReleaseBuffer(buf);
7495 
7496  blkno = ItemPointerGetBlockNumber(htid);
7497  buf = ReadBuffer(rel, blkno);
7498  nblocksaccessed++;
7499  Assert(!delstate->bottomup ||
7500  nblocksaccessed <= BOTTOMUP_MAX_NBLOCKS);
7501 
7502 #ifdef USE_PREFETCH
7503 
7504  /*
7505  * To maintain the prefetch distance, prefetch one more page for
7506  * each page we read.
7507  */
7508  index_delete_prefetch_buffer(rel, &prefetch_state, 1);
7509 #endif
7510 
7512 
7513  page = BufferGetPage(buf);
7514  maxoff = PageGetMaxOffsetNumber(page);
7515  }
7516 
7517  if (istatus->knowndeletable)
7518  Assert(!delstate->bottomup && !istatus->promising);
7519  else
7520  {
7521  ItemPointerData tmp = *htid;
7522  HeapTupleData heapTuple;
7523 
7524  /* Are any tuples from this HOT chain non-vacuumable? */
7525  if (heap_hot_search_buffer(&tmp, rel, buf, &SnapshotNonVacuumable,
7526  &heapTuple, NULL, true))
7527  continue; /* can't delete entry */
7528 
7529  /* Caller will delete, since whole HOT chain is vacuumable */
7530  istatus->knowndeletable = true;
7531 
7532  /* Maintain index free space info for bottom-up deletion case */
7533  if (delstate->bottomup)
7534  {
7535  Assert(istatus->freespace > 0);
7536  actualfreespace += istatus->freespace;
7537  if (actualfreespace >= curtargetfreespace)
7538  bottomup_final_block = true;
7539  }
7540  }
7541 
7542  /*
7543  * Maintain latestRemovedXid value for deletion operation as a whole
7544  * by advancing current value using heap tuple headers. This is
7545  * loosely based on the logic for pruning a HOT chain.
7546  */
7547  offnum = ItemPointerGetOffsetNumber(htid);
7548  priorXmax = InvalidTransactionId; /* cannot check first XMIN */
7549  for (;;)
7550  {
7551  ItemId lp;
7552  HeapTupleHeader htup;
7553 
7554  /* Some sanity checks */
7555  if (offnum < FirstOffsetNumber || offnum > maxoff)
7556  {
7557  Assert(false);
7558  break;
7559  }
7560 
7561  lp = PageGetItemId(page, offnum);
7562  if (ItemIdIsRedirected(lp))
7563  {
7564  offnum = ItemIdGetRedirect(lp);
7565  continue;
7566  }
7567 
7568  /*
7569  * We'll often encounter LP_DEAD line pointers (especially with an
7570  * entry marked knowndeletable by our caller up front). No heap
7571  * tuple headers get examined for an htid that leads us to an
7572  * LP_DEAD item. This is okay because the earlier pruning
7573  * operation that made the line pointer LP_DEAD in the first place
7574  * must have considered the original tuple header as part of
7575  * generating its own latestRemovedXid value.
7576  *
7577  * Relying on XLOG_HEAP2_PRUNE records like this is the same
7578  * strategy that index vacuuming uses in all cases. Index VACUUM
7579  * WAL records don't even have a latestRemovedXid field of their
7580  * own for this reason.
7581  */
7582  if (!ItemIdIsNormal(lp))
7583  break;
7584 
7585  htup = (HeapTupleHeader) PageGetItem(page, lp);
7586 
7587  /*
7588  * Check the tuple XMIN against prior XMAX, if any
7589  */
7590  if (TransactionIdIsValid(priorXmax) &&
7591  !TransactionIdEquals(HeapTupleHeaderGetXmin(htup), priorXmax))
7592  break;
7593 
7594  HeapTupleHeaderAdvanceLatestRemovedXid(htup, &latestRemovedXid);
7595 
7596  /*
7597  * If the tuple is not HOT-updated, then we are at the end of this
7598  * HOT-chain. No need to visit later tuples from the same update
7599  * chain (they get their own index entries) -- just move on to
7600  * next htid from index AM caller.
7601  */
7602  if (!HeapTupleHeaderIsHotUpdated(htup))
7603  break;
7604 
7605  /* Advance to next HOT chain member */
7606  Assert(ItemPointerGetBlockNumber(&htup->t_ctid) == blkno);
7607  offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
7608  priorXmax = HeapTupleHeaderGetUpdateXid(htup);
7609  }
7610 
7611  /* Enable further/final shrinking of deltids for caller */
7612  finalndeltids = i + 1;
7613  }
7614 
7615  UnlockReleaseBuffer(buf);
7616 
7617  /*
7618  * Shrink deltids array to exclude non-deletable entries at the end. This
7619  * is not just a minor optimization. Final deltids array size might be
7620  * zero for a bottom-up caller. Index AM is explicitly allowed to rely on
7621  * ndeltids being zero in all cases with zero total deletable entries.
7622  */
7623  Assert(finalndeltids > 0 || delstate->bottomup);
7624  delstate->ndeltids = finalndeltids;
7625 
7626  return latestRemovedXid;
7627 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
void HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, TransactionId *latestRemovedXid)
Definition: heapam.c:7247
TM_IndexDelete * deltids
Definition: tableam.h:228
bool IsCatalogRelation(Relation relation)
Definition: catalog.c:103
int maintenance_io_concurrency
Definition: bufmgr.c:150
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:587
static int bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
Definition: heapam.c:7882
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:78
#define Min(x, y)
Definition: c.h:986
#define InvalidBuffer
Definition: buf.h:25
bool knowndeletable
Definition: tableam.h:196
#define InitNonVacuumableSnapshot(snapshotdata, vistestp)
Definition: snapmgr.h:82
uint32 BlockNumber
Definition: block.h:31
Form_pg_class rd_rel
Definition: rel.h:109
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
Definition: heapam.c:1710
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:3963
int get_tablespace_maintenance_io_concurrency(Oid spcid)
Definition: spccache.c:228
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3807
ItemPointerData t_ctid
Definition: htup_details.h:160
static char * buf
Definition: pg_test_fsync.c:68
#define BOTTOMUP_MAX_NBLOCKS
Definition: heapam.c:185
#define InvalidTransactionId
Definition: transam.h:31
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
bool promising
Definition: tableam.h:199
#define HeapTupleHeaderIsHotUpdated(tup)
Definition: htup_details.h:483
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
TM_IndexStatus * status
Definition: tableam.h:229
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4023
ItemPointerData tid
Definition: tableam.h:189
#define InvalidOffsetNumber
Definition: off.h:26
static void index_delete_sort(TM_IndexDeleteOp *delstate)
Definition: heapam.c:7669
#define Assert(condition)
Definition: c.h:804
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:697
#define InvalidBlockNumber
Definition: block.h:33
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
int16 freespace
Definition: tableam.h:200
int i
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
int bottomupfreespace
Definition: tableam.h:224
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define TransactionIdIsValid(xid)
Definition: transam.h:41
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78

◆ heap_inplace_update()

void heap_inplace_update ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 6059 of file heapam.c.

References BUFFER_LOCK_EXCLUSIVE, BufferGetPage, CacheInvalidateHeapTuple(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, IsBootstrapProcessingMode, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), xl_heap_inplace::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapInplace, START_CRIT_SECTION, HeapTupleData::t_data, HeapTupleHeaderData::t_hoff, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), XLOG_HEAP_INPLACE, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by create_toast_table(), index_update_stats(), truncate_update_partedrel_stats(), vac_update_datfrozenxid(), and vac_update_relstats().

6060 {
6061  Buffer buffer;
6062  Page page;
6063  OffsetNumber offnum;
6064  ItemId lp = NULL;
6065  HeapTupleHeader htup;
6066  uint32 oldlen;
6067  uint32 newlen;
6068 
6069  /*
6070  * For now, we don't allow parallel updates. Unlike a regular update,
6071  * this should never create a combo CID, so it might be possible to relax
6072  * this restriction, but not without more thought and testing. It's not
6073  * clear that it would be useful, anyway.
6074  */
6075  if (IsInParallelMode())
6076  ereport(ERROR,
6077  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
6078  errmsg("cannot update tuples during a parallel operation")));
6079 
6080  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
6082  page = (Page) BufferGetPage(buffer);
6083 
6084  offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
6085  if (PageGetMaxOffsetNumber(page) >= offnum)
6086  lp = PageGetItemId(page, offnum);
6087 
6088  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
6089  elog(ERROR, "invalid lp");
6090 
6091  htup = (HeapTupleHeader) PageGetItem(page, lp);
6092 
6093  oldlen = ItemIdGetLength(lp) - htup->t_hoff;
6094  newlen = tuple->t_len - tuple->t_data->t_hoff;
6095  if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
6096  elog(ERROR, "wrong tuple length");
6097 
6098  /* NO EREPORT(ERROR) from here till changes are logged */
6100 
6101  memcpy((char *) htup + htup->t_hoff,
6102  (char *) tuple->t_data + tuple->t_data->t_hoff,
6103  newlen);
6104 
6105  MarkBufferDirty(buffer);
6106 
6107  /* XLOG stuff */
6108  if (RelationNeedsWAL(relation))
6109  {
6110  xl_heap_inplace xlrec;
6111  XLogRecPtr recptr;
6112 
6113  xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6114 
6115  XLogBeginInsert();
6116  XLogRegisterData((char *) &xlrec, SizeOfHeapInplace);
6117 
6118  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6119  XLogRegisterBufData(0, (char *) htup + htup->t_hoff, newlen);
6120 
6121  /* inplace updates aren't decoded atm, don't log the origin */
6122 
6123  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
6124 
6125  PageSetLSN(page, recptr);
6126  }
6127 
6128  END_CRIT_SECTION();
6129 
6130  UnlockReleaseBuffer(buffer);
6131 
6132  /*
6133  * Send out shared cache inval if necessary. Note that because we only
6134  * pass the new version of the tuple, this mustn't be used for any
6135  * operations that could change catcache lookup keys. But we aren't
6136  * bothering with index updates either, so that's true a fortiori.
6137  */
6139  CacheInvalidateHeapTuple(relation, tuple, NULL);
6140 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:368
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1123
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1562
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:220
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:149
#define SizeOfHeapInplace
Definition: heapam_xlog.h:314
#define START_CRIT_SECTION()
Definition: miscadmin.h:147
int errcode(int sqlerrcode)
Definition: elog.c:698
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
bool IsInParallelMode(void)
Definition: xact.c:1012
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3807
#define ERROR
Definition: elog.h:46
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define REGBUF_STANDARD
Definition: xloginsert.h:35
unsigned int uint32
Definition: c.h:441
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:330
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:422
OffsetNumber offnum
Definition: heapam_xlog.h:310
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4023
#define ereport(elevel,...)
Definition: elog.h:157
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:697
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define XLOG_HEAP_INPLACE
Definition: heapam_xlog.h:39
#define RelationNeedsWAL(relation)
Definition: rel.h:582
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:406
int errmsg(const char *fmt,...)
Definition: elog.c:909
#define elog(elevel,...)
Definition: elog.h:232
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
void XLogBeginInsert(void)
Definition: xloginsert.c:123
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78

◆ heap_insert()

void heap_insert ( Relation  relation,
HeapTuple  tup,
CommandId  cid,
int  options,
BulkInsertState  bistate 
)

Definition at line 2060 of file heapam.c.

References Assert, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), END_CRIT_SECTION, FirstOffsetNumber, xl_heap_insert::flags, GetCurrentTransactionId(), heap_freetuple(), HEAP_INSERT_FROZEN, HEAP_INSERT_NO_LOGICAL, HEAP_INSERT_SPECULATIVE, heap_prepare_insert(), HeapTupleHeaderGetNatts, InvalidBlockNumber, InvalidBuffer, InvalidTransactionId, InvalidXLogRecPtr, IsToastRelation(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, log_heap_new_cid(), MarkBufferDirty(), xl_heap_insert::offnum, PageClearAllVisible, PageGetMaxOffsetNumber, PageIsAllVisible, PageSetAllVisible, PageSetLSN, pgstat_count_heap_insert(), REGBUF_KEEP_DATA, REGBUF_STANDARD, REGBUF_WILL_INIT, RelationGetBufferForTuple(), RelationGetNumberOfAttributes, RelationIsAccessibleInLogicalDecoding, RelationIsLogicallyLogged, RelationNeedsWAL, RelationPutHeapTuple(), ReleaseBuffer(), SizeOfHeapHeader, SizeOfHeapInsert, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), VISIBILITYMAP_ALL_FROZEN, VISIBILITYMAP_ALL_VISIBLE, visibilitymap_clear(), visibilitymap_get_status(), visibilitymap_pin_ok(), visibilitymap_set(), VISIBILITYMAP_VALID_BITS, XLH_INSERT_ALL_FROZEN_SET, XLH_INSERT_ALL_VISIBLE_CLEARED, XLH_INSERT_CONTAINS_NEW_TUPLE, XLH_INSERT_IS_SPECULATIVE, XLH_INSERT_ON_TOAST_RELATION, XLOG_HEAP_INIT_PAGE, XLOG_HEAP_INSERT, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by heapam_tuple_insert(), heapam_tuple_insert_speculative(), simple_heap_insert(), and toast_save_datum().

2062 {
2064  HeapTuple heaptup;
2065  Buffer buffer;
2066  Page page = NULL;
2067  Buffer vmbuffer = InvalidBuffer;
2068  bool starting_with_empty_page;
2069  bool all_visible_cleared = false;
2070  bool all_frozen_set = false;
2071  uint8 vmstatus = 0;
2072 
2073  /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
2075  RelationGetNumberOfAttributes(relation));
2076 
2077  /*
2078  * Fill in tuple header fields and toast the tuple if necessary.
2079  *
2080  * Note: below this point, heaptup is the data we actually intend to store
2081  * into the relation; tup is the caller's original untoasted data.
2082  */
2083  heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
2084 
2085  /*
2086  * Find buffer to insert this tuple into. If the page is all visible,
2087  * this will also pin the requisite visibility map page.
2088  *
2089  * Also pin visibility map page if COPY FREEZE inserts tuples into an
2090  * empty page. See all_frozen_set below.
2091  */
2092  buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
2093  InvalidBuffer, options, bistate,
2094  &vmbuffer, NULL);
2095 
2096 
2097  /*
2098  * If we're inserting frozen entry into an empty page, set visibility map
2099  * bits and PageAllVisible() hint.
2100  *
2101  * If we're inserting frozen entry into already all_frozen page, preserve
2102  * this state.
2103  */
2105  {
2106  page = BufferGetPage(buffer);
2107 
2108  starting_with_empty_page = PageGetMaxOffsetNumber(page) == 0;
2109 
2110  if (visibilitymap_pin_ok(BufferGetBlockNumber(buffer), vmbuffer))
2111  vmstatus = visibilitymap_get_status(relation,
2112  BufferGetBlockNumber(buffer), &vmbuffer);
2113 
2114  if ((starting_with_empty_page || vmstatus & VISIBILITYMAP_ALL_FROZEN))
2115  all_frozen_set = true;
2116  }
2117 
2118  /*
2119  * We're about to do the actual insert -- but check for conflict first, to
2120  * avoid possibly having to roll back work we've just done.
2121  *
2122  * This is safe without a recheck as long as there is no possibility of
2123  * another process scanning the page between this check and the insert
2124  * being visible to the scan (i.e., an exclusive buffer content lock is
2125  * continuously held from this point until the tuple insert is visible).
2126  *
2127  * For a heap insert, we only need to check for table-level SSI locks. Our
2128  * new tuple can't possibly conflict with existing tuple locks, and heap
2129  * page locks are only consolidated versions of tuple locks; they do not
2130  * lock "gaps" as index page locks do. So we don't need to specify a
2131  * buffer when making the call, which makes for a faster check.
2132  */
2134 
2135  /* NO EREPORT(ERROR) from here till changes are logged */
2137 
2138  RelationPutHeapTuple(relation, buffer, heaptup,
2139  (options & HEAP_INSERT_SPECULATIVE) != 0);
2140 
2141  /*
2142  * If the page is all visible, need to clear that, unless we're only going
2143  * to add further frozen rows to it.
2144  *
2145  * If we're only adding already frozen rows to a page that was empty or
2146  * marked as all visible, mark it as all-visible.
2147  */
2148  if (PageIsAllVisible(BufferGetPage(buffer)) && !(options & HEAP_INSERT_FROZEN))
2149  {
2150  all_visible_cleared = true;
2152  visibilitymap_clear(relation,
2153  ItemPointerGetBlockNumber(&(heaptup->t_self)),
2154  vmbuffer, VISIBILITYMAP_VALID_BITS);
2155  }
2156  else if (all_frozen_set)
2157  {
2158  /* We only ever set all_frozen_set after reading the page. */
2159  Assert(page);
2160 
2161  PageSetAllVisible(page);
2162  }
2163 
2164  /*
2165  * XXX Should we set PageSetPrunable on this page ?
2166  *
2167  * The inserting transaction may eventually abort thus making this tuple
2168  * DEAD and hence available for pruning. Though we don't want to optimize
2169  * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
2170  * aborted tuple will never be pruned until next vacuum is triggered.
2171  *
2172  * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
2173  */
2174 
2175  MarkBufferDirty(buffer);
2176 
2177  /* XLOG stuff */
2178  if (RelationNeedsWAL(relation))
2179  {
2180  xl_heap_insert xlrec;
2181  xl_heap_header xlhdr;
2182  XLogRecPtr recptr;
2183  Page page = BufferGetPage(buffer);
2184  uint8 info = XLOG_HEAP_INSERT;
2185  int bufflags = 0;
2186 
2187  /*
2188  * If this is a catalog, we need to transmit combo CIDs to properly
2189  * decode, so log that as well.
2190  */
2192  log_heap_new_cid(relation, heaptup);
2193 
2194  /*
2195  * If this is the single and first tuple on page, we can reinit the
2196  * page instead of restoring the whole thing. Set flag, and hide
2197  * buffer references from XLogInsert.
2198  */
2199  if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
2201  {
2202  info |= XLOG_HEAP_INIT_PAGE;
2203  bufflags |= REGBUF_WILL_INIT;
2204  }
2205 
2206  xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
2207  xlrec.flags = 0;
2208  if (all_visible_cleared)
2210  if (all_frozen_set)
2215 
2216  /*
2217  * For logical decoding, we need the tuple even if we're doing a full
2218  * page write, so make sure it's included even if we take a full-page
2219  * image. (XXX We could alternatively store a pointer into the FPW).
2220  */
2221  if (RelationIsLogicallyLogged(relation) &&
2223  {
2225  bufflags |= REGBUF_KEEP_DATA;
2226 
2227  if (IsToastRelation(relation))
2229  }
2230 
2231  XLogBeginInsert();
2232  XLogRegisterData((char *) &xlrec, SizeOfHeapInsert);
2233 
2234  xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
2235  xlhdr.t_infomask = heaptup->t_data->t_infomask;
2236  xlhdr.t_hoff = heaptup->t_data->t_hoff;
2237 
2238  /*
2239  * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
2240  * write the whole page to the xlog, we don't need to store
2241  * xl_heap_header in the xlog.
2242  */
2243  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2244  XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
2245  /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
2247  (char *) heaptup->t_data + SizeofHeapTupleHeader,
2248  heaptup->t_len - SizeofHeapTupleHeader);
2249 
2250  /* filtering by origin on a row level is much more efficient */
2252 
2253  recptr = XLogInsert(RM_HEAP_ID, info);
2254 
2255  PageSetLSN(page, recptr);
2256