PostgreSQL Source Code  git master
heapam.c File Reference
#include "postgres.h"
#include "access/bufmask.h"
#include "access/genam.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/heaptoast.h"
#include "access/hio.h"
#include "access/multixact.h"
#include "access/parallel.h"
#include "access/relscan.h"
#include "access/subtrans.h"
#include "access/syncscan.h"
#include "access/sysattr.h"
#include "access/tableam.h"
#include "access/transam.h"
#include "access/valid.h"
#include "access/visibilitymap.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "access/xloginsert.h"
#include "access/xlogutils.h"
#include "catalog/catalog.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "port/atomics.h"
#include "port/pg_bitutils.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
#include "storage/procarray.h"
#include "storage/smgr.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "utils/datum.h"
#include "utils/inval.h"
#include "utils/lsyscache.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
#include "utils/spccache.h"
Include dependency graph for heapam.c:

Go to the source code of this file.

Data Structures

struct  IndexDeleteCounts
 

Macros

#define LOCKMODE_from_mxstatus(status)   (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
 
#define LockTupleTuplock(rel, tup, mode)   LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define UnlockTupleTuplock(rel, tup, mode)   UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define ConditionalLockTupleTuplock(rel, tup, mode)   ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define BOTTOMUP_MAX_NBLOCKS   6
 
#define BOTTOMUP_TOLERANCE_NBLOCKS   3
 
#define TUPLOCK_from_mxstatus(status)   (MultiXactStatusLock[(status)])
 
#define FRM_NOOP   0x0001
 
#define FRM_INVALIDATE_XMAX   0x0002
 
#define FRM_RETURN_IS_XID   0x0004
 
#define FRM_RETURN_IS_MULTI   0x0008
 
#define FRM_MARK_COMMITTED   0x0010
 

Typedefs

typedef struct IndexDeleteCounts IndexDeleteCounts
 

Functions

static HeapTuple heap_prepare_insert (Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
 
static XLogRecPtr log_heap_update (Relation reln, Buffer oldbuf, Buffer newbuf, HeapTuple oldtup, HeapTuple newtup, HeapTuple old_key_tuple, bool all_visible_cleared, bool new_all_visible_cleared)
 
static BitmapsetHeapDetermineModifiedColumns (Relation relation, Bitmapset *interesting_cols, HeapTuple oldtup, HeapTuple newtup)
 
static bool heap_acquire_tuplock (Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
 
static void compute_new_xmax_infomask (TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
 
static TM_Result heap_lock_updated_tuple (Relation rel, HeapTuple tuple, ItemPointer ctid, TransactionId xid, LockTupleMode mode)
 
static void GetMultiXactIdHintBits (MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
 
static TransactionId MultiXactIdGetUpdateXid (TransactionId xmax, uint16 t_infomask)
 
static bool DoesMultiXactIdConflict (MultiXactId multi, uint16 infomask, LockTupleMode lockmode, bool *current_is_member)
 
static void MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
static bool ConditionalMultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, int *remaining)
 
static void index_delete_sort (TM_IndexDeleteOp *delstate)
 
static int bottomup_sort_and_shrink (TM_IndexDeleteOp *delstate)
 
static XLogRecPtr log_heap_new_cid (Relation relation, HeapTuple tup)
 
static HeapTuple ExtractReplicaIdentity (Relation rel, HeapTuple tup, bool key_changed, bool *copy)
 
static void initscan (HeapScanDesc scan, ScanKey key, bool keep_startblock)
 
void heap_setscanlimits (TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
 
void heapgetpage (TableScanDesc sscan, BlockNumber page)
 
static void heapgettup (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
static void heapgettup_pagemode (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
TableScanDesc heap_beginscan (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelTableScanDesc parallel_scan, uint32 flags)
 
void heap_rescan (TableScanDesc sscan, ScanKey key, bool set_params, bool allow_strat, bool allow_sync, bool allow_pagemode)
 
void heap_endscan (TableScanDesc sscan)
 
HeapTuple heap_getnext (TableScanDesc sscan, ScanDirection direction)
 
bool heap_getnextslot (TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
 
void heap_set_tidrange (TableScanDesc sscan, ItemPointer mintid, ItemPointer maxtid)
 
bool heap_getnextslot_tidrange (TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
 
bool heap_fetch (Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf)
 
bool heap_hot_search_buffer (ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
 
void heap_get_latest_tid (TableScanDesc sscan, ItemPointer tid)
 
static void UpdateXmaxHintBits (HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
 
BulkInsertState GetBulkInsertState (void)
 
void FreeBulkInsertState (BulkInsertState bistate)
 
void ReleaseBulkInsertStatePin (BulkInsertState bistate)
 
void heap_insert (Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate)
 
void heap_multi_insert (Relation relation, TupleTableSlot **slots, int ntuples, CommandId cid, int options, BulkInsertState bistate)
 
void simple_heap_insert (Relation relation, HeapTuple tup)
 
static uint8 compute_infobits (uint16 infomask, uint16 infomask2)
 
static bool xmax_infomask_changed (uint16 new_infomask, uint16 old_infomask)
 
TM_Result heap_delete (Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
 
void simple_heap_delete (Relation relation, ItemPointer tid)
 
TM_Result heap_update (Relation relation, ItemPointer otid, HeapTuple newtup, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode)
 
static bool heap_tuple_attr_equals (TupleDesc tupdesc, int attrnum, HeapTuple tup1, HeapTuple tup2)
 
void simple_heap_update (Relation relation, ItemPointer otid, HeapTuple tup)
 
static MultiXactStatus get_mxact_status_for_lock (LockTupleMode mode, bool is_update)
 
TM_Result heap_lock_tuple (Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, TM_FailureData *tmfd)
 
static TM_Result test_lockmode_for_conflict (MultiXactStatus status, TransactionId xid, LockTupleMode mode, HeapTuple tup, bool *needwait)
 
static TM_Result heap_lock_updated_tuple_rec (Relation rel, ItemPointer tid, TransactionId xid, LockTupleMode mode)
 
void heap_finish_speculative (Relation relation, ItemPointer tid)
 
void heap_abort_speculative (Relation relation, ItemPointer tid)
 
void heap_inplace_update (Relation relation, HeapTuple tuple)
 
static TransactionId FreezeMultiXactId (MultiXactId multi, uint16 t_infomask, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, MultiXactId cutoff_multi, uint16 *flags)
 
bool heap_prepare_freeze_tuple (HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
 
void heap_execute_freeze_tuple (HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
 
bool heap_freeze_tuple (HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi)
 
TransactionId HeapTupleGetUpdateXid (HeapTupleHeader tuple)
 
static bool Do_MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
bool heap_tuple_needs_eventual_freeze (HeapTupleHeader tuple)
 
bool heap_tuple_needs_freeze (HeapTupleHeader tuple, TransactionId cutoff_xid, MultiXactId cutoff_multi, Buffer buf)
 
void HeapTupleHeaderAdvanceLatestRemovedXid (HeapTupleHeader tuple, TransactionId *latestRemovedXid)
 
TransactionId heap_index_delete_tuples (Relation rel, TM_IndexDeleteOp *delstate)
 
static int index_delete_sort_cmp (TM_IndexDelete *deltid1, TM_IndexDelete *deltid2)
 
static int bottomup_nblocksfavorable (IndexDeleteCounts *blockgroups, int nblockgroups, TM_IndexDelete *deltids)
 
static int bottomup_sort_and_shrink_cmp (const void *arg1, const void *arg2)
 
XLogRecPtr log_heap_freeze (Relation reln, Buffer buffer, TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples, int ntuples)
 
XLogRecPtr log_heap_visible (RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer, TransactionId cutoff_xid, uint8 vmflags)
 
static void heap_xlog_prune (XLogReaderState *record)
 
static void heap_xlog_vacuum (XLogReaderState *record)
 
static void heap_xlog_visible (XLogReaderState *record)
 
static void heap_xlog_freeze_page (XLogReaderState *record)
 
static void fix_infomask_from_infobits (uint8 infobits, uint16 *infomask, uint16 *infomask2)
 
static void heap_xlog_delete (XLogReaderState *record)
 
static void heap_xlog_insert (XLogReaderState *record)
 
static void heap_xlog_multi_insert (XLogReaderState *record)
 
static void heap_xlog_update (XLogReaderState *record, bool hot_update)
 
static void heap_xlog_confirm (XLogReaderState *record)
 
static void heap_xlog_lock (XLogReaderState *record)
 
static void heap_xlog_lock_updated (XLogReaderState *record)
 
static void heap_xlog_inplace (XLogReaderState *record)
 
void heap_redo (XLogReaderState *record)
 
void heap2_redo (XLogReaderState *record)
 
void heap_mask (char *pagedata, BlockNumber blkno)
 
void HeapCheckForSerializableConflictOut (bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
 

Variables

struct {
   LOCKMODE   hwlock
 
   int   lockstatus
 
   int   updstatus
 
tupleLockExtraInfo [MaxLockTupleMode+1]
 
static const int MultiXactStatusLock [MaxMultiXactStatus+1]
 

Macro Definition Documentation

◆ BOTTOMUP_MAX_NBLOCKS

#define BOTTOMUP_MAX_NBLOCKS   6

◆ BOTTOMUP_TOLERANCE_NBLOCKS

#define BOTTOMUP_TOLERANCE_NBLOCKS   3

Definition at line 186 of file heapam.c.

Referenced by bottomup_nblocksfavorable().

◆ ConditionalLockTupleTuplock

#define ConditionalLockTupleTuplock (   rel,
  tup,
  mode 
)    ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 167 of file heapam.c.

Referenced by heap_acquire_tuplock().

◆ FRM_INVALIDATE_XMAX

#define FRM_INVALIDATE_XMAX   0x0002

Definition at line 6075 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_MARK_COMMITTED

#define FRM_MARK_COMMITTED   0x0010

Definition at line 6078 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_NOOP

#define FRM_NOOP   0x0001

Definition at line 6074 of file heapam.c.

Referenced by FreezeMultiXactId().

◆ FRM_RETURN_IS_MULTI

#define FRM_RETURN_IS_MULTI   0x0008

Definition at line 6077 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_RETURN_IS_XID

#define FRM_RETURN_IS_XID   0x0004

Definition at line 6076 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ LOCKMODE_from_mxstatus

#define LOCKMODE_from_mxstatus (   status)    (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)

◆ LockTupleTuplock

#define LockTupleTuplock (   rel,
  tup,
  mode 
)    LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 163 of file heapam.c.

Referenced by heap_acquire_tuplock().

◆ TUPLOCK_from_mxstatus

#define TUPLOCK_from_mxstatus (   status)    (MultiXactStatusLock[(status)])

Definition at line 214 of file heapam.c.

Referenced by compute_new_xmax_infomask(), GetMultiXactIdHintBits(), and heap_lock_tuple().

◆ UnlockTupleTuplock

#define UnlockTupleTuplock (   rel,
  tup,
  mode 
)    UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 165 of file heapam.c.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

Typedef Documentation

◆ IndexDeleteCounts

Function Documentation

◆ bottomup_nblocksfavorable()

static int bottomup_nblocksfavorable ( IndexDeleteCounts blockgroups,
int  nblockgroups,
TM_IndexDelete deltids 
)
static

Definition at line 7698 of file heapam.c.

References Assert, BOTTOMUP_MAX_NBLOCKS, BOTTOMUP_TOLERANCE_NBLOCKS, IndexDeleteCounts::ifirsttid, ItemPointerGetBlockNumber, and TM_IndexDelete::tid.

Referenced by bottomup_sort_and_shrink().

7700 {
7701  int64 lastblock = -1;
7702  int nblocksfavorable = 0;
7703 
7704  Assert(nblockgroups >= 1);
7705  Assert(nblockgroups <= BOTTOMUP_MAX_NBLOCKS);
7706 
7707  /*
7708  * We tolerate heap blocks that will be accessed only slightly out of
7709  * physical order. Small blips occur when a pair of almost-contiguous
7710  * blocks happen to fall into different buckets (perhaps due only to a
7711  * small difference in npromisingtids that the bucketing scheme didn't
7712  * quite manage to ignore). We effectively ignore these blips by applying
7713  * a small tolerance. The precise tolerance we use is a little arbitrary,
7714  * but it works well enough in practice.
7715  */
7716  for (int b = 0; b < nblockgroups; b++)
7717  {
7718  IndexDeleteCounts *group = blockgroups + b;
7719  TM_IndexDelete *firstdtid = deltids + group->ifirsttid;
7720  BlockNumber block = ItemPointerGetBlockNumber(&firstdtid->tid);
7721 
7722  if (lastblock != -1 &&
7723  ((int64) block < lastblock - BOTTOMUP_TOLERANCE_NBLOCKS ||
7724  (int64) block > lastblock + BOTTOMUP_TOLERANCE_NBLOCKS))
7725  break;
7726 
7727  nblocksfavorable++;
7728  lastblock = block;
7729  }
7730 
7731  /* Always indicate that there is at least 1 favorable block */
7732  Assert(nblocksfavorable >= 1);
7733 
7734  return nblocksfavorable;
7735 }
uint32 BlockNumber
Definition: block.h:31
#define BOTTOMUP_TOLERANCE_NBLOCKS
Definition: heapam.c:186
#define BOTTOMUP_MAX_NBLOCKS
Definition: heapam.c:185
ItemPointerData tid
Definition: tableam.h:189
int16 ifirsttid
Definition: heapam.c:196
#define Assert(condition)
Definition: c.h:804
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98

◆ bottomup_sort_and_shrink()

static int bottomup_sort_and_shrink ( TM_IndexDeleteOp delstate)
static

Definition at line 7814 of file heapam.c.

References Assert, BlockNumberIsValid, TM_IndexDeleteOp::bottomup, BOTTOMUP_MAX_NBLOCKS, bottomup_nblocksfavorable(), bottomup_sort_and_shrink_cmp(), TM_IndexDeleteOp::deltids, i, TM_IndexDelete::id, IndexDeleteCounts::ifirsttid, InvalidBlockNumber, ItemPointerGetBlockNumber, Min, TM_IndexDeleteOp::ndeltids, IndexDeleteCounts::npromisingtids, IndexDeleteCounts::ntids, palloc(), pfree(), pg_nextpower2_32(), TM_IndexStatus::promising, qsort, TM_IndexDeleteOp::status, and TM_IndexDelete::tid.

Referenced by heap_index_delete_tuples().

7815 {
7816  IndexDeleteCounts *blockgroups;
7817  TM_IndexDelete *reordereddeltids;
7818  BlockNumber curblock = InvalidBlockNumber;
7819  int nblockgroups = 0;
7820  int ncopied = 0;
7821  int nblocksfavorable = 0;
7822 
7823  Assert(delstate->bottomup);
7824  Assert(delstate->ndeltids > 0);
7825 
7826  /* Calculate per-heap-block count of TIDs */
7827  blockgroups = palloc(sizeof(IndexDeleteCounts) * delstate->ndeltids);
7828  for (int i = 0; i < delstate->ndeltids; i++)
7829  {
7830  TM_IndexDelete *ideltid = &delstate->deltids[i];
7831  TM_IndexStatus *istatus = delstate->status + ideltid->id;
7832  ItemPointer htid = &ideltid->tid;
7833  bool promising = istatus->promising;
7834 
7835  if (curblock != ItemPointerGetBlockNumber(htid))
7836  {
7837  /* New block group */
7838  nblockgroups++;
7839 
7840  Assert(curblock < ItemPointerGetBlockNumber(htid) ||
7841  !BlockNumberIsValid(curblock));
7842 
7843  curblock = ItemPointerGetBlockNumber(htid);
7844  blockgroups[nblockgroups - 1].ifirsttid = i;
7845  blockgroups[nblockgroups - 1].ntids = 1;
7846  blockgroups[nblockgroups - 1].npromisingtids = 0;
7847  }
7848  else
7849  {
7850  blockgroups[nblockgroups - 1].ntids++;
7851  }
7852 
7853  if (promising)
7854  blockgroups[nblockgroups - 1].npromisingtids++;
7855  }
7856 
7857  /*
7858  * We're about ready to sort block groups to determine the optimal order
7859  * for visiting heap blocks. But before we do, round the number of
7860  * promising tuples for each block group up to the next power-of-two,
7861  * unless it is very low (less than 4), in which case we round up to 4.
7862  * npromisingtids is far too noisy to trust when choosing between a pair
7863  * of block groups that both have very low values.
7864  *
7865  * This scheme divides heap blocks/block groups into buckets. Each bucket
7866  * contains blocks that have _approximately_ the same number of promising
7867  * TIDs as each other. The goal is to ignore relatively small differences
7868  * in the total number of promising entries, so that the whole process can
7869  * give a little weight to heapam factors (like heap block locality)
7870  * instead. This isn't a trade-off, really -- we have nothing to lose. It
7871  * would be foolish to interpret small differences in npromisingtids
7872  * values as anything more than noise.
7873  *
7874  * We tiebreak on nhtids when sorting block group subsets that have the
7875  * same npromisingtids, but this has the same issues as npromisingtids,
7876  * and so nhtids is subject to the same power-of-two bucketing scheme. The
7877  * only reason that we don't fix nhtids in the same way here too is that
7878  * we'll need accurate nhtids values after the sort. We handle nhtids
7879  * bucketization dynamically instead (in the sort comparator).
7880  *
7881  * See bottomup_nblocksfavorable() for a full explanation of when and how
7882  * heap locality/favorable blocks can significantly influence when and how
7883  * heap blocks are accessed.
7884  */
7885  for (int b = 0; b < nblockgroups; b++)
7886  {
7887  IndexDeleteCounts *group = blockgroups + b;
7888 
7889  /* Better off falling back on nhtids with low npromisingtids */
7890  if (group->npromisingtids <= 4)
7891  group->npromisingtids = 4;
7892  else
7893  group->npromisingtids =
7895  }
7896 
7897  /* Sort groups and rearrange caller's deltids array */
7898  qsort(blockgroups, nblockgroups, sizeof(IndexDeleteCounts),
7900  reordereddeltids = palloc(delstate->ndeltids * sizeof(TM_IndexDelete));
7901 
7902  nblockgroups = Min(BOTTOMUP_MAX_NBLOCKS, nblockgroups);
7903  /* Determine number of favorable blocks at the start of final deltids */
7904  nblocksfavorable = bottomup_nblocksfavorable(blockgroups, nblockgroups,
7905  delstate->deltids);
7906 
7907  for (int b = 0; b < nblockgroups; b++)
7908  {
7909  IndexDeleteCounts *group = blockgroups + b;
7910  TM_IndexDelete *firstdtid = delstate->deltids + group->ifirsttid;
7911 
7912  memcpy(reordereddeltids + ncopied, firstdtid,
7913  sizeof(TM_IndexDelete) * group->ntids);
7914  ncopied += group->ntids;
7915  }
7916 
7917  /* Copy final grouped and sorted TIDs back into start of caller's array */
7918  memcpy(delstate->deltids, reordereddeltids,
7919  sizeof(TM_IndexDelete) * ncopied);
7920  delstate->ndeltids = ncopied;
7921 
7922  pfree(reordereddeltids);
7923  pfree(blockgroups);
7924 
7925  return nblocksfavorable;
7926 }
TM_IndexDelete * deltids
Definition: tableam.h:228
static int bottomup_sort_and_shrink_cmp(const void *arg1, const void *arg2)
Definition: heapam.c:7741
#define Min(x, y)
Definition: c.h:986
int16 npromisingtids
Definition: heapam.c:194
uint32 BlockNumber
Definition: block.h:31
void pfree(void *pointer)
Definition: mcxt.c:1169
#define BOTTOMUP_MAX_NBLOCKS
Definition: heapam.c:185
static uint32 pg_nextpower2_32(uint32 num)
Definition: pg_bitutils.h:146
unsigned int uint32
Definition: c.h:441
bool promising
Definition: tableam.h:199
TM_IndexStatus * status
Definition: tableam.h:229
ItemPointerData tid
Definition: tableam.h:189
#define BlockNumberIsValid(blockNumber)
Definition: block.h:70
int16 ifirsttid
Definition: heapam.c:196
#define Assert(condition)
Definition: c.h:804
static int bottomup_nblocksfavorable(IndexDeleteCounts *blockgroups, int nblockgroups, TM_IndexDelete *deltids)
Definition: heapam.c:7698
#define InvalidBlockNumber
Definition: block.h:33
void * palloc(Size size)
Definition: mcxt.c:1062
int i
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define qsort(a, b, c, d)
Definition: port.h:504

◆ bottomup_sort_and_shrink_cmp()

static int bottomup_sort_and_shrink_cmp ( const void *  arg1,
const void *  arg2 
)
static

Definition at line 7741 of file heapam.c.

References IndexDeleteCounts::ifirsttid, IndexDeleteCounts::npromisingtids, IndexDeleteCounts::ntids, pg_nextpower2_32(), and pg_unreachable.

Referenced by bottomup_sort_and_shrink().

7742 {
7743  const IndexDeleteCounts *group1 = (const IndexDeleteCounts *) arg1;
7744  const IndexDeleteCounts *group2 = (const IndexDeleteCounts *) arg2;
7745 
7746  /*
7747  * Most significant field is npromisingtids (which we invert the order of
7748  * so as to sort in desc order).
7749  *
7750  * Caller should have already normalized npromisingtids fields into
7751  * power-of-two values (buckets).
7752  */
7753  if (group1->npromisingtids > group2->npromisingtids)
7754  return -1;
7755  if (group1->npromisingtids < group2->npromisingtids)
7756  return 1;
7757 
7758  /*
7759  * Tiebreak: desc ntids sort order.
7760  *
7761  * We cannot expect power-of-two values for ntids fields. We should
7762  * behave as if they were already rounded up for us instead.
7763  */
7764  if (group1->ntids != group2->ntids)
7765  {
7766  uint32 ntids1 = pg_nextpower2_32((uint32) group1->ntids);
7767  uint32 ntids2 = pg_nextpower2_32((uint32) group2->ntids);
7768 
7769  if (ntids1 > ntids2)
7770  return -1;
7771  if (ntids1 < ntids2)
7772  return 1;
7773  }
7774 
7775  /*
7776  * Tiebreak: asc offset-into-deltids-for-block (offset to first TID for
7777  * block in deltids array) order.
7778  *
7779  * This is equivalent to sorting in ascending heap block number order
7780  * (among otherwise equal subsets of the array). This approach allows us
7781  * to avoid accessing the out-of-line TID. (We rely on the assumption
7782  * that the deltids array was sorted in ascending heap TID order when
7783  * these offsets to the first TID from each heap block group were formed.)
7784  */
7785  if (group1->ifirsttid > group2->ifirsttid)
7786  return 1;
7787  if (group1->ifirsttid < group2->ifirsttid)
7788  return -1;
7789 
7790  pg_unreachable();
7791 
7792  return 0;
7793 }
#define pg_unreachable()
Definition: c.h:258
int16 npromisingtids
Definition: heapam.c:194
static uint32 pg_nextpower2_32(uint32 num)
Definition: pg_bitutils.h:146
unsigned int uint32
Definition: c.h:441
int16 ifirsttid
Definition: heapam.c:196

◆ compute_infobits()

static uint8 compute_infobits ( uint16  infomask,
uint16  infomask2 
)
static

Definition at line 2655 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_abort_speculative(), heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_update(), and log_heap_update().

2656 {
2657  return
2658  ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
2659  ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
2660  ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
2661  /* note we ignore HEAP_XMAX_SHR_LOCK here */
2662  ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
2663  ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
2664  XLHL_KEYS_UPDATED : 0);
2665 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:193
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:196
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:269
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:268
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:195
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:270
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:272
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:271

◆ compute_new_xmax_infomask()

static void compute_new_xmax_infomask ( TransactionId  xmax,
uint16  old_infomask,
uint16  old_infomask2,
TransactionId  add_to_xmax,
LockTupleMode  mode,
bool  is_update,
TransactionId result_xmax,
uint16 result_infomask,
uint16 result_infomask2 
)
static

Definition at line 4985 of file heapam.c.

References Assert, elog, ERROR, get_mxact_status_for_lock(), GetMultiXactIdHintBits(), HEAP_KEYS_UPDATED, HEAP_LOCKED_UPGRADED, HEAP_XMAX_COMMITTED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_INVALID, HEAP_XMAX_IS_EXCL_LOCKED, HEAP_XMAX_IS_KEYSHR_LOCKED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HEAP_XMAX_IS_SHR_LOCKED, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, InvalidTransactionId, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, MultiXactIdCreate(), MultiXactIdExpand(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TUPLOCK_from_mxstatus, and WARNING.

Referenced by heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), and heap_update().

4990 {
4991  TransactionId new_xmax;
4992  uint16 new_infomask,
4993  new_infomask2;
4994 
4996 
4997 l5:
4998  new_infomask = 0;
4999  new_infomask2 = 0;
5000  if (old_infomask & HEAP_XMAX_INVALID)
5001  {
5002  /*
5003  * No previous locker; we just insert our own TransactionId.
5004  *
5005  * Note that it's critical that this case be the first one checked,
5006  * because there are several blocks below that come back to this one
5007  * to implement certain optimizations; old_infomask might contain
5008  * other dirty bits in those cases, but we don't really care.
5009  */
5010  if (is_update)
5011  {
5012  new_xmax = add_to_xmax;
5013  if (mode == LockTupleExclusive)
5014  new_infomask2 |= HEAP_KEYS_UPDATED;
5015  }
5016  else
5017  {
5018  new_infomask |= HEAP_XMAX_LOCK_ONLY;
5019  switch (mode)
5020  {
5021  case LockTupleKeyShare:
5022  new_xmax = add_to_xmax;
5023  new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
5024  break;
5025  case LockTupleShare:
5026  new_xmax = add_to_xmax;
5027  new_infomask |= HEAP_XMAX_SHR_LOCK;
5028  break;
5030  new_xmax = add_to_xmax;
5031  new_infomask |= HEAP_XMAX_EXCL_LOCK;
5032  break;
5033  case LockTupleExclusive:
5034  new_xmax = add_to_xmax;
5035  new_infomask |= HEAP_XMAX_EXCL_LOCK;
5036  new_infomask2 |= HEAP_KEYS_UPDATED;
5037  break;
5038  default:
5039  new_xmax = InvalidTransactionId; /* silence compiler */
5040  elog(ERROR, "invalid lock mode");
5041  }
5042  }
5043  }
5044  else if (old_infomask & HEAP_XMAX_IS_MULTI)
5045  {
5046  MultiXactStatus new_status;
5047 
5048  /*
5049  * Currently we don't allow XMAX_COMMITTED to be set for multis, so
5050  * cross-check.
5051  */
5052  Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
5053 
5054  /*
5055  * A multixact together with LOCK_ONLY set but neither lock bit set
5056  * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
5057  * anymore. This check is critical for databases upgraded by
5058  * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
5059  * that such multis are never passed.
5060  */
5061  if (HEAP_LOCKED_UPGRADED(old_infomask))
5062  {
5063  old_infomask &= ~HEAP_XMAX_IS_MULTI;
5064  old_infomask |= HEAP_XMAX_INVALID;
5065  goto l5;
5066  }
5067 
5068  /*
5069  * If the XMAX is already a MultiXactId, then we need to expand it to
5070  * include add_to_xmax; but if all the members were lockers and are
5071  * all gone, we can do away with the IS_MULTI bit and just set
5072  * add_to_xmax as the only locker/updater. If all lockers are gone
5073  * and we have an updater that aborted, we can also do without a
5074  * multi.
5075  *
5076  * The cost of doing GetMultiXactIdMembers would be paid by
5077  * MultiXactIdExpand if we weren't to do this, so this check is not
5078  * incurring extra work anyhow.
5079  */
5080  if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
5081  {
5082  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
5084  old_infomask)))
5085  {
5086  /*
5087  * Reset these bits and restart; otherwise fall through to
5088  * create a new multi below.
5089  */
5090  old_infomask &= ~HEAP_XMAX_IS_MULTI;
5091  old_infomask |= HEAP_XMAX_INVALID;
5092  goto l5;
5093  }
5094  }
5095 
5096  new_status = get_mxact_status_for_lock(mode, is_update);
5097 
5098  new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
5099  new_status);
5100  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5101  }
5102  else if (old_infomask & HEAP_XMAX_COMMITTED)
5103  {
5104  /*
5105  * It's a committed update, so we need to preserve him as updater of
5106  * the tuple.
5107  */
5109  MultiXactStatus new_status;
5110 
5111  if (old_infomask2 & HEAP_KEYS_UPDATED)
5112  status = MultiXactStatusUpdate;
5113  else
5114  status = MultiXactStatusNoKeyUpdate;
5115 
5116  new_status = get_mxact_status_for_lock(mode, is_update);
5117 
5118  /*
5119  * since it's not running, it's obviously impossible for the old
5120  * updater to be identical to the current one, so we need not check
5121  * for that case as we do in the block above.
5122  */
5123  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5124  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5125  }
5126  else if (TransactionIdIsInProgress(xmax))
5127  {
5128  /*
5129  * If the XMAX is a valid, in-progress TransactionId, then we need to
5130  * create a new MultiXactId that includes both the old locker or
5131  * updater and our own TransactionId.
5132  */
5133  MultiXactStatus new_status;
5134  MultiXactStatus old_status;
5135  LockTupleMode old_mode;
5136 
5137  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5138  {
5139  if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5140  old_status = MultiXactStatusForKeyShare;
5141  else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5142  old_status = MultiXactStatusForShare;
5143  else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5144  {
5145  if (old_infomask2 & HEAP_KEYS_UPDATED)
5146  old_status = MultiXactStatusForUpdate;
5147  else
5148  old_status = MultiXactStatusForNoKeyUpdate;
5149  }
5150  else
5151  {
5152  /*
5153  * LOCK_ONLY can be present alone only when a page has been
5154  * upgraded by pg_upgrade. But in that case,
5155  * TransactionIdIsInProgress() should have returned false. We
5156  * assume it's no longer locked in this case.
5157  */
5158  elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
5159  old_infomask |= HEAP_XMAX_INVALID;
5160  old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
5161  goto l5;
5162  }
5163  }
5164  else
5165  {
5166  /* it's an update, but which kind? */
5167  if (old_infomask2 & HEAP_KEYS_UPDATED)
5168  old_status = MultiXactStatusUpdate;
5169  else
5170  old_status = MultiXactStatusNoKeyUpdate;
5171  }
5172 
5173  old_mode = TUPLOCK_from_mxstatus(old_status);
5174 
5175  /*
5176  * If the lock to be acquired is for the same TransactionId as the
5177  * existing lock, there's an optimization possible: consider only the
5178  * strongest of both locks as the only one present, and restart.
5179  */
5180  if (xmax == add_to_xmax)
5181  {
5182  /*
5183  * Note that it's not possible for the original tuple to be
5184  * updated: we wouldn't be here because the tuple would have been
5185  * invisible and we wouldn't try to update it. As a subtlety,
5186  * this code can also run when traversing an update chain to lock
5187  * future versions of a tuple. But we wouldn't be here either,
5188  * because the add_to_xmax would be different from the original
5189  * updater.
5190  */
5191  Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5192 
5193  /* acquire the strongest of both */
5194  if (mode < old_mode)
5195  mode = old_mode;
5196  /* mustn't touch is_update */
5197 
5198  old_infomask |= HEAP_XMAX_INVALID;
5199  goto l5;
5200  }
5201 
5202  /* otherwise, just fall back to creating a new multixact */
5203  new_status = get_mxact_status_for_lock(mode, is_update);
5204  new_xmax = MultiXactIdCreate(xmax, old_status,
5205  add_to_xmax, new_status);
5206  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5207  }
5208  else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
5209  TransactionIdDidCommit(xmax))
5210  {
5211  /*
5212  * It's a committed update, so we gotta preserve him as updater of the
5213  * tuple.
5214  */
5216  MultiXactStatus new_status;
5217 
5218  if (old_infomask2 & HEAP_KEYS_UPDATED)
5219  status = MultiXactStatusUpdate;
5220  else
5221  status = MultiXactStatusNoKeyUpdate;
5222 
5223  new_status = get_mxact_status_for_lock(mode, is_update);
5224 
5225  /*
5226  * since it's not running, it's obviously impossible for the old
5227  * updater to be identical to the current one, so we need not check
5228  * for that case as we do in the block above.
5229  */
5230  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5231  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5232  }
5233  else
5234  {
5235  /*
5236  * Can get here iff the locking/updating transaction was running when
5237  * the infomask was extracted from the tuple, but finished before
5238  * TransactionIdIsInProgress got to run. Deal with it as if there was
5239  * no locker at all in the first place.
5240  */
5241  old_infomask |= HEAP_XMAX_INVALID;
5242  goto l5;
5243  }
5244 
5245  *result_infomask = new_infomask;
5246  *result_infomask2 = new_infomask2;
5247  *result_xmax = new_xmax;
5248 }
static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
Definition: heapam.c:6683
static PgChecksumMode mode
Definition: pg_checksums.c:65
MultiXactStatus
Definition: multixact.h:41
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:193
LockTupleMode
Definition: lockoptions.h:49
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:196
uint32 TransactionId
Definition: c.h:587
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:869
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:1359
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:252
#define HEAP_XMAX_COMMITTED
Definition: htup_details.h:206
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:199
#define HEAP_XMAX_IS_SHR_LOCKED(infomask)
Definition: htup_details.h:262
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:6764
unsigned short uint16
Definition: c.h:440
#define ERROR
Definition: elog.h:46
#define HEAP_XMAX_INVALID
Definition: htup_details.h:207
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:195
#define InvalidTransactionId
Definition: transam.h:31
#define WARNING
Definition: elog.h:40
MultiXactId MultiXactIdCreate(TransactionId xid1, MultiXactStatus status1, TransactionId xid2, MultiXactStatus status2)
Definition: multixact.c:386
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
TransactionId MultiXactId
Definition: c.h:597
#define Assert(condition)
Definition: c.h:804
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:214
#define elog(elevel,...)
Definition: elog.h:232
static MultiXactStatus get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
Definition: heapam.c:4199
#define HEAP_XMAX_IS_EXCL_LOCKED(infomask)
Definition: htup_details.h:264
#define HEAP_XMAX_IS_KEYSHR_LOCKED(infomask)
Definition: htup_details.h:266
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:551
MultiXactId MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
Definition: multixact.c:439

◆ ConditionalMultiXactIdWait()

static bool ConditionalMultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
Relation  rel,
int *  remaining 
)
static

Definition at line 7031 of file heapam.c.

References Do_MultiXactIdWait(), and XLTW_None.

Referenced by heap_lock_tuple().

7033 {
7034  return Do_MultiXactIdWait(multi, status, infomask, true,
7035  rel, NULL, XLTW_None, remaining);
7036 }
int remaining
Definition: informix.c:667
Definition: lmgr.h:26
static bool Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:6931
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227

◆ Do_MultiXactIdWait()

static bool Do_MultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
bool  nowait,
Relation  rel,
ItemPointer  ctid,
XLTW_Oper  oper,
int *  remaining 
)
static

Definition at line 6931 of file heapam.c.

References ConditionalXactLockTableWait(), DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, LOCKMODE_from_mxstatus, pfree(), MultiXactMember::status, TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), XactLockTableWait(), and MultiXactMember::xid.

Referenced by ConditionalMultiXactIdWait(), and MultiXactIdWait().

6935 {
6936  bool result = true;
6937  MultiXactMember *members;
6938  int nmembers;
6939  int remain = 0;
6940 
6941  /* for pre-pg_upgrade tuples, no need to sleep at all */
6942  nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
6943  GetMultiXactIdMembers(multi, &members, false,
6944  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
6945 
6946  if (nmembers >= 0)
6947  {
6948  int i;
6949 
6950  for (i = 0; i < nmembers; i++)
6951  {
6952  TransactionId memxid = members[i].xid;
6953  MultiXactStatus memstatus = members[i].status;
6954 
6956  {
6957  remain++;
6958  continue;
6959  }
6960 
6963  {
6964  if (remaining && TransactionIdIsInProgress(memxid))
6965  remain++;
6966  continue;
6967  }
6968 
6969  /*
6970  * This member conflicts with our multi, so we have to sleep (or
6971  * return failure, if asked to avoid waiting.)
6972  *
6973  * Note that we don't set up an error context callback ourselves,
6974  * but instead we pass the info down to XactLockTableWait. This
6975  * might seem a bit wasteful because the context is set up and
6976  * tore down for each member of the multixact, but in reality it
6977  * should be barely noticeable, and it avoids duplicate code.
6978  */
6979  if (nowait)
6980  {
6981  result = ConditionalXactLockTableWait(memxid);
6982  if (!result)
6983  break;
6984  }
6985  else
6986  XactLockTableWait(memxid, rel, ctid, oper);
6987  }
6988 
6989  pfree(members);
6990  }
6991 
6992  if (remaining)
6993  *remaining = remain;
6994 
6995  return result;
6996 }
int remaining
Definition: informix.c:667
MultiXactStatus
Definition: multixact.h:41
uint32 TransactionId
Definition: c.h:587
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:869
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:1359
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:252
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:155
bool ConditionalXactLockTableWait(TransactionId xid)
Definition: lmgr.c:713
void pfree(void *pointer)
Definition: mcxt.c:1169
TransactionId xid
Definition: multixact.h:62
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:582
MultiXactStatus status
Definition: multixact.h:63
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:640
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1223
Operator oper(ParseState *pstate, List *opname, Oid ltypeId, Oid rtypeId, bool noError, int location)
Definition: parse_oper.c:382
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227

◆ DoesMultiXactIdConflict()

static bool DoesMultiXactIdConflict ( MultiXactId  multi,
uint16  infomask,
LockTupleMode  lockmode,
bool current_is_member 
)
static

Definition at line 6832 of file heapam.c.

References DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, ISUPDATE_from_mxstatus, LOCKMODE_from_mxstatus, pfree(), status(), TransactionIdDidAbort(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), tupleLockExtraInfo, and MultiXactMember::xid.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

6834 {
6835  int nmembers;
6836  MultiXactMember *members;
6837  bool result = false;
6838  LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
6839 
6840  if (HEAP_LOCKED_UPGRADED(infomask))
6841  return false;
6842 
6843  nmembers = GetMultiXactIdMembers(multi, &members, false,
6844  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
6845  if (nmembers >= 0)
6846  {
6847  int i;
6848 
6849  for (i = 0; i < nmembers; i++)
6850  {
6851  TransactionId memxid;
6852  LOCKMODE memlockmode;
6853 
6854  if (result && (current_is_member == NULL || *current_is_member))
6855  break;
6856 
6857  memlockmode = LOCKMODE_from_mxstatus(members[i].status);
6858 
6859  /* ignore members from current xact (but track their presence) */
6860  memxid = members[i].xid;
6862  {
6863  if (current_is_member != NULL)
6864  *current_is_member = true;
6865  continue;
6866  }
6867  else if (result)
6868  continue;
6869 
6870  /* ignore members that don't conflict with the lock we want */
6871  if (!DoLockModesConflict(memlockmode, wanted))
6872  continue;
6873 
6874  if (ISUPDATE_from_mxstatus(members[i].status))
6875  {
6876  /* ignore aborted updaters */
6877  if (TransactionIdDidAbort(memxid))
6878  continue;
6879  }
6880  else
6881  {
6882  /* ignore lockers-only that are no longer in progress */
6883  if (!TransactionIdIsInProgress(memxid))
6884  continue;
6885  }
6886 
6887  /*
6888  * Whatever remains are either live lockers that conflict with our
6889  * wanted lock, and updaters that are not aborted. Those conflict
6890  * with what we want. Set up to return true, but keep going to
6891  * look for the current transaction among the multixact members,
6892  * if needed.
6893  */
6894  result = true;
6895  }
6896  pfree(members);
6897  }
6898 
6899  return result;
6900 }
uint32 TransactionId
Definition: c.h:587
int LOCKMODE
Definition: lockdefs.h:26
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:869
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:1359
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:252
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:155
void pfree(void *pointer)
Definition: mcxt.c:1169
TransactionId xid
Definition: multixact.h:62
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:582
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:56
static const struct @13 tupleLockExtraInfo[MaxLockTupleMode+1]
bool TransactionIdDidAbort(TransactionId transactionId)
Definition: transam.c:181
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1223
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227

◆ ExtractReplicaIdentity()

static HeapTuple ExtractReplicaIdentity ( Relation  rel,
HeapTuple  tup,
bool  key_changed,
bool copy 
)
static

Definition at line 8310 of file heapam.c.

References Assert, bms_free(), bms_is_empty(), bms_is_member(), FirstLowInvalidHeapAttributeNumber, heap_deform_tuple(), heap_form_tuple(), heap_freetuple(), HeapTupleHasExternal, i, INDEX_ATTR_BITMAP_IDENTITY_KEY, MaxHeapAttributeNumber, TupleDescData::natts, RelationData::rd_rel, RelationGetDescr, RelationGetIndexAttrBitmap(), RelationIsLogicallyLogged, toast_flatten_tuple(), and values.

Referenced by heap_delete(), and heap_update().

8312 {
8313  TupleDesc desc = RelationGetDescr(relation);
8314  char replident = relation->rd_rel->relreplident;
8315  Bitmapset *idattrs;
8316  HeapTuple key_tuple;
8317  bool nulls[MaxHeapAttributeNumber];
8319 
8320  *copy = false;
8321 
8322  if (!RelationIsLogicallyLogged(relation))
8323  return NULL;
8324 
8325  if (replident == REPLICA_IDENTITY_NOTHING)
8326  return NULL;
8327 
8328  if (replident == REPLICA_IDENTITY_FULL)
8329  {
8330  /*
8331  * When logging the entire old tuple, it very well could contain
8332  * toasted columns. If so, force them to be inlined.
8333  */
8334  if (HeapTupleHasExternal(tp))
8335  {
8336  *copy = true;
8337  tp = toast_flatten_tuple(tp, desc);
8338  }
8339  return tp;
8340  }
8341 
8342  /* if the key hasn't changed and we're only logging the key, we're done */
8343  if (!key_changed)
8344  return NULL;
8345 
8346  /* find out the replica identity columns */
8347  idattrs = RelationGetIndexAttrBitmap(relation,
8349 
8350  /*
8351  * If there's no defined replica identity columns, treat as !key_changed.
8352  * (This case should not be reachable from heap_update, since that should
8353  * calculate key_changed accurately. But heap_delete just passes constant
8354  * true for key_changed, so we can hit this case in deletes.)
8355  */
8356  if (bms_is_empty(idattrs))
8357  return NULL;
8358 
8359  /*
8360  * Construct a new tuple containing only the replica identity columns,
8361  * with nulls elsewhere. While we're at it, assert that the replica
8362  * identity columns aren't null.
8363  */
8364  heap_deform_tuple(tp, desc, values, nulls);
8365 
8366  for (int i = 0; i < desc->natts; i++)
8367  {
8369  idattrs))
8370  Assert(!nulls[i]);
8371  else
8372  nulls[i] = true;
8373  }
8374 
8375  key_tuple = heap_form_tuple(desc, values, nulls);
8376  *copy = true;
8377 
8378  bms_free(idattrs);
8379 
8380  /*
8381  * If the tuple, which by here only contains indexed columns, still has
8382  * toasted columns, force them to be inlined. This is somewhat unlikely
8383  * since there's limits on the size of indexed columns, so we don't
8384  * duplicate toast_flatten_tuple()s functionality in the above loop over
8385  * the indexed columns, even if it would be more efficient.
8386  */
8387  if (HeapTupleHasExternal(key_tuple))
8388  {
8389  HeapTuple oldtup = key_tuple;
8390 
8391  key_tuple = toast_flatten_tuple(oldtup, desc);
8392  heap_freetuple(oldtup);
8393  }
8394 
8395  return key_tuple;
8396 }
#define RelationGetDescr(relation)
Definition: rel.h:503
#define FirstLowInvalidHeapAttributeNumber
Definition: sysattr.h:27
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: heaptuple.c:1020
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:674
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1338
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition: heaptoast.c:350
bool bms_is_empty(const Bitmapset *a)
Definition: bitmapset.c:701
uintptr_t Datum
Definition: postgres.h:411
#define MaxHeapAttributeNumber
Definition: htup_details.h:47
void bms_free(Bitmapset *a)
Definition: bitmapset.c:208
#define Assert(condition)
Definition: c.h:804
void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull)
Definition: heaptuple.c:1249
static Datum values[MAXATTR]
Definition: bootstrap.c:166
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:672
int i
Bitmapset * RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
Definition: relcache.c:5003
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:427

◆ fix_infomask_from_infobits()

static void fix_infomask_from_infobits ( uint8  infobits,
uint16 infomask,
uint16 infomask2 
)
static

Definition at line 8763 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_xlog_delete(), heap_xlog_lock(), heap_xlog_lock_updated(), and heap_xlog_update().

8764 {
8765  *infomask &= ~(HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY |
8767  *infomask2 &= ~HEAP_KEYS_UPDATED;
8768 
8769  if (infobits & XLHL_XMAX_IS_MULTI)
8770  *infomask |= HEAP_XMAX_IS_MULTI;
8771  if (infobits & XLHL_XMAX_LOCK_ONLY)
8772  *infomask |= HEAP_XMAX_LOCK_ONLY;
8773  if (infobits & XLHL_XMAX_EXCL_LOCK)
8774  *infomask |= HEAP_XMAX_EXCL_LOCK;
8775  /* note HEAP_XMAX_SHR_LOCK isn't considered here */
8776  if (infobits & XLHL_XMAX_KEYSHR_LOCK)
8777  *infomask |= HEAP_XMAX_KEYSHR_LOCK;
8778 
8779  if (infobits & XLHL_KEYS_UPDATED)
8780  *infomask2 |= HEAP_KEYS_UPDATED;
8781 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:193
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:196
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:269
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:268
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:195
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:270
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:272
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:271

◆ FreeBulkInsertState()

void FreeBulkInsertState ( BulkInsertState  bistate)

Definition at line 2021 of file heapam.c.

References BulkInsertStateData::current_buf, FreeAccessStrategy(), InvalidBuffer, pfree(), ReleaseBuffer(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), CopyMultiInsertBufferCleanup(), intorel_shutdown(), and transientrel_shutdown().

2022 {
2023  if (bistate->current_buf != InvalidBuffer)
2024  ReleaseBuffer(bistate->current_buf);
2025  FreeAccessStrategy(bistate->strategy);
2026  pfree(bistate);
2027 }
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3772
void pfree(void *pointer)
Definition: mcxt.c:1169
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:597
BufferAccessStrategy strategy
Definition: hio.h:31
Buffer current_buf
Definition: hio.h:32

◆ FreezeMultiXactId()

static TransactionId FreezeMultiXactId ( MultiXactId  multi,
uint16  t_infomask,
TransactionId  relfrozenxid,
TransactionId  relminmxid,
TransactionId  cutoff_xid,
MultiXactId  cutoff_multi,
uint16 flags 
)
static

Definition at line 6102 of file heapam.c.

References Assert, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg_internal(), ERROR, FRM_INVALIDATE_XMAX, FRM_MARK_COMMITTED, FRM_NOOP, FRM_RETURN_IS_MULTI, FRM_RETURN_IS_XID, GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, i, InvalidTransactionId, ISUPDATE_from_mxstatus, MultiXactIdCreateFromMembers(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactIdIsValid, MultiXactIdPrecedes(), palloc(), pfree(), status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TransactionIdIsValid, TransactionIdPrecedes(), and MultiXactMember::xid.

Referenced by heap_prepare_freeze_tuple().

6106 {
6108  int i;
6109  MultiXactMember *members;
6110  int nmembers;
6111  bool need_replace;
6112  int nnewmembers;
6113  MultiXactMember *newmembers;
6114  bool has_lockers;
6115  TransactionId update_xid;
6116  bool update_committed;
6117 
6118  *flags = 0;
6119 
6120  /* We should only be called in Multis */
6121  Assert(t_infomask & HEAP_XMAX_IS_MULTI);
6122 
6123  if (!MultiXactIdIsValid(multi) ||
6124  HEAP_LOCKED_UPGRADED(t_infomask))
6125  {
6126  /* Ensure infomask bits are appropriately set/reset */
6127  *flags |= FRM_INVALIDATE_XMAX;
6128  return InvalidTransactionId;
6129  }
6130  else if (MultiXactIdPrecedes(multi, relminmxid))
6131  ereport(ERROR,
6133  errmsg_internal("found multixact %u from before relminmxid %u",
6134  multi, relminmxid)));
6135  else if (MultiXactIdPrecedes(multi, cutoff_multi))
6136  {
6137  /*
6138  * This old multi cannot possibly have members still running, but
6139  * verify just in case. If it was a locker only, it can be removed
6140  * without any further consideration; but if it contained an update,
6141  * we might need to preserve it.
6142  */
6143  if (MultiXactIdIsRunning(multi,
6144  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
6145  ereport(ERROR,
6147  errmsg_internal("multixact %u from before cutoff %u found to be still running",
6148  multi, cutoff_multi)));
6149 
6150  if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
6151  {
6152  *flags |= FRM_INVALIDATE_XMAX;
6153  xid = InvalidTransactionId; /* not strictly necessary */
6154  }
6155  else
6156  {
6157  /* replace multi by update xid */
6158  xid = MultiXactIdGetUpdateXid(multi, t_infomask);
6159 
6160  /* wasn't only a lock, xid needs to be valid */
6162 
6163  if (TransactionIdPrecedes(xid, relfrozenxid))
6164  ereport(ERROR,
6166  errmsg_internal("found update xid %u from before relfrozenxid %u",
6167  xid, relfrozenxid)));
6168 
6169  /*
6170  * If the xid is older than the cutoff, it has to have aborted,
6171  * otherwise the tuple would have gotten pruned away.
6172  */
6173  if (TransactionIdPrecedes(xid, cutoff_xid))
6174  {
6175  if (TransactionIdDidCommit(xid))
6176  ereport(ERROR,
6178  errmsg_internal("cannot freeze committed update xid %u", xid)));
6179  *flags |= FRM_INVALIDATE_XMAX;
6180  xid = InvalidTransactionId; /* not strictly necessary */
6181  }
6182  else
6183  {
6184  *flags |= FRM_RETURN_IS_XID;
6185  }
6186  }
6187 
6188  return xid;
6189  }
6190 
6191  /*
6192  * This multixact might have or might not have members still running, but
6193  * we know it's valid and is newer than the cutoff point for multis.
6194  * However, some member(s) of it may be below the cutoff for Xids, so we
6195  * need to walk the whole members array to figure out what to do, if
6196  * anything.
6197  */
6198 
6199  nmembers =
6200  GetMultiXactIdMembers(multi, &members, false,
6201  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
6202  if (nmembers <= 0)
6203  {
6204  /* Nothing worth keeping */
6205  *flags |= FRM_INVALIDATE_XMAX;
6206  return InvalidTransactionId;
6207  }
6208 
6209  /* is there anything older than the cutoff? */
6210  need_replace = false;
6211  for (i = 0; i < nmembers; i++)
6212  {
6213  if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
6214  {
6215  need_replace = true;
6216  break;
6217  }
6218  }
6219 
6220  /*
6221  * In the simplest case, there is no member older than the cutoff; we can
6222  * keep the existing MultiXactId as is.
6223  */
6224  if (!need_replace)
6225  {
6226  *flags |= FRM_NOOP;
6227  pfree(members);
6228  return InvalidTransactionId;
6229  }
6230 
6231  /*
6232  * If the multi needs to be updated, figure out which members do we need
6233  * to keep.
6234  */
6235  nnewmembers = 0;
6236  newmembers = palloc(sizeof(MultiXactMember) * nmembers);
6237  has_lockers = false;
6238  update_xid = InvalidTransactionId;
6239  update_committed = false;
6240 
6241  for (i = 0; i < nmembers; i++)
6242  {
6243  /*
6244  * Determine whether to keep this member or ignore it.
6245  */
6246  if (ISUPDATE_from_mxstatus(members[i].status))
6247  {
6248  TransactionId xid = members[i].xid;
6249 
6251  if (TransactionIdPrecedes(xid, relfrozenxid))
6252  ereport(ERROR,
6254  errmsg_internal("found update xid %u from before relfrozenxid %u",
6255  xid, relfrozenxid)));
6256 
6257  /*
6258  * It's an update; should we keep it? If the transaction is known
6259  * aborted or crashed then it's okay to ignore it, otherwise not.
6260  * Note that an updater older than cutoff_xid cannot possibly be
6261  * committed, because HeapTupleSatisfiesVacuum would have returned
6262  * HEAPTUPLE_DEAD and we would not be trying to freeze the tuple.
6263  *
6264  * As with all tuple visibility routines, it's critical to test
6265  * TransactionIdIsInProgress before TransactionIdDidCommit,
6266  * because of race conditions explained in detail in
6267  * heapam_visibility.c.
6268  */
6271  {
6272  Assert(!TransactionIdIsValid(update_xid));
6273  update_xid = xid;
6274  }
6275  else if (TransactionIdDidCommit(xid))
6276  {
6277  /*
6278  * The transaction committed, so we can tell caller to set
6279  * HEAP_XMAX_COMMITTED. (We can only do this because we know
6280  * the transaction is not running.)
6281  */
6282  Assert(!TransactionIdIsValid(update_xid));
6283  update_committed = true;
6284  update_xid = xid;
6285  }
6286  else
6287  {
6288  /*
6289  * Not in progress, not committed -- must be aborted or
6290  * crashed; we can ignore it.
6291  */
6292  }
6293 
6294  /*
6295  * Since the tuple wasn't marked HEAPTUPLE_DEAD by vacuum, the
6296  * update Xid cannot possibly be older than the xid cutoff. The
6297  * presence of such a tuple would cause corruption, so be paranoid
6298  * and check.
6299  */
6300  if (TransactionIdIsValid(update_xid) &&
6301  TransactionIdPrecedes(update_xid, cutoff_xid))
6302  ereport(ERROR,
6304  errmsg_internal("found update xid %u from before xid cutoff %u",
6305  update_xid, cutoff_xid)));
6306 
6307  /*
6308  * If we determined that it's an Xid corresponding to an update
6309  * that must be retained, additionally add it to the list of
6310  * members of the new Multi, in case we end up using that. (We
6311  * might still decide to use only an update Xid and not a multi,
6312  * but it's easier to maintain the list as we walk the old members
6313  * list.)
6314  */
6315  if (TransactionIdIsValid(update_xid))
6316  newmembers[nnewmembers++] = members[i];
6317  }
6318  else
6319  {
6320  /* We only keep lockers if they are still running */
6321  if (TransactionIdIsCurrentTransactionId(members[i].xid) ||
6322  TransactionIdIsInProgress(members[i].xid))
6323  {
6324  /* running locker cannot possibly be older than the cutoff */
6325  Assert(!TransactionIdPrecedes(members[i].xid, cutoff_xid));
6326  newmembers[nnewmembers++] = members[i];
6327  has_lockers = true;
6328  }
6329  }
6330  }
6331 
6332  pfree(members);
6333 
6334  if (nnewmembers == 0)
6335  {
6336  /* nothing worth keeping!? Tell caller to remove the whole thing */
6337  *flags |= FRM_INVALIDATE_XMAX;
6338  xid = InvalidTransactionId;
6339  }
6340  else if (TransactionIdIsValid(update_xid) && !has_lockers)
6341  {
6342  /*
6343  * If there's a single member and it's an update, pass it back alone
6344  * without creating a new Multi. (XXX we could do this when there's a
6345  * single remaining locker, too, but that would complicate the API too
6346  * much; moreover, the case with the single updater is more
6347  * interesting, because those are longer-lived.)
6348  */
6349  Assert(nnewmembers == 1);
6350  *flags |= FRM_RETURN_IS_XID;
6351  if (update_committed)
6352  *flags |= FRM_MARK_COMMITTED;
6353  xid = update_xid;
6354  }
6355  else
6356  {
6357  /*
6358  * Create a new multixact with the surviving members of the previous
6359  * one, to set as new Xmax in the tuple.
6360  */
6361  xid = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
6362  *flags |= FRM_RETURN_IS_MULTI;
6363  }
6364 
6365  pfree(newmembers);
6366 
6367  return xid;
6368 }
#define FRM_RETURN_IS_XID
Definition: heapam.c:6076
#define FRM_MARK_COMMITTED
Definition: heapam.c:6078
uint32 TransactionId
Definition: c.h:587
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:869
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:1359
MultiXactId MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members)
Definition: multixact.c:767
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:252
int errcode(int sqlerrcode)
Definition: elog.c:698
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:6764
void pfree(void *pointer)
Definition: mcxt.c:1169
#define ERROR
Definition: elog.h:46
TransactionId xid
Definition: multixact.h:62
#define FRM_INVALIDATE_XMAX
Definition: heapam.c:6075
#define InvalidTransactionId
Definition: transam.h:31
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:56
#define MultiXactIdIsValid(multi)
Definition: multixact.h:28
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:47
#define FRM_RETURN_IS_MULTI
Definition: heapam.c:6077
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
#define ereport(elevel,...)
Definition: elog.h:157
int errmsg_internal(const char *fmt,...)
Definition: elog.c:996
#define Assert(condition)
Definition: c.h:804
#define FRM_NOOP
Definition: heapam.c:6074
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3159
void * palloc(Size size)
Definition: mcxt.c:1062
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1223
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:551

◆ get_mxact_status_for_lock()

static MultiXactStatus get_mxact_status_for_lock ( LockTupleMode  mode,
bool  is_update 
)
static

Definition at line 4199 of file heapam.c.

References elog, ERROR, mode, and tupleLockExtraInfo.

Referenced by compute_new_xmax_infomask(), heap_lock_tuple(), and test_lockmode_for_conflict().

4200 {
4201  int retval;
4202 
4203  if (is_update)
4204  retval = tupleLockExtraInfo[mode].updstatus;
4205  else
4206  retval = tupleLockExtraInfo[mode].lockstatus;
4207 
4208  if (retval == -1)
4209  elog(ERROR, "invalid lock tuple mode %d/%s", mode,
4210  is_update ? "true" : "false");
4211 
4212  return (MultiXactStatus) retval;
4213 }
static PgChecksumMode mode
Definition: pg_checksums.c:65
MultiXactStatus
Definition: multixact.h:41
#define ERROR
Definition: elog.h:46
static const struct @13 tupleLockExtraInfo[MaxLockTupleMode+1]
#define elog(elevel,...)
Definition: elog.h:232

◆ GetBulkInsertState()

BulkInsertState GetBulkInsertState ( void  )

Definition at line 2007 of file heapam.c.

References BAS_BULKWRITE, BulkInsertStateData::current_buf, GetAccessStrategy(), InvalidBuffer, palloc(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), CopyMultiInsertBufferInit(), intorel_startup(), and transientrel_startup().

2008 {
2009  BulkInsertState bistate;
2010 
2011  bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
2013  bistate->current_buf = InvalidBuffer;
2014  return bistate;
2015 }
BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
Definition: freelist.c:542
#define InvalidBuffer
Definition: buf.h:25
struct BulkInsertStateData * BulkInsertState
Definition: heapam.h:39
BufferAccessStrategy strategy
Definition: hio.h:31
void * palloc(Size size)
Definition: mcxt.c:1062
Buffer current_buf
Definition: hio.h:32

◆ GetMultiXactIdHintBits()

static void GetMultiXactIdHintBits ( MultiXactId  multi,
uint16 new_infomask,
uint16 new_infomask2 
)
static

Definition at line 6683 of file heapam.c.

References GetMultiXactIdMembers(), HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, i, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, mode, MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, pfree(), status(), and TUPLOCK_from_mxstatus.

Referenced by compute_new_xmax_infomask(), heap_prepare_freeze_tuple(), and heap_update().

6685 {
6686  int nmembers;
6687  MultiXactMember *members;
6688  int i;
6689  uint16 bits = HEAP_XMAX_IS_MULTI;
6690  uint16 bits2 = 0;
6691  bool has_update = false;
6692  LockTupleMode strongest = LockTupleKeyShare;
6693 
6694  /*
6695  * We only use this in multis we just created, so they cannot be values
6696  * pre-pg_upgrade.
6697  */
6698  nmembers = GetMultiXactIdMembers(multi, &members, false, false);
6699 
6700  for (i = 0; i < nmembers; i++)
6701  {
6703 
6704  /*
6705  * Remember the strongest lock mode held by any member of the
6706  * multixact.
6707  */
6708  mode = TUPLOCK_from_mxstatus(members[i].status);
6709  if (mode > strongest)
6710  strongest = mode;
6711 
6712  /* See what other bits we need */
6713  switch (members[i].status)
6714  {
6718  break;
6719 
6721  bits2 |= HEAP_KEYS_UPDATED;
6722  break;
6723 
6725  has_update = true;
6726  break;
6727 
6728  case MultiXactStatusUpdate:
6729  bits2 |= HEAP_KEYS_UPDATED;
6730  has_update = true;
6731  break;
6732  }
6733  }
6734 
6735  if (strongest == LockTupleExclusive ||
6736  strongest == LockTupleNoKeyExclusive)
6737  bits |= HEAP_XMAX_EXCL_LOCK;
6738  else if (strongest == LockTupleShare)
6739  bits |= HEAP_XMAX_SHR_LOCK;
6740  else if (strongest == LockTupleKeyShare)
6741  bits |= HEAP_XMAX_KEYSHR_LOCK;
6742 
6743  if (!has_update)
6744  bits |= HEAP_XMAX_LOCK_ONLY;
6745 
6746  if (nmembers > 0)
6747  pfree(members);
6748 
6749  *new_infomask = bits;
6750  *new_infomask2 = bits2;
6751 }
static PgChecksumMode mode
Definition: pg_checksums.c:65
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:193
LockTupleMode
Definition: lockoptions.h:49
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:196
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:199
unsigned short uint16
Definition: c.h:440
void pfree(void *pointer)
Definition: mcxt.c:1169
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:195
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:214
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1223
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227

◆ heap2_redo()

void heap2_redo ( XLogReaderState record)

Definition at line 9650 of file heapam.c.

References elog, heap_xlog_freeze_page(), heap_xlog_lock_updated(), heap_xlog_logical_rewrite(), heap_xlog_multi_insert(), heap_xlog_prune(), heap_xlog_vacuum(), heap_xlog_visible(), PANIC, XLOG_HEAP2_FREEZE_PAGE, XLOG_HEAP2_LOCK_UPDATED, XLOG_HEAP2_MULTI_INSERT, XLOG_HEAP2_NEW_CID, XLOG_HEAP2_PRUNE, XLOG_HEAP2_REWRITE, XLOG_HEAP2_VACUUM, XLOG_HEAP2_VISIBLE, XLOG_HEAP_OPMASK, XLogRecGetInfo, and XLR_INFO_MASK.

9651 {
9652  uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
9653 
9654  switch (info & XLOG_HEAP_OPMASK)
9655  {
9656  case XLOG_HEAP2_PRUNE:
9657  heap_xlog_prune(record);
9658  break;
9659  case XLOG_HEAP2_VACUUM:
9660  heap_xlog_vacuum(record);
9661  break;
9663  heap_xlog_freeze_page(record);
9664  break;
9665  case XLOG_HEAP2_VISIBLE:
9666  heap_xlog_visible(record);
9667  break;
9669  heap_xlog_multi_insert(record);
9670  break;
9672  heap_xlog_lock_updated(record);
9673  break;
9674  case XLOG_HEAP2_NEW_CID:
9675 
9676  /*
9677  * Nothing to do on a real replay, only used during logical
9678  * decoding.
9679  */
9680  break;
9681  case XLOG_HEAP2_REWRITE:
9682  heap_xlog_logical_rewrite(record);
9683  break;
9684  default:
9685  elog(PANIC, "heap2_redo: unknown op code %u", info);
9686  }
9687 }
static void heap_xlog_prune(XLogReaderState *record)
Definition: heapam.c:8404
void heap_xlog_logical_rewrite(XLogReaderState *r)
Definition: rewriteheap.c:1110
#define XLOG_HEAP2_LOCK_UPDATED
Definition: heapam_xlog.h:59
#define XLOG_HEAP2_REWRITE
Definition: heapam_xlog.h:53
unsigned char uint8
Definition: c.h:439
#define XLOG_HEAP_OPMASK
Definition: heapam_xlog.h:41
#define XLOG_HEAP2_PRUNE
Definition: heapam_xlog.h:54
#define PANIC
Definition: elog.h:50
#define XLOG_HEAP2_MULTI_INSERT
Definition: heapam_xlog.h:58
#define XLOG_HEAP2_VISIBLE
Definition: heapam_xlog.h:57
static void heap_xlog_lock_updated(XLogReaderState *record)
Definition: heapam.c:9503
static void heap_xlog_freeze_page(XLogReaderState *record)
Definition: heapam.c:8705
static void heap_xlog_vacuum(XLogReaderState *record)
Definition: heapam.c:8490
static void heap_xlog_multi_insert(XLogReaderState *record)
Definition: heapam.c:8977
#define XLOG_HEAP2_NEW_CID
Definition: heapam_xlog.h:60
#define XLOG_HEAP2_VACUUM
Definition: heapam_xlog.h:55
#define XLogRecGetInfo(decoder)
Definition: xlogreader.h:305
#define XLR_INFO_MASK
Definition: xlogrecord.h:62
#define XLOG_HEAP2_FREEZE_PAGE
Definition: heapam_xlog.h:56
#define elog(elevel,...)
Definition: elog.h:232
static void heap_xlog_visible(XLogReaderState *record)
Definition: heapam.c:8565

◆ heap_abort_speculative()

void heap_abort_speculative ( Relation  relation,
ItemPointer  tid 
)

Definition at line 5838 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, compute_infobits(), elog, END_CRIT_SECTION, ERROR, xl_heap_delete::flags, GetCurrentTransactionId(), HEAP_KEYS_UPDATED, HEAP_MOVED, heap_toast_delete(), HEAP_XMAX_BITS, HeapTupleHasExternal, HeapTupleHeaderIsHeapOnly, HeapTupleHeaderIsSpeculative, HeapTupleHeaderSetXmin, xl_heap_delete::infobits_set, InvalidTransactionId, IsToastRelation(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), MarkBufferDirty(), xl_heap_delete::offnum, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), RelationData::rd_rel, ReadBuffer(), REGBUF_STANDARD, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapDelete, START_CRIT_SECTION, HeapTupleHeaderData::t_choice, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_heap, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, HeapTupleFields::t_xmin, TransactionIdIsValid, TransactionIdPrecedes(), TransactionXmin, XLH_DELETE_IS_SUPER, XLOG_HEAP_DELETE, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and xl_heap_delete::xmax.

Referenced by heapam_tuple_complete_speculative(), and toast_delete_datum().

5839 {
5841  ItemId lp;
5842  HeapTupleData tp;
5843  Page page;
5844  BlockNumber block;
5845  Buffer buffer;
5846  TransactionId prune_xid;
5847 
5848  Assert(ItemPointerIsValid(tid));
5849 
5850  block = ItemPointerGetBlockNumber(tid);
5851  buffer = ReadBuffer(relation, block);
5852  page = BufferGetPage(buffer);
5853 
5855 
5856  /*
5857  * Page can't be all visible, we just inserted into it, and are still
5858  * running.
5859  */
5860  Assert(!PageIsAllVisible(page));
5861 
5862  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
5863  Assert(ItemIdIsNormal(lp));
5864 
5865  tp.t_tableOid = RelationGetRelid(relation);
5866  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
5867  tp.t_len = ItemIdGetLength(lp);
5868  tp.t_self = *tid;
5869 
5870  /*
5871  * Sanity check that the tuple really is a speculatively inserted tuple,
5872  * inserted by us.
5873  */
5874  if (tp.t_data->t_choice.t_heap.t_xmin != xid)
5875  elog(ERROR, "attempted to kill a tuple inserted by another transaction");
5876  if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
5877  elog(ERROR, "attempted to kill a non-speculative tuple");
5879 
5880  /*
5881  * No need to check for serializable conflicts here. There is never a
5882  * need for a combo CID, either. No need to extract replica identity, or
5883  * do anything special with infomask bits.
5884  */
5885 
5887 
5888  /*
5889  * The tuple will become DEAD immediately. Flag that this page is a
5890  * candidate for pruning by setting xmin to TransactionXmin. While not
5891  * immediately prunable, it is the oldest xid we can cheaply determine
5892  * that's safe against wraparound / being older than the table's
5893  * relfrozenxid. To defend against the unlikely case of a new relation
5894  * having a newer relfrozenxid than our TransactionXmin, use relfrozenxid
5895  * if so (vacuum can't subsequently move relfrozenxid to beyond
5896  * TransactionXmin, so there's no race here).
5897  */
5899  if (TransactionIdPrecedes(TransactionXmin, relation->rd_rel->relfrozenxid))
5900  prune_xid = relation->rd_rel->relfrozenxid;
5901  else
5902  prune_xid = TransactionXmin;
5903  PageSetPrunable(page, prune_xid);
5904 
5905  /* store transaction information of xact deleting the tuple */
5908 
5909  /*
5910  * Set the tuple header xmin to InvalidTransactionId. This makes the
5911  * tuple immediately invisible everyone. (In particular, to any
5912  * transactions waiting on the speculative token, woken up later.)
5913  */
5915 
5916  /* Clear the speculative insertion token too */
5917  tp.t_data->t_ctid = tp.t_self;
5918 
5919  MarkBufferDirty(buffer);
5920 
5921  /*
5922  * XLOG stuff
5923  *
5924  * The WAL records generated here match heap_delete(). The same recovery
5925  * routines are used.
5926  */
5927  if (RelationNeedsWAL(relation))
5928  {
5929  xl_heap_delete xlrec;
5930  XLogRecPtr recptr;
5931 
5932  xlrec.flags = XLH_DELETE_IS_SUPER;
5934  tp.t_data->t_infomask2);
5936  xlrec.xmax = xid;
5937 
5938  XLogBeginInsert();
5939  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
5940  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
5941 
5942  /* No replica identity & replication origin logged */
5943 
5944  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
5945 
5946  PageSetLSN(page, recptr);
5947  }
5948 
5949  END_CRIT_SECTION();
5950 
5951  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
5952 
5953  if (HeapTupleHasExternal(&tp))
5954  {
5955  Assert(!IsToastRelation(relation));
5956  heap_toast_delete(relation, &tp, true);
5957  }
5958 
5959  /*
5960  * Never need to mark tuple for invalidation, since catalogs don't support
5961  * speculative insertion
5962  */
5963 
5964  /* Now we can release the buffer */
5965  ReleaseBuffer(buffer);
5966 
5967  /* count deletion, as we counted the insertion too */
5968  pgstat_count_heap_delete(relation);
5969 }
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
bool IsToastRelation(Relation relation)
Definition: catalog.c:146
#define HEAP_XMAX_BITS
Definition: htup_details.h:270
#define XLH_DELETE_IS_SUPER
Definition: heapam_xlog.h:99
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2655
HeapTupleFields t_heap
Definition: htup_details.h:156
#define PageIsAllVisible(page)
Definition: bufpage.h:385
uint32 TransactionId
Definition: c.h:587
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1556
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:232
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:149
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:429
#define PageSetPrunable(page, xid)
Definition: bufpage.h:392
#define START_CRIT_SECTION()
Definition: miscadmin.h:147
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3772
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
Form_pg_class rd_rel
Definition: rel.h:109
union HeapTupleHeaderData::@43 t_choice
OffsetNumber offnum
Definition: heapam_xlog.h:110
TransactionId TransactionXmin
Definition: snapmgr.c:112
HeapTupleHeader t_data
Definition: htup.h:68
#define HeapTupleHeaderIsHeapOnly(tup)
Definition: htup_details.h:500
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ERROR
Definition: elog.h:46
ItemPointerData t_ctid
Definition: htup_details.h:160
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:109
void heap_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: heaptoast.c:43
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:438
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:115
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
TransactionId t_xmin
Definition: htup_details.h:123
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:340
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:432
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4011
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_MOVED
Definition: htup_details.h:216
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:804
uint8 infobits_set
Definition: heapam_xlog.h:111
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:694
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define RelationNeedsWAL(relation)
Definition: rel.h:601
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:2275
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:672
#define elog(elevel,...)
Definition: elog.h:232
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define TransactionIdIsValid(xid)
Definition: transam.h:41
void XLogBeginInsert(void)
Definition: xloginsert.c:135
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:477
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define HeapTupleHeaderSetXmin(tup, xid)
Definition: htup_details.h:319

◆ heap_acquire_tuplock()

static bool heap_acquire_tuplock ( Relation  relation,
ItemPointer  tid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
bool have_tuple_lock 
)
static

Definition at line 4936 of file heapam.c.

References ConditionalLockTupleTuplock, ereport, errcode(), errmsg(), ERROR, LockTupleTuplock, LockWaitBlock, LockWaitError, LockWaitSkip, and RelationGetRelationName.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

4938 {
4939  if (*have_tuple_lock)
4940  return true;
4941 
4942  switch (wait_policy)
4943  {
4944  case LockWaitBlock:
4945  LockTupleTuplock(relation, tid, mode);
4946  break;
4947 
4948  case LockWaitSkip:
4949  if (!ConditionalLockTupleTuplock(relation, tid, mode))
4950  return false;
4951  break;
4952 
4953  case LockWaitError:
4954  if (!ConditionalLockTupleTuplock(relation, tid, mode))
4955  ereport(ERROR,
4956  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4957  errmsg("could not obtain lock on row in relation \"%s\"",
4958  RelationGetRelationName(relation))));
4959  break;
4960  }
4961  *have_tuple_lock = true;
4962 
4963  return true;
4964 }
static PgChecksumMode mode
Definition: pg_checksums.c:65
#define LockTupleTuplock(rel, tup, mode)
Definition: heapam.c:163
#define ConditionalLockTupleTuplock(rel, tup, mode)
Definition: heapam.c:167
int errcode(int sqlerrcode)
Definition: elog.c:698
#define ERROR
Definition: elog.h:46
#define RelationGetRelationName(relation)
Definition: rel.h:511
#define ereport(elevel,...)
Definition: elog.h:157
int errmsg(const char *fmt,...)
Definition: elog.c:909

◆ heap_beginscan()

TableScanDesc heap_beginscan ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
ParallelTableScanDesc  parallel_scan,
uint32  flags 
)

Definition at line 1185 of file heapam.c.

References Assert, initscan(), IsMVCCSnapshot, palloc(), PredicateLockRelation(), RelationGetRelid, RelationIncrementReferenceCount(), HeapScanDescData::rs_base, HeapScanDescData::rs_ctup, TableScanDescData::rs_flags, TableScanDescData::rs_key, TableScanDescData::rs_nkeys, TableScanDescData::rs_parallel, HeapScanDescData::rs_parallelworkerdata, TableScanDescData::rs_rd, TableScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, SO_ALLOW_PAGEMODE, SO_TYPE_SAMPLESCAN, SO_TYPE_SEQSCAN, and HeapTupleData::t_tableOid.

Referenced by SampleHeapTupleVisible().

1189 {
1190  HeapScanDesc scan;
1191 
1192  /*
1193  * increment relation ref count while scanning relation
1194  *
1195  * This is just to make really sure the relcache entry won't go away while
1196  * the scan has a pointer to it. Caller should be holding the rel open
1197  * anyway, so this is redundant in all normal scenarios...
1198  */
1200 
1201  /*
1202  * allocate and initialize scan descriptor
1203  */
1204  scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
1205 
1206  scan->rs_base.rs_rd = relation;
1207  scan->rs_base.rs_snapshot = snapshot;
1208  scan->rs_base.rs_nkeys = nkeys;
1209  scan->rs_base.rs_flags = flags;
1210  scan->rs_base.rs_parallel = parallel_scan;
1211  scan->rs_strategy = NULL; /* set in initscan */
1212 
1213  /*
1214  * Disable page-at-a-time mode if it's not a MVCC-safe snapshot.
1215  */
1216  if (!(snapshot && IsMVCCSnapshot(snapshot)))
1218 
1219  /*
1220  * For seqscan and sample scans in a serializable transaction, acquire a
1221  * predicate lock on the entire relation. This is required not only to
1222  * lock all the matching tuples, but also to conflict with new insertions
1223  * into the table. In an indexscan, we take page locks on the index pages
1224  * covering the range specified in the scan qual, but in a heap scan there
1225  * is nothing more fine-grained to lock. A bitmap scan is a different
1226  * story, there we have already scanned the index and locked the index
1227  * pages covering the predicate. But in that case we still have to lock
1228  * any matching heap tuples. For sample scan we could optimize the locking
1229  * to be at least page-level granularity, but we'd need to add per-tuple
1230  * locking for that.
1231  */
1233  {
1234  /*
1235  * Ensure a missing snapshot is noticed reliably, even if the
1236  * isolation mode means predicate locking isn't performed (and
1237  * therefore the snapshot isn't used here).
1238  */
1239  Assert(snapshot);
1240  PredicateLockRelation(relation, snapshot);
1241  }
1242 
1243  /* we only need to set this up once */
1244  scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1245 
1246  /*
1247  * Allocate memory to keep track of page allocation for parallel workers
1248  * when doing a parallel scan.
1249  */
1250  if (parallel_scan != NULL)
1252  else
1253  scan->rs_parallelworkerdata = NULL;
1254 
1255  /*
1256  * we do this here instead of in initscan() because heap_rescan also calls
1257  * initscan() and we don't want to allocate memory again
1258  */
1259  if (nkeys > 0)
1260  scan->rs_base.rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
1261  else
1262  scan->rs_base.rs_key = NULL;
1263 
1264  initscan(scan, key, false);
1265 
1266  return (TableScanDesc) scan;
1267 }
TableScanDescData rs_base
Definition: heapam.h:49
void PredicateLockRelation(Relation relation, Snapshot snapshot)
Definition: predicate.c:2569
uint32 rs_flags
Definition: relscan.h:47
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:79
HeapTupleData rs_ctup
Definition: heapam.h:66
ScanKeyData * ScanKey
Definition: skey.h:75
ParallelBlockTableScanWorkerData * rs_parallelworkerdata
Definition: heapam.h:72
Oid t_tableOid
Definition: htup.h:66
struct ScanKeyData * rs_key
Definition: relscan.h:37
void RelationIncrementReferenceCount(Relation rel)
Definition: relcache.c:2068
BufferAccessStrategy rs_strategy
Definition: heapam.h:64
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:96
#define Assert(condition)
Definition: c.h:804
Relation rs_rd
Definition: relscan.h:34
struct SnapshotData * rs_snapshot
Definition: relscan.h:35
void * palloc(Size size)
Definition: mcxt.c:1062
struct ParallelTableScanDescData * rs_parallel
Definition: relscan.h:49
static void initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
Definition: heapam.c:227
#define RelationGetRelid(relation)
Definition: rel.h:477

◆ heap_delete()

TM_Result heap_delete ( Relation  relation,
ItemPointer  tid,
CommandId  cid,
Snapshot  crosscheck,
bool  wait,
TM_FailureData tmfd,
bool  changingPart 
)

Definition at line 2700 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), TM_FailureData::cmax, compute_infobits(), compute_new_xmax_infomask(), TM_FailureData::ctid, DoesMultiXactIdConflict(), END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, ExtractReplicaIdentity(), xl_heap_delete::flags, GetCurrentTransactionId(), heap_acquire_tuplock(), heap_freetuple(), HEAP_KEYS_UPDATED, HEAP_MOVED, heap_toast_delete(), HEAP_XMAX_BITS, HEAP_XMAX_INVALID, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HeapTupleHasExternal, HeapTupleHeaderAdjustCmax(), HeapTupleHeaderClearHotUpdated, HeapTupleHeaderGetCmax(), HeapTupleHeaderGetRawXmax, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIsOnlyLocked(), HeapTupleHeaderSetCmax, HeapTupleHeaderSetMovedPartitions, HeapTupleHeaderSetXmax, HeapTupleSatisfiesUpdate(), HeapTupleSatisfiesVisibility(), xl_heap_delete::infobits_set, InvalidBuffer, InvalidCommandId, InvalidSnapshot, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerEquals(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), LockTupleExclusive, LockWaitBlock, log_heap_new_cid(), MarkBufferDirty(), MultiXactIdSetOldestMember(), MultiXactIdWait(), MultiXactStatusUpdate, xl_heap_delete::offnum, PageClearAllVisible, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), RelationData::rd_rel, ReadBuffer(), REGBUF_STANDARD, RelationGetRelid, RelationIsAccessibleInLogicalDecoding, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapDelete, SizeOfHeapHeader, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TM_BeingModified, TM_Deleted, TM_Invisible, TM_Ok, TM_SelfModified, TM_Updated, TransactionIdEquals, TransactionIdIsCurrentTransactionId(), UnlockReleaseBuffer(), UnlockTupleTuplock, UpdateXmaxHintBits(), visibilitymap_clear(), visibilitymap_pin(), VISIBILITYMAP_VALID_BITS, XactLockTableWait(), XLH_DELETE_ALL_VISIBLE_CLEARED, XLH_DELETE_CONTAINS_OLD_KEY, XLH_DELETE_CONTAINS_OLD_TUPLE, XLH_DELETE_IS_PARTITION_MOVE, XLOG_HEAP_DELETE, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), XLogSetRecordFlags(), XLTW_Delete, xl_heap_delete::xmax, TM_FailureData::xmax, and xmax_infomask_changed().

Referenced by heapam_tuple_delete(), and simple_heap_delete().

2703 {
2704  TM_Result result;
2706  ItemId lp;
2707  HeapTupleData tp;
2708  Page page;
2709  BlockNumber block;
2710  Buffer buffer;
2711  Buffer vmbuffer = InvalidBuffer;
2712  TransactionId new_xmax;
2713  uint16 new_infomask,
2714  new_infomask2;
2715  bool have_tuple_lock = false;
2716  bool iscombo;
2717  bool all_visible_cleared = false;
2718  HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
2719  bool old_key_copied = false;
2720 
2721  Assert(ItemPointerIsValid(tid));
2722 
2723  /*
2724  * Forbid this during a parallel operation, lest it allocate a combo CID.
2725  * Other workers might need that combo CID for visibility checks, and we
2726  * have no provision for broadcasting it to them.
2727  */
2728  if (IsInParallelMode())
2729  ereport(ERROR,
2730  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
2731  errmsg("cannot delete tuples during a parallel operation")));
2732 
2733  block = ItemPointerGetBlockNumber(tid);
2734  buffer = ReadBuffer(relation, block);
2735  page = BufferGetPage(buffer);
2736 
2737  /*
2738  * Before locking the buffer, pin the visibility map page if it appears to
2739  * be necessary. Since we haven't got the lock yet, someone else might be
2740  * in the middle of changing this, so we'll need to recheck after we have
2741  * the lock.
2742  */
2743  if (PageIsAllVisible(page))
2744  visibilitymap_pin(relation, block, &vmbuffer);
2745 
2747 
2748  /*
2749  * If we didn't pin the visibility map page and the page has become all
2750  * visible while we were busy locking the buffer, we'll have to unlock and
2751  * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
2752  * unfortunate, but hopefully shouldn't happen often.
2753  */
2754  if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
2755  {
2756  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2757  visibilitymap_pin(relation, block, &vmbuffer);
2759  }
2760 
2761  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
2762  Assert(ItemIdIsNormal(lp));
2763 
2764  tp.t_tableOid = RelationGetRelid(relation);
2765  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2766  tp.t_len = ItemIdGetLength(lp);
2767  tp.t_self = *tid;
2768 
2769 l1:
2770  result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
2771 
2772  if (result == TM_Invisible)
2773  {
2774  UnlockReleaseBuffer(buffer);
2775  ereport(ERROR,
2776  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2777  errmsg("attempted to delete invisible tuple")));
2778  }
2779  else if (result == TM_BeingModified && wait)
2780  {
2781  TransactionId xwait;
2782  uint16 infomask;
2783 
2784  /* must copy state data before unlocking buffer */
2785  xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
2786  infomask = tp.t_data->t_infomask;
2787 
2788  /*
2789  * Sleep until concurrent transaction ends -- except when there's a
2790  * single locker and it's our own transaction. Note we don't care
2791  * which lock mode the locker has, because we need the strongest one.
2792  *
2793  * Before sleeping, we need to acquire tuple lock to establish our
2794  * priority for the tuple (see heap_lock_tuple). LockTuple will
2795  * release us when we are next-in-line for the tuple.
2796  *
2797  * If we are forced to "start over" below, we keep the tuple lock;
2798  * this arranges that we stay at the head of the line while rechecking
2799  * tuple state.
2800  */
2801  if (infomask & HEAP_XMAX_IS_MULTI)
2802  {
2803  bool current_is_member = false;
2804 
2805  if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
2806  LockTupleExclusive, &current_is_member))
2807  {
2808  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2809 
2810  /*
2811  * Acquire the lock, if necessary (but skip it when we're
2812  * requesting a lock and already have one; avoids deadlock).
2813  */
2814  if (!current_is_member)
2816  LockWaitBlock, &have_tuple_lock);
2817 
2818  /* wait for multixact */
2820  relation, &(tp.t_self), XLTW_Delete,
2821  NULL);
2823 
2824  /*
2825  * If xwait had just locked the tuple then some other xact
2826  * could update this tuple before we get to this point. Check
2827  * for xmax change, and start over if so.
2828  */
2829  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2831  xwait))
2832  goto l1;
2833  }
2834 
2835  /*
2836  * You might think the multixact is necessarily done here, but not
2837  * so: it could have surviving members, namely our own xact or
2838  * other subxacts of this backend. It is legal for us to delete
2839  * the tuple in either case, however (the latter case is
2840  * essentially a situation of upgrading our former shared lock to
2841  * exclusive). We don't bother changing the on-disk hint bits
2842  * since we are about to overwrite the xmax altogether.
2843  */
2844  }
2845  else if (!TransactionIdIsCurrentTransactionId(xwait))
2846  {
2847  /*
2848  * Wait for regular transaction to end; but first, acquire tuple
2849  * lock.
2850  */
2851  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2853  LockWaitBlock, &have_tuple_lock);
2854  XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
2856 
2857  /*
2858  * xwait is done, but if xwait had just locked the tuple then some
2859  * other xact could update this tuple before we get to this point.
2860  * Check for xmax change, and start over if so.
2861  */
2862  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2864  xwait))
2865  goto l1;
2866 
2867  /* Otherwise check if it committed or aborted */
2868  UpdateXmaxHintBits(tp.t_data, buffer, xwait);
2869  }
2870 
2871  /*
2872  * We may overwrite if previous xmax aborted, or if it committed but
2873  * only locked the tuple without updating it.
2874  */
2875  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2878  result = TM_Ok;
2879  else if (!ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid))
2880  result = TM_Updated;
2881  else
2882  result = TM_Deleted;
2883  }
2884 
2885  if (crosscheck != InvalidSnapshot && result == TM_Ok)
2886  {
2887  /* Perform additional check for transaction-snapshot mode RI updates */
2888  if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
2889  result = TM_Updated;
2890  }
2891 
2892  if (result != TM_Ok)
2893  {
2894  Assert(result == TM_SelfModified ||
2895  result == TM_Updated ||
2896  result == TM_Deleted ||
2897  result == TM_BeingModified);
2899  Assert(result != TM_Updated ||
2900  !ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid));
2901  tmfd->ctid = tp.t_data->t_ctid;
2903  if (result == TM_SelfModified)
2904  tmfd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
2905  else
2906  tmfd->cmax = InvalidCommandId;
2907  UnlockReleaseBuffer(buffer);
2908  if (have_tuple_lock)
2909  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
2910  if (vmbuffer != InvalidBuffer)
2911  ReleaseBuffer(vmbuffer);
2912  return result;
2913  }
2914 
2915  /*
2916  * We're about to do the actual delete -- check for conflict first, to
2917  * avoid possibly having to roll back work we've just done.
2918  *
2919  * This is safe without a recheck as long as there is no possibility of
2920  * another process scanning the page between this check and the delete
2921  * being visible to the scan (i.e., an exclusive buffer content lock is
2922  * continuously held from this point until the tuple delete is visible).
2923  */
2924  CheckForSerializableConflictIn(relation, tid, BufferGetBlockNumber(buffer));
2925 
2926  /* replace cid with a combo CID if necessary */
2927  HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
2928 
2929  /*
2930  * Compute replica identity tuple before entering the critical section so
2931  * we don't PANIC upon a memory allocation failure.
2932  */
2933  old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
2934 
2935  /*
2936  * If this is the first possibly-multixact-able operation in the current
2937  * transaction, set my per-backend OldestMemberMXactId setting. We can be
2938  * certain that the transaction will never become a member of any older
2939  * MultiXactIds than that. (We have to do this even if we end up just
2940  * using our own TransactionId below, since some other backend could
2941  * incorporate our XID into a MultiXact immediately afterwards.)
2942  */
2944 
2947  xid, LockTupleExclusive, true,
2948  &new_xmax, &new_infomask, &new_infomask2);
2949 
2951 
2952  /*
2953  * If this transaction commits, the tuple will become DEAD sooner or
2954  * later. Set flag that this page is a candidate for pruning once our xid
2955  * falls below the OldestXmin horizon. If the transaction finally aborts,
2956  * the subsequent page pruning will be a no-op and the hint will be
2957  * cleared.
2958  */
2959  PageSetPrunable(page, xid);
2960 
2961  if (PageIsAllVisible(page))
2962  {
2963  all_visible_cleared = true;
2964  PageClearAllVisible(page);
2965  visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
2966  vmbuffer, VISIBILITYMAP_VALID_BITS);
2967  }
2968 
2969  /* store transaction information of xact deleting the tuple */
2972  tp.t_data->t_infomask |= new_infomask;
2973  tp.t_data->t_infomask2 |= new_infomask2;
2975  HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
2976  HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
2977  /* Make sure there is no forward chain link in t_ctid */
2978  tp.t_data->t_ctid = tp.t_self;
2979 
2980  /* Signal that this is actually a move into another partition */
2981  if (changingPart)
2983 
2984  MarkBufferDirty(buffer);
2985 
2986  /*
2987  * XLOG stuff
2988  *
2989  * NB: heap_abort_speculative() uses the same xlog record and replay
2990  * routines.
2991  */
2992  if (RelationNeedsWAL(relation))
2993  {
2994  xl_heap_delete xlrec;
2995  xl_heap_header xlhdr;
2996  XLogRecPtr recptr;
2997 
2998  /*
2999  * For logical decode we need combo CIDs to properly decode the
3000  * catalog
3001  */
3003  log_heap_new_cid(relation, &tp);
3004 
3005  xlrec.flags = 0;
3006  if (all_visible_cleared)
3008  if (changingPart)
3011  tp.t_data->t_infomask2);
3013  xlrec.xmax = new_xmax;
3014 
3015  if (old_key_tuple != NULL)
3016  {
3017  if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
3019  else
3021  }
3022 
3023  XLogBeginInsert();
3024  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
3025 
3026  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
3027 
3028  /*
3029  * Log replica identity of the deleted tuple if there is one
3030  */
3031  if (old_key_tuple != NULL)
3032  {
3033  xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
3034  xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
3035  xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
3036 
3037  XLogRegisterData((char *) &xlhdr, SizeOfHeapHeader);
3038  XLogRegisterData((char *) old_key_tuple->t_data
3040  old_key_tuple->t_len
3042  }
3043 
3044  /* filtering by origin on a row level is much more efficient */
3046 
3047  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
3048 
3049  PageSetLSN(page, recptr);
3050  }
3051 
3052  END_CRIT_SECTION();
3053 
3054  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3055 
3056  if (vmbuffer != InvalidBuffer)
3057  ReleaseBuffer(vmbuffer);
3058 
3059  /*
3060  * If the tuple has toasted out-of-line attributes, we need to delete
3061  * those items too. We have to do this before releasing the buffer
3062  * because we need to look at the contents of the tuple, but it's OK to
3063  * release the content lock on the buffer first.
3064  */
3065  if (relation->rd_rel->relkind != RELKIND_RELATION &&
3066  relation->rd_rel->relkind != RELKIND_MATVIEW)
3067  {
3068  /* toast table entries should never be recursively toasted */
3070  }
3071  else if (HeapTupleHasExternal(&tp))
3072  heap_toast_delete(relation, &tp, false);
3073 
3074  /*
3075  * Mark tuple for invalidation from system caches at next command
3076  * boundary. We have to do this before releasing the buffer because we
3077  * need to look at the contents of the tuple.
3078  */
3079  CacheInvalidateHeapTuple(relation, &tp, NULL);
3080 
3081  /* Now we can release the buffer */
3082  ReleaseBuffer(buffer);
3083 
3084  /*
3085  * Release the lmgr tuple lock, if we had it.
3086  */
3087  if (have_tuple_lock)
3088  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3089 
3090  pgstat_count_heap_delete(relation);
3091 
3092  if (old_key_tuple != NULL && old_key_copied)
3093  heap_freetuple(old_key_tuple);
3094 
3095  return TM_Ok;
3096 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
ItemPointerData ctid
Definition: tableam.h:126
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
#define SizeofHeapTupleHeader
Definition: htup_details.h:184
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:8228
#define HEAP_XMAX_BITS
Definition: htup_details.h:270
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2655
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1123
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
#define PageIsAllVisible(page)
Definition: bufpage.h:385
uint32 TransactionId
Definition: c.h:587
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:869
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1556
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:232
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_changed, bool *copy)
Definition: heapam.c:8310
static bool xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
Definition: heapam.c:2677
#define HeapTupleHeaderClearHotUpdated(tup)
Definition: htup_details.h:495
#define END_CRIT_SECTION()
Definition: miscadmin.h:149
CommandId cmax
Definition: tableam.h:128
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
#define InvalidBuffer
Definition: buf.h:25
uint16 t_infomask2
Definition: heapam_xlog.h:146
#define PageSetPrunable(page, xid)
Definition: bufpage.h:392
#define START_CRIT_SECTION()
Definition: miscadmin.h:147
int errcode(int sqlerrcode)
Definition: elog.c:698
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:255
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3772
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
Form_pg_class rd_rel
Definition: rel.h:109
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1338
TM_Result HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid, Buffer buffer)
#define UnlockTupleTuplock(rel, tup, mode)
Definition: heapam.c:165
OffsetNumber offnum
Definition: heapam_xlog.h:110
void MultiXactIdSetOldestMember(void)
Definition: multixact.c:625
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:68
#define HeapTupleHeaderGetRawXmax(tup)
Definition: htup_details.h:375
unsigned short uint16
Definition: c.h:440
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
bool IsInParallelMode(void)
Definition: xact.c:1012
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3795
TransactionId xmax
Definition: tableam.h:127
#define ERROR
Definition: elog.h:46
#define HEAP_XMAX_INVALID
Definition: htup_details.h:207
ItemPointerData t_ctid
Definition: htup_details.h:160
#define HeapTupleHeaderSetMovedPartitions(tup)
Definition: htup_details.h:448
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:109
static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:7009
void heap_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: heaptoast.c:43
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:438
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:115
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define XLH_DELETE_CONTAINS_OLD_KEY
Definition: heapam_xlog.h:98
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:380
Oid t_tableOid
Definition: htup.h:66
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:414
#define HeapTupleHeaderSetCmax(tup, cid, iscombo)
Definition: htup_details.h:405
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
Definition: heapam.c:4985
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
#define InvalidSnapshot
Definition: snapshot.h:123
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:340
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:432
TM_Result
Definition: tableam.h:71
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:657
#define InvalidCommandId
Definition: c.h:604
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4011
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
void CheckForSerializableConflictIn(Relation relation, ItemPointer tid, BlockNumber blkno)
Definition: predicate.c:4446
static void UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
Definition: heapam.c:1985
#define HEAP_MOVED
Definition: htup_details.h:216
static bool heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
Definition: heapam.c:4936
#define ereport(elevel,...)
Definition: elog.h:157
TransactionId MultiXactId
Definition: c.h:597
#define PageClearAllVisible(page)
Definition: bufpage.h:389
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:640
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:804
uint8 infobits_set
Definition: heapam_xlog.h:111
CommandId HeapTupleHeaderGetCmax(HeapTupleHeader tup)
Definition: combocid.c:118
Definition: tableam.h:77
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:694
uint16 t_infomask
Definition: heapam_xlog.h:147
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask, LockTupleMode lockmode, bool *current_is_member)
Definition: heapam.c:6832
#define RelationNeedsWAL(relation)
Definition: rel.h:601
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:2275
void HeapTupleHeaderAdjustCmax(HeapTupleHeader tup, CommandId *cmax, bool *iscombo)
Definition: combocid.c:153
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2752
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:672
int errmsg(const char *fmt,...)
Definition: elog.c:909
#define XLH_DELETE_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:96
#define XLH_DELETE_IS_PARTITION_MOVE
Definition: heapam_xlog.h:100
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
void XLogBeginInsert(void)
Definition: xloginsert.c:135
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:477
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
#define SizeOfHeapHeader
Definition: heapam_xlog.h:151
Pointer Page
Definition: bufpage.h:78
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)
#define XLH_DELETE_CONTAINS_OLD_TUPLE
Definition: heapam_xlog.h:97

◆ heap_endscan()

void heap_endscan ( TableScanDesc  sscan)

Definition at line 1307 of file heapam.c.

References BufferIsValid, FreeAccessStrategy(), pfree(), RelationDecrementReferenceCount(), ReleaseBuffer(), HeapScanDescData::rs_base, HeapScanDescData::rs_cbuf, TableScanDescData::rs_flags, TableScanDescData::rs_key, HeapScanDescData::rs_parallelworkerdata, TableScanDescData::rs_rd, TableScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, SO_TEMP_SNAPSHOT, and UnregisterSnapshot().

Referenced by SampleHeapTupleVisible().

1308 {
1309  HeapScanDesc scan = (HeapScanDesc) sscan;
1310 
1311  /* Note: no locking manipulations needed */
1312 
1313  /*
1314  * unpin scan buffers
1315  */
1316  if (BufferIsValid(scan->rs_cbuf))
1317  ReleaseBuffer(scan->rs_cbuf);
1318 
1319  /*
1320  * decrement relation reference count and free scan descriptor storage
1321  */
1323 
1324  if (scan->rs_base.rs_key)
1325  pfree(scan->rs_base.rs_key);
1326 
1327  if (scan->rs_strategy != NULL)
1329 
1330  if (scan->rs_parallelworkerdata != NULL)
1332 
1333  if (scan->rs_base.rs_flags & SO_TEMP_SNAPSHOT)
1335 
1336  pfree(scan);
1337 }
TableScanDescData rs_base
Definition: heapam.h:49
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3772
uint32 rs_flags
Definition: relscan.h:47
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:79
void pfree(void *pointer)
Definition: mcxt.c:1169
void RelationDecrementReferenceCount(Relation rel)
Definition: relcache.c:2081
ParallelBlockTableScanWorkerData * rs_parallelworkerdata
Definition: heapam.h:72
struct ScanKeyData * rs_key
Definition: relscan.h:37
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:852
BufferAccessStrategy rs_strategy
Definition: heapam.h:64
Buffer rs_cbuf
Definition: heapam.h:60
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:597
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
Relation rs_rd
Definition: relscan.h:34
struct SnapshotData * rs_snapshot
Definition: relscan.h:35

◆ heap_execute_freeze_tuple()

void heap_execute_freeze_tuple ( HeapTupleHeader  tuple,
xl_heap_freeze_tuple frz 
)

Definition at line 6631 of file heapam.c.

References FrozenTransactionId, xl_heap_freeze_tuple::frzflags, HeapTupleHeaderSetXmax, HeapTupleHeaderSetXvac, InvalidTransactionId, HeapTupleHeaderData::t_infomask, xl_heap_freeze_tuple::t_infomask, HeapTupleHeaderData::t_infomask2, xl_heap_freeze_tuple::t_infomask2, XLH_FREEZE_XVAC, XLH_INVALID_XVAC, and xl_heap_freeze_tuple::xmax.

Referenced by heap_freeze_tuple(), heap_xlog_freeze_page(), and lazy_scan_prune().

6632 {
6633  HeapTupleHeaderSetXmax(tuple, frz->xmax);
6634 
6635  if (frz->frzflags & XLH_FREEZE_XVAC)
6637 
6638  if (frz->frzflags & XLH_INVALID_XVAC)
6640 
6641  tuple->t_infomask = frz->t_infomask;
6642  tuple->t_infomask2 = frz->t_infomask2;
6643 }
#define HeapTupleHeaderSetXvac(tup, xid)
Definition: htup_details.h:423
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:380
#define InvalidTransactionId
Definition: transam.h:31
#define FrozenTransactionId
Definition: transam.h:33
TransactionId xmax
Definition: heapam_xlog.h:326
#define XLH_INVALID_XVAC
Definition: heapam_xlog.h:322
#define XLH_FREEZE_XVAC
Definition: heapam_xlog.h:321

◆ heap_fetch()

bool heap_fetch ( Relation  relation,
Snapshot  snapshot,
HeapTuple  tuple,
Buffer userbuf 
)

Definition at line 1595 of file heapam.c.

References BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, HeapCheckForSerializableConflictOut(), HeapTupleHeaderGetXmin, HeapTupleSatisfiesVisibility(), InvalidBuffer, ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PredicateLockTID(), ReadBuffer(), RelationGetRelid, ReleaseBuffer(), HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, and TestForOldSnapshot().

Referenced by heap_lock_updated_tuple_rec(), heapam_fetch_row_version(), and heapam_tuple_lock().

1599 {
1600  ItemPointer tid = &(tuple->t_self);
1601  ItemId lp;
1602  Buffer buffer;
1603  Page page;
1604  OffsetNumber offnum;
1605  bool valid;
1606 
1607  /*
1608  * Fetch and pin the appropriate page of the relation.
1609  */
1610  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1611 
1612  /*
1613  * Need share lock on buffer to examine tuple commit status.
1614  */
1615  LockBuffer(buffer, BUFFER_LOCK_SHARE);
1616  page = BufferGetPage(buffer);
1617  TestForOldSnapshot(snapshot, relation, page);
1618 
1619  /*
1620  * We'd better check for out-of-range offnum in case of VACUUM since the
1621  * TID was obtained.
1622  */
1623  offnum = ItemPointerGetOffsetNumber(tid);
1624  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1625  {
1626  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1627  ReleaseBuffer(buffer);
1628  *userbuf = InvalidBuffer;
1629  tuple->t_data = NULL;
1630  return false;
1631  }
1632 
1633  /*
1634  * get the item line pointer corresponding to the requested tid
1635  */
1636  lp = PageGetItemId(page, offnum);
1637 
1638  /*
1639  * Must check for deleted tuple.
1640  */
1641  if (!ItemIdIsNormal(lp))
1642  {
1643  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1644  ReleaseBuffer(buffer);
1645  *userbuf = InvalidBuffer;
1646  tuple->t_data = NULL;
1647  return false;
1648  }
1649 
1650  /*
1651  * fill in *tuple fields
1652  */
1653  tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1654  tuple->t_len = ItemIdGetLength(lp);
1655  tuple->t_tableOid = RelationGetRelid(relation);
1656 
1657  /*
1658  * check tuple visibility, then release lock
1659  */
1660  valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1661 
1662  if (valid)
1663  PredicateLockTID(relation, &(tuple->t_self), snapshot,
1664  HeapTupleHeaderGetXmin(tuple->t_data));
1665 
1666  HeapCheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1667 
1668  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1669 
1670  if (valid)
1671  {
1672  /*
1673  * All checks passed, so return the tuple as valid. Caller is now
1674  * responsible for releasing the buffer.
1675  */
1676  *userbuf = buffer;
1677 
1678  return true;
1679  }
1680 
1681  /* Tuple failed time qual */
1682  ReleaseBuffer(buffer);
1683  *userbuf = InvalidBuffer;
1684 
1685  return false;
1686 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:279
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3772
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4011
void PredicateLockTID(Relation relation, ItemPointer tid, Snapshot snapshot, TransactionId tuple_xid)
Definition: predicate.c:2614
void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: heapam.c:9789
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:694
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:477
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)

◆ heap_finish_speculative()

void heap_finish_speculative ( Relation  relation,
ItemPointer  tid 
)

Definition at line 5747 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, elog, END_CRIT_SECTION, ERROR, HeapTupleHeaderIsSpeculative, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), MaxOffsetNumber, xl_heap_confirm::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapConfirm, SpecTokenOffsetNumber, START_CRIT_SECTION, StaticAssertStmt, HeapTupleHeaderData::t_ctid, UnlockReleaseBuffer(), XLOG_HEAP_CONFIRM, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by heapam_tuple_complete_speculative().

5748 {
5749  Buffer buffer;
5750  Page page;
5751  OffsetNumber offnum;
5752  ItemId lp = NULL;
5753  HeapTupleHeader htup;
5754 
5755  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
5757  page = (Page) BufferGetPage(buffer);
5758 
5759  offnum = ItemPointerGetOffsetNumber(tid);
5760  if (PageGetMaxOffsetNumber(page) >= offnum)
5761  lp = PageGetItemId(page, offnum);
5762 
5763  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
5764  elog(ERROR, "invalid lp");
5765 
5766  htup = (HeapTupleHeader) PageGetItem(page, lp);
5767 
5768  /* SpecTokenOffsetNumber should be distinguishable from any real offset */
5770  "invalid speculative token constant");
5771 
5772  /* NO EREPORT(ERROR) from here till changes are logged */
5774 
5776 
5777  MarkBufferDirty(buffer);
5778 
5779  /*
5780  * Replace the speculative insertion token with a real t_ctid, pointing to
5781  * itself like it does on regular tuples.
5782  */
5783  htup->t_ctid = *tid;
5784 
5785  /* XLOG stuff */
5786  if (RelationNeedsWAL(relation))
5787  {
5788  xl_heap_confirm xlrec;
5789  XLogRecPtr recptr;
5790 
5791  xlrec.offnum = ItemPointerGetOffsetNumber(tid);
5792 
5793  XLogBeginInsert();
5794 
5795  /* We want the same filtering on this as on a plain insert */
5797 
5798  XLogRegisterData((char *) &xlrec, SizeOfHeapConfirm);
5799  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
5800 
5801  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
5802 
5803  PageSetLSN(page, recptr);
5804  }
5805 
5806  END_CRIT_SECTION();
5807 
5808  UnlockReleaseBuffer(buffer);
5809 }
OffsetNumber offnum
Definition: heapam_xlog.h:302
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1556
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:232
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define MaxOffsetNumber
Definition: off.h:28
#define END_CRIT_SECTION()
Definition: miscadmin.h:149
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:429
#define START_CRIT_SECTION()
Definition: miscadmin.h:147
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:255
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
#define SpecTokenOffsetNumber
Definition: itemptr.h:63
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:918
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3795
#define ERROR
Definition: elog.h:46
ItemPointerData t_ctid
Definition: htup_details.h:160
#define REGBUF_STANDARD
Definition: xloginsert.h:35
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:414
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define SizeOfHeapConfirm
Definition: heapam_xlog.h:305
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:340
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:432
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4011
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:804
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:694
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define RelationNeedsWAL(relation)
Definition: rel.h:601
#define elog(elevel,...)
Definition: elog.h:232
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
void XLogBeginInsert(void)
Definition: xloginsert.c:135
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define XLOG_HEAP_CONFIRM
Definition: heapam_xlog.h:37

◆ heap_freeze_tuple()

bool heap_freeze_tuple ( HeapTupleHeader  tuple,
TransactionId  relfrozenxid,
TransactionId  relminmxid,
TransactionId  cutoff_xid,
TransactionId  cutoff_multi 
)

Definition at line 6652 of file heapam.c.

References heap_execute_freeze_tuple(), and heap_prepare_freeze_tuple().

Referenced by rewrite_heap_tuple().

6655 {
6657  bool do_freeze;
6658  bool tuple_totally_frozen;
6659 
6660  do_freeze = heap_prepare_freeze_tuple(tuple,
6661  relfrozenxid, relminmxid,
6662  cutoff_xid, cutoff_multi,
6663  &frz, &tuple_totally_frozen);
6664 
6665  /*
6666  * Note that because this is not a WAL-logged operation, we don't need to
6667  * fill in the offset in the freeze record.
6668  */
6669 
6670  if (do_freeze)
6671  heap_execute_freeze_tuple(tuple, &frz);
6672  return do_freeze;
6673 }
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
Definition: heapam.c:6402
void heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
Definition: heapam.c:6631

◆ heap_get_latest_tid()

void heap_get_latest_tid ( TableScanDesc  sscan,
ItemPointer  tid 
)

Definition at line 1862 of file heapam.c.

References Assert, BUFFER_LOCK_SHARE, BufferGetPage, HEAP_XMAX_INVALID, HeapCheckForSerializableConflictOut(), HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleHeaderIndicatesMovedPartitions, HeapTupleHeaderIsOnlyLocked(), HeapTupleSatisfiesVisibility(), InvalidTransactionId, ItemIdGetLength, ItemIdIsNormal, ItemPointerEquals(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, ReadBuffer(), RelationGetRelid, TableScanDescData::rs_rd, TableScanDescData::rs_snapshot, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TestForOldSnapshot(), TransactionIdEquals, TransactionIdIsValid, and UnlockReleaseBuffer().

Referenced by SampleHeapTupleVisible().

1864 {
1865  Relation relation = sscan->rs_rd;
1866  Snapshot snapshot = sscan->rs_snapshot;
1867  ItemPointerData ctid;
1868  TransactionId priorXmax;
1869 
1870  /*
1871  * table_tuple_get_latest_tid() verified that the passed in tid is valid.
1872  * Assume that t_ctid links are valid however - there shouldn't be invalid
1873  * ones in the table.
1874  */
1875  Assert(ItemPointerIsValid(tid));
1876 
1877  /*
1878  * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
1879  * need to examine, and *tid is the TID we will return if ctid turns out
1880  * to be bogus.
1881  *
1882  * Note that we will loop until we reach the end of the t_ctid chain.
1883  * Depending on the snapshot passed, there might be at most one visible
1884  * version of the row, but we don't try to optimize for that.
1885  */
1886  ctid = *tid;
1887  priorXmax = InvalidTransactionId; /* cannot check first XMIN */
1888  for (;;)
1889  {
1890  Buffer buffer;
1891  Page page;
1892  OffsetNumber offnum;
1893  ItemId lp;
1894  HeapTupleData tp;
1895  bool valid;
1896 
1897  /*
1898  * Read, pin, and lock the page.
1899  */
1900  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
1901  LockBuffer(buffer, BUFFER_LOCK_SHARE);
1902  page = BufferGetPage(buffer);
1903  TestForOldSnapshot(snapshot, relation, page);
1904 
1905  /*
1906  * Check for bogus item number. This is not treated as an error
1907  * condition because it can happen while following a t_ctid link. We
1908  * just assume that the prior tid is OK and return it unchanged.
1909  */
1910  offnum = ItemPointerGetOffsetNumber(&ctid);
1911  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1912  {
1913  UnlockReleaseBuffer(buffer);
1914  break;
1915  }
1916  lp = PageGetItemId(page, offnum);
1917  if (!ItemIdIsNormal(lp))
1918  {
1919  UnlockReleaseBuffer(buffer);
1920  break;
1921  }
1922 
1923  /* OK to access the tuple */
1924  tp.t_self = ctid;
1925  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
1926  tp.t_len = ItemIdGetLength(lp);
1927  tp.t_tableOid = RelationGetRelid(relation);
1928 
1929  /*
1930  * After following a t_ctid link, we might arrive at an unrelated
1931  * tuple. Check for XMIN match.
1932  */
1933  if (TransactionIdIsValid(priorXmax) &&
1935  {
1936  UnlockReleaseBuffer(buffer);
1937  break;
1938  }
1939 
1940  /*
1941  * Check tuple visibility; if visible, set it as the new result
1942  * candidate.
1943  */
1944  valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
1945  HeapCheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
1946  if (valid)
1947  *tid = ctid;
1948 
1949  /*
1950  * If there's a valid t_ctid link, follow it, else we're done.
1951  */
1952  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
1956  {
1957  UnlockReleaseBuffer(buffer);
1958  break;
1959  }
1960 
1961  ctid = tp.t_data->t_ctid;
1962  priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
1963  UnlockReleaseBuffer(buffer);
1964  } /* end of loop */
1965 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:279
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:587
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
#define HeapTupleHeaderIndicatesMovedPartitions(tup)
Definition: htup_details.h:445
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3795
#define HEAP_XMAX_INVALID
Definition: htup_details.h:207
ItemPointerData t_ctid
Definition: htup_details.h:160
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4011
void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: heapam.c:9789
#define Assert(condition)
Definition: c.h:804
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:694
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
Relation rs_rd
Definition: relscan.h:34
struct SnapshotData * rs_snapshot
Definition: relscan.h:35
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define TransactionIdIsValid(xid)
Definition: transam.h:41
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:477
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)

◆ heap_getnext()

HeapTuple heap_getnext ( TableScanDesc  sscan,
ScanDirection  direction 
)

Definition at line 1340 of file heapam.c.

References bsysscan, CheckXidAlive, elog, ereport, errcode(), errmsg_internal(), ERROR, GetHeapamTableAmRoutine(), heapgettup(), heapgettup_pagemode(), pgstat_count_heap_getnext, RelationData::rd_tableam, HeapScanDescData::rs_base, HeapScanDescData::rs_ctup, TableScanDescData::rs_flags, TableScanDescData::rs_key, TableScanDescData::rs_nkeys, TableScanDescData::rs_rd, SO_ALLOW_PAGEMODE, HeapTupleData::t_data, TransactionIdIsValid, and unlikely.

Referenced by AlterTableMoveAll(), AlterTableSpaceOptions(), check_db_file_conflict(), createdb(), do_autovacuum(), DropSetting(), DropTableSpace(), find_typed_table_dependencies(), get_all_vacuum_rels(), get_database_list(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), heapam_index_build_range_scan(), heapam_index_validate_scan(), index_update_stats(), objectsInSchemaToOids(), pgrowlocks(), pgstat_collect_oids(), pgstat_heap(), populate_typ_list(), ReindexMultipleTables(), remove_dbtablespaces(), RemoveSubscriptionRel(), RenameTableSpace(), ThereIsAtLeastOneRole(), and vac_truncate_clog().

1341 {
1342  HeapScanDesc scan = (HeapScanDesc) sscan;
1343 
1344  /*
1345  * This is still widely used directly, without going through table AM, so
1346  * add a safety check. It's possible we should, at a later point,
1347  * downgrade this to an assert. The reason for checking the AM routine,
1348  * rather than the AM oid, is that this allows to write regression tests
1349  * that create another AM reusing the heap handler.
1350  */
1352  ereport(ERROR,
1353  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1354  errmsg_internal("only heap AM is supported")));
1355 
1356  /*
1357  * We don't expect direct calls to heap_getnext with valid CheckXidAlive
1358  * for catalog or regular tables. See detailed comments in xact.c where
1359  * these variables are declared. Normally we have such a check at tableam
1360  * level API but this is called from many places so we need to ensure it
1361  * here.
1362  */
1364  elog(ERROR, "unexpected heap_getnext call during logical decoding");
1365 
1366  /* Note: no locking manipulations needed */
1367 
1368  if (scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE)
1369  heapgettup_pagemode(scan, direction,
1370  scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1371  else
1372  heapgettup(scan, direction,
1373  scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1374 
1375  if (scan->rs_ctup.t_data == NULL)
1376  return NULL;
1377 
1378  /*
1379  * if we get here it means we have a new current scan tuple, so point to
1380  * the proper return buffer and return the tuple.
1381  */
1382 
1384 
1385  return &scan->rs_ctup;
1386 }
TableScanDescData rs_base
Definition: heapam.h:49
int errcode(int sqlerrcode)
Definition: elog.c:698
uint32 rs_flags
Definition: relscan.h:47
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:79
HeapTupleData rs_ctup
Definition: heapam.h:66
HeapTupleHeader t_data
Definition: htup.h:68
#define ERROR
Definition: elog.h:46
bool bsysscan
Definition: xact.c:96
struct ScanKeyData * rs_key
Definition: relscan.h:37
TransactionId CheckXidAlive
Definition: xact.c:95
const struct TableAmRoutine * rd_tableam
Definition: rel.h:184
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:506
#define ereport(elevel,...)
Definition: elog.h:157
int errmsg_internal(const char *fmt,...)
Definition: elog.c:996
Relation rs_rd
Definition: relscan.h:34
#define elog(elevel,...)
Definition: elog.h:232
#define unlikely(x)
Definition: c.h:273
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:1030
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:844
const TableAmRoutine * GetHeapamTableAmRoutine(void)

◆ heap_getnextslot()

bool heap_getnextslot ( TableScanDesc  sscan,
ScanDirection  direction,
TupleTableSlot slot 
)

Definition at line 1389 of file heapam.c.

References ExecClearTuple(), ExecStoreBufferHeapTuple(), heapgettup(), heapgettup_pagemode(), pgstat_count_heap_getnext, HeapScanDescData::rs_base, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_ctup, TableScanDescData::rs_flags, TableScanDescData::rs_key, TableScanDescData::rs_nkeys, TableScanDescData::rs_rd, SO_ALLOW_PAGEMODE, and HeapTupleData::t_data.

Referenced by SampleHeapTupleVisible().

1390 {
1391  HeapScanDesc scan = (HeapScanDesc) sscan;
1392 
1393  /* Note: no locking manipulations needed */
1394 
1395  if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
1396  heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1397  else
1398  heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1399 
1400  if (scan->rs_ctup.t_data == NULL)
1401  {
1402  ExecClearTuple(slot);
1403  return false;
1404  }
1405 
1406  /*
1407  * if we get here it means we have a new current scan tuple, so point to
1408  * the proper return buffer and return the tuple.
1409  */
1410 
1412 
1413  ExecStoreBufferHeapTuple(&scan->rs_ctup, slot,
1414  scan->rs_cbuf);
1415  return true;
1416 }
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:425
TableScanDescData rs_base
Definition: heapam.h:49
uint32 rs_flags
Definition: relscan.h:47
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:79
HeapTupleData rs_ctup
Definition: heapam.h:66
HeapTupleHeader t_data
Definition: htup.h:68
struct ScanKeyData * rs_key
Definition: relscan.h:37
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:506
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1392
Buffer rs_cbuf
Definition: heapam.h:60
Relation rs_rd
Definition: relscan.h:34
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:1030
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:844

◆ heap_getnextslot_tidrange()

bool heap_getnextslot_tidrange ( TableScanDesc  sscan,
ScanDirection  direction,
TupleTableSlot slot 
)

Definition at line 1492 of file heapam.c.

References ExecClearTuple(), ExecStoreBufferHeapTuple(), heapgettup(), heapgettup_pagemode(), ItemPointerCompare(), pgstat_count_heap_getnext, HeapScanDescData::rs_base, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_ctup, TableScanDescData::rs_flags, TableScanDescData::rs_key, TableScanDescData::rs_maxtid, TableScanDescData::rs_mintid, TableScanDescData::rs_nkeys, TableScanDescData::rs_rd, ScanDirectionIsBackward, ScanDirectionIsForward, SO_ALLOW_PAGEMODE, HeapTupleData::t_data, and HeapTupleData::t_self.

Referenced by SampleHeapTupleVisible().

1494 {
1495  HeapScanDesc scan = (HeapScanDesc) sscan;
1496  ItemPointer mintid = &sscan->rs_mintid;
1497  ItemPointer maxtid = &sscan->rs_maxtid;
1498 
1499  /* Note: no locking manipulations needed */
1500  for (;;)
1501  {
1502  if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
1503  heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1504  else
1505  heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1506 
1507  if (scan->rs_ctup.t_data == NULL)
1508  {
1509  ExecClearTuple(slot);
1510  return false;
1511  }
1512 
1513  /*
1514  * heap_set_tidrange will have used heap_setscanlimits to limit the
1515  * range of pages we scan to only ones that can contain the TID range
1516  * we're scanning for. Here we must filter out any tuples from these
1517  * pages that are outwith that range.
1518  */
1519  if (ItemPointerCompare(&scan->rs_ctup.t_self, mintid) < 0)
1520  {
1521  ExecClearTuple(slot);
1522 
1523  /*
1524  * When scanning backwards, the TIDs will be in descending order.
1525  * Future tuples in this direction will be lower still, so we can
1526  * just return false to indicate there will be no more tuples.
1527  */
1528  if (ScanDirectionIsBackward(direction))
1529  return false;
1530 
1531  continue;
1532  }
1533 
1534  /*
1535  * Likewise for the final page, we must filter out TIDs greater than
1536  * maxtid.
1537  */
1538  if (ItemPointerCompare(&scan->rs_ctup.t_self, maxtid) > 0)
1539  {
1540  ExecClearTuple(slot);
1541 
1542  /*
1543  * When scanning forward, the TIDs will be in ascending order.
1544  * Future tuples in this direction will be higher still, so we can
1545  * just return false to indicate there will be no more tuples.
1546  */
1547  if (ScanDirectionIsForward(direction))
1548  return false;
1549  continue;
1550  }
1551 
1552  break;
1553  }
1554 
1555  /*
1556  * if we get here it means we have a new current scan tuple, so point to
1557  * the proper return buffer and return the tuple.
1558  */
1560 
1561  ExecStoreBufferHeapTuple(&scan->rs_ctup, slot, scan->rs_cbuf);
1562  return true;
1563 }
int32 ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
Definition: itemptr.c:52
ItemPointerData rs_mintid
Definition: relscan.h:40
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:425
#define ScanDirectionIsForward(direction)
Definition: sdir.h:55
TableScanDescData rs_base
Definition: heapam.h:49
uint32 rs_flags
Definition: relscan.h:47
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:79
HeapTupleData rs_ctup
Definition: heapam.h:66
HeapTupleHeader t_data
Definition: htup.h:68
#define ScanDirectionIsBackward(direction)
Definition: sdir.h:41
ItemPointerData t_self
Definition: htup.h:65
struct ScanKeyData * rs_key
Definition: relscan.h:37
ItemPointerData rs_maxtid
Definition: relscan.h:41
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:506
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1392
Buffer rs_cbuf
Definition: heapam.h:60
Relation rs_rd
Definition: relscan.h:34
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:1030
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:844

◆ heap_hot_search_buffer()

bool heap_hot_search_buffer ( ItemPointer  tid,
Relation  relation,
Buffer  buffer,
Snapshot  snapshot,
HeapTuple  heapTuple,
bool all_dead,
bool  first_call 
)

Definition at line 1710 of file heapam.c.

References Assert, BufferGetBlockNumber(), BufferGetPage, GlobalVisTestFor(), HeapCheckForSerializableConflictOut(), HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleIsHeapOnly, HeapTupleIsHotUpdated, HeapTupleIsSurelyDead(), HeapTupleSatisfiesVisibility(), InvalidTransactionId, ItemIdGetLength, ItemIdGetRedirect, ItemIdIsNormal, ItemIdIsRedirected, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerSet, ItemPointerSetOffsetNumber, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PredicateLockTID(), RecentXmin, RelationGetRelid, skip, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdEquals, and TransactionIdIsValid.

Referenced by heap_index_delete_tuples(), heapam_index_fetch_tuple(), and heapam_scan_bitmap_next_block().

1713 {
1714  Page dp = (Page) BufferGetPage(buffer);
1715  TransactionId prev_xmax = InvalidTransactionId;
1716  BlockNumber blkno;
1717  OffsetNumber offnum;
1718  bool at_chain_start;
1719  bool valid;
1720  bool skip;
1721  GlobalVisState *vistest = NULL;
1722 
1723  /* If this is not the first call, previous call returned a (live!) tuple */
1724  if (all_dead)
1725  *all_dead = first_call;
1726 
1727  blkno = ItemPointerGetBlockNumber(tid);
1728  offnum = ItemPointerGetOffsetNumber(tid);
1729  at_chain_start = first_call;
1730  skip = !first_call;
1731 
1732  /* XXX: we should assert that a snapshot is pushed or registered */
1734  Assert(BufferGetBlockNumber(buffer) == blkno);
1735 
1736  /* Scan through possible multiple members of HOT-chain */
1737  for (;;)
1738  {
1739  ItemId lp;
1740 
1741  /* check for bogus TID */
1742  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
1743  break;
1744 
1745  lp = PageGetItemId(dp, offnum);
1746 
1747  /* check for unused, dead, or redirected items */
1748  if (!ItemIdIsNormal(lp))
1749  {
1750  /* We should only see a redirect at start of chain */
1751  if (ItemIdIsRedirected(lp) && at_chain_start)
1752  {
1753  /* Follow the redirect */
1754  offnum = ItemIdGetRedirect(lp);
1755  at_chain_start = false;
1756  continue;
1757  }
1758  /* else must be end of chain */
1759  break;
1760  }
1761 
1762  /*
1763  * Update heapTuple to point to the element of the HOT chain we're
1764  * currently investigating. Having t_self set correctly is important
1765  * because the SSI checks and the *Satisfies routine for historical
1766  * MVCC snapshots need the correct tid to decide about the visibility.
1767  */
1768  heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
1769  heapTuple->t_len = ItemIdGetLength(lp);
1770  heapTuple->t_tableOid = RelationGetRelid(relation);
1771  ItemPointerSet(&heapTuple->t_self, blkno, offnum);
1772 
1773  /*
1774  * Shouldn't see a HEAP_ONLY tuple at chain start.
1775  */
1776  if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
1777  break;
1778 
1779  /*
1780  * The xmin should match the previous xmax value, else chain is
1781  * broken.
1782  */
1783  if (TransactionIdIsValid(prev_xmax) &&
1784  !TransactionIdEquals(prev_xmax,
1785  HeapTupleHeaderGetXmin(heapTuple->t_data)))
1786  break;
1787 
1788  /*
1789  * When first_call is true (and thus, skip is initially false) we'll
1790  * return the first tuple we find. But on later passes, heapTuple
1791  * will initially be pointing to the tuple we returned last time.
1792  * Returning it again would be incorrect (and would loop forever), so
1793  * we skip it and return the next match we find.
1794  */
1795  if (!skip)
1796  {
1797  /* If it's visible per the snapshot, we must return it */
1798  valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
1799  HeapCheckForSerializableConflictOut(valid, relation, heapTuple,
1800  buffer, snapshot);
1801 
1802  if (valid)
1803  {
1804  ItemPointerSetOffsetNumber(tid, offnum);
1805  PredicateLockTID(relation, &heapTuple->t_self, snapshot,
1806  HeapTupleHeaderGetXmin(heapTuple->t_data));
1807  if (all_dead)
1808  *all_dead = false;
1809  return true;
1810  }
1811  }
1812  skip = false;
1813 
1814  /*
1815  * If we can't see it, maybe no one else can either. At caller
1816  * request, check whether all chain members are dead to all
1817  * transactions.
1818  *
1819  * Note: if you change the criterion here for what is "dead", fix the
1820  * planner's get_actual_variable_range() function to match.
1821  */
1822  if (all_dead && *all_dead)
1823  {
1824  if (!vistest)
1825  vistest = GlobalVisTestFor(relation);
1826 
1827  if (!HeapTupleIsSurelyDead(heapTuple, vistest))
1828  *all_dead = false;
1829  }
1830 
1831  /*
1832  * Check to see if HOT chain continues past this tuple; if so fetch
1833  * the next offnum and loop around.
1834  */
1835  if (HeapTupleIsHotUpdated(heapTuple))
1836  {
1838  blkno);
1839  offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
1840  at_chain_start = false;
1841  prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
1842  }
1843  else
1844  break; /* end of chain */
1845  }
1846 
1847  return false;
1848 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:587
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:78
static const struct exclude_list_item skip[]
Definition: pg_checksums.c:116
TransactionId RecentXmin
Definition: snapmgr.c:113
uint32 BlockNumber
Definition: block.h:31
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define HeapTupleIsHotUpdated(tuple)
Definition: htup_details.h:675
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:4029
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
ItemPointerData t_ctid
Definition: htup_details.h:160
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void PredicateLockTID(Relation relation, ItemPointer tid, Snapshot snapshot, TransactionId tuple_xid)
Definition: predicate.c:2614
void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: heapam.c:9789
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:684
#define Assert(condition)
Definition: c.h:804
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define ItemPointerSetOffsetNumber(pointer, offsetNumber)
Definition: itemptr.h:148
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2752
bool HeapTupleIsSurelyDead(HeapTuple htup, GlobalVisState *vistest)
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define TransactionIdIsValid(xid)
Definition: transam.h:41
#define RelationGetRelid(relation)
Definition: rel.h:477
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)

◆ heap_index_delete_tuples()

TransactionId heap_index_delete_tuples ( Relation  rel,
TM_IndexDeleteOp delstate 
)

Definition at line 7269 of file heapam.c.

References Assert, TM_IndexDeleteOp::bottomup, BOTTOMUP_MAX_NBLOCKS, bottomup_sort_and_shrink(), TM_IndexDeleteOp::bottomupfreespace, buf, BUFFER_LOCK_SHARE, BufferGetPage, BufferIsValid, TM_IndexDeleteOp::deltids, TM_IndexStatus::freespace, get_tablespace_maintenance_io_concurrency(), GlobalVisTestFor(), heap_hot_search_buffer(), HeapTupleHeaderAdvanceLatestRemovedXid(), HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleHeaderIsHotUpdated, i, TM_IndexDelete::id, index_delete_sort(), InitNonVacuumableSnapshot, InvalidBlockNumber, InvalidBuffer, InvalidOffsetNumber, InvalidTransactionId, IsCatalogRelation(), ItemIdGetRedirect, ItemIdIsNormal, ItemIdIsRedirected, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, TM_IndexStatus::knowndeletable, LockBuffer(), maintenance_io_concurrency, Min, TM_IndexDeleteOp::ndeltids, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, TM_IndexStatus::promising, RelationData::rd_rel, ReadBuffer(), TM_IndexDeleteOp::status, HeapTupleHeaderData::t_ctid, TM_IndexDelete::tid, TransactionIdEquals, TransactionIdIsValid, and UnlockReleaseBuffer().

Referenced by SampleHeapTupleVisible().

7270 {
7271  /* Initial assumption is that earlier pruning took care of conflict */
7272  TransactionId latestRemovedXid = InvalidTransactionId;
7275  Page page = NULL;
7277  TransactionId priorXmax;
7278 #ifdef USE_PREFETCH
7279  IndexDeletePrefetchState prefetch_state;
7280  int prefetch_distance;
7281 #endif
7282  SnapshotData SnapshotNonVacuumable;
7283  int finalndeltids = 0,
7284  nblocksaccessed = 0;
7285 
7286  /* State that's only used in bottom-up index deletion case */
7287  int nblocksfavorable = 0;
7288  int curtargetfreespace = delstate->bottomupfreespace,
7289  lastfreespace = 0,
7290  actualfreespace = 0;
7291  bool bottomup_final_block = false;
7292 
7293  InitNonVacuumableSnapshot(SnapshotNonVacuumable, GlobalVisTestFor(rel));
7294 
7295  /* Sort caller's deltids array by TID for further processing */
7296  index_delete_sort(delstate);
7297 
7298  /*
7299  * Bottom-up case: resort deltids array in an order attuned to where the
7300  * greatest number of promising TIDs are to be found, and determine how
7301  * many blocks from the start of sorted array should be considered
7302  * favorable. This will also shrink the deltids array in order to
7303  * eliminate completely unfavorable blocks up front.
7304  */
7305  if (delstate->bottomup)
7306  nblocksfavorable = bottomup_sort_and_shrink(delstate);
7307 
7308 #ifdef USE_PREFETCH
7309  /* Initialize prefetch state. */
7310  prefetch_state.cur_hblkno = InvalidBlockNumber;
7311  prefetch_state.next_item = 0;
7312  prefetch_state.ndeltids = delstate->ndeltids;
7313  prefetch_state.deltids = delstate->deltids;
7314 
7315  /*
7316  * Determine the prefetch distance that we will attempt to maintain.
7317  *
7318  * Since the caller holds a buffer lock somewhere in rel, we'd better make
7319  * sure that isn't a catalog relation before we call code that does
7320  * syscache lookups, to avoid risk of deadlock.
7321  */
7322  if (IsCatalogRelation(rel))
7323  prefetch_distance = maintenance_io_concurrency;
7324  else
7325  prefetch_distance =
7327 
7328  /* Cap initial prefetch distance for bottom-up deletion caller */
7329  if (delstate->bottomup)
7330  {
7331  Assert(nblocksfavorable >= 1);
7332  Assert(nblocksfavorable <= BOTTOMUP_MAX_NBLOCKS);
7333  prefetch_distance = Min(prefetch_distance, nblocksfavorable);
7334  }
7335 
7336  /* Start prefetching. */
7337  index_delete_prefetch_buffer(rel, &prefetch_state, prefetch_distance);
7338 #endif
7339 
7340  /* Iterate over deltids, determine which to delete, check their horizon */
7341  Assert(delstate->ndeltids > 0);
7342  for (int i = 0; i < delstate->ndeltids; i++)
7343  {
7344  TM_IndexDelete *ideltid = &delstate->deltids[i];
7345  TM_IndexStatus *istatus = delstate->status + ideltid->id;
7346  ItemPointer htid = &ideltid->tid;
7347  OffsetNumber offnum;
7348 
7349  /*
7350  * Read buffer, and perform required extra steps each time a new block
7351  * is encountered. Avoid refetching if it's the same block as the one
7352  * from the last htid.
7353  */
7354  if (blkno == InvalidBlockNumber ||
7355  ItemPointerGetBlockNumber(htid) != blkno)
7356  {
7357  /*
7358  * Consider giving up early for bottom-up index deletion caller
7359  * first. (Only prefetch next-next block afterwards, when it
7360  * becomes clear that we're at least going to access the next
7361  * block in line.)
7362  *
7363  * Sometimes the first block frees so much space for bottom-up
7364  * caller that the deletion process can end without accessing any
7365  * more blocks. It is usually necessary to access 2 or 3 blocks
7366  * per bottom-up deletion operation, though.
7367  */
7368  if (delstate->bottomup)
7369  {
7370  /*
7371  * We often allow caller to delete a few additional items
7372  * whose entries we reached after the point that space target
7373  * from caller was satisfied. The cost of accessing the page
7374  * was already paid at that point, so it made sense to finish
7375  * it off. When that happened, we finalize everything here
7376  * (by finishing off the whole bottom-up deletion operation
7377  * without needlessly paying the cost of accessing any more
7378  * blocks).
7379  */
7380  if (bottomup_final_block)
7381  break;
7382 
7383  /*
7384  * Give up when we didn't enable our caller to free any
7385  * additional space as a result of processing the page that we
7386  * just finished up with. This rule is the main way in which
7387  * we keep the cost of bottom-up deletion under control.
7388  */
7389  if (nblocksaccessed >= 1 && actualfreespace == lastfreespace)
7390  break;
7391  lastfreespace = actualfreespace; /* for next time */
7392 
7393  /*
7394  * Deletion operation (which is bottom-up) will definitely
7395  * access the next block in line. Prepare for that now.
7396  *
7397  * Decay target free space so that we don't hang on for too
7398  * long with a marginal case. (Space target is only truly
7399  * helpful when it allows us to recognize that we don't need
7400  * to access more than 1 or 2 blocks to satisfy caller due to
7401  * agreeable workload characteristics.)
7402  *
7403  * We are a bit more patient when we encounter contiguous
7404  * blocks, though: these are treated as favorable blocks. The
7405  * decay process is only applied when the next block in line
7406  * is not a favorable/contiguous block. This is not an
7407  * exception to the general rule; we still insist on finding
7408  * at least one deletable item per block accessed. See
7409  * bottomup_nblocksfavorable() for full details of the theory
7410  * behind favorable blocks and heap block locality in general.
7411  *
7412  * Note: The first block in line is always treated as a
7413  * favorable block, so the earliest possible point that the
7414  * decay can be applied is just before we access the second
7415  * block in line. The Assert() verifies this for us.
7416  */
7417  Assert(nblocksaccessed > 0 || nblocksfavorable > 0);
7418  if (nblocksfavorable > 0)
7419  nblocksfavorable--;
7420  else
7421  curtargetfreespace /= 2;
7422  }
7423 
7424  /* release old buffer */
7425  if (BufferIsValid(buf))
7426  UnlockReleaseBuffer(buf);
7427 
7428  blkno = ItemPointerGetBlockNumber(htid);
7429  buf = ReadBuffer(rel, blkno);
7430  nblocksaccessed++;
7431  Assert(!delstate->bottomup ||
7432  nblocksaccessed <= BOTTOMUP_MAX_NBLOCKS);
7433 
7434 #ifdef USE_PREFETCH
7435 
7436  /*
7437  * To maintain the prefetch distance, prefetch one more page for
7438  * each page we read.
7439  */
7440  index_delete_prefetch_buffer(rel, &prefetch_state, 1);
7441 #endif
7442 
7444 
7445  page = BufferGetPage(buf);
7446  maxoff = PageGetMaxOffsetNumber(page);
7447  }
7448 
7449  if (istatus->knowndeletable)
7450  Assert(!delstate->bottomup && !istatus->promising);
7451  else
7452  {
7453  ItemPointerData tmp = *htid;
7454  HeapTupleData heapTuple;
7455 
7456  /* Are any tuples from this HOT chain non-vacuumable? */
7457  if (heap_hot_search_buffer(&tmp, rel, buf, &SnapshotNonVacuumable,
7458  &heapTuple, NULL, true))
7459  continue; /* can't delete entry */
7460 
7461  /* Caller will delete, since whole HOT chain is vacuumable */
7462  istatus->knowndeletable = true;
7463 
7464  /* Maintain index free space info for bottom-up deletion case */
7465  if (delstate->bottomup)
7466  {
7467  Assert(istatus->freespace > 0);
7468  actualfreespace += istatus->freespace;
7469  if (actualfreespace >= curtargetfreespace)
7470  bottomup_final_block = true;
7471  }
7472  }
7473 
7474  /*
7475  * Maintain latestRemovedXid value for deletion operation as a whole
7476  * by advancing current value using heap tuple headers. This is
7477  * loosely based on the logic for pruning a HOT chain.
7478  */
7479  offnum = ItemPointerGetOffsetNumber(htid);
7480  priorXmax = InvalidTransactionId; /* cannot check first XMIN */
7481  for (;;)
7482  {
7483  ItemId lp;
7484  HeapTupleHeader htup;
7485 
7486  /* Some sanity checks */
7487  if (offnum < FirstOffsetNumber || offnum > maxoff)
7488  {
7489  Assert(false);
7490  break;
7491  }
7492 
7493  lp = PageGetItemId(page, offnum);
7494  if (ItemIdIsRedirected(lp))
7495  {
7496  offnum = ItemIdGetRedirect(lp);
7497  continue;
7498  }
7499 
7500  /*
7501  * We'll often encounter LP_DEAD line pointers (especially with an
7502  * entry marked knowndeletable by our caller up front). No heap
7503  * tuple headers get examined for an htid that leads us to an
7504  * LP_DEAD item. This is okay because the earlier pruning
7505  * operation that made the line pointer LP_DEAD in the first place
7506  * must have considered the original tuple header as part of
7507  * generating its own latestRemovedXid value.
7508  *
7509  * Relying on XLOG_HEAP2_PRUNE records like this is the same
7510  * strategy that index vacuuming uses in all cases. Index VACUUM
7511  * WAL records don't even have a latestRemovedXid field of their
7512  * own for this reason.
7513  */
7514  if (!ItemIdIsNormal(lp))
7515  break;
7516 
7517  htup = (HeapTupleHeader) PageGetItem(page, lp);
7518 
7519  /*
7520  * Check the tuple XMIN against prior XMAX, if any
7521  */
7522  if (TransactionIdIsValid(priorXmax) &&
7523  !TransactionIdEquals(HeapTupleHeaderGetXmin(htup), priorXmax))
7524  break;
7525 
7526  HeapTupleHeaderAdvanceLatestRemovedXid(htup, &latestRemovedXid);
7527 
7528  /*
7529  * If the tuple is not HOT-updated, then we are at the end of this
7530  * HOT-chain. No need to visit later tuples from the same update
7531  * chain (they get their own index entries) -- just move on to
7532  * next htid from index AM caller.
7533  */
7534  if (!HeapTupleHeaderIsHotUpdated(htup))
7535  break;
7536 
7537  /* Advance to next HOT chain member */
7538  Assert(ItemPointerGetBlockNumber(&htup->t_ctid) == blkno);
7539  offnum = ItemPointerGetOffsetNumber(&htup->t_ctid);
7540  priorXmax = HeapTupleHeaderGetUpdateXid(htup);
7541  }
7542 
7543  /* Enable further/final shrinking of deltids for caller */
7544  finalndeltids = i + 1;
7545  }
7546 
7547  UnlockReleaseBuffer(buf);
7548 
7549  /*
7550  * Shrink deltids array to exclude non-deletable entries at the end. This
7551  * is not just a minor optimization. Final deltids array size might be
7552  * zero for a bottom-up caller. Index AM is explicitly allowed to rely on
7553  * ndeltids being zero in all cases with zero total deletable entries.
7554  */
7555  Assert(finalndeltids > 0 || delstate->bottomup);
7556  delstate->ndeltids = finalndeltids;
7557 
7558  return latestRemovedXid;
7559 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
void HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, TransactionId *latestRemovedXid)
Definition: heapam.c:7179
TM_IndexDelete * deltids
Definition: tableam.h:228
bool IsCatalogRelation(Relation relation)
Definition: catalog.c:104
int maintenance_io_concurrency
Definition: bufmgr.c:150
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:587
static int bottomup_sort_and_shrink(TM_IndexDeleteOp *delstate)
Definition: heapam.c:7814
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:78
#define Min(x, y)
Definition: c.h:986
#define InvalidBuffer
Definition: buf.h:25
bool knowndeletable
Definition: tableam.h:196
#define InitNonVacuumableSnapshot(snapshotdata, vistestp)
Definition: snapmgr.h:82
uint32 BlockNumber
Definition: block.h:31
Form_pg_class rd_rel
Definition: rel.h:109
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
Definition: heapam.c:1710
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:4029
int get_tablespace_maintenance_io_concurrency(Oid spcid)
Definition: spccache.c:228
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3795
ItemPointerData t_ctid
Definition: htup_details.h:160
static char * buf
Definition: pg_test_fsync.c:68
#define BOTTOMUP_MAX_NBLOCKS
Definition: heapam.c:185
#define InvalidTransactionId
Definition: transam.h:31
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
bool promising
Definition: tableam.h:199
#define HeapTupleHeaderIsHotUpdated(tup)
Definition: htup_details.h:483
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
TM_IndexStatus * status
Definition: tableam.h:229
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4011
ItemPointerData tid
Definition: tableam.h:189
#define InvalidOffsetNumber
Definition: off.h:26
static void index_delete_sort(TM_IndexDeleteOp *delstate)
Definition: heapam.c:7601
#define Assert(condition)
Definition: c.h:804
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:694
#define InvalidBlockNumber
Definition: block.h:33
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
int16 freespace
Definition: tableam.h:200
int i
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
int bottomupfreespace
Definition: tableam.h:224
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define TransactionIdIsValid(xid)
Definition: transam.h:41
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78

◆ heap_inplace_update()

void heap_inplace_update ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 5991 of file heapam.c.

References BUFFER_LOCK_EXCLUSIVE, BufferGetPage, CacheInvalidateHeapTuple(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, IsBootstrapProcessingMode, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), xl_heap_inplace::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapInplace, START_CRIT_SECTION, HeapTupleData::t_data, HeapTupleHeaderData::t_hoff, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), XLOG_HEAP_INPLACE, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by create_toast_table(), index_update_stats(), truncate_update_partedrel_stats(), vac_update_datfrozenxid(), and vac_update_relstats().

5992 {
5993  Buffer buffer;
5994  Page page;
5995  OffsetNumber offnum;
5996  ItemId lp = NULL;
5997  HeapTupleHeader htup;
5998  uint32 oldlen;
5999  uint32 newlen;
6000 
6001  /*
6002  * For now, we don't allow parallel updates. Unlike a regular update,
6003  * this should never create a combo CID, so it might be possible to relax
6004  * this restriction, but not without more thought and testing. It's not
6005  * clear that it would be useful, anyway.
6006  */
6007  if (IsInParallelMode())
6008  ereport(ERROR,
6009  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
6010  errmsg("cannot update tuples during a parallel operation")));
6011 
6012  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
6014  page = (Page) BufferGetPage(buffer);
6015 
6016  offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
6017  if (PageGetMaxOffsetNumber(page) >= offnum)
6018  lp = PageGetItemId(page, offnum);
6019 
6020  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
6021  elog(ERROR, "invalid lp");
6022 
6023  htup = (HeapTupleHeader) PageGetItem(page, lp);
6024 
6025  oldlen = ItemIdGetLength(lp) - htup->t_hoff;
6026  newlen = tuple->t_len - tuple->t_data->t_hoff;
6027  if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
6028  elog(ERROR, "wrong tuple length");
6029 
6030  /* NO EREPORT(ERROR) from here till changes are logged */
6032 
6033  memcpy((char *) htup + htup->t_hoff,
6034  (char *) tuple->t_data + tuple->t_data->t_hoff,
6035  newlen);
6036 
6037  MarkBufferDirty(buffer);
6038 
6039  /* XLOG stuff */
6040  if (RelationNeedsWAL(relation))
6041  {
6042  xl_heap_inplace xlrec;
6043  XLogRecPtr recptr;
6044 
6045  xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6046 
6047  XLogBeginInsert();
6048  XLogRegisterData((char *) &xlrec, SizeOfHeapInplace);
6049 
6050  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6051  XLogRegisterBufData(0, (char *) htup + htup->t_hoff, newlen);
6052 
6053  /* inplace updates aren't decoded atm, don't log the origin */
6054 
6055  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
6056 
6057  PageSetLSN(page, recptr);
6058  }
6059 
6060  END_CRIT_SECTION();
6061 
6062  UnlockReleaseBuffer(buffer);
6063 
6064  /*
6065  * Send out shared cache inval if necessary. Note that because we only
6066  * pass the new version of the tuple, this mustn't be used for any
6067  * operations that could change catcache lookup keys. But we aren't
6068  * bothering with index updates either, so that's true a fortiori.
6069  */
6071  CacheInvalidateHeapTuple(relation, tuple, NULL);
6072 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:378
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1123
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1556
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:232
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:149
#define SizeOfHeapInplace
Definition: heapam_xlog.h:314
#define START_CRIT_SECTION()
Definition: miscadmin.h:147
int errcode(int sqlerrcode)
Definition: elog.c:698
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
bool IsInParallelMode(void)
Definition: xact.c:1012
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3795
#define ERROR
Definition: elog.h:46
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define REGBUF_STANDARD
Definition: xloginsert.h:35
unsigned int uint32
Definition: c.h:441
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:340
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:432
OffsetNumber offnum
Definition: heapam_xlog.h:310
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:4011
#define ereport(elevel,...)
Definition: elog.h:157
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:694
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define XLOG_HEAP_INPLACE
Definition: heapam_xlog.h:39
#define RelationNeedsWAL(relation)
Definition: rel.h:601
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:406
int errmsg(const char *fmt,...)
Definition: elog.c:909
#define elog(elevel,...)
Definition: elog.h:232
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
void XLogBeginInsert(void)
Definition: xloginsert.c:135
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78

◆ heap_insert()

void heap_insert ( Relation  relation,
HeapTuple  tup,
CommandId  cid,
int  options,
BulkInsertState  bistate 
)

Definition at line 2060 of file heapam.c.

References Assert, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), END_CRIT_SECTION, FirstOffsetNumber, xl_heap_insert::flags, GetCurrentTransactionId(), heap_freetuple(), HEAP_INSERT_NO_LOGICAL, HEAP_INSERT_SPECULATIVE, heap_prepare_insert(), HeapTupleHeaderGetNatts, InvalidBlockNumber, InvalidBuffer, IsToastRelation(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, log_heap_new_cid(), MarkBufferDirty(), xl_heap_insert::offnum, PageClearAllVisible, PageGetMaxOffsetNumber, PageIsAllVisible, PageSetLSN, pgstat_count_heap_insert(), REGBUF_KEEP_DATA, REGBUF_STANDARD, REGBUF_WILL_INIT, RelationGetBufferForTuple(), RelationGetNumberOfAttributes, RelationIsAccessibleInLogicalDecoding, RelationIsLogicallyLogged, RelationNeedsWAL, RelationPutHeapTuple(), ReleaseBuffer(), SizeOfHeapHeader, SizeOfHeapInsert, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), visibilitymap_clear(), VISIBILITYMAP_VALID_BITS, XLH_INSERT_ALL_VISIBLE_CLEARED, XLH_INSERT_CONTAINS_NEW_TUPLE, XLH_INSERT_IS_SPECULATIVE, XLH_INSERT_ON_TOAST_RELATION, XLOG_HEAP_INIT_PAGE, XLOG_HEAP_INSERT, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by heapam_tuple_insert(), heapam_tuple_insert_speculative(), simple_heap_insert(), and toast_save_datum().

2062 {
2064  HeapTuple heaptup;
2065  Buffer buffer;
2066  Buffer vmbuffer = InvalidBuffer;
2067  bool all_visible_cleared = false;
2068 
2069  /* Cheap, simplistic check that the tuple matches the rel's rowtype. */
2071  RelationGetNumberOfAttributes(relation));
2072 
2073  /*
2074  * Fill in tuple header fields and toast the tuple if necessary.
2075  *
2076  * Note: below this point, heaptup is the data we actually intend to store
2077  * into the relation; tup is the caller's original untoasted data.
2078  */
2079  heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
2080 
2081  /*
2082  * Find buffer to insert this tuple into. If the page is all visible,
2083  * this will also pin the requisite visibility map page.
2084  */
2085  buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
2086  InvalidBuffer, options, bistate,
2087  &vmbuffer, NULL);
2088 
2089  /*
2090  * We're about to do the actual insert -- but check for conflict first, to
2091  * avoid possibly having to roll back work we've just done.
2092  *
2093  * This is safe without a recheck as long as there is no possibility of
2094  * another process scanning the page between this check and the insert
2095  * being visible to the scan (i.e., an exclusive buffer content lock is
2096  * continuously held from this point until the tuple insert is visible).
2097  *
2098  * For a heap insert, we only need to check for table-level SSI locks. Our
2099  * new tuple can't possibly conflict with existing tuple locks, and heap
2100  * page locks are only consolidated versions of tuple locks; they do not
2101  * lock "gaps" as index page locks do. So we don't need to specify a
2102  * buffer when making the call, which makes for a faster check.
2103  */
2105 
2106  /* NO EREPORT(ERROR) from here till changes are logged */
2108 
2109  RelationPutHeapTuple(relation, buffer, heaptup,
2110  (options & HEAP_INSERT_SPECULATIVE) != 0);
2111 
2112  if (PageIsAllVisible(BufferGetPage(buffer)))
2113  {
2114  all_visible_cleared = true;
2116  visibilitymap_clear(relation,
2117  ItemPointerGetBlockNumber(&(heaptup->t_self)),
2118  vmbuffer, VISIBILITYMAP_VALID_BITS);
2119  }
2120 
2121  /*
2122  * XXX Should we set PageSetPrunable on this page ?
2123  *
2124  * The inserting transaction may eventually abort thus making this tuple
2125  * DEAD and hence available for pruning. Though we don't want to optimize
2126  * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
2127  * aborted tuple will never be pruned until next vacuum is triggered.
2128  *
2129  * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
2130  */
2131 
2132  MarkBufferDirty(buffer);
2133 
2134  /* XLOG stuff */
2135  if (RelationNeedsWAL(relation))
2136  {
2137  xl_heap_insert xlrec;
2138  xl_heap_header xlhdr;
2139  XLogRecPtr recptr;
2140  Page page = BufferGetPage(buffer);
2141  uint8 info = XLOG_HEAP_INSERT;
2142  int bufflags = 0;
2143 
2144  /*
2145  * If this is a catalog, we need to transmit combo CIDs to properly
2146  * decode, so log that as well.
2147  */
2149  log_heap_new_cid(relation, heaptup);
2150 
2151  /*
2152  * If this is the single and first tuple on page, we can reinit the
2153  * page instead of restoring the whole thing. Set flag, and hide
2154  * buffer references from XLogInsert.
2155  */
2156  if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
2158  {
2159  info |= XLOG_HEAP_INIT_PAGE;
2160  bufflags |= REGBUF_WILL_INIT;
2161  }
2162 
2163  xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
2164  xlrec.flags = 0;
2165  if (all_visible_cleared)
2170 
2171  /*
2172  * For logical decoding, we need the tuple even if we're doing a full
2173  * page write, so make sure it's included even if we take a full-page
2174  * image. (XXX We could alternatively store a pointer into the FPW).
2175  */
2176  if (RelationIsLogicallyLogged(relation) &&
2178  {
2180  bufflags |= REGBUF_KEEP_DATA;
2181 
2182  if (IsToastRelation(relation))
2184  }
2185 
2186  XLogBeginInsert();
2187  XLogRegisterData((char *) &xlrec, SizeOfHeapInsert);
2188 
2189  xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
2190  xlhdr.t_infomask = heaptup->t_data->t_infomask;
2191  xlhdr.t_hoff = heaptup->t_data->t_hoff;
2192 
2193  /*
2194  * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
2195  * write the whole page to the xlog, we don't need to store
2196  * xl_heap_header in the xlog.
2197  */
2198  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2199  XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
2200  /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
2202  (char *) heaptup->t_data + SizeofHeapTupleHeader,
2203  heaptup->t_len - SizeofHeapTupleHeader);
2204 
2205  /* filtering by origin on a row level is much more efficient */
2207 
2208  recptr = XLogInsert(RM_HEAP_ID, info);
2209 
2210  PageSetLSN(page, recptr);
2211  }
2212 
2213  END_CRIT_SECTION();
2214 
2215  UnlockReleaseBuffer(buffer);
2216  if (vmbuffer != InvalidBuffer)
2217  ReleaseBuffer(vmbuffer);
2218 
2219  /*
2220  * If tuple is cachable, mark it for invalidation from the caches in case
2221  * we abort. Note it is OK to do this after releasing the buffer, because
2222  * the heaptup data structure is all in local memory, not in the shared
2223  * buffer.
2224  */
2225  CacheInvalidateHeapTuple(relation, heaptup, NULL);
2226 
2227  /* Note: speculative insertions are counted too, even if aborted later */
2228  pgstat_count_heap_insert(relation, 1);
2229 
2230  /*
2231  * If heaptup is a private copy, release it. Don't forget to copy t_self
2232  * back to the caller's image, too.
2233  */
2234  if (heaptup != tup)
2235  {
2236  tup->t_self = heaptup->t_self;
2237  heap_freetuple(heaptup);
2238  }
2239 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:378
#define SizeofHeapTupleHeader
Definition: htup_details.h:184
bool IsToastRelation(Relation relation)
Definition: catalog.c:146
#define XLOG_HEAP_INSERT
Definition: heapam_xlog.h:32
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:8228
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1123
static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
Definition: heapam.c:2248
#define PageIsAllVisible(page)
Definition: bufpage.h:385
uint32 TransactionId
Definition: c.h:587