PostgreSQL Source Code  git master
heapam.c File Reference
#include "postgres.h"
#include "access/bufmask.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/hio.h"
#include "access/multixact.h"
#include "access/parallel.h"
#include "access/relscan.h"
#include "access/sysattr.h"
#include "access/transam.h"
#include "access/tuptoaster.h"
#include "access/valid.h"
#include "access/visibilitymap.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "access/xloginsert.h"
#include "access/xlogutils.h"
#include "catalog/catalog.h"
#include "catalog/namespace.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "port/atomics.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
#include "storage/procarray.h"
#include "storage/smgr.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "utils/datum.h"
#include "utils/inval.h"
#include "utils/lsyscache.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
#include "utils/syscache.h"
#include "utils/tqual.h"
Include dependency graph for heapam.c:

Go to the source code of this file.

Macros

#define LOCKMODE_from_mxstatus(status)   (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
 
#define LockTupleTuplock(rel, tup, mode)   LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define UnlockTupleTuplock(rel, tup, mode)   UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define ConditionalLockTupleTuplock(rel, tup, mode)   ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define TUPLOCK_from_mxstatus(status)   (MultiXactStatusLock[(status)])
 
#define HEAPDEBUG_1
 
#define HEAPDEBUG_2
 
#define HEAPDEBUG_3
 
#define FRM_NOOP   0x0001
 
#define FRM_INVALIDATE_XMAX   0x0002
 
#define FRM_RETURN_IS_XID   0x0004
 
#define FRM_RETURN_IS_MULTI   0x0008
 
#define FRM_MARK_COMMITTED   0x0010
 

Functions

static HeapScanDesc heap_beginscan_internal (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
 
static void heap_parallelscan_startblock_init (HeapScanDesc scan)
 
static BlockNumber heap_parallelscan_nextpage (HeapScanDesc scan)
 
static HeapTuple heap_prepare_insert (Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
 
static XLogRecPtr log_heap_update (Relation reln, Buffer oldbuf, Buffer newbuf, HeapTuple oldtup, HeapTuple newtup, HeapTuple old_key_tup, bool all_visible_cleared, bool new_all_visible_cleared)
 
static BitmapsetHeapDetermineModifiedColumns (Relation relation, Bitmapset *interesting_cols, HeapTuple oldtup, HeapTuple newtup)
 
static bool heap_acquire_tuplock (Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
 
static void compute_new_xmax_infomask (TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
 
static HTSU_Result heap_lock_updated_tuple (Relation rel, HeapTuple tuple, ItemPointer ctid, TransactionId xid, LockTupleMode mode)
 
static void GetMultiXactIdHintBits (MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
 
static TransactionId MultiXactIdGetUpdateXid (TransactionId xmax, uint16 t_infomask)
 
static bool DoesMultiXactIdConflict (MultiXactId multi, uint16 infomask, LockTupleMode lockmode)
 
static void MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
static bool ConditionalMultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, int *remaining)
 
static XLogRecPtr log_heap_new_cid (Relation relation, HeapTuple tup)
 
static HeapTuple ExtractReplicaIdentity (Relation rel, HeapTuple tup, bool key_modified, bool *copy)
 
static void initscan (HeapScanDesc scan, ScanKey key, bool keep_startblock)
 
void heap_setscanlimits (HeapScanDesc scan, BlockNumber startBlk, BlockNumber numBlks)
 
void heapgetpage (HeapScanDesc scan, BlockNumber page)
 
static void heapgettup (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
static void heapgettup_pagemode (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
Relation relation_open (Oid relationId, LOCKMODE lockmode)
 
Relation try_relation_open (Oid relationId, LOCKMODE lockmode)
 
Relation relation_openrv (const RangeVar *relation, LOCKMODE lockmode)
 
Relation relation_openrv_extended (const RangeVar *relation, LOCKMODE lockmode, bool missing_ok)
 
void relation_close (Relation relation, LOCKMODE lockmode)
 
Relation heap_open (Oid relationId, LOCKMODE lockmode)
 
Relation heap_openrv (const RangeVar *relation, LOCKMODE lockmode)
 
Relation heap_openrv_extended (const RangeVar *relation, LOCKMODE lockmode, bool missing_ok)
 
HeapScanDesc heap_beginscan (Relation relation, Snapshot snapshot, int nkeys, ScanKey key)
 
HeapScanDesc heap_beginscan_catalog (Relation relation, int nkeys, ScanKey key)
 
HeapScanDesc heap_beginscan_strat (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, bool allow_strat, bool allow_sync)
 
HeapScanDesc heap_beginscan_bm (Relation relation, Snapshot snapshot, int nkeys, ScanKey key)
 
HeapScanDesc heap_beginscan_sampling (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, bool allow_strat, bool allow_sync, bool allow_pagemode)
 
void heap_rescan (HeapScanDesc scan, ScanKey key)
 
void heap_rescan_set_params (HeapScanDesc scan, ScanKey key, bool allow_strat, bool allow_sync, bool allow_pagemode)
 
void heap_endscan (HeapScanDesc scan)
 
Size heap_parallelscan_estimate (Snapshot snapshot)
 
void heap_parallelscan_initialize (ParallelHeapScanDesc target, Relation relation, Snapshot snapshot)
 
void heap_parallelscan_reinitialize (ParallelHeapScanDesc parallel_scan)
 
HeapScanDesc heap_beginscan_parallel (Relation relation, ParallelHeapScanDesc parallel_scan)
 
void heap_update_snapshot (HeapScanDesc scan, Snapshot snapshot)
 
HeapTuple heap_getnext (HeapScanDesc scan, ScanDirection direction)
 
bool heap_fetch (Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf, bool keep_buf, Relation stats_relation)
 
bool heap_hot_search_buffer (ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
 
bool heap_hot_search (ItemPointer tid, Relation relation, Snapshot snapshot, bool *all_dead)
 
void heap_get_latest_tid (Relation relation, Snapshot snapshot, ItemPointer tid)
 
static void UpdateXmaxHintBits (HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
 
BulkInsertState GetBulkInsertState (void)
 
void FreeBulkInsertState (BulkInsertState bistate)
 
void ReleaseBulkInsertStatePin (BulkInsertState bistate)
 
Oid heap_insert (Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate)
 
void heap_multi_insert (Relation relation, HeapTuple *tuples, int ntuples, CommandId cid, int options, BulkInsertState bistate)
 
Oid simple_heap_insert (Relation relation, HeapTuple tup)
 
static uint8 compute_infobits (uint16 infomask, uint16 infomask2)
 
static bool xmax_infomask_changed (uint16 new_infomask, uint16 old_infomask)
 
HTSU_Result heap_delete (Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, HeapUpdateFailureData *hufd)
 
void simple_heap_delete (Relation relation, ItemPointer tid)
 
HTSU_Result heap_update (Relation relation, ItemPointer otid, HeapTuple newtup, CommandId cid, Snapshot crosscheck, bool wait, HeapUpdateFailureData *hufd, LockTupleMode *lockmode)
 
static bool heap_tuple_attr_equals (TupleDesc tupdesc, int attrnum, HeapTuple tup1, HeapTuple tup2)
 
void simple_heap_update (Relation relation, ItemPointer otid, HeapTuple tup)
 
static MultiXactStatus get_mxact_status_for_lock (LockTupleMode mode, bool is_update)
 
HTSU_Result heap_lock_tuple (Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, HeapUpdateFailureData *hufd)
 
static HTSU_Result test_lockmode_for_conflict (MultiXactStatus status, TransactionId xid, LockTupleMode mode, bool *needwait)
 
static HTSU_Result heap_lock_updated_tuple_rec (Relation rel, ItemPointer tid, TransactionId xid, LockTupleMode mode)
 
void heap_finish_speculative (Relation relation, HeapTuple tuple)
 
void heap_abort_speculative (Relation relation, HeapTuple tuple)
 
void heap_inplace_update (Relation relation, HeapTuple tuple)
 
static TransactionId FreezeMultiXactId (MultiXactId multi, uint16 t_infomask, TransactionId cutoff_xid, MultiXactId cutoff_multi, uint16 *flags)
 
bool heap_prepare_freeze_tuple (HeapTupleHeader tuple, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
 
void heap_execute_freeze_tuple (HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
 
bool heap_freeze_tuple (HeapTupleHeader tuple, TransactionId cutoff_xid, TransactionId cutoff_multi)
 
TransactionId HeapTupleGetUpdateXid (HeapTupleHeader tuple)
 
static bool Do_MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
bool heap_tuple_needs_eventual_freeze (HeapTupleHeader tuple)
 
bool heap_tuple_needs_freeze (HeapTupleHeader tuple, TransactionId cutoff_xid, MultiXactId cutoff_multi, Buffer buf)
 
void HeapTupleHeaderAdvanceLatestRemovedXid (HeapTupleHeader tuple, TransactionId *latestRemovedXid)
 
XLogRecPtr log_heap_cleanup_info (RelFileNode rnode, TransactionId latestRemovedXid)
 
XLogRecPtr log_heap_clean (Relation reln, Buffer buffer, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused, TransactionId latestRemovedXid)
 
XLogRecPtr log_heap_freeze (Relation reln, Buffer buffer, TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples, int ntuples)
 
XLogRecPtr log_heap_visible (RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer, TransactionId cutoff_xid, uint8 vmflags)
 
static void heap_xlog_cleanup_info (XLogReaderState *record)
 
static void heap_xlog_clean (XLogReaderState *record)
 
static void heap_xlog_visible (XLogReaderState *record)
 
static void heap_xlog_freeze_page (XLogReaderState *record)
 
static void fix_infomask_from_infobits (uint8 infobits, uint16 *infomask, uint16 *infomask2)
 
static void heap_xlog_delete (XLogReaderState *record)
 
static void heap_xlog_insert (XLogReaderState *record)
 
static void heap_xlog_multi_insert (XLogReaderState *record)
 
static void heap_xlog_update (XLogReaderState *record, bool hot_update)
 
static void heap_xlog_confirm (XLogReaderState *record)
 
static void heap_xlog_lock (XLogReaderState *record)
 
static void heap_xlog_lock_updated (XLogReaderState *record)
 
static void heap_xlog_inplace (XLogReaderState *record)
 
void heap_redo (XLogReaderState *record)
 
void heap2_redo (XLogReaderState *record)
 
void heap_sync (Relation rel)
 
void heap_mask (char *pagedata, BlockNumber blkno)
 

Variables

bool synchronize_seqscans = true
 
struct {
   LOCKMODE   hwlock
 
   int   lockstatus
 
   int   updstatus
 
tupleLockExtraInfo [MaxLockTupleMode+1]
 
static const int MultiXactStatusLock [MaxMultiXactStatus+1]
 

Macro Definition Documentation

◆ ConditionalLockTupleTuplock

#define ConditionalLockTupleTuplock (   rel,
  tup,
  mode 
)    ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 185 of file heapam.c.

Referenced by heap_acquire_tuplock().

◆ FRM_INVALIDATE_XMAX

#define FRM_INVALIDATE_XMAX   0x0002

Definition at line 6332 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_MARK_COMMITTED

#define FRM_MARK_COMMITTED   0x0010

Definition at line 6335 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_NOOP

#define FRM_NOOP   0x0001

Definition at line 6331 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_RETURN_IS_MULTI

#define FRM_RETURN_IS_MULTI   0x0008

Definition at line 6334 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_RETURN_IS_XID

#define FRM_RETURN_IS_XID   0x0004

Definition at line 6333 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ HEAPDEBUG_1

#define HEAPDEBUG_1

Definition at line 1801 of file heapam.c.

Referenced by heap_getnext().

◆ HEAPDEBUG_2

#define HEAPDEBUG_2

Definition at line 1802 of file heapam.c.

Referenced by heap_getnext().

◆ HEAPDEBUG_3

#define HEAPDEBUG_3

Definition at line 1803 of file heapam.c.

Referenced by heap_getnext().

◆ LOCKMODE_from_mxstatus

#define LOCKMODE_from_mxstatus (   status)    (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)

◆ LockTupleTuplock

#define LockTupleTuplock (   rel,
  tup,
  mode 
)    LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 181 of file heapam.c.

Referenced by heap_acquire_tuplock().

◆ TUPLOCK_from_mxstatus

#define TUPLOCK_from_mxstatus (   status)    (MultiXactStatusLock[(status)])

Definition at line 203 of file heapam.c.

Referenced by compute_new_xmax_infomask(), GetMultiXactIdHintBits(), and heap_lock_tuple().

◆ UnlockTupleTuplock

#define UnlockTupleTuplock (   rel,
  tup,
  mode 
)    UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 183 of file heapam.c.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

Function Documentation

◆ compute_infobits()

static uint8 compute_infobits ( uint16  infomask,
uint16  infomask2 
)
static

Definition at line 2971 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_abort_speculative(), heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_update(), and log_heap_update().

2972 {
2973  return
2974  ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
2975  ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
2976  ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
2977  /* note we ignore HEAP_XMAX_SHR_LOCK here */
2978  ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
2979  ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
2980  XLHL_KEYS_UPDATED : 0);
2981 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:184
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:187
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:241
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:240
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:186
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:242
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:244
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:269
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:199
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:243

◆ compute_new_xmax_infomask()

static void compute_new_xmax_infomask ( TransactionId  xmax,
uint16  old_infomask,
uint16  old_infomask2,
TransactionId  add_to_xmax,
LockTupleMode  mode,
bool  is_update,
TransactionId result_xmax,
uint16 result_infomask,
uint16 result_infomask2 
)
static

Definition at line 5275 of file heapam.c.

References Assert, elog, ERROR, get_mxact_status_for_lock(), GetMultiXactIdHintBits(), HEAP_KEYS_UPDATED, HEAP_LOCKED_UPGRADED, HEAP_XMAX_COMMITTED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_INVALID, HEAP_XMAX_IS_EXCL_LOCKED, HEAP_XMAX_IS_KEYSHR_LOCKED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HEAP_XMAX_IS_SHR_LOCKED, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, InvalidTransactionId, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, MultiXactIdCreate(), MultiXactIdExpand(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TUPLOCK_from_mxstatus, and WARNING.

Referenced by heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), and heap_update().

5280 {
5281  TransactionId new_xmax;
5282  uint16 new_infomask,
5283  new_infomask2;
5284 
5286 
5287 l5:
5288  new_infomask = 0;
5289  new_infomask2 = 0;
5290  if (old_infomask & HEAP_XMAX_INVALID)
5291  {
5292  /*
5293  * No previous locker; we just insert our own TransactionId.
5294  *
5295  * Note that it's critical that this case be the first one checked,
5296  * because there are several blocks below that come back to this one
5297  * to implement certain optimizations; old_infomask might contain
5298  * other dirty bits in those cases, but we don't really care.
5299  */
5300  if (is_update)
5301  {
5302  new_xmax = add_to_xmax;
5303  if (mode == LockTupleExclusive)
5304  new_infomask2 |= HEAP_KEYS_UPDATED;
5305  }
5306  else
5307  {
5308  new_infomask |= HEAP_XMAX_LOCK_ONLY;
5309  switch (mode)
5310  {
5311  case LockTupleKeyShare:
5312  new_xmax = add_to_xmax;
5313  new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
5314  break;
5315  case LockTupleShare:
5316  new_xmax = add_to_xmax;
5317  new_infomask |= HEAP_XMAX_SHR_LOCK;
5318  break;
5320  new_xmax = add_to_xmax;
5321  new_infomask |= HEAP_XMAX_EXCL_LOCK;
5322  break;
5323  case LockTupleExclusive:
5324  new_xmax = add_to_xmax;
5325  new_infomask |= HEAP_XMAX_EXCL_LOCK;
5326  new_infomask2 |= HEAP_KEYS_UPDATED;
5327  break;
5328  default:
5329  new_xmax = InvalidTransactionId; /* silence compiler */
5330  elog(ERROR, "invalid lock mode");
5331  }
5332  }
5333  }
5334  else if (old_infomask & HEAP_XMAX_IS_MULTI)
5335  {
5336  MultiXactStatus new_status;
5337 
5338  /*
5339  * Currently we don't allow XMAX_COMMITTED to be set for multis, so
5340  * cross-check.
5341  */
5342  Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
5343 
5344  /*
5345  * A multixact together with LOCK_ONLY set but neither lock bit set
5346  * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
5347  * anymore. This check is critical for databases upgraded by
5348  * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
5349  * that such multis are never passed.
5350  */
5351  if (HEAP_LOCKED_UPGRADED(old_infomask))
5352  {
5353  old_infomask &= ~HEAP_XMAX_IS_MULTI;
5354  old_infomask |= HEAP_XMAX_INVALID;
5355  goto l5;
5356  }
5357 
5358  /*
5359  * If the XMAX is already a MultiXactId, then we need to expand it to
5360  * include add_to_xmax; but if all the members were lockers and are
5361  * all gone, we can do away with the IS_MULTI bit and just set
5362  * add_to_xmax as the only locker/updater. If all lockers are gone
5363  * and we have an updater that aborted, we can also do without a
5364  * multi.
5365  *
5366  * The cost of doing GetMultiXactIdMembers would be paid by
5367  * MultiXactIdExpand if we weren't to do this, so this check is not
5368  * incurring extra work anyhow.
5369  */
5370  if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
5371  {
5372  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
5374  old_infomask)))
5375  {
5376  /*
5377  * Reset these bits and restart; otherwise fall through to
5378  * create a new multi below.
5379  */
5380  old_infomask &= ~HEAP_XMAX_IS_MULTI;
5381  old_infomask |= HEAP_XMAX_INVALID;
5382  goto l5;
5383  }
5384  }
5385 
5386  new_status = get_mxact_status_for_lock(mode, is_update);
5387 
5388  new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
5389  new_status);
5390  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5391  }
5392  else if (old_infomask & HEAP_XMAX_COMMITTED)
5393  {
5394  /*
5395  * It's a committed update, so we need to preserve him as updater of
5396  * the tuple.
5397  */
5399  MultiXactStatus new_status;
5400 
5401  if (old_infomask2 & HEAP_KEYS_UPDATED)
5402  status = MultiXactStatusUpdate;
5403  else
5404  status = MultiXactStatusNoKeyUpdate;
5405 
5406  new_status = get_mxact_status_for_lock(mode, is_update);
5407 
5408  /*
5409  * since it's not running, it's obviously impossible for the old
5410  * updater to be identical to the current one, so we need not check
5411  * for that case as we do in the block above.
5412  */
5413  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5414  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5415  }
5416  else if (TransactionIdIsInProgress(xmax))
5417  {
5418  /*
5419  * If the XMAX is a valid, in-progress TransactionId, then we need to
5420  * create a new MultiXactId that includes both the old locker or
5421  * updater and our own TransactionId.
5422  */
5423  MultiXactStatus new_status;
5424  MultiXactStatus old_status;
5425  LockTupleMode old_mode;
5426 
5427  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5428  {
5429  if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5430  old_status = MultiXactStatusForKeyShare;
5431  else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5432  old_status = MultiXactStatusForShare;
5433  else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5434  {
5435  if (old_infomask2 & HEAP_KEYS_UPDATED)
5436  old_status = MultiXactStatusForUpdate;
5437  else
5438  old_status = MultiXactStatusForNoKeyUpdate;
5439  }
5440  else
5441  {
5442  /*
5443  * LOCK_ONLY can be present alone only when a page has been
5444  * upgraded by pg_upgrade. But in that case,
5445  * TransactionIdIsInProgress() should have returned false. We
5446  * assume it's no longer locked in this case.
5447  */
5448  elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
5449  old_infomask |= HEAP_XMAX_INVALID;
5450  old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
5451  goto l5;
5452  }
5453  }
5454  else
5455  {
5456  /* it's an update, but which kind? */
5457  if (old_infomask2 & HEAP_KEYS_UPDATED)
5458  old_status = MultiXactStatusUpdate;
5459  else
5460  old_status = MultiXactStatusNoKeyUpdate;
5461  }
5462 
5463  old_mode = TUPLOCK_from_mxstatus(old_status);
5464 
5465  /*
5466  * If the lock to be acquired is for the same TransactionId as the
5467  * existing lock, there's an optimization possible: consider only the
5468  * strongest of both locks as the only one present, and restart.
5469  */
5470  if (xmax == add_to_xmax)
5471  {
5472  /*
5473  * Note that it's not possible for the original tuple to be
5474  * updated: we wouldn't be here because the tuple would have been
5475  * invisible and we wouldn't try to update it. As a subtlety,
5476  * this code can also run when traversing an update chain to lock
5477  * future versions of a tuple. But we wouldn't be here either,
5478  * because the add_to_xmax would be different from the original
5479  * updater.
5480  */
5481  Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5482 
5483  /* acquire the strongest of both */
5484  if (mode < old_mode)
5485  mode = old_mode;
5486  /* mustn't touch is_update */
5487 
5488  old_infomask |= HEAP_XMAX_INVALID;
5489  goto l5;
5490  }
5491 
5492  /* otherwise, just fall back to creating a new multixact */
5493  new_status = get_mxact_status_for_lock(mode, is_update);
5494  new_xmax = MultiXactIdCreate(xmax, old_status,
5495  add_to_xmax, new_status);
5496  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5497  }
5498  else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
5499  TransactionIdDidCommit(xmax))
5500  {
5501  /*
5502  * It's a committed update, so we gotta preserve him as updater of the
5503  * tuple.
5504  */
5506  MultiXactStatus new_status;
5507 
5508  if (old_infomask2 & HEAP_KEYS_UPDATED)
5509  status = MultiXactStatusUpdate;
5510  else
5511  status = MultiXactStatusNoKeyUpdate;
5512 
5513  new_status = get_mxact_status_for_lock(mode, is_update);
5514 
5515  /*
5516  * since it's not running, it's obviously impossible for the old
5517  * updater to be identical to the current one, so we need not check
5518  * for that case as we do in the block above.
5519  */
5520  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5521  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5522  }
5523  else
5524  {
5525  /*
5526  * Can get here iff the locking/updating transaction was running when
5527  * the infomask was extracted from the tuple, but finished before
5528  * TransactionIdIsInProgress got to run. Deal with it as if there was
5529  * no locker at all in the first place.
5530  */
5531  old_infomask |= HEAP_XMAX_INVALID;
5532  goto l5;
5533  }
5534 
5535  *result_infomask = new_infomask;
5536  *result_infomask2 = new_infomask2;
5537  *result_xmax = new_xmax;
5538 }
static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
Definition: heapam.c:6850
MultiXactStatus
Definition: multixact.h:40
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:184
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:187
uint32 TransactionId
Definition: c.h:445
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:766
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:999
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:243
#define HEAP_XMAX_COMMITTED
Definition: htup_details.h:197
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:190
#define HEAP_XMAX_IS_SHR_LOCKED(infomask)
Definition: htup_details.h:253
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:6931
LockTupleMode
Definition: heapam.h:38
unsigned short uint16
Definition: c.h:295
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:198
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:186
#define InvalidTransactionId
Definition: transam.h:31
#define WARNING
Definition: elog.h:40
MultiXactId MultiXactIdCreate(TransactionId xid1, MultiXactStatus status1, TransactionId xid2, MultiXactStatus status2)
Definition: multixact.c:384
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:221
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:269
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:199
TransactionId MultiXactId
Definition: c.h:455
#define Assert(condition)
Definition: c.h:670
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:203
static MultiXactStatus get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
Definition: heapam.c:4507
#define HEAP_XMAX_IS_EXCL_LOCKED(infomask)
Definition: htup_details.h:255
#define elog
Definition: elog.h:219
#define HEAP_XMAX_IS_KEYSHR_LOCKED(infomask)
Definition: htup_details.h:257
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:549
MultiXactId MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
Definition: multixact.c:437

◆ ConditionalMultiXactIdWait()

static bool ConditionalMultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
Relation  rel,
int *  remaining 
)
static

Definition at line 7185 of file heapam.c.

References Do_MultiXactIdWait(), and XLTW_None.

Referenced by heap_lock_tuple().

7187 {
7188  return Do_MultiXactIdWait(multi, status, infomask, true,
7189  rel, NULL, XLTW_None, remaining);
7190 }
int remaining
Definition: informix.c:692
Definition: lmgr.h:26
static bool Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:7085
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225

◆ Do_MultiXactIdWait()

static bool Do_MultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
bool  nowait,
Relation  rel,
ItemPointer  ctid,
XLTW_Oper  oper,
int *  remaining 
)
static

Definition at line 7085 of file heapam.c.

References ConditionalXactLockTableWait(), DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, LOCKMODE_from_mxstatus, pfree(), MultiXactMember::status, TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), XactLockTableWait(), and MultiXactMember::xid.

Referenced by ConditionalMultiXactIdWait(), and MultiXactIdWait().

7089 {
7090  bool result = true;
7091  MultiXactMember *members;
7092  int nmembers;
7093  int remain = 0;
7094 
7095  /* for pre-pg_upgrade tuples, no need to sleep at all */
7096  nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
7097  GetMultiXactIdMembers(multi, &members, false,
7098  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7099 
7100  if (nmembers >= 0)
7101  {
7102  int i;
7103 
7104  for (i = 0; i < nmembers; i++)
7105  {
7106  TransactionId memxid = members[i].xid;
7107  MultiXactStatus memstatus = members[i].status;
7108 
7110  {
7111  remain++;
7112  continue;
7113  }
7114 
7117  {
7118  if (remaining && TransactionIdIsInProgress(memxid))
7119  remain++;
7120  continue;
7121  }
7122 
7123  /*
7124  * This member conflicts with our multi, so we have to sleep (or
7125  * return failure, if asked to avoid waiting.)
7126  *
7127  * Note that we don't set up an error context callback ourselves,
7128  * but instead we pass the info down to XactLockTableWait. This
7129  * might seem a bit wasteful because the context is set up and
7130  * tore down for each member of the multixact, but in reality it
7131  * should be barely noticeable, and it avoids duplicate code.
7132  */
7133  if (nowait)
7134  {
7135  result = ConditionalXactLockTableWait(memxid);
7136  if (!result)
7137  break;
7138  }
7139  else
7140  XactLockTableWait(memxid, rel, ctid, oper);
7141  }
7142 
7143  pfree(members);
7144  }
7145 
7146  if (remaining)
7147  *remaining = remain;
7148 
7149  return result;
7150 }
int remaining
Definition: informix.c:692
MultiXactStatus
Definition: multixact.h:40
uint32 TransactionId
Definition: c.h:445
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:766
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:999
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:243
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:173
bool ConditionalXactLockTableWait(TransactionId xid)
Definition: lmgr.c:607
void pfree(void *pointer)
Definition: mcxt.c:949
TransactionId xid
Definition: multixact.h:61
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:556
MultiXactStatus status
Definition: multixact.h:62
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:221
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:554
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
Operator oper(ParseState *pstate, List *opname, Oid ltypeId, Oid rtypeId, bool noError, int location)
Definition: parse_oper.c:377
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225

◆ DoesMultiXactIdConflict()

static bool DoesMultiXactIdConflict ( MultiXactId  multi,
uint16  infomask,
LockTupleMode  lockmode 
)
static

Definition at line 6996 of file heapam.c.

References DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, ISUPDATE_from_mxstatus, LOCKMODE_from_mxstatus, pfree(), status(), TransactionIdDidAbort(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), tupleLockExtraInfo, and MultiXactMember::xid.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

6998 {
6999  int nmembers;
7000  MultiXactMember *members;
7001  bool result = false;
7002  LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
7003 
7004  if (HEAP_LOCKED_UPGRADED(infomask))
7005  return false;
7006 
7007  nmembers = GetMultiXactIdMembers(multi, &members, false,
7008  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7009  if (nmembers >= 0)
7010  {
7011  int i;
7012 
7013  for (i = 0; i < nmembers; i++)
7014  {
7015  TransactionId memxid;
7016  LOCKMODE memlockmode;
7017 
7018  memlockmode = LOCKMODE_from_mxstatus(members[i].status);
7019 
7020  /* ignore members that don't conflict with the lock we want */
7021  if (!DoLockModesConflict(memlockmode, wanted))
7022  continue;
7023 
7024  /* ignore members from current xact */
7025  memxid = members[i].xid;
7027  continue;
7028 
7029  if (ISUPDATE_from_mxstatus(members[i].status))
7030  {
7031  /* ignore aborted updaters */
7032  if (TransactionIdDidAbort(memxid))
7033  continue;
7034  }
7035  else
7036  {
7037  /* ignore lockers-only that are no longer in progress */
7038  if (!TransactionIdIsInProgress(memxid))
7039  continue;
7040  }
7041 
7042  /*
7043  * Whatever remains are either live lockers that conflict with our
7044  * wanted lock, and updaters that are not aborted. Those conflict
7045  * with what we want, so return true.
7046  */
7047  result = true;
7048  break;
7049  }
7050  pfree(members);
7051  }
7052 
7053  return result;
7054 }
uint32 TransactionId
Definition: c.h:445
int LOCKMODE
Definition: lockdefs.h:26
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:766
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:999
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:243
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:173
void pfree(void *pointer)
Definition: mcxt.c:949
TransactionId xid
Definition: multixact.h:61
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:556
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:55
bool TransactionIdDidAbort(TransactionId transactionId)
Definition: transam.c:181
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:221
static const struct @20 tupleLockExtraInfo[MaxLockTupleMode+1]
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225

◆ ExtractReplicaIdentity()

static HeapTuple ExtractReplicaIdentity ( Relation  rel,
HeapTuple  tup,
bool  key_modified,
bool copy 
)
static

Definition at line 7823 of file heapam.c.

References DEBUG4, elog, ERROR, heap_deform_tuple(), heap_form_tuple(), heap_freetuple(), HeapTupleGetOid, HeapTupleHasExternal, HeapTupleSetOid, MaxHeapAttributeNumber, tupleDesc::natts, ObjectIdAttributeNumber, OidIsValid, RelationData::rd_index, RelationData::rd_rel, RelationClose(), RelationGetDescr, RelationGetRelationName, RelationGetReplicaIndex(), RelationIdGetRelation(), RelationIsLogicallyLogged, REPLICA_IDENTITY_FULL, REPLICA_IDENTITY_NOTHING, toast_flatten_tuple(), and values.

Referenced by heap_delete(), and heap_update().

7824 {
7825  TupleDesc desc = RelationGetDescr(relation);
7826  Oid replidindex;
7827  Relation idx_rel;
7828  TupleDesc idx_desc;
7829  char replident = relation->rd_rel->relreplident;
7830  HeapTuple key_tuple = NULL;
7831  bool nulls[MaxHeapAttributeNumber];
7833  int natt;
7834 
7835  *copy = false;
7836 
7837  if (!RelationIsLogicallyLogged(relation))
7838  return NULL;
7839 
7840  if (replident == REPLICA_IDENTITY_NOTHING)
7841  return NULL;
7842 
7843  if (replident == REPLICA_IDENTITY_FULL)
7844  {
7845  /*
7846  * When logging the entire old tuple, it very well could contain
7847  * toasted columns. If so, force them to be inlined.
7848  */
7849  if (HeapTupleHasExternal(tp))
7850  {
7851  *copy = true;
7852  tp = toast_flatten_tuple(tp, RelationGetDescr(relation));
7853  }
7854  return tp;
7855  }
7856 
7857  /* if the key hasn't changed and we're only logging the key, we're done */
7858  if (!key_changed)
7859  return NULL;
7860 
7861  /* find the replica identity index */
7862  replidindex = RelationGetReplicaIndex(relation);
7863  if (!OidIsValid(replidindex))
7864  {
7865  elog(DEBUG4, "could not find configured replica identity for table \"%s\"",
7866  RelationGetRelationName(relation));
7867  return NULL;
7868  }
7869 
7870  idx_rel = RelationIdGetRelation(replidindex);
7871  idx_desc = RelationGetDescr(idx_rel);
7872 
7873  /* deform tuple, so we have fast access to columns */
7874  heap_deform_tuple(tp, desc, values, nulls);
7875 
7876  /* set all columns to NULL, regardless of whether they actually are */
7877  memset(nulls, 1, sizeof(nulls));
7878 
7879  /*
7880  * Now set all columns contained in the index to NOT NULL, they cannot
7881  * currently be NULL.
7882  */
7883  for (natt = 0; natt < idx_desc->natts; natt++)
7884  {
7885  int attno = idx_rel->rd_index->indkey.values[natt];
7886 
7887  if (attno < 0)
7888  {
7889  /*
7890  * The OID column can appear in an index definition, but that's
7891  * OK, because we always copy the OID if present (see below).
7892  * Other system columns may not.
7893  */
7894  if (attno == ObjectIdAttributeNumber)
7895  continue;
7896  elog(ERROR, "system column in index");
7897  }
7898  nulls[attno - 1] = false;
7899  }
7900 
7901  key_tuple = heap_form_tuple(desc, values, nulls);
7902  *copy = true;
7903  RelationClose(idx_rel);
7904 
7905  /*
7906  * Always copy oids if the table has them, even if not included in the
7907  * index. The space in the logged tuple is used anyway, so there's little
7908  * point in not including the information.
7909  */
7910  if (relation->rd_rel->relhasoids)
7911  HeapTupleSetOid(key_tuple, HeapTupleGetOid(tp));
7912 
7913  /*
7914  * If the tuple, which by here only contains indexed columns, still has
7915  * toasted columns, force them to be inlined. This is somewhat unlikely
7916  * since there's limits on the size of indexed columns, so we don't
7917  * duplicate toast_flatten_tuple()s functionality in the above loop over
7918  * the indexed columns, even if it would be more efficient.
7919  */
7920  if (HeapTupleHasExternal(key_tuple))
7921  {
7922  HeapTuple oldtup = key_tuple;
7923 
7924  key_tuple = toast_flatten_tuple(oldtup, RelationGetDescr(relation));
7925  heap_freetuple(oldtup);
7926  }
7927 
7928  return key_tuple;
7929 }
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition: tuptoaster.c:1085
Oid RelationGetReplicaIndex(Relation relation)
Definition: relcache.c:4691
#define RelationGetDescr(relation)
Definition: rel.h:437
#define ObjectIdAttributeNumber
Definition: sysattr.h:22
#define REPLICA_IDENTITY_NOTHING
Definition: pg_class.h:177
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: heaptuple.c:695
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:584
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1373
unsigned int Oid
Definition: postgres_ext.h:31
#define DEBUG4
Definition: elog.h:22
#define OidIsValid(objectId)
Definition: c.h:576
int natts
Definition: tupdesc.h:79
#define HeapTupleSetOid(tuple, oid)
Definition: htup_details.h:703
Form_pg_index rd_index
Definition: rel.h:159
#define REPLICA_IDENTITY_FULL
Definition: pg_class.h:179
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:445
void RelationClose(Relation relation)
Definition: relcache.c:2167
uintptr_t Datum
Definition: postgres.h:372
#define MaxHeapAttributeNumber
Definition: htup_details.h:47
void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull)
Definition: heaptuple.c:936
static Datum values[MAXATTR]
Definition: bootstrap.c:164
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:679
#define elog
Definition: elog.h:219
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:700
Relation RelationIdGetRelation(Oid relationId)
Definition: relcache.c:2078

◆ fix_infomask_from_infobits()

static void fix_infomask_from_infobits ( uint8  infobits,
uint16 infomask,
uint16 infomask2 
)
static

Definition at line 8219 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_xlog_delete(), heap_xlog_lock(), heap_xlog_lock_updated(), and heap_xlog_update().

8220 {
8221  *infomask &= ~(HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY |
8223  *infomask2 &= ~HEAP_KEYS_UPDATED;
8224 
8225  if (infobits & XLHL_XMAX_IS_MULTI)
8226  *infomask |= HEAP_XMAX_IS_MULTI;
8227  if (infobits & XLHL_XMAX_LOCK_ONLY)
8228  *infomask |= HEAP_XMAX_LOCK_ONLY;
8229  if (infobits & XLHL_XMAX_EXCL_LOCK)
8230  *infomask |= HEAP_XMAX_EXCL_LOCK;
8231  /* note HEAP_XMAX_SHR_LOCK isn't considered here */
8232  if (infobits & XLHL_XMAX_KEYSHR_LOCK)
8233  *infomask |= HEAP_XMAX_KEYSHR_LOCK;
8234 
8235  if (infobits & XLHL_KEYS_UPDATED)
8236  *infomask2 |= HEAP_KEYS_UPDATED;
8237 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:184
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:187
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:241
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:240
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:186
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:242
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:244
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:269
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:199
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:243

◆ FreeBulkInsertState()

void FreeBulkInsertState ( BulkInsertState  bistate)

Definition at line 2350 of file heapam.c.

References BulkInsertStateData::current_buf, FreeAccessStrategy(), InvalidBuffer, pfree(), ReleaseBuffer(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), intorel_shutdown(), and transientrel_shutdown().

2351 {
2352  if (bistate->current_buf != InvalidBuffer)
2353  ReleaseBuffer(bistate->current_buf);
2354  FreeAccessStrategy(bistate->strategy);
2355  pfree(bistate);
2356 }
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
void pfree(void *pointer)
Definition: mcxt.c:949
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:597
BufferAccessStrategy strategy
Definition: hio.h:33
Buffer current_buf
Definition: hio.h:34

◆ FreezeMultiXactId()

static TransactionId FreezeMultiXactId ( MultiXactId  multi,
uint16  t_infomask,
TransactionId  cutoff_xid,
MultiXactId  cutoff_multi,
uint16 flags 
)
static

Definition at line 6359 of file heapam.c.

References Assert, FRM_INVALIDATE_XMAX, FRM_MARK_COMMITTED, FRM_NOOP, FRM_RETURN_IS_MULTI, FRM_RETURN_IS_XID, GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, i, InvalidTransactionId, ISUPDATE_from_mxstatus, MultiXactIdCreateFromMembers(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactIdIsValid, MultiXactIdPrecedes(), palloc(), pfree(), status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TransactionIdIsValid, TransactionIdPrecedes(), and MultiXactMember::xid.

Referenced by heap_prepare_freeze_tuple().

6362 {
6364  int i;
6365  MultiXactMember *members;
6366  int nmembers;
6367  bool need_replace;
6368  int nnewmembers;
6369  MultiXactMember *newmembers;
6370  bool has_lockers;
6371  TransactionId update_xid;
6372  bool update_committed;
6373 
6374  *flags = 0;
6375 
6376  /* We should only be called in Multis */
6377  Assert(t_infomask & HEAP_XMAX_IS_MULTI);
6378 
6379  if (!MultiXactIdIsValid(multi) ||
6380  HEAP_LOCKED_UPGRADED(t_infomask))
6381  {
6382  /* Ensure infomask bits are appropriately set/reset */
6383  *flags |= FRM_INVALIDATE_XMAX;
6384  return InvalidTransactionId;
6385  }
6386  else if (MultiXactIdPrecedes(multi, cutoff_multi))
6387  {
6388  /*
6389  * This old multi cannot possibly have members still running. If it
6390  * was a locker only, it can be removed without any further
6391  * consideration; but if it contained an update, we might need to
6392  * preserve it.
6393  */
6395  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)));
6396  if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
6397  {
6398  *flags |= FRM_INVALIDATE_XMAX;
6399  xid = InvalidTransactionId; /* not strictly necessary */
6400  }
6401  else
6402  {
6403  /* replace multi by update xid */
6404  xid = MultiXactIdGetUpdateXid(multi, t_infomask);
6405 
6406  /* wasn't only a lock, xid needs to be valid */
6408 
6409  /*
6410  * If the xid is older than the cutoff, it has to have aborted,
6411  * otherwise the tuple would have gotten pruned away.
6412  */
6413  if (TransactionIdPrecedes(xid, cutoff_xid))
6414  {
6416  *flags |= FRM_INVALIDATE_XMAX;
6417  xid = InvalidTransactionId; /* not strictly necessary */
6418  }
6419  else
6420  {
6421  *flags |= FRM_RETURN_IS_XID;
6422  }
6423  }
6424 
6425  return xid;
6426  }
6427 
6428  /*
6429  * This multixact might have or might not have members still running, but
6430  * we know it's valid and is newer than the cutoff point for multis.
6431  * However, some member(s) of it may be below the cutoff for Xids, so we
6432  * need to walk the whole members array to figure out what to do, if
6433  * anything.
6434  */
6435 
6436  nmembers =
6437  GetMultiXactIdMembers(multi, &members, false,
6438  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
6439  if (nmembers <= 0)
6440  {
6441  /* Nothing worth keeping */
6442  *flags |= FRM_INVALIDATE_XMAX;
6443  return InvalidTransactionId;
6444  }
6445 
6446  /* is there anything older than the cutoff? */
6447  need_replace = false;
6448  for (i = 0; i < nmembers; i++)
6449  {
6450  if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
6451  {
6452  need_replace = true;
6453  break;
6454  }
6455  }
6456 
6457  /*
6458  * In the simplest case, there is no member older than the cutoff; we can
6459  * keep the existing MultiXactId as is.
6460  */
6461  if (!need_replace)
6462  {
6463  *flags |= FRM_NOOP;
6464  pfree(members);
6465  return InvalidTransactionId;
6466  }
6467 
6468  /*
6469  * If the multi needs to be updated, figure out which members do we need
6470  * to keep.
6471  */
6472  nnewmembers = 0;
6473  newmembers = palloc(sizeof(MultiXactMember) * nmembers);
6474  has_lockers = false;
6475  update_xid = InvalidTransactionId;
6476  update_committed = false;
6477 
6478  for (i = 0; i < nmembers; i++)
6479  {
6480  /*
6481  * Determine whether to keep this member or ignore it.
6482  */
6483  if (ISUPDATE_from_mxstatus(members[i].status))
6484  {
6485  TransactionId xid = members[i].xid;
6486 
6487  /*
6488  * It's an update; should we keep it? If the transaction is known
6489  * aborted or crashed then it's okay to ignore it, otherwise not.
6490  * Note that an updater older than cutoff_xid cannot possibly be
6491  * committed, because HeapTupleSatisfiesVacuum would have returned
6492  * HEAPTUPLE_DEAD and we would not be trying to freeze the tuple.
6493  *
6494  * As with all tuple visibility routines, it's critical to test
6495  * TransactionIdIsInProgress before TransactionIdDidCommit,
6496  * because of race conditions explained in detail in tqual.c.
6497  */
6500  {
6501  Assert(!TransactionIdIsValid(update_xid));
6502  update_xid = xid;
6503  }
6504  else if (TransactionIdDidCommit(xid))
6505  {
6506  /*
6507  * The transaction committed, so we can tell caller to set
6508  * HEAP_XMAX_COMMITTED. (We can only do this because we know
6509  * the transaction is not running.)
6510  */
6511  Assert(!TransactionIdIsValid(update_xid));
6512  update_committed = true;
6513  update_xid = xid;
6514  }
6515 
6516  /*
6517  * Not in progress, not committed -- must be aborted or crashed;
6518  * we can ignore it.
6519  */
6520 
6521  /*
6522  * Since the tuple wasn't marked HEAPTUPLE_DEAD by vacuum, the
6523  * update Xid cannot possibly be older than the xid cutoff.
6524  */
6525  Assert(!TransactionIdIsValid(update_xid) ||
6526  !TransactionIdPrecedes(update_xid, cutoff_xid));
6527 
6528  /*
6529  * If we determined that it's an Xid corresponding to an update
6530  * that must be retained, additionally add it to the list of
6531  * members of the new Multi, in case we end up using that. (We
6532  * might still decide to use only an update Xid and not a multi,
6533  * but it's easier to maintain the list as we walk the old members
6534  * list.)
6535  */
6536  if (TransactionIdIsValid(update_xid))
6537  newmembers[nnewmembers++] = members[i];
6538  }
6539  else
6540  {
6541  /* We only keep lockers if they are still running */
6542  if (TransactionIdIsCurrentTransactionId(members[i].xid) ||
6543  TransactionIdIsInProgress(members[i].xid))
6544  {
6545  /* running locker cannot possibly be older than the cutoff */
6546  Assert(!TransactionIdPrecedes(members[i].xid, cutoff_xid));
6547  newmembers[nnewmembers++] = members[i];
6548  has_lockers = true;
6549  }
6550  }
6551  }
6552 
6553  pfree(members);
6554 
6555  if (nnewmembers == 0)
6556  {
6557  /* nothing worth keeping!? Tell caller to remove the whole thing */
6558  *flags |= FRM_INVALIDATE_XMAX;
6559  xid = InvalidTransactionId;
6560  }
6561  else if (TransactionIdIsValid(update_xid) && !has_lockers)
6562  {
6563  /*
6564  * If there's a single member and it's an update, pass it back alone
6565  * without creating a new Multi. (XXX we could do this when there's a
6566  * single remaining locker, too, but that would complicate the API too
6567  * much; moreover, the case with the single updater is more
6568  * interesting, because those are longer-lived.)
6569  */
6570  Assert(nnewmembers == 1);
6571  *flags |= FRM_RETURN_IS_XID;
6572  if (update_committed)
6573  *flags |= FRM_MARK_COMMITTED;
6574  xid = update_xid;
6575  }
6576  else
6577  {
6578  /*
6579  * Create a new multixact with the surviving members of the previous
6580  * one, to set as new Xmax in the tuple.
6581  */
6582  xid = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
6583  *flags |= FRM_RETURN_IS_MULTI;
6584  }
6585 
6586  pfree(newmembers);
6587 
6588  return xid;
6589 }
#define FRM_RETURN_IS_XID
Definition: heapam.c:6333
#define FRM_MARK_COMMITTED
Definition: heapam.c:6335
uint32 TransactionId
Definition: c.h:445
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:766
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:999
MultiXactId MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members)
Definition: multixact.c:746
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:243
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:6931
void pfree(void *pointer)
Definition: mcxt.c:949
TransactionId xid
Definition: multixact.h:61
#define FRM_INVALIDATE_XMAX
Definition: heapam.c:6332
#define InvalidTransactionId
Definition: transam.h:31
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:55
#define MultiXactIdIsValid(multi)
Definition: multixact.h:27
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
#define FRM_RETURN_IS_MULTI
Definition: heapam.c:6334
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:221
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:199
#define Assert(condition)
Definition: c.h:670
#define FRM_NOOP
Definition: heapam.c:6331
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3140
void * palloc(Size size)
Definition: mcxt.c:848
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:549

◆ get_mxact_status_for_lock()

static MultiXactStatus get_mxact_status_for_lock ( LockTupleMode  mode,
bool  is_update 
)
static

Definition at line 4507 of file heapam.c.

References elog, ERROR, and tupleLockExtraInfo.

Referenced by compute_new_xmax_infomask(), heap_lock_tuple(), and test_lockmode_for_conflict().

4508 {
4509  int retval;
4510 
4511  if (is_update)
4512  retval = tupleLockExtraInfo[mode].updstatus;
4513  else
4514  retval = tupleLockExtraInfo[mode].lockstatus;
4515 
4516  if (retval == -1)
4517  elog(ERROR, "invalid lock tuple mode %d/%s", mode,
4518  is_update ? "true" : "false");
4519 
4520  return (MultiXactStatus) retval;
4521 }
MultiXactStatus
Definition: multixact.h:40
#define ERROR
Definition: elog.h:43
static const struct @20 tupleLockExtraInfo[MaxLockTupleMode+1]
#define elog
Definition: elog.h:219

◆ GetBulkInsertState()

BulkInsertState GetBulkInsertState ( void  )

Definition at line 2336 of file heapam.c.

References BAS_BULKWRITE, BulkInsertStateData::current_buf, GetAccessStrategy(), InvalidBuffer, palloc(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), intorel_startup(), and transientrel_startup().

2337 {
2338  BulkInsertState bistate;
2339 
2340  bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
2342  bistate->current_buf = InvalidBuffer;
2343  return bistate;
2344 }
BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
Definition: freelist.c:542
#define InvalidBuffer
Definition: buf.h:25
struct BulkInsertStateData * BulkInsertState
Definition: heapam.h:33
BufferAccessStrategy strategy
Definition: hio.h:33
void * palloc(Size size)
Definition: mcxt.c:848
Buffer current_buf
Definition: hio.h:34

◆ GetMultiXactIdHintBits()

static void GetMultiXactIdHintBits ( MultiXactId  multi,
uint16 new_infomask,
uint16 new_infomask2 
)
static

Definition at line 6850 of file heapam.c.

References GetMultiXactIdMembers(), HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, i, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, pfree(), status(), and TUPLOCK_from_mxstatus.

Referenced by compute_new_xmax_infomask(), heap_prepare_freeze_tuple(), and heap_update().

6852 {
6853  int nmembers;
6854  MultiXactMember *members;
6855  int i;
6856  uint16 bits = HEAP_XMAX_IS_MULTI;
6857  uint16 bits2 = 0;
6858  bool has_update = false;
6859  LockTupleMode strongest = LockTupleKeyShare;
6860 
6861  /*
6862  * We only use this in multis we just created, so they cannot be values
6863  * pre-pg_upgrade.
6864  */
6865  nmembers = GetMultiXactIdMembers(multi, &members, false, false);
6866 
6867  for (i = 0; i < nmembers; i++)
6868  {
6869  LockTupleMode mode;
6870 
6871  /*
6872  * Remember the strongest lock mode held by any member of the
6873  * multixact.
6874  */
6875  mode = TUPLOCK_from_mxstatus(members[i].status);
6876  if (mode > strongest)
6877  strongest = mode;
6878 
6879  /* See what other bits we need */
6880  switch (members[i].status)
6881  {
6885  break;
6886 
6888  bits2 |= HEAP_KEYS_UPDATED;
6889  break;
6890 
6892  has_update = true;
6893  break;
6894 
6895  case MultiXactStatusUpdate:
6896  bits2 |= HEAP_KEYS_UPDATED;
6897  has_update = true;
6898  break;
6899  }
6900  }
6901 
6902  if (strongest == LockTupleExclusive ||
6903  strongest == LockTupleNoKeyExclusive)
6904  bits |= HEAP_XMAX_EXCL_LOCK;
6905  else if (strongest == LockTupleShare)
6906  bits |= HEAP_XMAX_SHR_LOCK;
6907  else if (strongest == LockTupleKeyShare)
6908  bits |= HEAP_XMAX_KEYSHR_LOCK;
6909 
6910  if (!has_update)
6911  bits |= HEAP_XMAX_LOCK_ONLY;
6912 
6913  if (nmembers > 0)
6914  pfree(members);
6915 
6916  *new_infomask = bits;
6917  *new_infomask2 = bits2;
6918 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:184
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:187
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:190
LockTupleMode
Definition: heapam.h:38
unsigned short uint16
Definition: c.h:295
void pfree(void *pointer)
Definition: mcxt.c:949
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:186
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:269
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:199
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:203
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225

◆ heap2_redo()

void heap2_redo ( XLogReaderState record)

Definition at line 9083 of file heapam.c.

References elog, heap_xlog_clean(), heap_xlog_cleanup_info(), heap_xlog_freeze_page(), heap_xlog_lock_updated(), heap_xlog_logical_rewrite(), heap_xlog_multi_insert(), heap_xlog_visible(), PANIC, XLOG_HEAP2_CLEAN, XLOG_HEAP2_CLEANUP_INFO, XLOG_HEAP2_FREEZE_PAGE, XLOG_HEAP2_LOCK_UPDATED, XLOG_HEAP2_MULTI_INSERT, XLOG_HEAP2_NEW_CID, XLOG_HEAP2_REWRITE, XLOG_HEAP2_VISIBLE, XLOG_HEAP_OPMASK, XLogRecGetInfo, and XLR_INFO_MASK.

9084 {
9085  uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
9086 
9087  switch (info & XLOG_HEAP_OPMASK)
9088  {
9089  case XLOG_HEAP2_CLEAN:
9090  heap_xlog_clean(record);
9091  break;
9093  heap_xlog_freeze_page(record);
9094  break;
9096  heap_xlog_cleanup_info(record);
9097  break;
9098  case XLOG_HEAP2_VISIBLE:
9099  heap_xlog_visible(record);
9100  break;
9102  heap_xlog_multi_insert(record);
9103  break;
9105  heap_xlog_lock_updated(record);
9106  break;
9107  case XLOG_HEAP2_NEW_CID:
9108 
9109  /*
9110  * Nothing to do on a real replay, only used during logical
9111  * decoding.
9112  */
9113  break;
9114  case XLOG_HEAP2_REWRITE:
9115  heap_xlog_logical_rewrite(record);
9116  break;
9117  default:
9118  elog(PANIC, "heap2_redo: unknown op code %u", info);
9119  }
9120 }
void heap_xlog_logical_rewrite(XLogReaderState *r)
Definition: rewriteheap.c:1117
#define XLOG_HEAP2_LOCK_UPDATED
Definition: heapam_xlog.h:59
#define XLOG_HEAP2_REWRITE
Definition: heapam_xlog.h:53
unsigned char uint8
Definition: c.h:294
#define XLOG_HEAP_OPMASK
Definition: heapam_xlog.h:41
#define PANIC
Definition: elog.h:53
#define XLOG_HEAP2_MULTI_INSERT
Definition: heapam_xlog.h:58
#define XLOG_HEAP2_VISIBLE
Definition: heapam_xlog.h:57
static void heap_xlog_lock_updated(XLogReaderState *record)
Definition: heapam.c:8944
static void heap_xlog_freeze_page(XLogReaderState *record)
Definition: heapam.c:8161
#define XLOG_HEAP2_CLEAN
Definition: heapam_xlog.h:54
#define XLOG_HEAP2_CLEANUP_INFO
Definition: heapam_xlog.h:56
static void heap_xlog_multi_insert(XLogReaderState *record)
Definition: heapam.c:8426
#define XLOG_HEAP2_NEW_CID
Definition: heapam_xlog.h:60
#define XLogRecGetInfo(decoder)
Definition: xlogreader.h:222
#define XLR_INFO_MASK
Definition: xlogrecord.h:62
static void heap_xlog_cleanup_info(XLogReaderState *record)
Definition: heapam.c:7935
#define XLOG_HEAP2_FREEZE_PAGE
Definition: heapam_xlog.h:55
static void heap_xlog_visible(XLogReaderState *record)
Definition: heapam.c:8046
#define elog
Definition: elog.h:219
static void heap_xlog_clean(XLogReaderState *record)
Definition: heapam.c:7956

◆ heap_abort_speculative()

void heap_abort_speculative ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 6107 of file heapam.c.

References Assert, buffer, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, compute_infobits(), elog, END_CRIT_SECTION, ERROR, xl_heap_delete::flags, GetCurrentTransactionId(), HEAP_KEYS_UPDATED, HEAP_MOVED, HEAP_XMAX_BITS, HeapTupleHasExternal, HeapTupleHeaderIsHeapOnly, HeapTupleHeaderIsSpeculative, HeapTupleHeaderSetXmin, xl_heap_delete::infobits_set, InvalidTransactionId, IsToastRelation(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), MarkBufferDirty(), xl_heap_delete::offnum, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), ReadBuffer(), RecentGlobalXmin, REGBUF_STANDARD, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapDelete, START_CRIT_SECTION, HeapTupleHeaderData::t_choice, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_heap, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, HeapTupleFields::t_xmin, toast_delete(), TransactionIdIsValid, XLH_DELETE_IS_SUPER, XLOG_HEAP_DELETE, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and xl_heap_delete::xmax.

Referenced by ExecInsert(), and toast_delete_datum().

6108 {
6110  ItemPointer tid = &(tuple->t_self);
6111  ItemId lp;
6112  HeapTupleData tp;
6113  Page page;
6114  BlockNumber block;
6115  Buffer buffer;
6116 
6117  Assert(ItemPointerIsValid(tid));
6118 
6119  block = ItemPointerGetBlockNumber(tid);
6120  buffer = ReadBuffer(relation, block);
6121  page = BufferGetPage(buffer);
6122 
6124 
6125  /*
6126  * Page can't be all visible, we just inserted into it, and are still
6127  * running.
6128  */
6129  Assert(!PageIsAllVisible(page));
6130 
6131  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
6132  Assert(ItemIdIsNormal(lp));
6133 
6134  tp.t_tableOid = RelationGetRelid(relation);
6135  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
6136  tp.t_len = ItemIdGetLength(lp);
6137  tp.t_self = *tid;
6138 
6139  /*
6140  * Sanity check that the tuple really is a speculatively inserted tuple,
6141  * inserted by us.
6142  */
6143  if (tp.t_data->t_choice.t_heap.t_xmin != xid)
6144  elog(ERROR, "attempted to kill a tuple inserted by another transaction");
6145  if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
6146  elog(ERROR, "attempted to kill a non-speculative tuple");
6148 
6149  /*
6150  * No need to check for serializable conflicts here. There is never a
6151  * need for a combocid, either. No need to extract replica identity, or
6152  * do anything special with infomask bits.
6153  */
6154 
6156 
6157  /*
6158  * The tuple will become DEAD immediately. Flag that this page
6159  * immediately is a candidate for pruning by setting xmin to
6160  * RecentGlobalXmin. That's not pretty, but it doesn't seem worth
6161  * inventing a nicer API for this.
6162  */
6165 
6166  /* store transaction information of xact deleting the tuple */
6169 
6170  /*
6171  * Set the tuple header xmin to InvalidTransactionId. This makes the
6172  * tuple immediately invisible everyone. (In particular, to any
6173  * transactions waiting on the speculative token, woken up later.)
6174  */
6176 
6177  /* Clear the speculative insertion token too */
6178  tp.t_data->t_ctid = tp.t_self;
6179 
6180  MarkBufferDirty(buffer);
6181 
6182  /*
6183  * XLOG stuff
6184  *
6185  * The WAL records generated here match heap_delete(). The same recovery
6186  * routines are used.
6187  */
6188  if (RelationNeedsWAL(relation))
6189  {
6190  xl_heap_delete xlrec;
6191  XLogRecPtr recptr;
6192 
6193  xlrec.flags = XLH_DELETE_IS_SUPER;
6195  tp.t_data->t_infomask2);
6197  xlrec.xmax = xid;
6198 
6199  XLogBeginInsert();
6200  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
6201  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6202 
6203  /* No replica identity & replication origin logged */
6204 
6205  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
6206 
6207  PageSetLSN(page, recptr);
6208  }
6209 
6210  END_CRIT_SECTION();
6211 
6212  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6213 
6214  if (HeapTupleHasExternal(&tp))
6215  {
6216  Assert(!IsToastRelation(relation));
6217  toast_delete(relation, &tp, true);
6218  }
6219 
6220  /*
6221  * Never need to mark tuple for invalidation, since catalogs don't support
6222  * speculative insertion
6223  */
6224 
6225  /* Now we can release the buffer */
6226  ReleaseBuffer(buffer);
6227 
6228  /* count deletion, as we counted the insertion too */
6229  pgstat_count_heap_delete(relation);
6230 }
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:60
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
bool IsToastRelation(Relation relation)
Definition: catalog.c:136
#define HEAP_XMAX_BITS
Definition: htup_details.h:261
union HeapTupleHeaderData::@45 t_choice
#define XLH_DELETE_IS_SUPER
Definition: heapam_xlog.h:95
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2971
HeapTupleFields t_heap
Definition: htup_details.h:151
#define PageIsAllVisible(page)
Definition: bufpage.h:381
uint32 TransactionId
Definition: c.h:445
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:428
#define PageSetPrunable(page, xid)
Definition: bufpage.h:394
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
OffsetNumber offnum
Definition: heapam_xlog.h:105
HeapTupleHeader t_data
Definition: htup.h:67
#define HeapTupleHeaderIsHeapOnly(tup)
Definition: htup_details.h:507
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
#define ERROR
Definition: elog.h:43
ItemPointerData t_ctid
Definition: htup_details.h:155
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:104
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:418
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:110
TransactionId RecentGlobalXmin
Definition: snapmgr.c:166
#define REGBUF_STANDARD
Definition: xloginsert.h:34
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
TransactionId t_xmin
Definition: htup_details.h:118
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:269
#define HEAP_MOVED
Definition: htup_details.h:207
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:670
uint8 infobits_set
Definition: heapam_xlog.h:106
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define RelationNeedsWAL(relation)
Definition: rel.h:514
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:1953
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:679
void toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: tuptoaster.c:464
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define TransactionIdIsValid(xid)
Definition: transam.h:41
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:425
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
#define HeapTupleHeaderSetXmin(tup, xid)
Definition: htup_details.h:318

◆ heap_acquire_tuplock()

static bool heap_acquire_tuplock ( Relation  relation,
ItemPointer  tid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
bool have_tuple_lock 
)
static

Definition at line 5226 of file heapam.c.

References ConditionalLockTupleTuplock, ereport, errcode(), errmsg(), ERROR, LockTupleTuplock, LockWaitBlock, LockWaitError, LockWaitSkip, and RelationGetRelationName.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

5228 {
5229  if (*have_tuple_lock)
5230  return true;
5231 
5232  switch (wait_policy)
5233  {
5234  case LockWaitBlock:
5235  LockTupleTuplock(relation, tid, mode);
5236  break;
5237 
5238  case LockWaitSkip:
5239  if (!ConditionalLockTupleTuplock(relation, tid, mode))
5240  return false;
5241  break;
5242 
5243  case LockWaitError:
5244  if (!ConditionalLockTupleTuplock(relation, tid, mode))
5245  ereport(ERROR,
5246  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5247  errmsg("could not obtain lock on row in relation \"%s\"",
5248  RelationGetRelationName(relation))));
5249  break;
5250  }
5251  *have_tuple_lock = true;
5252 
5253  return true;
5254 }
#define LockTupleTuplock(rel, tup, mode)
Definition: heapam.c:181
#define ConditionalLockTupleTuplock(rel, tup, mode)
Definition: heapam.c:185
int errcode(int sqlerrcode)
Definition: elog.c:575
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:445
#define ereport(elevel, rest)
Definition: elog.h:122
int errmsg(const char *fmt,...)
Definition: elog.c:797

◆ heap_beginscan()

HeapScanDesc heap_beginscan ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key 
)

Definition at line 1397 of file heapam.c.

References heap_beginscan_internal().

Referenced by AlterDomainNotNull(), ATRewriteTable(), check_default_allows_bound(), copy_heap_data(), CopyTo(), DefineQueryRewrite(), pgrowlocks(), pgstat_collect_oids(), RelationFindReplTupleSeq(), SeqNext(), validateCheckConstraint(), validateDomainConstraint(), and validateForeignKeyConstraint().

1399 {
1400  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1401  true, true, true, false, false, false);
1402 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1443

◆ heap_beginscan_bm()

HeapScanDesc heap_beginscan_bm ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key 
)

Definition at line 1425 of file heapam.c.

References heap_beginscan_internal().

Referenced by ExecInitBitmapHeapScan().

1427 {
1428  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1429  false, false, true, true, false, false);
1430 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1443

◆ heap_beginscan_catalog()

HeapScanDesc heap_beginscan_catalog ( Relation  relation,
int  nkeys,
ScanKey  key 
)

Definition at line 1405 of file heapam.c.

References GetCatalogSnapshot(), heap_beginscan_internal(), RegisterSnapshot(), and RelationGetRelid.

Referenced by AlterTableMoveAll(), AlterTableSpaceOptions(), boot_openrel(), check_db_file_conflict(), createdb(), do_autovacuum(), DropSetting(), DropTableSpace(), find_typed_table_dependencies(), get_all_vacuum_rels(), get_database_list(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), index_update_stats(), objectsInSchemaToOids(), ReindexMultipleTables(), remove_dbtablespaces(), RemoveConversionById(), RemoveSubscriptionRel(), RenameTableSpace(), ThereIsAtLeastOneRole(), and vac_truncate_clog().

1406 {
1407  Oid relid = RelationGetRelid(relation);
1408  Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
1409 
1410  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1411  true, true, true, false, false, true);
1412 }
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:863
Snapshot GetCatalogSnapshot(Oid relid)
Definition: snapmgr.c:440
unsigned int Oid
Definition: postgres_ext.h:31
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1443
#define RelationGetRelid(relation)
Definition: rel.h:425

◆ heap_beginscan_internal()

static HeapScanDesc heap_beginscan_internal ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
ParallelHeapScanDesc  parallel_scan,
bool  allow_strat,
bool  allow_sync,
bool  allow_pagemode,
bool  is_bitmapscan,
bool  is_samplescan,
bool  temp_snap 
)
static

Definition at line 1443 of file heapam.c.

References initscan(), IsMVCCSnapshot, palloc(), PredicateLockRelation(), RelationGetRelid, RelationIncrementReferenceCount(), HeapScanDescData::rs_allow_strat, HeapScanDescData::rs_allow_sync, HeapScanDescData::rs_bitmapscan, HeapScanDescData::rs_ctup, HeapScanDescData::rs_key, HeapScanDescData::rs_nkeys, HeapScanDescData::rs_pageatatime, HeapScanDescData::rs_parallel, HeapScanDescData::rs_rd, HeapScanDescData::rs_samplescan, HeapScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, HeapScanDescData::rs_temp_snap, and HeapTupleData::t_tableOid.

Referenced by heap_beginscan(), heap_beginscan_bm(), heap_beginscan_catalog(), heap_beginscan_parallel(), heap_beginscan_sampling(), and heap_beginscan_strat().

1452 {
1453  HeapScanDesc scan;
1454 
1455  /*
1456  * increment relation ref count while scanning relation
1457  *
1458  * This is just to make really sure the relcache entry won't go away while
1459  * the scan has a pointer to it. Caller should be holding the rel open
1460  * anyway, so this is redundant in all normal scenarios...
1461  */
1463 
1464  /*
1465  * allocate and initialize scan descriptor
1466  */
1467  scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
1468 
1469  scan->rs_rd = relation;
1470  scan->rs_snapshot = snapshot;
1471  scan->rs_nkeys = nkeys;
1472  scan->rs_bitmapscan = is_bitmapscan;
1473  scan->rs_samplescan = is_samplescan;
1474  scan->rs_strategy = NULL; /* set in initscan */
1475  scan->rs_allow_strat = allow_strat;
1476  scan->rs_allow_sync = allow_sync;
1477  scan->rs_temp_snap = temp_snap;
1478  scan->rs_parallel = parallel_scan;
1479 
1480  /*
1481  * we can use page-at-a-time mode if it's an MVCC-safe snapshot
1482  */
1483  scan->rs_pageatatime = allow_pagemode && IsMVCCSnapshot(snapshot);
1484 
1485  /*
1486  * For a seqscan in a serializable transaction, acquire a predicate lock
1487  * on the entire relation. This is required not only to lock all the
1488  * matching tuples, but also to conflict with new insertions into the
1489  * table. In an indexscan, we take page locks on the index pages covering
1490  * the range specified in the scan qual, but in a heap scan there is
1491  * nothing more fine-grained to lock. A bitmap scan is a different story,
1492  * there we have already scanned the index and locked the index pages
1493  * covering the predicate. But in that case we still have to lock any
1494  * matching heap tuples.
1495  */
1496  if (!is_bitmapscan)
1497  PredicateLockRelation(relation, snapshot);
1498 
1499  /* we only need to set this up once */
1500  scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1501 
1502  /*
1503  * we do this here instead of in initscan() because heap_rescan also calls
1504  * initscan() and we don't want to allocate memory again
1505  */
1506  if (nkeys > 0)
1507  scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
1508  else
1509  scan->rs_key = NULL;
1510 
1511  initscan(scan, key, false);
1512 
1513  return scan;
1514 }
bool rs_allow_sync
Definition: relscan.h:56
void PredicateLockRelation(Relation relation, Snapshot snapshot)
Definition: predicate.c:2498
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:100
HeapTupleData rs_ctup
Definition: relscan.h:69
bool rs_bitmapscan
Definition: relscan.h:52
bool rs_pageatatime
Definition: relscan.h:54
ParallelHeapScanDesc rs_parallel
Definition: relscan.h:73
ScanKeyData * ScanKey
Definition: skey.h:75
Snapshot rs_snapshot
Definition: relscan.h:49
Oid t_tableOid
Definition: htup.h:66
bool rs_temp_snap
Definition: relscan.h:57
void RelationIncrementReferenceCount(Relation rel)
Definition: relcache.c:2134
BufferAccessStrategy rs_strategy
Definition: relscan.h:64
Relation rs_rd
Definition: relscan.h:48
#define IsMVCCSnapshot(snapshot)
Definition: tqual.h:31
void * palloc(Size size)
Definition: mcxt.c:848
bool rs_allow_strat
Definition: relscan.h:55
static void initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
Definition: heapam.c:216
bool rs_samplescan
Definition: relscan.h:53
#define RelationGetRelid(relation)
Definition: rel.h:425
ScanKey rs_key
Definition: relscan.h:51

◆ heap_beginscan_parallel()

HeapScanDesc heap_beginscan_parallel ( Relation  relation,
ParallelHeapScanDesc  parallel_scan 
)

Definition at line 1650 of file heapam.c.

References Assert, heap_beginscan_internal(), ParallelHeapScanDescData::phs_relid, ParallelHeapScanDescData::phs_snapshot_data, RegisterSnapshot(), RelationGetRelid, and RestoreSnapshot().

Referenced by ExecSeqScanInitializeDSM(), and ExecSeqScanInitializeWorker().

1651 {
1652  Snapshot snapshot;
1653 
1654  Assert(RelationGetRelid(relation) == parallel_scan->phs_relid);
1655  snapshot = RestoreSnapshot(parallel_scan->phs_snapshot_data);
1656  RegisterSnapshot(snapshot);
1657 
1658  return heap_beginscan_internal(relation, snapshot, 0, NULL, parallel_scan,
1659  true, true, true, false, false, true);
1660 }
char phs_snapshot_data[FLEXIBLE_ARRAY_MEMBER]
Definition: relscan.h:42
Snapshot RestoreSnapshot(char *start_address)
Definition: snapmgr.c:2127
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:863
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1443
#define Assert(condition)
Definition: c.h:670
#define RelationGetRelid(relation)
Definition: rel.h:425

◆ heap_beginscan_sampling()

HeapScanDesc heap_beginscan_sampling ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
bool  allow_strat,
bool  allow_sync,
bool  allow_pagemode 
)

Definition at line 1433 of file heapam.c.

References heap_beginscan_internal().

Referenced by tablesample_init().

1436 {
1437  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1438  allow_strat, allow_sync, allow_pagemode,
1439  false, true, false);
1440 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1443

◆ heap_beginscan_strat()

HeapScanDesc heap_beginscan_strat ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
bool  allow_strat,
bool  allow_sync 
)

Definition at line 1415 of file heapam.c.

References heap_beginscan_internal().

Referenced by IndexBuildHeapRangeScan(), IndexCheckExclusion(), pgstat_heap(), systable_beginscan(), and validate_index_heapscan().

1418 {
1419  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1420  allow_strat, allow_sync, true,
1421  false, false, false);
1422 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1443

◆ heap_delete()

HTSU_Result heap_delete ( Relation  relation,
ItemPointer  tid,
CommandId  cid,
Snapshot  crosscheck,
bool  wait,
HeapUpdateFailureData hufd 
)

Definition at line 3030 of file heapam.c.

References Assert, buffer, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), HeapUpdateFailureData::cmax, compute_infobits(), compute_new_xmax_infomask(), HeapUpdateFailureData::ctid, DoesMultiXactIdConflict(), END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, ExtractReplicaIdentity(), xl_heap_delete::flags, GetCurrentTransactionId(), heap_acquire_tuplock(), heap_freetuple(), HEAP_KEYS_UPDATED, HEAP_MOVED, HEAP_XMAX_BITS, HEAP_XMAX_INVALID, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HeapTupleBeingUpdated, HeapTupleHasExternal, HeapTupleHeaderAdjustCmax(), HeapTupleHeaderClearHotUpdated, HeapTupleHeaderGetCmax(), HeapTupleHeaderGetRawXmax, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIsOnlyLocked(), HeapTupleHeaderSetCmax, HeapTupleHeaderSetXmax, HeapTupleInvisible, HeapTupleMayBeUpdated, HeapTupleSatisfiesUpdate(), HeapTupleSatisfiesVisibility, HeapTupleSelfUpdated, HeapTupleUpdated, xl_heap_delete::infobits_set, InvalidBuffer, InvalidCommandId, InvalidSnapshot, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), LockTupleExclusive, LockWaitBlock, log_heap_new_cid(), MarkBufferDirty(), MultiXactIdSetOldestMember(), MultiXactIdWait(), MultiXactStatusUpdate, xl_heap_delete::offnum, PageClearAllVisible, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), RelationData::rd_rel, ReadBuffer(), REGBUF_STANDARD, RelationGetRelid, RelationIsAccessibleInLogicalDecoding, RelationNeedsWAL, ReleaseBuffer(), RELKIND_MATVIEW, RELKIND_RELATION, REPLICA_IDENTITY_FULL, SizeOfHeapDelete, SizeOfHeapHeader, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, toast_delete(), TransactionIdEquals, TransactionIdIsCurrentTransactionId(), UnlockReleaseBuffer(), UnlockTupleTuplock, UpdateXmaxHintBits(), visibilitymap_clear(), visibilitymap_pin(), VISIBILITYMAP_VALID_BITS, XactLockTableWait(), XLH_DELETE_ALL_VISIBLE_CLEARED, XLH_DELETE_CONTAINS_OLD_KEY, XLH_DELETE_CONTAINS_OLD_TUPLE, XLOG_HEAP_DELETE, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), XLogSetRecordFlags(), XLTW_Delete, HeapUpdateFailureData::xmax, xl_heap_delete::xmax, and xmax_infomask_changed().

Referenced by ExecDelete(), and simple_heap_delete().

3033 {
3034  HTSU_Result result;
3036  ItemId lp;
3037  HeapTupleData tp;
3038  Page page;
3039  BlockNumber block;
3040  Buffer buffer;
3041  Buffer vmbuffer = InvalidBuffer;
3042  TransactionId new_xmax;
3043  uint16 new_infomask,
3044  new_infomask2;
3045  bool have_tuple_lock = false;
3046  bool iscombo;
3047  bool all_visible_cleared = false;
3048  HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
3049  bool old_key_copied = false;
3050 
3051  Assert(ItemPointerIsValid(tid));
3052 
3053  /*
3054  * Forbid this during a parallel operation, lest it allocate a combocid.
3055  * Other workers might need that combocid for visibility checks, and we
3056  * have no provision for broadcasting it to them.
3057  */
3058  if (IsInParallelMode())
3059  ereport(ERROR,
3060  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
3061  errmsg("cannot delete tuples during a parallel operation")));
3062 
3063  block = ItemPointerGetBlockNumber(tid);
3064  buffer = ReadBuffer(relation, block);
3065  page = BufferGetPage(buffer);
3066 
3067  /*
3068  * Before locking the buffer, pin the visibility map page if it appears to
3069  * be necessary. Since we haven't got the lock yet, someone else might be
3070  * in the middle of changing this, so we'll need to recheck after we have
3071  * the lock.
3072  */
3073  if (PageIsAllVisible(page))
3074  visibilitymap_pin(relation, block, &vmbuffer);
3075 
3077 
3078  /*
3079  * If we didn't pin the visibility map page and the page has become all
3080  * visible while we were busy locking the buffer, we'll have to unlock and
3081  * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
3082  * unfortunate, but hopefully shouldn't happen often.
3083  */
3084  if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3085  {
3086  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3087  visibilitymap_pin(relation, block, &vmbuffer);
3089  }
3090 
3091  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
3092  Assert(ItemIdIsNormal(lp));
3093 
3094  tp.t_tableOid = RelationGetRelid(relation);
3095  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3096  tp.t_len = ItemIdGetLength(lp);
3097  tp.t_self = *tid;
3098 
3099 l1:
3100  result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
3101 
3102  if (result == HeapTupleInvisible)
3103  {
3104  UnlockReleaseBuffer(buffer);
3105  ereport(ERROR,
3106  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
3107  errmsg("attempted to delete invisible tuple")));
3108  }
3109  else if (result == HeapTupleBeingUpdated && wait)
3110  {
3111  TransactionId xwait;
3112  uint16 infomask;
3113 
3114  /* must copy state data before unlocking buffer */
3115  xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
3116  infomask = tp.t_data->t_infomask;
3117 
3118  /*
3119  * Sleep until concurrent transaction ends -- except when there's a
3120  * single locker and it's our own transaction. Note we don't care
3121  * which lock mode the locker has, because we need the strongest one.
3122  *
3123  * Before sleeping, we need to acquire tuple lock to establish our
3124  * priority for the tuple (see heap_lock_tuple). LockTuple will
3125  * release us when we are next-in-line for the tuple.
3126  *
3127  * If we are forced to "start over" below, we keep the tuple lock;
3128  * this arranges that we stay at the head of the line while rechecking
3129  * tuple state.
3130  */
3131  if (infomask & HEAP_XMAX_IS_MULTI)
3132  {
3133  /* wait for multixact */
3134  if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
3136  {
3137  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3138 
3139  /* acquire tuple lock, if necessary */
3141  LockWaitBlock, &have_tuple_lock);
3142 
3143  /* wait for multixact */
3145  relation, &(tp.t_self), XLTW_Delete,
3146  NULL);
3148 
3149  /*
3150  * If xwait had just locked the tuple then some other xact
3151  * could update this tuple before we get to this point. Check
3152  * for xmax change, and start over if so.
3153  */
3154  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
3156  xwait))
3157  goto l1;
3158  }
3159 
3160  /*
3161  * You might think the multixact is necessarily done here, but not
3162  * so: it could have surviving members, namely our own xact or
3163  * other subxacts of this backend. It is legal for us to delete
3164  * the tuple in either case, however (the latter case is
3165  * essentially a situation of upgrading our former shared lock to
3166  * exclusive). We don't bother changing the on-disk hint bits
3167  * since we are about to overwrite the xmax altogether.
3168  */
3169  }
3170  else if (!TransactionIdIsCurrentTransactionId(xwait))
3171  {
3172  /*
3173  * Wait for regular transaction to end; but first, acquire tuple
3174  * lock.
3175  */
3176  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3178  LockWaitBlock, &have_tuple_lock);
3179  XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
3181 
3182  /*
3183  * xwait is done, but if xwait had just locked the tuple then some
3184  * other xact could update this tuple before we get to this point.
3185  * Check for xmax change, and start over if so.
3186  */
3187  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
3189  xwait))
3190  goto l1;
3191 
3192  /* Otherwise check if it committed or aborted */
3193  UpdateXmaxHintBits(tp.t_data, buffer, xwait);
3194  }
3195 
3196  /*
3197  * We may overwrite if previous xmax aborted, or if it committed but
3198  * only locked the tuple without updating it.
3199  */
3200  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
3203  result = HeapTupleMayBeUpdated;
3204  else
3205  result = HeapTupleUpdated;
3206  }
3207 
3208  if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)
3209  {
3210  /* Perform additional check for transaction-snapshot mode RI updates */
3211  if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
3212  result = HeapTupleUpdated;
3213  }
3214 
3215  if (result != HeapTupleMayBeUpdated)
3216  {
3217  Assert(result == HeapTupleSelfUpdated ||
3218  result == HeapTupleUpdated ||
3219  result == HeapTupleBeingUpdated);
3221  hufd->ctid = tp.t_data->t_ctid;
3223  if (result == HeapTupleSelfUpdated)
3224  hufd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
3225  else
3226  hufd->cmax = InvalidCommandId;
3227  UnlockReleaseBuffer(buffer);
3228  if (have_tuple_lock)
3229  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3230  if (vmbuffer != InvalidBuffer)
3231  ReleaseBuffer(vmbuffer);
3232  return result;
3233  }
3234 
3235  /*
3236  * We're about to do the actual delete -- check for conflict first, to
3237  * avoid possibly having to roll back work we've just done.
3238  *
3239  * This is safe without a recheck as long as there is no possibility of
3240  * another process scanning the page between this check and the delete
3241  * being visible to the scan (i.e., an exclusive buffer content lock is
3242  * continuously held from this point until the tuple delete is visible).
3243  */
3244  CheckForSerializableConflictIn(relation, &tp, buffer);
3245 
3246  /* replace cid with a combo cid if necessary */
3247  HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
3248 
3249  /*
3250  * Compute replica identity tuple before entering the critical section so
3251  * we don't PANIC upon a memory allocation failure.
3252  */
3253  old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
3254 
3255  /*
3256  * If this is the first possibly-multixact-able operation in the current
3257  * transaction, set my per-backend OldestMemberMXactId setting. We can be
3258  * certain that the transaction will never become a member of any older
3259  * MultiXactIds than that. (We have to do this even if we end up just
3260  * using our own TransactionId below, since some other backend could
3261  * incorporate our XID into a MultiXact immediately afterwards.)
3262  */
3264 
3267  xid, LockTupleExclusive, true,
3268  &new_xmax, &new_infomask, &new_infomask2);
3269 
3271 
3272  /*
3273  * If this transaction commits, the tuple will become DEAD sooner or
3274  * later. Set flag that this page is a candidate for pruning once our xid
3275  * falls below the OldestXmin horizon. If the transaction finally aborts,
3276  * the subsequent page pruning will be a no-op and the hint will be
3277  * cleared.
3278  */
3279  PageSetPrunable(page, xid);
3280 
3281  if (PageIsAllVisible(page))
3282  {
3283  all_visible_cleared = true;
3284  PageClearAllVisible(page);
3285  visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
3286  vmbuffer, VISIBILITYMAP_VALID_BITS);
3287  }
3288 
3289  /* store transaction information of xact deleting the tuple */
3292  tp.t_data->t_infomask |= new_infomask;
3293  tp.t_data->t_infomask2 |= new_infomask2;
3295  HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
3296  HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
3297  /* Make sure there is no forward chain link in t_ctid */
3298  tp.t_data->t_ctid = tp.t_self;
3299 
3300  MarkBufferDirty(buffer);
3301 
3302  /*
3303  * XLOG stuff
3304  *
3305  * NB: heap_abort_speculative() uses the same xlog record and replay
3306  * routines.
3307  */
3308  if (RelationNeedsWAL(relation))
3309  {
3310  xl_heap_delete xlrec;
3311  XLogRecPtr recptr;
3312 
3313  /* For logical decode we need combocids to properly decode the catalog */
3315  log_heap_new_cid(relation, &tp);
3316 
3317  xlrec.flags = all_visible_cleared ? XLH_DELETE_ALL_VISIBLE_CLEARED : 0;
3319  tp.t_data->t_infomask2);
3321  xlrec.xmax = new_xmax;
3322 
3323  if (old_key_tuple != NULL)
3324  {
3325  if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
3327  else
3329  }
3330 
3331  XLogBeginInsert();
3332  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
3333 
3334  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
3335 
3336  /*
3337  * Log replica identity of the deleted tuple if there is one
3338  */
3339  if (old_key_tuple != NULL)
3340  {
3341  xl_heap_header xlhdr;
3342 
3343  xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
3344  xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
3345  xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
3346 
3347  XLogRegisterData((char *) &xlhdr, SizeOfHeapHeader);
3348  XLogRegisterData((char *) old_key_tuple->t_data
3350  old_key_tuple->t_len
3352  }
3353 
3354  /* filtering by origin on a row level is much more efficient */
3356 
3357  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
3358 
3359  PageSetLSN(page, recptr);
3360  }
3361 
3362  END_CRIT_SECTION();
3363 
3364  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3365 
3366  if (vmbuffer != InvalidBuffer)
3367  ReleaseBuffer(vmbuffer);
3368 
3369  /*
3370  * If the tuple has toasted out-of-line attributes, we need to delete
3371  * those items too. We have to do this before releasing the buffer
3372  * because we need to look at the contents of the tuple, but it's OK to
3373  * release the content lock on the buffer first.
3374  */
3375  if (relation->rd_rel->relkind != RELKIND_RELATION &&
3376  relation->rd_rel->relkind != RELKIND_MATVIEW)
3377  {
3378  /* toast table entries should never be recursively toasted */
3380  }
3381  else if (HeapTupleHasExternal(&tp))
3382  toast_delete(relation, &tp, false);
3383 
3384  /*
3385  * Mark tuple for invalidation from system caches at next command
3386  * boundary. We have to do this before releasing the buffer because we
3387  * need to look at the contents of the tuple.
3388  */
3389  CacheInvalidateHeapTuple(relation, &tp, NULL);
3390 
3391  /* Now we can release the buffer */
3392  ReleaseBuffer(buffer);
3393 
3394  /*
3395  * Release the lmgr tuple lock, if we had it.
3396  */
3397  if (have_tuple_lock)
3398  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3399 
3400  pgstat_count_heap_delete(relation);
3401 
3402  if (old_key_tuple != NULL && old_key_copied)
3403  heap_freetuple(old_key_tuple);
3404 
3405  return HeapTupleMayBeUpdated;
3406 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:364
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
Definition: tqual.c:1605
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:60
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
#define SizeofHeapTupleHeader
Definition: htup_details.h:175
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:7747
#define HEAP_XMAX_BITS
Definition: htup_details.h:261
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2971
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1094
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
#define PageIsAllVisible(page)
Definition: bufpage.h:381
uint32 TransactionId
Definition: c.h:445
HTSU_Result HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid, Buffer buffer)
Definition: tqual.c:460
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:766
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
static bool xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
Definition: heapam.c:2993
#define HeapTupleHeaderClearHotUpdated(tup)
Definition: htup_details.h:502
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define RELKIND_MATVIEW
Definition: pg_class.h:165
#define InvalidBuffer
Definition: buf.h:25
uint16 t_infomask2
Definition: heapam_xlog.h:122
#define PageSetPrunable(page, xid)
Definition: bufpage.h:394
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
int errcode(int sqlerrcode)
Definition: elog.c:575
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:192
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
Form_pg_class rd_rel
Definition: rel.h:114
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1373
void CheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
Definition: predicate.c:4326
#define UnlockTupleTuplock(rel, tup, mode)
Definition: heapam.c:183
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
OffsetNumber offnum
Definition: heapam_xlog.h:105
void MultiXactIdSetOldestMember(void)
Definition: multixact.c:623
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:67
#define HeapTupleHeaderGetRawXmax(tup)
Definition: htup_details.h:374
unsigned short uint16
Definition: c.h:295
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
bool IsInParallelMode(void)
Definition: xact.c:906
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define REPLICA_IDENTITY_FULL
Definition: pg_class.h:179
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:198
ItemPointerData t_ctid
Definition: htup_details.h:155
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:104
static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:7163
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:418
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:110
#define REGBUF_STANDARD
Definition: xloginsert.h:34
#define XLH_DELETE_CONTAINS_OLD_KEY
Definition: heapam_xlog.h:94
CommandId cmax
Definition: heapam.h:72
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:379
HTSU_Result
Definition: snapshot.h:121
Oid t_tableOid
Definition: htup.h:66
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define HeapTupleHeaderSetCmax(tup, cid, iscombo)
Definition: htup_details.h:404
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define ereport(elevel, rest)
Definition: elog.h:122
static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
Definition: heapam.c:5275
TransactionId xmax
Definition: heapam.h:71
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
#define InvalidSnapshot
Definition: snapshot.h:25
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:568
#define InvalidCommandId
Definition: c.h:462
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:221
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:269
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:199
static void UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
Definition: heapam.c:2314
#define HEAP_MOVED
Definition: htup_details.h:207
static bool heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
Definition: heapam.c:5226
TransactionId MultiXactId
Definition: c.h:455
#define PageClearAllVisible(page)
Definition: bufpage.h:385
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:554
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:670
uint8 infobits_set
Definition: heapam_xlog.h:106
static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified, bool *copy)
Definition: heapam.c:7823
CommandId HeapTupleHeaderGetCmax(HeapTupleHeader tup)
Definition: combocid.c:119
static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask, LockTupleMode lockmode)
Definition: heapam.c:6996
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
uint16 t_infomask
Definition: heapam_xlog.h:123
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define RelationNeedsWAL(relation)
Definition: rel.h:514
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:1953
void HeapTupleHeaderAdjustCmax(HeapTupleHeader tup, CommandId *cmax, bool *iscombo)
Definition: combocid.c:154
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:679
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define XLH_DELETE_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:92
void toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: tuptoaster.c:464
ItemPointerData ctid
Definition: heapam.h:70
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define RELKIND_RELATION
Definition: pg_class.h:160
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:425
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
#define SizeOfHeapHeader
Definition: heapam_xlog.h:127
Pointer Page
Definition: bufpage.h:74
#define XLH_DELETE_CONTAINS_OLD_TUPLE
Definition: heapam_xlog.h:93

◆ heap_endscan()

void heap_endscan ( HeapScanDesc  scan)

Definition at line 1565 of file heapam.c.

References BufferIsValid, FreeAccessStrategy(), pfree(), RelationDecrementReferenceCount(), ReleaseBuffer(), HeapScanDescData::rs_cbuf, HeapScanDescData::rs_key, HeapScanDescData::rs_rd, HeapScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, HeapScanDescData::rs_temp_snap, and UnregisterSnapshot().

Referenced by AlterDomainNotNull(), AlterTableMoveAll(), AlterTableSpaceOptions(), ATRewriteTable(), boot_openrel(), check_db_file_conflict(), check_default_allows_bound(), copy_heap_data(), CopyTo(), createdb(), DefineQueryRewrite(), do_autovacuum(), DropSetting(), DropTableSpace(), ExecEndBitmapHeapScan(), ExecEndSampleScan(), ExecEndSeqScan(), find_typed_table_dependencies(), get_all_vacuum_rels(), get_database_list(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), index_update_stats(), IndexBuildHeapRangeScan(), IndexCheckExclusion(), objectsInSchemaToOids(), pgrowlocks(), pgstat_collect_oids(), pgstat_heap(), ReindexMultipleTables(), RelationFindReplTupleSeq(), remove_dbtablespaces(), RemoveConversionById(), RemoveSubscriptionRel(), RenameTableSpace(), systable_endscan(), ThereIsAtLeastOneRole(), vac_truncate_clog(), validate_index_heapscan(), validateCheckConstraint(), validateDomainConstraint(), and validateForeignKeyConstraint().

1566 {
1567  /* Note: no locking manipulations needed */
1568 
1569  /*
1570  * unpin scan buffers
1571  */
1572  if (BufferIsValid(scan->rs_cbuf))
1573  ReleaseBuffer(scan->rs_cbuf);
1574 
1575  /*
1576  * decrement relation reference count and free scan descriptor storage
1577  */
1579 
1580  if (scan->rs_key)
1581  pfree(scan->rs_key);
1582 
1583  if (scan->rs_strategy != NULL)
1585 
1586  if (scan->rs_temp_snap)
1588 
1589  pfree(scan);
1590 }
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
void pfree(void *pointer)
Definition: mcxt.c:949
void RelationDecrementReferenceCount(Relation rel)
Definition: relcache.c:2147
Snapshot rs_snapshot
Definition: relscan.h:49
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:905
bool rs_temp_snap
Definition: relscan.h:57
BufferAccessStrategy rs_strategy
Definition: relscan.h:64
Relation rs_rd
Definition: relscan.h:48
Buffer rs_cbuf
Definition: relscan.h:71
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:597
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
ScanKey rs_key
Definition: relscan.h:51

◆ heap_execute_freeze_tuple()

void heap_execute_freeze_tuple ( HeapTupleHeader  tuple,
xl_heap_freeze_tuple frz 
)

Definition at line 6801 of file heapam.c.

References FrozenTransactionId, xl_heap_freeze_tuple::frzflags, HeapTupleHeaderSetXmax, HeapTupleHeaderSetXvac, InvalidTransactionId, HeapTupleHeaderData::t_infomask, xl_heap_freeze_tuple::t_infomask, HeapTupleHeaderData::t_infomask2, xl_heap_freeze_tuple::t_infomask2, XLH_FREEZE_XVAC, XLH_INVALID_XVAC, and xl_heap_freeze_tuple::xmax.

Referenced by heap_freeze_tuple(), heap_xlog_freeze_page(), and lazy_scan_heap().

6802 {
6803  HeapTupleHeaderSetXmax(tuple, frz->xmax);
6804 
6805  if (frz->frzflags & XLH_FREEZE_XVAC)
6807 
6808  if (frz->frzflags & XLH_INVALID_XVAC)
6810 
6811  tuple->t_infomask = frz->t_infomask;
6812  tuple->t_infomask2 = frz->t_infomask2;
6813 }
#define HeapTupleHeaderSetXvac(tup, xid)
Definition: htup_details.h:422
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:379
#define InvalidTransactionId
Definition: transam.h:31
#define FrozenTransactionId
Definition: transam.h:33
TransactionId xmax
Definition: heapam_xlog.h:298
#define XLH_INVALID_XVAC
Definition: heapam_xlog.h:294
#define XLH_FREEZE_XVAC
Definition: heapam_xlog.h:293

◆ heap_fetch()

bool heap_fetch ( Relation  relation,
Snapshot  snapshot,
HeapTuple  tuple,
Buffer userbuf,
bool  keep_buf,
Relation  stats_relation 
)

Definition at line 1876 of file heapam.c.

References buffer, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, CheckForSerializableConflictOut(), HeapTupleSatisfiesVisibility, InvalidBuffer, ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, pgstat_count_heap_fetch, PredicateLockTuple(), ReadBuffer(), RelationGetRelid, ReleaseBuffer(), HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, and TestForOldSnapshot().

Referenced by AfterTriggerExecute(), EvalPlanQualFetch(), EvalPlanQualFetchRowMarks(), ExecCheckTIDVisible(), ExecDelete(), ExecLockRows(), heap_lock_updated_tuple_rec(), and TidNext().

1882 {
1883  ItemPointer tid = &(tuple->t_self);
1884  ItemId lp;
1885  Buffer buffer;
1886  Page page;
1887  OffsetNumber offnum;
1888  bool valid;
1889 
1890  /*
1891  * Fetch and pin the appropriate page of the relation.
1892  */
1893  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1894 
1895  /*
1896  * Need share lock on buffer to examine tuple commit status.
1897  */
1898  LockBuffer(buffer, BUFFER_LOCK_SHARE);
1899  page = BufferGetPage(buffer);
1900  TestForOldSnapshot(snapshot, relation, page);
1901 
1902  /*
1903  * We'd better check for out-of-range offnum in case of VACUUM since the
1904  * TID was obtained.
1905  */
1906  offnum = ItemPointerGetOffsetNumber(tid);
1907  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1908  {
1909  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1910  if (keep_buf)
1911  *userbuf = buffer;
1912  else
1913  {
1914  ReleaseBuffer(buffer);
1915  *userbuf = InvalidBuffer;
1916  }
1917  tuple->t_data = NULL;
1918  return false;
1919  }
1920 
1921  /*
1922  * get the item line pointer corresponding to the requested tid
1923  */
1924  lp = PageGetItemId(page, offnum);
1925 
1926  /*
1927  * Must check for deleted tuple.
1928  */
1929  if (!ItemIdIsNormal(lp))
1930  {
1931  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1932  if (keep_buf)
1933  *userbuf = buffer;
1934  else
1935  {
1936  ReleaseBuffer(buffer);
1937  *userbuf = InvalidBuffer;
1938  }
1939  tuple->t_data = NULL;
1940  return false;
1941  }
1942 
1943  /*
1944  * fill in *tuple fields
1945  */
1946  tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1947  tuple->t_len = ItemIdGetLength(lp);
1948  tuple->t_tableOid = RelationGetRelid(relation);
1949 
1950  /*
1951  * check time qualification of tuple, then release lock
1952  */
1953  valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1954 
1955  if (valid)
1956  PredicateLockTuple(relation, tuple, snapshot);
1957 
1958  CheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1959 
1960  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1961 
1962  if (valid)
1963  {
1964  /*
1965  * All checks passed, so return the tuple as valid. Caller is now
1966  * responsible for releasing the buffer.
1967  */
1968  *userbuf = buffer;
1969 
1970  /* Count the successful fetch against appropriate rel, if any */
1971  if (stats_relation != NULL)
1972  pgstat_count_heap_fetch(stats_relation);
1973 
1974  return true;
1975  }
1976 
1977  /* Tuple failed time qual, but maybe caller wants to see it anyway. */
1978  if (keep_buf)
1979  *userbuf = buffer;
1980  else
1981  {
1982  ReleaseBuffer(buffer);
1983  *userbuf = InvalidBuffer;
1984  }
1985 
1986  return false;
1987 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:265
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:3945
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
ItemPointerData t_self
Definition: htup.h:65
#define pgstat_count_heap_fetch(rel)
Definition: pgstat.h:1270
uint32 t_len
Definition: htup.h:64
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
void PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
Definition: predicate.c:2543
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:425
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74

◆ heap_finish_speculative()

void heap_finish_speculative ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 6016 of file heapam.c.

References Assert, buffer, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, elog, END_CRIT_SECTION, ERROR, HeapTupleHeaderIsSpeculative, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), MaxOffsetNumber, xl_heap_confirm::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapConfirm, SpecTokenOffsetNumber, START_CRIT_SECTION, StaticAssertStmt, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleData::t_self, UnlockReleaseBuffer(), XLOG_HEAP_CONFIRM, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by ExecInsert().

6017 {
6018  Buffer buffer;
6019  Page page;
6020  OffsetNumber offnum;
6021  ItemId lp = NULL;
6022  HeapTupleHeader htup;
6023 
6024  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
6026  page = (Page) BufferGetPage(buffer);
6027 
6028  offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
6029  if (PageGetMaxOffsetNumber(page) >= offnum)
6030  lp = PageGetItemId(page, offnum);
6031 
6032  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
6033  elog(ERROR, "invalid lp");
6034 
6035  htup = (HeapTupleHeader) PageGetItem(page, lp);
6036 
6037  /* SpecTokenOffsetNumber should be distinguishable from any real offset */
6039  "invalid speculative token constant");
6040 
6041  /* NO EREPORT(ERROR) from here till changes are logged */
6043 
6045 
6046  MarkBufferDirty(buffer);
6047 
6048  /*
6049  * Replace the speculative insertion token with a real t_ctid, pointing to
6050  * itself like it does on regular tuples.
6051  */
6052  htup->t_ctid = tuple->t_self;
6053 
6054  /* XLOG stuff */
6055  if (RelationNeedsWAL(relation))
6056  {
6057  xl_heap_confirm xlrec;
6058  XLogRecPtr recptr;
6059 
6060  xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6061 
6062  XLogBeginInsert();
6063 
6064  /* We want the same filtering on this as on a plain insert */
6066 
6067  XLogRegisterData((char *) &xlrec, SizeOfHeapConfirm);
6068  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6069 
6070  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
6071 
6072  PageSetLSN(page, recptr);
6073  }
6074 
6075  END_CRIT_SECTION();
6076 
6077  UnlockReleaseBuffer(buffer);
6078 }
OffsetNumber offnum
Definition: heapam_xlog.h:274
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define MaxOffsetNumber
Definition: off.h:28
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:428
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:192
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:753
#define SpecTokenOffsetNumber
Definition: htup_details.h:290
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define ERROR
Definition: elog.h:43
ItemPointerData t_ctid
Definition: htup_details.h:155
ItemPointerData t_self
Definition: htup.h:65
#define REGBUF_STANDARD
Definition: xloginsert.h:34
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define SizeOfHeapConfirm
Definition: heapam_xlog.h:277
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:670
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define RelationNeedsWAL(relation)
Definition: rel.h:514
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
#define XLOG_HEAP_CONFIRM
Definition: heapam_xlog.h:37

◆ heap_freeze_tuple()

bool heap_freeze_tuple ( HeapTupleHeader  tuple,
TransactionId  cutoff_xid,
TransactionId  cutoff_multi 
)

Definition at line 6822 of file heapam.c.

References heap_execute_freeze_tuple(), and heap_prepare_freeze_tuple().

Referenced by rewrite_heap_tuple().

6824 {
6826  bool do_freeze;
6827  bool tuple_totally_frozen;
6828 
6829  do_freeze = heap_prepare_freeze_tuple(tuple, cutoff_xid, cutoff_multi,
6830  &frz, &tuple_totally_frozen);
6831 
6832  /*
6833  * Note that because this is not a WAL-logged operation, we don't need to
6834  * fill in the offset in the freeze record.
6835  */
6836 
6837  if (do_freeze)
6838  heap_execute_freeze_tuple(tuple, &frz);
6839  return do_freeze;
6840 }
void heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
Definition: heapam.c:6801
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
Definition: heapam.c:6623

◆ heap_get_latest_tid()

void heap_get_latest_tid ( Relation  relation,
Snapshot  snapshot,
ItemPointer  tid 
)

Definition at line 2184 of file heapam.c.

References buffer, BUFFER_LOCK_SHARE, BufferGetPage, CheckForSerializableConflictOut(), elog, ERROR, HEAP_XMAX_INVALID, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleHeaderIsOnlyLocked(), HeapTupleSatisfiesVisibility, InvalidTransactionId, ItemIdGetLength, ItemIdIsNormal, ItemPointerEquals(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, ReadBuffer(), RelationGetNumberOfBlocks, RelationGetRelationName, RelationGetRelid, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TestForOldSnapshot(), TransactionIdEquals, TransactionIdIsValid, and UnlockReleaseBuffer().

Referenced by currtid_byrelname(), currtid_byreloid(), and TidNext().

2187 {
2188  BlockNumber blk;
2189  ItemPointerData ctid;
2190  TransactionId priorXmax;
2191 
2192  /* this is to avoid Assert failures on bad input */
2193  if (!ItemPointerIsValid(tid))
2194  return;
2195 
2196  /*
2197  * Since this can be called with user-supplied TID, don't trust the input
2198  * too much. (RelationGetNumberOfBlocks is an expensive check, so we
2199  * don't check t_ctid links again this way. Note that it would not do to
2200  * call it just once and save the result, either.)
2201  */
2202  blk = ItemPointerGetBlockNumber(tid);
2203  if (blk >= RelationGetNumberOfBlocks(relation))
2204  elog(ERROR, "block number %u is out of range for relation \"%s\"",
2205  blk, RelationGetRelationName(relation));
2206 
2207  /*
2208  * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
2209  * need to examine, and *tid is the TID we will return if ctid turns out
2210  * to be bogus.
2211  *
2212  * Note that we will loop until we reach the end of the t_ctid chain.
2213  * Depending on the snapshot passed, there might be at most one visible
2214  * version of the row, but we don't try to optimize for that.
2215  */
2216  ctid = *tid;
2217  priorXmax = InvalidTransactionId; /* cannot check first XMIN */
2218  for (;;)
2219  {
2220  Buffer buffer;
2221  Page page;
2222  OffsetNumber offnum;
2223  ItemId lp;
2224  HeapTupleData tp;
2225  bool valid;
2226 
2227  /*
2228  * Read, pin, and lock the page.
2229  */
2230  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
2231  LockBuffer(buffer, BUFFER_LOCK_SHARE);
2232  page = BufferGetPage(buffer);
2233  TestForOldSnapshot(snapshot, relation, page);
2234 
2235  /*
2236  * Check for bogus item number. This is not treated as an error
2237  * condition because it can happen while following a t_ctid link. We
2238  * just assume that the prior tid is OK and return it unchanged.
2239  */
2240  offnum = ItemPointerGetOffsetNumber(&ctid);
2241  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
2242  {
2243  UnlockReleaseBuffer(buffer);
2244  break;
2245  }
2246  lp = PageGetItemId(page, offnum);
2247  if (!ItemIdIsNormal(lp))
2248  {
2249  UnlockReleaseBuffer(buffer);
2250  break;
2251  }
2252 
2253  /* OK to access the tuple */
2254  tp.t_self = ctid;
2255  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2256  tp.t_len = ItemIdGetLength(lp);
2257  tp.t_tableOid = RelationGetRelid(relation);
2258 
2259  /*
2260  * After following a t_ctid link, we might arrive at an unrelated
2261  * tuple. Check for XMIN match.
2262  */
2263  if (TransactionIdIsValid(priorXmax) &&
2265  {
2266  UnlockReleaseBuffer(buffer);
2267  break;
2268  }
2269 
2270  /*
2271  * Check time qualification of tuple; if visible, set it as the new
2272  * result candidate.
2273  */
2274  valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
2275  CheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
2276  if (valid)
2277  *tid = ctid;
2278 
2279  /*
2280  * If there's a valid t_ctid link, follow it, else we're done.
2281  */
2282  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2285  {
2286  UnlockReleaseBuffer(buffer);
2287  break;
2288  }
2289 
2290  ctid = tp.t_data->t_ctid;
2291  priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
2292  UnlockReleaseBuffer(buffer);
2293  } /* end of loop */
2294 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:364
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
Definition: tqual.c:1605
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:60
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:265
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:445
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
uint32 BlockNumber
Definition: block.h:31
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:3945
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:198
ItemPointerData t_ctid
Definition: htup_details.h:155
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define InvalidTransactionId
Definition: transam.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:445
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:199
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:312
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define TransactionIdIsValid(xid)
Definition: transam.h:41
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:425
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74

◆ heap_getnext()

HeapTuple heap_getnext ( HeapScanDesc  scan,
ScanDirection  direction 
)

Definition at line 1808 of file heapam.c.

References HEAPDEBUG_1, HEAPDEBUG_2, HEAPDEBUG_3, heapgettup(), heapgettup_pagemode(), pgstat_count_heap_getnext, HeapScanDescData::rs_ctup, HeapScanDescData::rs_key, HeapScanDescData::rs_nkeys, HeapScanDescData::rs_pageatatime, HeapScanDescData::rs_rd, and HeapTupleData::t_data.

Referenced by AlterDomainNotNull(), AlterTableMoveAll(), AlterTableSpaceOptions(), ATRewriteTable(), boot_openrel(), check_db_file_conflict(), check_default_allows_bound(), copy_heap_data(), CopyTo(), createdb(), DefineQueryRewrite(), do_autovacuum(), DropSetting(), DropTableSpace(), find_typed_table_dependencies(), get_all_vacuum_rels(), get_database_list(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), index_update_stats(), IndexBuildHeapRangeScan(), IndexCheckExclusion(), objectsInSchemaToOids(), pgrowlocks(), pgstat_collect_oids(), pgstat_heap(), ReindexMultipleTables(), RelationFindReplTupleSeq(), remove_dbtablespaces(), RemoveConversionById(), RemoveSubscriptionRel(), RenameTableSpace(), SeqNext(), systable_getnext(), ThereIsAtLeastOneRole(), vac_truncate_clog(), validate_index_heapscan(), validateCheckConstraint(), validateDomainConstraint(), and validateForeignKeyConstraint().

1809 {
1810  /* Note: no locking manipulations needed */
1811 
1812  HEAPDEBUG_1; /* heap_getnext( info ) */
1813 
1814  if (scan->rs_pageatatime)
1815  heapgettup_pagemode(scan, direction,
1816  scan->rs_nkeys, scan->rs_key);
1817  else
1818  heapgettup(scan, direction, scan->rs_nkeys, scan->rs_key);
1819 
1820  if (scan->rs_ctup.t_data == NULL)
1821  {
1822  HEAPDEBUG_2; /* heap_getnext returning EOS */
1823  return NULL;
1824  }
1825 
1826  /*
1827  * if we get here it means we have a new current scan tuple, so point to
1828  * the proper return buffer and return the tuple.
1829  */
1830  HEAPDEBUG_3; /* heap_getnext returning tuple */
1831 
1833 
1834  return &(scan->rs_ctup);
1835 }
#define HEAPDEBUG_2
Definition: heapam.c:1802
HeapTupleData rs_ctup
Definition: relscan.h:69
HeapTupleHeader t_data
Definition: htup.h:67
bool rs_pageatatime
Definition: relscan.h:54
#define HEAPDEBUG_1
Definition: heapam.c:1801
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:481
Relation rs_rd
Definition: relscan.h:48
#define HEAPDEBUG_3
Definition: heapam.c:1803
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:1265
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:785
ScanKey rs_key
Definition: relscan.h:51

◆ heap_hot_search()

bool heap_hot_search ( ItemPointer  tid,
Relation  relation,
Snapshot  snapshot,
bool all_dead 
)

Definition at line 2156 of file heapam.c.

References buffer, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, heap_hot_search_buffer(), ItemPointerGetBlockNumber, LockBuffer(), ReadBuffer(), and ReleaseBuffer().

Referenced by _bt_check_unique(), and unique_key_recheck().

2158 {
2159  bool result;
2160  Buffer buffer;
2161  HeapTupleData heapTuple;
2162 
2163  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
2164  LockBuffer(buffer, BUFFER_LOCK_SHARE);
2165  result = heap_hot_search_buffer(tid, relation, buffer, snapshot,
2166  &heapTuple, all_dead, true);
2167  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2168  ReleaseBuffer(buffer);
2169  return result;
2170 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
Definition: heapam.c:2011
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
int Buffer
Definition: buf.h:23

◆ heap_hot_search_buffer()

bool heap_hot_search_buffer ( ItemPointer  tid,
Relation  relation,
Buffer  buffer,
Snapshot  snapshot,
HeapTuple  heapTuple,
bool all_dead,
bool  first_call 
)

Definition at line 2011 of file heapam.c.

References Assert, BufferGetBlockNumber(), BufferGetPage, CheckForSerializableConflictOut(), HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleIsHeapOnly, HeapTupleIsHotUpdated, HeapTupleIsSurelyDead(), HeapTupleSatisfiesVisibility, InvalidTransactionId, ItemIdGetLength, ItemIdGetRedirect, ItemIdIsNormal, ItemIdIsRedirected, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerSet, ItemPointerSetOffsetNumber, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PredicateLockTuple(), RecentGlobalXmin, RelationGetRelid, skip(), HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdEquals, and TransactionIdIsValid.

Referenced by bitgetpage(), heap_hot_search(), and index_fetch_heap().

2014 {
2015  Page dp = (Page) BufferGetPage(buffer);
2016  TransactionId prev_xmax = InvalidTransactionId;
2017  OffsetNumber offnum;
2018  bool at_chain_start;
2019  bool valid;
2020  bool skip;
2021 
2022  /* If this is not the first call, previous call returned a (live!) tuple */
2023  if (all_dead)
2024  *all_dead = first_call;
2025 
2027 
2029  offnum = ItemPointerGetOffsetNumber(tid);
2030  at_chain_start = first_call;
2031  skip = !first_call;
2032 
2033  heapTuple->t_self = *tid;
2034 
2035  /* Scan through possible multiple members of HOT-chain */
2036  for (;;)
2037  {
2038  ItemId lp;
2039 
2040  /* check for bogus TID */
2041  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
2042  break;
2043 
2044  lp = PageGetItemId(dp, offnum);
2045 
2046  /* check for unused, dead, or redirected items */
2047  if (!ItemIdIsNormal(lp))
2048  {
2049  /* We should only see a redirect at start of chain */
2050  if (ItemIdIsRedirected(lp) && at_chain_start)
2051  {
2052  /* Follow the redirect */
2053  offnum = ItemIdGetRedirect(lp);
2054  at_chain_start = false;
2055  continue;
2056  }
2057  /* else must be end of chain */
2058  break;
2059  }
2060 
2061  heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
2062  heapTuple->t_len = ItemIdGetLength(lp);
2063  heapTuple->t_tableOid = RelationGetRelid(relation);
2064  ItemPointerSetOffsetNumber(&heapTuple->t_self, offnum);
2065 
2066  /*
2067  * Shouldn't see a HEAP_ONLY tuple at chain start.
2068  */
2069  if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
2070  break;
2071 
2072  /*
2073  * The xmin should match the previous xmax value, else chain is
2074  * broken.
2075  */
2076  if (TransactionIdIsValid(prev_xmax) &&
2077  !TransactionIdEquals(prev_xmax,
2078  HeapTupleHeaderGetXmin(heapTuple->t_data)))
2079  break;
2080 
2081  /*
2082  * When first_call is true (and thus, skip is initially false) we'll
2083  * return the first tuple we find. But on later passes, heapTuple
2084  * will initially be pointing to the tuple we returned last time.
2085  * Returning it again would be incorrect (and would loop forever), so
2086  * we skip it and return the next match we find.
2087  */
2088  if (!skip)
2089  {
2090  /*
2091  * For the benefit of logical decoding, have t_self point at the
2092  * element of the HOT chain we're currently investigating instead
2093  * of the root tuple of the HOT chain. This is important because
2094  * the *Satisfies routine for historical mvcc snapshots needs the
2095  * correct tid to decide about the visibility in some cases.
2096  */
2097  ItemPointerSet(&(heapTuple->t_self), BufferGetBlockNumber(buffer), offnum);
2098 
2099  /* If it's visible per the snapshot, we must return it */
2100  valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
2101  CheckForSerializableConflictOut(valid, relation, heapTuple,
2102  buffer, snapshot);
2103  /* reset to original, non-redirected, tid */
2104  heapTuple->t_self = *tid;
2105 
2106  if (valid)
2107  {
2108  ItemPointerSetOffsetNumber(tid, offnum);
2109  PredicateLockTuple(relation, heapTuple, snapshot);
2110  if (all_dead)
2111  *all_dead = false;
2112  return true;
2113  }
2114  }
2115  skip = false;
2116 
2117  /*
2118  * If we can't see it, maybe no one else can either. At caller
2119  * request, check whether all chain members are dead to all
2120  * transactions.
2121  *
2122  * Note: if you change the criterion here for what is "dead", fix the
2123  * planner's get_actual_variable_range() function to match.
2124  */
2125  if (all_dead && *all_dead &&
2127  *all_dead = false;
2128 
2129  /*
2130  * Check to see if HOT chain continues past this tuple; if so fetch
2131  * the next offnum and loop around.
2132  */
2133  if (HeapTupleIsHotUpdated(heapTuple))
2134  {
2137  offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
2138  at_chain_start = false;
2139  prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
2140  }
2141  else
2142  break; /* end of chain */
2143  }
2144 
2145  return false;
2146 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:364
static void skip(struct vars *v)
Definition: regc_lex.c:1109
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:105
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:445
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:77
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:3945
bool HeapTupleIsSurelyDead(HeapTuple htup, TransactionId OldestXmin)
Definition: tqual.c:1429
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define HeapTupleIsHotUpdated(tuple)
Definition: htup_details.h:682
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
ItemPointerData t_ctid
Definition: htup_details.h:155
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
TransactionId RecentGlobalXmin
Definition: snapmgr.c:166
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:691
#define Assert(condition)
Definition: c.h:670
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:312
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
void PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
Definition: predicate.c:2543
#define ItemPointerSetOffsetNumber(pointer, offsetNumber)
Definition: itemptr.h:126
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define TransactionIdIsValid(xid)
Definition: transam.h:41
#define RelationGetRelid(relation)
Definition: rel.h:425
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:105

◆ heap_inplace_update()

void heap_inplace_update ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 6248 of file heapam.c.

References buffer, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, CacheInvalidateHeapTuple(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, IsBootstrapProcessingMode, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), xl_heap_inplace::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapInplace, START_CRIT_SECTION, HeapTupleData::t_data, HeapTupleHeaderData::t_hoff, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), XLOG_HEAP_INPLACE, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by create_toast_table(), index_set_state_flags(), index_update_stats(), vac_update_datfrozenxid(), and vac_update_relstats().

6249 {
6250  Buffer buffer;
6251  Page page;
6252  OffsetNumber offnum;
6253  ItemId lp = NULL;
6254  HeapTupleHeader htup;
6255  uint32 oldlen;
6256  uint32 newlen;
6257 
6258  /*
6259  * For now, parallel operations are required to be strictly read-only.
6260  * Unlike a regular update, this should never create a combo CID, so it
6261  * might be possible to relax this restriction, but not without more
6262  * thought and testing. It's not clear that it would be useful, anyway.
6263  */
6264  if (IsInParallelMode())
6265  ereport(ERROR,
6266  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
6267  errmsg("cannot update tuples during a parallel operation")));
6268 
6269  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
6271  page = (Page) BufferGetPage(buffer);
6272 
6273  offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
6274  if (PageGetMaxOffsetNumber(page) >= offnum)
6275  lp = PageGetItemId(page, offnum);
6276 
6277  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
6278  elog(ERROR, "invalid lp");
6279 
6280  htup = (HeapTupleHeader) PageGetItem(page, lp);
6281 
6282  oldlen = ItemIdGetLength(lp) - htup->t_hoff;
6283  newlen = tuple->t_len - tuple->t_data->t_hoff;
6284  if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
6285  elog(ERROR, "wrong tuple length");
6286 
6287  /* NO EREPORT(ERROR) from here till changes are logged */
6289 
6290  memcpy((char *) htup + htup->t_hoff,
6291  (char *) tuple->t_data + tuple->t_data->t_hoff,
6292  newlen);
6293 
6294  MarkBufferDirty(buffer);
6295 
6296  /* XLOG stuff */
6297  if (RelationNeedsWAL(relation))
6298  {
6299  xl_heap_inplace xlrec;
6300  XLogRecPtr recptr;
6301 
6302  xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6303 
6304  XLogBeginInsert();
6305  XLogRegisterData((char *) &xlrec, SizeOfHeapInplace);
6306 
6307  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6308  XLogRegisterBufData(0, (char *) htup + htup->t_hoff, newlen);
6309 
6310  /* inplace updates aren't decoded atm, don't log the origin */
6311 
6312  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
6313 
6314  PageSetLSN(page, recptr);
6315  }
6316 
6317  END_CRIT_SECTION();
6318 
6319  UnlockReleaseBuffer(buffer);
6320 
6321  /*
6322  * Send out shared cache inval if necessary. Note that because we only
6323  * pass the new version of the tuple, this mustn't be used for any
6324  * operations that could change catcache lookup keys. But we aren't
6325  * bothering with index updates either, so that's true a fortiori.
6326  */
6328  CacheInvalidateHeapTuple(relation, tuple, NULL);
6329 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:361
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1094
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define SizeOfHeapInplace
Definition: heapam_xlog.h:286
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
int errcode(int sqlerrcode)
Definition: elog.c:575
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
bool IsInParallelMode(void)
Definition: xact.c:906
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define ERROR
Definition: elog.h:43
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define REGBUF_STANDARD
Definition: xloginsert.h:34
unsigned int uint32
Definition: c.h:296
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define ereport(elevel, rest)
Definition: elog.h:122
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
OffsetNumber offnum
Definition: heapam_xlog.h:282
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define XLOG_HEAP_INPLACE
Definition: heapam_xlog.h:39
#define RelationNeedsWAL(relation)
Definition: rel.h:514
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:367
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74

◆ heap_insert()

Oid heap_insert ( Relation  relation,
HeapTuple  tup,
CommandId  cid,
int  options,
BulkInsertState  bistate 
)

Definition at line 2413 of file heapam.c.

References Assert, buffer, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), END_CRIT_SECTION, FirstOffsetNumber, xl_heap_insert::flags, GetCurrentTransactionId(), heap_freetuple(), HEAP_INSERT_SKIP_WAL, HEAP_INSERT_SPECULATIVE, heap_prepare_insert(), HeapTupleGetOid, InvalidBuffer, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, log_heap_new_cid(), MarkBufferDirty(), xl_heap_insert::offnum, PageClearAllVisible, PageGetMaxOffsetNumber, PageIsAllVisible, PageSetLSN, pgstat_count_heap_insert(), REGBUF_KEEP_DATA, REGBUF_STANDARD, REGBUF_WILL_INIT, RelationGetBufferForTuple(), RelationIsAccessibleInLogicalDecoding, RelationIsLogicallyLogged, RelationNeedsWAL, RelationPutHeapTuple(), ReleaseBuffer(), SizeOfHeapHeader, SizeOfHeapInsert, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), visibilitymap_clear(), VISIBILITYMAP_VALID_BITS, XLH_INSERT_ALL_VISIBLE_CLEARED, XLH_INSERT_CONTAINS_NEW_TUPLE, XLH_INSERT_IS_SPECULATIVE, XLOG_HEAP_INIT_PAGE, XLOG_HEAP_INSERT, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by ATRewriteTable(), CopyFrom(), ExecInsert(), intorel_receive(), simple_heap_insert(), toast_save_datum(), and transientrel_receive().

2415 {
2417  HeapTuple heaptup;
2418  Buffer buffer;
2419  Buffer vmbuffer = InvalidBuffer;
2420  bool all_visible_cleared = false;
2421 
2422  /*
2423  * Fill in tuple header fields, assign an OID, and toast the tuple if
2424  * necessary.
2425  *
2426  * Note: below this point, heaptup is the data we actually intend to store
2427  * into the relation; tup is the caller's original untoasted data.
2428  */
2429  heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
2430 
2431  /*
2432  * Find buffer to insert this tuple into. If the page is all visible,
2433  * this will also pin the requisite visibility map page.
2434  */
2435  buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
2436  InvalidBuffer, options, bistate,
2437  &vmbuffer, NULL);
2438 
2439  /*
2440  * We're about to do the actual insert -- but check for conflict first, to
2441  * avoid possibly having to roll back work we've just done.
2442  *
2443  * This is safe without a recheck as long as there is no possibility of
2444  * another process scanning the page between this check and the insert
2445  * being visible to the scan (i.e., an exclusive buffer content lock is
2446  * continuously held from this point until the tuple insert is visible).
2447  *
2448  * For a heap insert, we only need to check for table-level SSI locks. Our
2449  * new tuple can't possibly conflict with existing tuple locks, and heap
2450  * page locks are only consolidated versions of tuple locks; they do not
2451  * lock "gaps" as index page locks do. So we don't need to specify a
2452  * buffer when making the call, which makes for a faster check.
2453  */
2455 
2456  /* NO EREPORT(ERROR) from here till changes are logged */
2458 
2459  RelationPutHeapTuple(relation, buffer, heaptup,
2460  (options & HEAP_INSERT_SPECULATIVE) != 0);
2461 
2462  if (PageIsAllVisible(BufferGetPage(buffer)))
2463  {
2464  all_visible_cleared = true;
2466  visibilitymap_clear(relation,
2467  ItemPointerGetBlockNumber(&(heaptup->t_self)),
2468  vmbuffer, VISIBILITYMAP_VALID_BITS);
2469  }
2470 
2471  /*
2472  * XXX Should we set PageSetPrunable on this page ?
2473  *
2474  * The inserting transaction may eventually abort thus making this tuple
2475  * DEAD and hence available for pruning. Though we don't want to optimize
2476  * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
2477  * aborted tuple will never be pruned until next vacuum is triggered.
2478  *
2479  * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
2480  */
2481 
2482  MarkBufferDirty(buffer);
2483 
2484  /* XLOG stuff */
2485  if (!(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation))
2486  {
2487  xl_heap_insert xlrec;
2488  xl_heap_header xlhdr;
2489  XLogRecPtr recptr;
2490  Page page = BufferGetPage(buffer);
2491  uint8 info = XLOG_HEAP_INSERT;
2492  int bufflags = 0;
2493 
2494  /*
2495  * If this is a catalog, we need to transmit combocids to properly
2496  * decode, so log that as well.
2497  */
2499  log_heap_new_cid(relation, heaptup);
2500 
2501  /*
2502  * If this is the single and first tuple on page, we can reinit the
2503  * page instead of restoring the whole thing. Set flag, and hide
2504  * buffer references from XLogInsert.
2505  */
2506  if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
2508  {
2509  info |= XLOG_HEAP_INIT_PAGE;
2510  bufflags |= REGBUF_WILL_INIT;
2511  }
2512 
2513  xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
2514  xlrec.flags = 0;
2515  if (all_visible_cleared)
2520 
2521  /*
2522  * For logical decoding, we need the tuple even if we're doing a full
2523  * page write, so make sure it's included even if we take a full-page
2524  * image. (XXX We could alternatively store a pointer into the FPW).
2525  */
2526  if (RelationIsLogicallyLogged(relation))
2527  {
2529  bufflags |= REGBUF_KEEP_DATA;
2530  }
2531 
2532  XLogBeginInsert();
2533  XLogRegisterData((char *) &xlrec, SizeOfHeapInsert);
2534 
2535  xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
2536  xlhdr.t_infomask = heaptup->t_data->t_infomask;
2537  xlhdr.t_hoff = heaptup->t_data->t_hoff;
2538 
2539  /*
2540  * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
2541  * write the whole page to the xlog, we don't need to store
2542  * xl_heap_header in the xlog.
2543  */
2544  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2545  XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
2546  /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
2548  (char *) heaptup->t_data + SizeofHeapTupleHeader,
2549  heaptup->t_len - SizeofHeapTupleHeader);
2550 
2551  /* filtering by origin on a row level is much more efficient */
2553 
2554  recptr = XLogInsert(RM_HEAP_ID, info);
2555 
2556  PageSetLSN(page, recptr);
2557  }
2558 
2559  END_CRIT_SECTION();
2560 
2561  UnlockReleaseBuffer(buffer);
2562  if (vmbuffer != InvalidBuffer)
2563  ReleaseBuffer(vmbuffer);
2564 
2565  /*
2566  * If tuple is cachable, mark it for invalidation from the caches in case
2567  * we abort. Note it is OK to do this after releasing the buffer, because
2568  * the heaptup data structure is all in local memory, not in the shared
2569  * buffer.
2570  */
2571  CacheInvalidateHeapTuple(relation, heaptup, NULL);
2572 
2573  /* Note: speculative insertions are counted too, even if aborted later */
2574  pgstat_count_heap_insert(relation, 1);
2575 
2576  /*
2577  * If heaptup is a private copy, release it. Don't forget to copy t_self
2578  * back to the caller's image, too.
2579  */
2580  if (heaptup != tup)
2581  {
2582  tup->t_self = heaptup->t_self;
2583  heap_freetuple(heaptup);
2584  }
2585 
2586  return HeapTupleGetOid(tup);
2587 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:361
#define SizeofHeapTupleHeader
Definition: htup_details.h:175
#define XLOG_HEAP_INSERT
Definition: heapam_xlog.h:32
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:7747
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1094
static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
Definition: heapam.c:2597
#define PageIsAllVisible(page)
Definition: bufpage.h:381
uint32 TransactionId
Definition: c.h:445
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
unsigned char uint8
Definition: c.h:294
#define XLH_INSERT_IS_SPECULATIVE
Definition: heapam_xlog.h:68
#define InvalidBuffer
Definition: buf.h:25
#define REGBUF_WILL_INIT
Definition: xloginsert.h:32
uint16 t_infomask2
Definition: heapam_xlog.h:122
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:192
#define HEAP_INSERT_SKIP_WAL
Definition: heapam.h:28
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:584
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1373
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
void RelationPutHeapTuple(Relation relation, Buffer buffer, HeapTuple tuple, bool token)
Definition: hio.c:36
void CheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
Definition: predicate.c:4326
#define XLOG_HEAP_INIT_PAGE
Definition: heapam_xlog.h:46
#define HEAP_INSERT_SPECULATIVE
Definition: heapam.h:31
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:67
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define XLH_INSERT_CONTAINS_NEW_TUPLE
Definition: heapam_xlog.h:69
ItemPointerData t_self
Definition: htup.h:65
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:418
uint32 t_len
Definition: htup.h:64
#define FirstOffsetNumber
Definition: off.h:27
#define REGBUF_STANDARD
Definition: xloginsert.h:34
Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer otherBuffer, int options, BulkInsertState bistate, Buffer *vmbuffer, Buffer *vmbuffer_other)
Definition: hio.c:297
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:568
#define REGBUF_KEEP_DATA
Definition: xloginsert.h:37
#define PageClearAllVisible(page)
Definition: bufpage.h:385
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:670
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
uint16 t_infomask
Definition: heapam_xlog.h:123
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define RelationNeedsWAL(relation)
Definition: rel.h:514
#define SizeOfHeapInsert
Definition: heapam_xlog.h:138
#define XLH_INSERT_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:66
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
void pgstat_count_heap_insert(Relation rel, PgStat_Counter n)
Definition: pgstat.c:1907
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:700
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
OffsetNumber offnum
Definition: heapam_xlog.h:132
#define SizeOfHeapHeader
Definition: heapam_xlog.h:127
Pointer Page
Definition: bufpage.h:74

◆ heap_lock_tuple()

HTSU_Result heap_lock_tuple ( Relation  relation,
HeapTuple  tuple,
CommandId  cid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
bool  follow_updates,
Buffer buffer,
HeapUpdateFailureData hufd 
)

Definition at line 4560 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferIsValid, HeapUpdateFailureData::cmax, compute_infobits(), compute_new_xmax_infomask(), ConditionalMultiXactIdWait(), ConditionalXactLockTableWait(), HeapUpdateFailureData::ctid, DoesMultiXactIdConflict(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, xl_heap_lock::flags, get_mxact_status_for_lock(), GetCurrentTransactionId(), GetMultiXactIdMembers(), heap_acquire_tuplock(), HEAP_KEYS_UPDATED, heap_lock_updated_tuple(), HEAP_XMAX_BITS, HEAP_XMAX_INVALID, HEAP_XMAX_IS_EXCL_LOCKED, HEAP_XMAX_IS_KEYSHR_LOCKED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HEAP_XMAX_IS_SHR_LOCKED, HeapTupleBeingUpdated, HeapTupleHeaderClearHotUpdated, HeapTupleHeaderGetCmax(), HeapTupleHeaderGetRawXmax, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIsOnlyLocked(), HeapTupleHeaderSetXmax, HeapTupleInvisible, HeapTupleMayBeUpdated, HeapTupleSatisfiesUpdate(), HeapTupleSelfUpdated, HeapTupleUpdated, HeapTupleWouldBlock, i, xl_heap_lock::infobits_set, InvalidBuffer, InvalidCommandId, ItemIdGetLength, ItemIdIsNormal, ItemPointerCopy, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), xl_heap_lock::locking_xid, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, LockWaitBlock, LockWaitError, LockWaitSkip, MarkBufferDirty(), MultiXactIdSetOldestMember(), MultiXactIdWait(), MultiXactStatusNoKeyUpdate, xl_heap_lock::offnum, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, pfree(), ReadBuffer(), REGBUF_STANDARD, RelationGetRelationName, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapLock, START_CRIT_SECTION, status(), HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdEquals, TransactionIdIsCurrentTransactionId(), TUPLOCK_from_mxstatus, UnlockTupleTuplock, UpdateXmaxHintBits(), VISIBILITYMAP_ALL_FROZEN, visibilitymap_clear(), visibilitymap_pin(), XactLockTableWait(), XLH_LOCK_ALL_FROZEN_CLEARED, XLOG_HEAP_LOCK, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), XLTW_Lock, HeapUpdateFailureData::xmax, and xmax_infomask_changed().

Referenced by EvalPlanQualFetch(), ExecLockRows(), ExecOnConflictUpdate(), GetTupleForTrigger(), RelationFindReplTupleByIndex(), and RelationFindReplTupleSeq().

4564 {
4565  HTSU_Result result;
4566  ItemPointer tid = &(tuple->t_self);
4567  ItemId lp;
4568  Page page;
4569  Buffer vmbuffer = InvalidBuffer;
4570  BlockNumber block;
4571  TransactionId xid,
4572  xmax;
4573  uint16 old_infomask,
4574  new_infomask,
4575  new_infomask2;
4576  bool first_time = true;
4577  bool have_tuple_lock = false;
4578  bool cleared_all_frozen = false;
4579 
4580  *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
4581  block = ItemPointerGetBlockNumber(tid);
4582 
4583  /*
4584  * Before locking the buffer, pin the visibility map page if it appears to
4585  * be necessary. Since we haven't got the lock yet, someone else might be
4586  * in the middle of changing this, so we'll need to recheck after we have
4587  * the lock.
4588  */
4590  visibilitymap_pin(relation, block, &vmbuffer);
4591 
4593 
4594  page = BufferGetPage(*buffer);
4595  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
4596  Assert(ItemIdIsNormal(lp));
4597 
4598  tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
4599  tuple->t_len = ItemIdGetLength(lp);
4600  tuple->t_tableOid = RelationGetRelid(relation);
4601 
4602 l3:
4603  result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
4604 
4605  if (result == HeapTupleInvisible)
4606  {
4607  /*
4608  * This is possible, but only when locking a tuple for ON CONFLICT
4609  * UPDATE. We return this value here rather than throwing an error in
4610  * order to give that case the opportunity to throw a more specific
4611  * error.
4612  */
4613  result = HeapTupleInvisible;
4614  goto out_locked;
4615  }
4616  else if (result == HeapTupleBeingUpdated || result == HeapTupleUpdated)
4617  {
4618  TransactionId xwait;
4619  uint16 infomask;
4620  uint16 infomask2;
4621  bool require_sleep;
4622  ItemPointerData t_ctid;
4623 
4624  /* must copy state data before unlocking buffer */
4625  xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
4626  infomask = tuple->t_data->t_infomask;
4627  infomask2 = tuple->t_data->t_infomask2;
4628  ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
4629 
4631 
4632  /*
4633  * If any subtransaction of the current top transaction already holds
4634  * a lock as strong as or stronger than what we're requesting, we
4635  * effectively hold the desired lock already. We *must* succeed
4636  * without trying to take the tuple lock, else we will deadlock
4637  * against anyone wanting to acquire a stronger lock.
4638  *
4639  * Note we only do this the first time we loop on the HTSU result;
4640  * there is no point in testing in subsequent passes, because
4641  * evidently our own transaction cannot have acquired a new lock after
4642  * the first time we checked.
4643  */
4644  if (first_time)
4645  {
4646  first_time = false;
4647 
4648  if (infomask & HEAP_XMAX_IS_MULTI)
4649  {
4650  int i;
4651  int nmembers;
4652  MultiXactMember *members;
4653 
4654  /*
4655  * We don't need to allow old multixacts here; if that had
4656  * been the case, HeapTupleSatisfiesUpdate would have returned
4657  * MayBeUpdated and we wouldn't be here.
4658  */
4659  nmembers =
4660  GetMultiXactIdMembers(xwait, &members, false,
4661  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
4662 
4663  for (i = 0; i < nmembers; i++)
4664  {
4665  /* only consider members of our own transaction */
4666  if (!TransactionIdIsCurrentTransactionId(members[i].xid))
4667  continue;
4668 
4669  if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
4670  {
4671  pfree(members);
4672  result = HeapTupleMayBeUpdated;
4673  goto out_unlocked;
4674  }
4675  }
4676 
4677  if (members)
4678  pfree(members);
4679  }
4680  else if (TransactionIdIsCurrentTransactionId(xwait))
4681  {
4682  switch (mode)
4683  {
4684  case LockTupleKeyShare:
4685  Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
4686  HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4687  HEAP_XMAX_IS_EXCL_LOCKED(infomask));
4688  result = HeapTupleMayBeUpdated;
4689  goto out_unlocked;
4690  case LockTupleShare:
4691  if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4692  HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4693  {
4694  result = HeapTupleMayBeUpdated;
4695  goto out_unlocked;
4696  }
4697  break;
4699  if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4700  {
4701  result = HeapTupleMayBeUpdated;
4702  goto out_unlocked;
4703  }
4704  break;
4705  case LockTupleExclusive:
4706  if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
4707  infomask2 & HEAP_KEYS_UPDATED)
4708  {
4709  result = HeapTupleMayBeUpdated;
4710  goto out_unlocked;
4711  }
4712  break;
4713  }
4714  }
4715  }
4716 
4717  /*
4718  * Initially assume that we will have to wait for the locking
4719  * transaction(s) to finish. We check various cases below in which
4720  * this can be turned off.
4721  */
4722  require_sleep = true;
4723  if (mode == LockTupleKeyShare)
4724  {
4725  /*
4726  * If we're requesting KeyShare, and there's no update present, we
4727  * don't need to wait. Even if there is an update, we can still
4728  * continue if the key hasn't been modified.
4729  *
4730  * However, if there are updates, we need to walk the update chain
4731  * to mark future versions of the row as locked, too. That way,
4732  * if somebody deletes that future version, we're protected
4733  * against the key going away. This locking of future versions
4734  * could block momentarily, if a concurrent transaction is
4735  * deleting a key; or it could return a value to the effect that
4736  * the transaction deleting the key has already committed. So we
4737  * do this before re-locking the buffer; otherwise this would be
4738  * prone to deadlocks.
4739  *
4740  * Note that the TID we're locking was grabbed before we unlocked
4741  * the buffer. For it to change while we're not looking, the
4742  * other properties we're testing for below after re-locking the
4743  * buffer would also change, in which case we would restart this
4744  * loop above.
4745  */
4746  if (!(infomask2 & HEAP_KEYS_UPDATED))
4747  {
4748  bool updated;
4749 
4750  updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
4751 
4752  /*
4753  * If there are updates, follow the update chain; bail out if
4754  * that cannot be done.
4755  */
4756  if (follow_updates && updated)
4757  {
4758  HTSU_Result res;
4759 
4760  res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4762  mode);
4763  if (res != HeapTupleMayBeUpdated)
4764  {
4765  result = res;
4766  /* recovery code expects to have buffer lock held */
4768  goto failed;
4769  }
4770  }
4771 
4773 
4774  /*
4775  * Make sure it's still an appropriate lock, else start over.
4776  * Also, if it wasn't updated before we released the lock, but
4777  * is updated now, we start over too; the reason is that we
4778  * now need to follow the update chain to lock the new
4779  * versions.
4780  */
4781  if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
4782  ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
4783  !updated))
4784  goto l3;
4785 
4786  /* Things look okay, so we can skip sleeping */
4787  require_sleep = false;
4788 
4789  /*
4790  * Note we allow Xmax to change here; other updaters/lockers
4791  * could have modified it before we grabbed the buffer lock.
4792  * However, this is not a problem, because with the recheck we
4793  * just did we ensure that they still don't conflict with the
4794  * lock we want.
4795  */
4796  }
4797  }
4798  else if (mode == LockTupleShare)
4799  {
4800  /*
4801  * If we're requesting Share, we can similarly avoid sleeping if
4802  * there's no update and no exclusive lock present.
4803  */
4804  if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
4805  !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4806  {
4808 
4809  /*
4810  * Make sure it's still an appropriate lock, else start over.
4811  * See above about allowing xmax to change.
4812  */
4813  if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
4815  goto l3;
4816  require_sleep = false;
4817  }
4818  }
4819  else if (mode == LockTupleNoKeyExclusive)
4820  {
4821  /*
4822  * If we're requesting NoKeyExclusive, we might also be able to
4823  * avoid sleeping; just ensure that there no conflicting lock
4824  * already acquired.
4825  */
4826  if (infomask & HEAP_XMAX_IS_MULTI)
4827  {
4828  if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
4829  mode))
4830  {
4831  /*
4832  * No conflict, but if the xmax changed under us in the
4833  * meantime, start over.
4834  */
4836  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4838  xwait))
4839  goto l3;
4840 
4841  /* otherwise, we're good */
4842  require_sleep = false;
4843  }
4844  }
4845  else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
4846  {
4848 
4849  /* if the xmax changed in the meantime, start over */
4850  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4853  xwait))
4854  goto l3;
4855  /* otherwise, we're good */
4856  require_sleep = false;
4857  }
4858  }
4859 
4860  /*
4861  * As a check independent from those above, we can also avoid sleeping
4862  * if the current transaction is the sole locker of the tuple. Note
4863  * that the strength of the lock already held is irrelevant; this is
4864  * not about recording the lock in Xmax (which will be done regardless
4865  * of this optimization, below). Also, note that the cases where we
4866  * hold a lock stronger than we are requesting are already handled
4867  * above by not doing anything.
4868  *
4869  * Note we only deal with the non-multixact case here; MultiXactIdWait
4870  * is well equipped to deal with this situation on its own.
4871  */
4872  if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
4874  {
4875  /* ... but if the xmax changed in the meantime, start over */
4877  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4879  xwait))
4880  goto l3;
4882  require_sleep = false;
4883  }
4884 
4885  /*
4886  * Time to sleep on the other transaction/multixact, if necessary.
4887  *
4888  * If the other transaction is an update that's already committed,
4889  * then sleeping cannot possibly do any good: if we're required to
4890  * sleep, get out to raise an error instead.
4891  *
4892  * By here, we either have already acquired the buffer exclusive lock,
4893  * or we must wait for the locking transaction or multixact; so below
4894  * we ensure that we grab buffer lock after the sleep.
4895  */
4896  if (require_sleep && result == HeapTupleUpdated)
4897  {
4899  goto failed;
4900  }
4901  else if (require_sleep)
4902  {
4903  /*
4904  * Acquire tuple lock to establish our priority for the tuple, or
4905  * die trying. LockTuple will release us when we are next-in-line
4906  * for the tuple. We must do this even if we are share-locking.
4907  *
4908  * If we are forced to "start over" below, we keep the tuple lock;
4909  * this arranges that we stay at the head of the line while
4910  * rechecking tuple state.
4911  */
4912  if (!heap_acquire_tuplock(relation, tid, mode, wait_policy,
4913  &have_tuple_lock))
4914  {
4915  /*
4916  * This can only happen if wait_policy is Skip and the lock
4917  * couldn't be obtained.
4918  */
4919  result = HeapTupleWouldBlock;
4920  /* recovery code expects to have buffer lock held */
4922  goto failed;
4923  }
4924 
4925  if (infomask & HEAP_XMAX_IS_MULTI)
4926  {
4928 
4929  /* We only ever lock tuples, never update them */
4930  if (status >= MultiXactStatusNoKeyUpdate)
4931  elog(ERROR, "invalid lock mode in heap_lock_tuple");
4932 
4933  /* wait for multixact to end, or die trying */
4934  switch (wait_policy)
4935  {
4936  case LockWaitBlock:
4937  MultiXactIdWait((MultiXactId) xwait, status, infomask,
4938  relation, &tuple->t_self, XLTW_Lock, NULL);
4939  break;
4940  case LockWaitSkip:
4942  status, infomask, relation,
4943  NULL))
4944  {
4945  result = HeapTupleWouldBlock;
4946  /* recovery code expects to have buffer lock held */
4948  goto failed;
4949  }
4950  break;
4951  case LockWaitError:
4953  status, infomask, relation,
4954  NULL))
4955  ereport(ERROR,
4956  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4957  errmsg("could not obtain lock on row in relation \"%s\"",
4958  RelationGetRelationName(relation))));
4959 
4960  break;
4961  }
4962 
4963  /*
4964  * Of course, the multixact might not be done here: if we're
4965  * requesting a light lock mode, other transactions with light
4966  * locks could still be alive, as well as locks owned by our
4967  * own xact or other subxacts of this backend. We need to
4968  * preserve the surviving MultiXact members. Note that it
4969  * isn't absolutely necessary in the latter case, but doing so
4970  * is simpler.
4971  */
4972  }
4973  else
4974  {
4975  /* wait for regular transaction to end, or die trying */
4976  switch (wait_policy)
4977  {
4978  case LockWaitBlock:
4979  XactLockTableWait(xwait, relation, &tuple->t_self,
4980  XLTW_Lock);
4981  break;
4982  case LockWaitSkip:
4983  if (!ConditionalXactLockTableWait(xwait))
4984  {
4985  result = HeapTupleWouldBlock;
4986  /* recovery code expects to have buffer lock held */
4988  goto failed;
4989  }
4990  break;
4991  case LockWaitError:
4992  if (!ConditionalXactLockTableWait(xwait))
4993  ereport(ERROR,
4994  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4995  errmsg("could not obtain lock on row in relation \"%s\"",
4996  RelationGetRelationName(relation))));
4997  break;
4998  }
4999  }
5000 
5001  /* if there are updates, follow the update chain */
5002  if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
5003  {
5004  HTSU_Result res;
5005 
5006  res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
5008  mode);
5009  if (res != HeapTupleMayBeUpdated)
5010  {
5011  result = res;
5012  /* recovery code expects to have buffer lock held */
5014  goto failed;
5015  }
5016  }
5017 
5019 
5020  /*
5021  * xwait is done, but if xwait had just locked the tuple then some
5022  * other xact could update this tuple before we get to this point.
5023  * Check for xmax change, and start over if so.
5024  */
5025  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
5027  xwait))
5028  goto l3;
5029 
5030  if (!(infomask & HEAP_XMAX_IS_MULTI))
5031  {
5032  /*
5033  * Otherwise check if it committed or aborted. Note we cannot
5034  * be here if the tuple was only locked by somebody who didn't
5035  * conflict with us; that would have been handled above. So
5036  * that transaction must necessarily be gone by now. But
5037  * don't check for this in the multixact case, because some
5038  * locker transactions might still be running.
5039  */
5040  UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
5041  }
5042  }
5043 
5044  /* By here, we're certain that we hold buffer exclusive lock again */
5045 
5046  /*
5047  * We may lock if previous xmax aborted, or if it committed but only
5048  * locked the tuple without updating it; or if we didn't have to wait
5049  * at all for whatever reason.
5050  */
5051  if (!require_sleep ||
5052  (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
5055  result = HeapTupleMayBeUpdated;
5056  else
5057  result = HeapTupleUpdated;
5058  }
5059 
5060 failed:
5061  if (result != HeapTupleMayBeUpdated)
5062  {
5063  Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated ||
5064  result == HeapTupleWouldBlock);
5065  Assert(!(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
5066  hufd->ctid = tuple->t_data->t_ctid;
5067  hufd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
5068  if (result == HeapTupleSelfUpdated)
5069  hufd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
5070  else
5071  hufd->cmax = InvalidCommandId;
5072  goto out_locked;
5073  }
5074 
5075  /*
5076  * If we didn't pin the visibility map page and the page has become all
5077  * visible while we were busy locking the buffer, or during some
5078  * subsequent window during which we had it unlocked, we'll have to unlock
5079  * and re-lock, to avoid holding the buffer lock across I/O. That's a bit
5080  * unfortunate, especially since we'll now have to recheck whether the
5081  * tuple has been locked or updated under us, but hopefully it won't
5082  * happen very often.
5083  */
5084  if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
5085  {
5087  visibilitymap_pin(relation, block, &vmbuffer);
5089  goto l3;
5090  }
5091 
5092  xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
5093  old_infomask = tuple->t_data->t_infomask;
5094 
5095  /*
5096  * If this is the first possibly-multixact-able operation in the current
5097  * transaction, set my per-backend OldestMemberMXactId setting. We can be
5098  * certain that the transaction will never become a member of any older
5099  * MultiXactIds than that. (We have to do this even if we end up just
5100  * using our own TransactionId below, since some other backend could
5101  * incorporate our XID into a MultiXact immediately afterwards.)
5102  */
5104 
5105  /*
5106  * Compute the new xmax and infomask to store into the tuple. Note we do
5107  * not modify the tuple just yet, because that would leave it in the wrong
5108  * state if multixact.c elogs.
5109  */
5110  compute_new_xmax_infomask(xmax, old_infomask, tuple->t_data->t_infomask2,
5111  GetCurrentTransactionId(), mode, false,
5112  &xid, &new_infomask, &new_infomask2);
5113 
5115 
5116  /*
5117  * Store transaction information of xact locking the tuple.
5118  *
5119  * Note: Cmax is meaningless in this context, so don't set it; this avoids
5120  * possibly generating a useless combo CID. Moreover, if we're locking a
5121  * previously updated tuple, it's important to preserve the Cmax.
5122  *
5123  * Also reset the HOT UPDATE bit, but only if there's no update; otherwise
5124  * we would break the HOT chain.
5125  */
5126  tuple->t_data->t_infomask &= ~HEAP_XMAX_BITS;
5127  tuple->t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5128  tuple->t_data->t_infomask |= new_infomask;
5129  tuple->t_data->t_infomask2 |= new_infomask2;
5130  if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
5132  HeapTupleHeaderSetXmax(tuple->t_data, xid);
5133 
5134  /*
5135  * Make sure there is no forward chain link in t_ctid. Note that in the
5136  * cases where the tuple has been updated, we must not overwrite t_ctid,
5137  * because it was set by the updater. Moreover, if the tuple has been
5138  * updated, we need to follow the update chain to lock the new versions of
5139  * the tuple as well.
5140  */
5141  if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
5142  tuple->t_data->t_ctid = *tid;
5143 
5144  /* Clear only the all-frozen bit on visibility map if needed */
5145  if (PageIsAllVisible(page) &&
5146  visibilitymap_clear(relation, block, vmbuffer,
5148  cleared_all_frozen = true;
5149 
5150 
5152