PostgreSQL Source Code  git master
heapam.c File Reference
#include "postgres.h"
#include "access/bufmask.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/hio.h"
#include "access/multixact.h"
#include "access/parallel.h"
#include "access/relscan.h"
#include "access/sysattr.h"
#include "access/transam.h"
#include "access/tuptoaster.h"
#include "access/valid.h"
#include "access/visibilitymap.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "access/xloginsert.h"
#include "access/xlogutils.h"
#include "catalog/catalog.h"
#include "catalog/namespace.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "port/atomics.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
#include "storage/procarray.h"
#include "storage/smgr.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "utils/datum.h"
#include "utils/inval.h"
#include "utils/lsyscache.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
#include "utils/syscache.h"
#include "utils/tqual.h"
Include dependency graph for heapam.c:

Go to the source code of this file.

Macros

#define LOCKMODE_from_mxstatus(status)   (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
 
#define LockTupleTuplock(rel, tup, mode)   LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define UnlockTupleTuplock(rel, tup, mode)   UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define ConditionalLockTupleTuplock(rel, tup, mode)   ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define TUPLOCK_from_mxstatus(status)   (MultiXactStatusLock[(status)])
 
#define HEAPDEBUG_1
 
#define HEAPDEBUG_2
 
#define HEAPDEBUG_3
 
#define FRM_NOOP   0x0001
 
#define FRM_INVALIDATE_XMAX   0x0002
 
#define FRM_RETURN_IS_XID   0x0004
 
#define FRM_RETURN_IS_MULTI   0x0008
 
#define FRM_MARK_COMMITTED   0x0010
 

Functions

static HeapScanDesc heap_beginscan_internal (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
 
static void heap_parallelscan_startblock_init (HeapScanDesc scan)
 
static BlockNumber heap_parallelscan_nextpage (HeapScanDesc scan)
 
static HeapTuple heap_prepare_insert (Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
 
static XLogRecPtr log_heap_update (Relation reln, Buffer oldbuf, Buffer newbuf, HeapTuple oldtup, HeapTuple newtup, HeapTuple old_key_tup, bool all_visible_cleared, bool new_all_visible_cleared)
 
static BitmapsetHeapDetermineModifiedColumns (Relation relation, Bitmapset *interesting_cols, HeapTuple oldtup, HeapTuple newtup)
 
static bool heap_acquire_tuplock (Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
 
static void compute_new_xmax_infomask (TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
 
static HTSU_Result heap_lock_updated_tuple (Relation rel, HeapTuple tuple, ItemPointer ctid, TransactionId xid, LockTupleMode mode)
 
static void GetMultiXactIdHintBits (MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
 
static TransactionId MultiXactIdGetUpdateXid (TransactionId xmax, uint16 t_infomask)
 
static bool DoesMultiXactIdConflict (MultiXactId multi, uint16 infomask, LockTupleMode lockmode)
 
static void MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
static bool ConditionalMultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, int *remaining)
 
static XLogRecPtr log_heap_new_cid (Relation relation, HeapTuple tup)
 
static HeapTuple ExtractReplicaIdentity (Relation rel, HeapTuple tup, bool key_modified, bool *copy)
 
static void initscan (HeapScanDesc scan, ScanKey key, bool keep_startblock)
 
void heap_setscanlimits (HeapScanDesc scan, BlockNumber startBlk, BlockNumber numBlks)
 
void heapgetpage (HeapScanDesc scan, BlockNumber page)
 
static void heapgettup (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
static void heapgettup_pagemode (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
Relation relation_open (Oid relationId, LOCKMODE lockmode)
 
Relation try_relation_open (Oid relationId, LOCKMODE lockmode)
 
Relation relation_openrv (const RangeVar *relation, LOCKMODE lockmode)
 
Relation relation_openrv_extended (const RangeVar *relation, LOCKMODE lockmode, bool missing_ok)
 
void relation_close (Relation relation, LOCKMODE lockmode)
 
Relation heap_open (Oid relationId, LOCKMODE lockmode)
 
Relation heap_openrv (const RangeVar *relation, LOCKMODE lockmode)
 
Relation heap_openrv_extended (const RangeVar *relation, LOCKMODE lockmode, bool missing_ok)
 
HeapScanDesc heap_beginscan (Relation relation, Snapshot snapshot, int nkeys, ScanKey key)
 
HeapScanDesc heap_beginscan_catalog (Relation relation, int nkeys, ScanKey key)
 
HeapScanDesc heap_beginscan_strat (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, bool allow_strat, bool allow_sync)
 
HeapScanDesc heap_beginscan_bm (Relation relation, Snapshot snapshot, int nkeys, ScanKey key)
 
HeapScanDesc heap_beginscan_sampling (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, bool allow_strat, bool allow_sync, bool allow_pagemode)
 
void heap_rescan (HeapScanDesc scan, ScanKey key)
 
void heap_rescan_set_params (HeapScanDesc scan, ScanKey key, bool allow_strat, bool allow_sync, bool allow_pagemode)
 
void heap_endscan (HeapScanDesc scan)
 
Size heap_parallelscan_estimate (Snapshot snapshot)
 
void heap_parallelscan_initialize (ParallelHeapScanDesc target, Relation relation, Snapshot snapshot)
 
void heap_parallelscan_reinitialize (ParallelHeapScanDesc parallel_scan)
 
HeapScanDesc heap_beginscan_parallel (Relation relation, ParallelHeapScanDesc parallel_scan)
 
void heap_update_snapshot (HeapScanDesc scan, Snapshot snapshot)
 
HeapTuple heap_getnext (HeapScanDesc scan, ScanDirection direction)
 
bool heap_fetch (Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf, bool keep_buf, Relation stats_relation)
 
bool heap_hot_search_buffer (ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
 
bool heap_hot_search (ItemPointer tid, Relation relation, Snapshot snapshot, bool *all_dead)
 
void heap_get_latest_tid (Relation relation, Snapshot snapshot, ItemPointer tid)
 
static void UpdateXmaxHintBits (HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
 
BulkInsertState GetBulkInsertState (void)
 
void FreeBulkInsertState (BulkInsertState bistate)
 
void ReleaseBulkInsertStatePin (BulkInsertState bistate)
 
Oid heap_insert (Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate)
 
void heap_multi_insert (Relation relation, HeapTuple *tuples, int ntuples, CommandId cid, int options, BulkInsertState bistate)
 
Oid simple_heap_insert (Relation relation, HeapTuple tup)
 
static uint8 compute_infobits (uint16 infomask, uint16 infomask2)
 
static bool xmax_infomask_changed (uint16 new_infomask, uint16 old_infomask)
 
HTSU_Result heap_delete (Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, HeapUpdateFailureData *hufd)
 
void simple_heap_delete (Relation relation, ItemPointer tid)
 
HTSU_Result heap_update (Relation relation, ItemPointer otid, HeapTuple newtup, CommandId cid, Snapshot crosscheck, bool wait, HeapUpdateFailureData *hufd, LockTupleMode *lockmode)
 
static bool heap_tuple_attr_equals (TupleDesc tupdesc, int attrnum, HeapTuple tup1, HeapTuple tup2)
 
void simple_heap_update (Relation relation, ItemPointer otid, HeapTuple tup)
 
static MultiXactStatus get_mxact_status_for_lock (LockTupleMode mode, bool is_update)
 
HTSU_Result heap_lock_tuple (Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, HeapUpdateFailureData *hufd)
 
static HTSU_Result test_lockmode_for_conflict (MultiXactStatus status, TransactionId xid, LockTupleMode mode, bool *needwait)
 
static HTSU_Result heap_lock_updated_tuple_rec (Relation rel, ItemPointer tid, TransactionId xid, LockTupleMode mode)
 
void heap_finish_speculative (Relation relation, HeapTuple tuple)
 
void heap_abort_speculative (Relation relation, HeapTuple tuple)
 
void heap_inplace_update (Relation relation, HeapTuple tuple)
 
static TransactionId FreezeMultiXactId (MultiXactId multi, uint16 t_infomask, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, MultiXactId cutoff_multi, uint16 *flags)
 
bool heap_prepare_freeze_tuple (HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
 
void heap_execute_freeze_tuple (HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
 
bool heap_freeze_tuple (HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi)
 
TransactionId HeapTupleGetUpdateXid (HeapTupleHeader tuple)
 
static bool Do_MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
bool heap_tuple_needs_eventual_freeze (HeapTupleHeader tuple)
 
bool heap_tuple_needs_freeze (HeapTupleHeader tuple, TransactionId cutoff_xid, MultiXactId cutoff_multi, Buffer buf)
 
void HeapTupleHeaderAdvanceLatestRemovedXid (HeapTupleHeader tuple, TransactionId *latestRemovedXid)
 
XLogRecPtr log_heap_cleanup_info (RelFileNode rnode, TransactionId latestRemovedXid)
 
XLogRecPtr log_heap_clean (Relation reln, Buffer buffer, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused, TransactionId latestRemovedXid)
 
XLogRecPtr log_heap_freeze (Relation reln, Buffer buffer, TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples, int ntuples)
 
XLogRecPtr log_heap_visible (RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer, TransactionId cutoff_xid, uint8 vmflags)
 
static void heap_xlog_cleanup_info (XLogReaderState *record)
 
static void heap_xlog_clean (XLogReaderState *record)
 
static void heap_xlog_visible (XLogReaderState *record)
 
static void heap_xlog_freeze_page (XLogReaderState *record)
 
static void fix_infomask_from_infobits (uint8 infobits, uint16 *infomask, uint16 *infomask2)
 
static void heap_xlog_delete (XLogReaderState *record)
 
static void heap_xlog_insert (XLogReaderState *record)
 
static void heap_xlog_multi_insert (XLogReaderState *record)
 
static void heap_xlog_update (XLogReaderState *record, bool hot_update)
 
static void heap_xlog_confirm (XLogReaderState *record)
 
static void heap_xlog_lock (XLogReaderState *record)
 
static void heap_xlog_lock_updated (XLogReaderState *record)
 
static void heap_xlog_inplace (XLogReaderState *record)
 
void heap_redo (XLogReaderState *record)
 
void heap2_redo (XLogReaderState *record)
 
void heap_sync (Relation rel)
 
void heap_mask (char *pagedata, BlockNumber blkno)
 

Variables

bool synchronize_seqscans = true
 
struct {
   LOCKMODE   hwlock
 
   int   lockstatus
 
   int   updstatus
 
tupleLockExtraInfo [MaxLockTupleMode+1]
 
static const int MultiXactStatusLock [MaxMultiXactStatus+1]
 

Macro Definition Documentation

◆ ConditionalLockTupleTuplock

#define ConditionalLockTupleTuplock (   rel,
  tup,
  mode 
)    ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 185 of file heapam.c.

Referenced by heap_acquire_tuplock().

◆ FRM_INVALIDATE_XMAX

#define FRM_INVALIDATE_XMAX   0x0002

Definition at line 6355 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_MARK_COMMITTED

#define FRM_MARK_COMMITTED   0x0010

Definition at line 6358 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_NOOP

#define FRM_NOOP   0x0001

Definition at line 6354 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_RETURN_IS_MULTI

#define FRM_RETURN_IS_MULTI   0x0008

Definition at line 6357 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_RETURN_IS_XID

#define FRM_RETURN_IS_XID   0x0004

Definition at line 6356 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ HEAPDEBUG_1

#define HEAPDEBUG_1

Definition at line 1824 of file heapam.c.

Referenced by heap_getnext().

◆ HEAPDEBUG_2

#define HEAPDEBUG_2

Definition at line 1825 of file heapam.c.

Referenced by heap_getnext().

◆ HEAPDEBUG_3

#define HEAPDEBUG_3

Definition at line 1826 of file heapam.c.

Referenced by heap_getnext().

◆ LOCKMODE_from_mxstatus

#define LOCKMODE_from_mxstatus (   status)    (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)

◆ LockTupleTuplock

#define LockTupleTuplock (   rel,
  tup,
  mode 
)    LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 181 of file heapam.c.

Referenced by heap_acquire_tuplock().

◆ TUPLOCK_from_mxstatus

#define TUPLOCK_from_mxstatus (   status)    (MultiXactStatusLock[(status)])

Definition at line 203 of file heapam.c.

Referenced by compute_new_xmax_infomask(), GetMultiXactIdHintBits(), and heap_lock_tuple().

◆ UnlockTupleTuplock

#define UnlockTupleTuplock (   rel,
  tup,
  mode 
)    UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 183 of file heapam.c.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

Function Documentation

◆ compute_infobits()

static uint8 compute_infobits ( uint16  infomask,
uint16  infomask2 
)
static

Definition at line 2994 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_abort_speculative(), heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_update(), and log_heap_update().

2995 {
2996  return
2997  ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
2998  ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
2999  ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
3000  /* note we ignore HEAP_XMAX_SHR_LOCK here */
3001  ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
3002  ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
3003  XLHL_KEYS_UPDATED : 0);
3004 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:184
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:187
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:241
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:240
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:186
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:242
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:244
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:269
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:199
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:243

◆ compute_new_xmax_infomask()

static void compute_new_xmax_infomask ( TransactionId  xmax,
uint16  old_infomask,
uint16  old_infomask2,
TransactionId  add_to_xmax,
LockTupleMode  mode,
bool  is_update,
TransactionId result_xmax,
uint16 result_infomask,
uint16 result_infomask2 
)
static

Definition at line 5298 of file heapam.c.

References Assert, elog, ERROR, get_mxact_status_for_lock(), GetMultiXactIdHintBits(), HEAP_KEYS_UPDATED, HEAP_LOCKED_UPGRADED, HEAP_XMAX_COMMITTED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_INVALID, HEAP_XMAX_IS_EXCL_LOCKED, HEAP_XMAX_IS_KEYSHR_LOCKED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HEAP_XMAX_IS_SHR_LOCKED, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, InvalidTransactionId, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, MultiXactIdCreate(), MultiXactIdExpand(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TUPLOCK_from_mxstatus, and WARNING.

Referenced by heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), and heap_update().

5303 {
5304  TransactionId new_xmax;
5305  uint16 new_infomask,
5306  new_infomask2;
5307 
5309 
5310 l5:
5311  new_infomask = 0;
5312  new_infomask2 = 0;
5313  if (old_infomask & HEAP_XMAX_INVALID)
5314  {
5315  /*
5316  * No previous locker; we just insert our own TransactionId.
5317  *
5318  * Note that it's critical that this case be the first one checked,
5319  * because there are several blocks below that come back to this one
5320  * to implement certain optimizations; old_infomask might contain
5321  * other dirty bits in those cases, but we don't really care.
5322  */
5323  if (is_update)
5324  {
5325  new_xmax = add_to_xmax;
5326  if (mode == LockTupleExclusive)
5327  new_infomask2 |= HEAP_KEYS_UPDATED;
5328  }
5329  else
5330  {
5331  new_infomask |= HEAP_XMAX_LOCK_ONLY;
5332  switch (mode)
5333  {
5334  case LockTupleKeyShare:
5335  new_xmax = add_to_xmax;
5336  new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
5337  break;
5338  case LockTupleShare:
5339  new_xmax = add_to_xmax;
5340  new_infomask |= HEAP_XMAX_SHR_LOCK;
5341  break;
5343  new_xmax = add_to_xmax;
5344  new_infomask |= HEAP_XMAX_EXCL_LOCK;
5345  break;
5346  case LockTupleExclusive:
5347  new_xmax = add_to_xmax;
5348  new_infomask |= HEAP_XMAX_EXCL_LOCK;
5349  new_infomask2 |= HEAP_KEYS_UPDATED;
5350  break;
5351  default:
5352  new_xmax = InvalidTransactionId; /* silence compiler */
5353  elog(ERROR, "invalid lock mode");
5354  }
5355  }
5356  }
5357  else if (old_infomask & HEAP_XMAX_IS_MULTI)
5358  {
5359  MultiXactStatus new_status;
5360 
5361  /*
5362  * Currently we don't allow XMAX_COMMITTED to be set for multis, so
5363  * cross-check.
5364  */
5365  Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
5366 
5367  /*
5368  * A multixact together with LOCK_ONLY set but neither lock bit set
5369  * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
5370  * anymore. This check is critical for databases upgraded by
5371  * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
5372  * that such multis are never passed.
5373  */
5374  if (HEAP_LOCKED_UPGRADED(old_infomask))
5375  {
5376  old_infomask &= ~HEAP_XMAX_IS_MULTI;
5377  old_infomask |= HEAP_XMAX_INVALID;
5378  goto l5;
5379  }
5380 
5381  /*
5382  * If the XMAX is already a MultiXactId, then we need to expand it to
5383  * include add_to_xmax; but if all the members were lockers and are
5384  * all gone, we can do away with the IS_MULTI bit and just set
5385  * add_to_xmax as the only locker/updater. If all lockers are gone
5386  * and we have an updater that aborted, we can also do without a
5387  * multi.
5388  *
5389  * The cost of doing GetMultiXactIdMembers would be paid by
5390  * MultiXactIdExpand if we weren't to do this, so this check is not
5391  * incurring extra work anyhow.
5392  */
5393  if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
5394  {
5395  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
5397  old_infomask)))
5398  {
5399  /*
5400  * Reset these bits and restart; otherwise fall through to
5401  * create a new multi below.
5402  */
5403  old_infomask &= ~HEAP_XMAX_IS_MULTI;
5404  old_infomask |= HEAP_XMAX_INVALID;
5405  goto l5;
5406  }
5407  }
5408 
5409  new_status = get_mxact_status_for_lock(mode, is_update);
5410 
5411  new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
5412  new_status);
5413  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5414  }
5415  else if (old_infomask & HEAP_XMAX_COMMITTED)
5416  {
5417  /*
5418  * It's a committed update, so we need to preserve him as updater of
5419  * the tuple.
5420  */
5422  MultiXactStatus new_status;
5423 
5424  if (old_infomask2 & HEAP_KEYS_UPDATED)
5425  status = MultiXactStatusUpdate;
5426  else
5427  status = MultiXactStatusNoKeyUpdate;
5428 
5429  new_status = get_mxact_status_for_lock(mode, is_update);
5430 
5431  /*
5432  * since it's not running, it's obviously impossible for the old
5433  * updater to be identical to the current one, so we need not check
5434  * for that case as we do in the block above.
5435  */
5436  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5437  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5438  }
5439  else if (TransactionIdIsInProgress(xmax))
5440  {
5441  /*
5442  * If the XMAX is a valid, in-progress TransactionId, then we need to
5443  * create a new MultiXactId that includes both the old locker or
5444  * updater and our own TransactionId.
5445  */
5446  MultiXactStatus new_status;
5447  MultiXactStatus old_status;
5448  LockTupleMode old_mode;
5449 
5450  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5451  {
5452  if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5453  old_status = MultiXactStatusForKeyShare;
5454  else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5455  old_status = MultiXactStatusForShare;
5456  else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5457  {
5458  if (old_infomask2 & HEAP_KEYS_UPDATED)
5459  old_status = MultiXactStatusForUpdate;
5460  else
5461  old_status = MultiXactStatusForNoKeyUpdate;
5462  }
5463  else
5464  {
5465  /*
5466  * LOCK_ONLY can be present alone only when a page has been
5467  * upgraded by pg_upgrade. But in that case,
5468  * TransactionIdIsInProgress() should have returned false. We
5469  * assume it's no longer locked in this case.
5470  */
5471  elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
5472  old_infomask |= HEAP_XMAX_INVALID;
5473  old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
5474  goto l5;
5475  }
5476  }
5477  else
5478  {
5479  /* it's an update, but which kind? */
5480  if (old_infomask2 & HEAP_KEYS_UPDATED)
5481  old_status = MultiXactStatusUpdate;
5482  else
5483  old_status = MultiXactStatusNoKeyUpdate;
5484  }
5485 
5486  old_mode = TUPLOCK_from_mxstatus(old_status);
5487 
5488  /*
5489  * If the lock to be acquired is for the same TransactionId as the
5490  * existing lock, there's an optimization possible: consider only the
5491  * strongest of both locks as the only one present, and restart.
5492  */
5493  if (xmax == add_to_xmax)
5494  {
5495  /*
5496  * Note that it's not possible for the original tuple to be
5497  * updated: we wouldn't be here because the tuple would have been
5498  * invisible and we wouldn't try to update it. As a subtlety,
5499  * this code can also run when traversing an update chain to lock
5500  * future versions of a tuple. But we wouldn't be here either,
5501  * because the add_to_xmax would be different from the original
5502  * updater.
5503  */
5504  Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5505 
5506  /* acquire the strongest of both */
5507  if (mode < old_mode)
5508  mode = old_mode;
5509  /* mustn't touch is_update */
5510 
5511  old_infomask |= HEAP_XMAX_INVALID;
5512  goto l5;
5513  }
5514 
5515  /* otherwise, just fall back to creating a new multixact */
5516  new_status = get_mxact_status_for_lock(mode, is_update);
5517  new_xmax = MultiXactIdCreate(xmax, old_status,
5518  add_to_xmax, new_status);
5519  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5520  }
5521  else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
5522  TransactionIdDidCommit(xmax))
5523  {
5524  /*
5525  * It's a committed update, so we gotta preserve him as updater of the
5526  * tuple.
5527  */
5529  MultiXactStatus new_status;
5530 
5531  if (old_infomask2 & HEAP_KEYS_UPDATED)
5532  status = MultiXactStatusUpdate;
5533  else
5534  status = MultiXactStatusNoKeyUpdate;
5535 
5536  new_status = get_mxact_status_for_lock(mode, is_update);
5537 
5538  /*
5539  * since it's not running, it's obviously impossible for the old
5540  * updater to be identical to the current one, so we need not check
5541  * for that case as we do in the block above.
5542  */
5543  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5544  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5545  }
5546  else
5547  {
5548  /*
5549  * Can get here iff the locking/updating transaction was running when
5550  * the infomask was extracted from the tuple, but finished before
5551  * TransactionIdIsInProgress got to run. Deal with it as if there was
5552  * no locker at all in the first place.
5553  */
5554  old_infomask |= HEAP_XMAX_INVALID;
5555  goto l5;
5556  }
5557 
5558  *result_infomask = new_infomask;
5559  *result_infomask2 = new_infomask2;
5560  *result_xmax = new_xmax;
5561 }
static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
Definition: heapam.c:6945
MultiXactStatus
Definition: multixact.h:40
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:184
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:187
uint32 TransactionId
Definition: c.h:463
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:766
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:999
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:243
#define HEAP_XMAX_COMMITTED
Definition: htup_details.h:197
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:190
#define HEAP_XMAX_IS_SHR_LOCKED(infomask)
Definition: htup_details.h:253
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:7026
LockTupleMode
Definition: heapam.h:38
unsigned short uint16
Definition: c.h:313
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:198
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:186
#define InvalidTransactionId
Definition: transam.h:31
#define WARNING
Definition: elog.h:40
MultiXactId MultiXactIdCreate(TransactionId xid1, MultiXactStatus status1, TransactionId xid2, MultiXactStatus status2)
Definition: multixact.c:384
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:221
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:269
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:199
TransactionId MultiXactId
Definition: c.h:473
#define Assert(condition)
Definition: c.h:688
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:203
static MultiXactStatus get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
Definition: heapam.c:4530
#define HEAP_XMAX_IS_EXCL_LOCKED(infomask)
Definition: htup_details.h:255
#define elog
Definition: elog.h:219
#define HEAP_XMAX_IS_KEYSHR_LOCKED(infomask)
Definition: htup_details.h:257
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:549
MultiXactId MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
Definition: multixact.c:437

◆ ConditionalMultiXactIdWait()

static bool ConditionalMultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
Relation  rel,
int *  remaining 
)
static

Definition at line 7280 of file heapam.c.

References Do_MultiXactIdWait(), and XLTW_None.

Referenced by heap_lock_tuple().

7282 {
7283  return Do_MultiXactIdWait(multi, status, infomask, true,
7284  rel, NULL, XLTW_None, remaining);
7285 }
int remaining
Definition: informix.c:692
Definition: lmgr.h:26
static bool Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:7180
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225

◆ Do_MultiXactIdWait()

static bool Do_MultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
bool  nowait,
Relation  rel,
ItemPointer  ctid,
XLTW_Oper  oper,
int *  remaining 
)
static

Definition at line 7180 of file heapam.c.

References ConditionalXactLockTableWait(), DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, LOCKMODE_from_mxstatus, pfree(), MultiXactMember::status, TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), XactLockTableWait(), and MultiXactMember::xid.

Referenced by ConditionalMultiXactIdWait(), and MultiXactIdWait().

7184 {
7185  bool result = true;
7186  MultiXactMember *members;
7187  int nmembers;
7188  int remain = 0;
7189 
7190  /* for pre-pg_upgrade tuples, no need to sleep at all */
7191  nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
7192  GetMultiXactIdMembers(multi, &members, false,
7193  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7194 
7195  if (nmembers >= 0)
7196  {
7197  int i;
7198 
7199  for (i = 0; i < nmembers; i++)
7200  {
7201  TransactionId memxid = members[i].xid;
7202  MultiXactStatus memstatus = members[i].status;
7203 
7205  {
7206  remain++;
7207  continue;
7208  }
7209 
7212  {
7213  if (remaining && TransactionIdIsInProgress(memxid))
7214  remain++;
7215  continue;
7216  }
7217 
7218  /*
7219  * This member conflicts with our multi, so we have to sleep (or
7220  * return failure, if asked to avoid waiting.)
7221  *
7222  * Note that we don't set up an error context callback ourselves,
7223  * but instead we pass the info down to XactLockTableWait. This
7224  * might seem a bit wasteful because the context is set up and
7225  * tore down for each member of the multixact, but in reality it
7226  * should be barely noticeable, and it avoids duplicate code.
7227  */
7228  if (nowait)
7229  {
7230  result = ConditionalXactLockTableWait(memxid);
7231  if (!result)
7232  break;
7233  }
7234  else
7235  XactLockTableWait(memxid, rel, ctid, oper);
7236  }
7237 
7238  pfree(members);
7239  }
7240 
7241  if (remaining)
7242  *remaining = remain;
7243 
7244  return result;
7245 }
int remaining
Definition: informix.c:692
MultiXactStatus
Definition: multixact.h:40
uint32 TransactionId
Definition: c.h:463
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:766
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:999
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:243
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:173
bool ConditionalXactLockTableWait(TransactionId xid)
Definition: lmgr.c:627
void pfree(void *pointer)
Definition: mcxt.c:936
TransactionId xid
Definition: multixact.h:61
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:556
MultiXactStatus status
Definition: multixact.h:62
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:221
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:554
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
Operator oper(ParseState *pstate, List *opname, Oid ltypeId, Oid rtypeId, bool noError, int location)
Definition: parse_oper.c:377
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225

◆ DoesMultiXactIdConflict()

static bool DoesMultiXactIdConflict ( MultiXactId  multi,
uint16  infomask,
LockTupleMode  lockmode 
)
static

Definition at line 7091 of file heapam.c.

References DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, ISUPDATE_from_mxstatus, LOCKMODE_from_mxstatus, pfree(), status(), TransactionIdDidAbort(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), tupleLockExtraInfo, and MultiXactMember::xid.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

7093 {
7094  int nmembers;
7095  MultiXactMember *members;
7096  bool result = false;
7097  LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
7098 
7099  if (HEAP_LOCKED_UPGRADED(infomask))
7100  return false;
7101 
7102  nmembers = GetMultiXactIdMembers(multi, &members, false,
7103  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7104  if (nmembers >= 0)
7105  {
7106  int i;
7107 
7108  for (i = 0; i < nmembers; i++)
7109  {
7110  TransactionId memxid;
7111  LOCKMODE memlockmode;
7112 
7113  memlockmode = LOCKMODE_from_mxstatus(members[i].status);
7114 
7115  /* ignore members that don't conflict with the lock we want */
7116  if (!DoLockModesConflict(memlockmode, wanted))
7117  continue;
7118 
7119  /* ignore members from current xact */
7120  memxid = members[i].xid;
7122  continue;
7123 
7124  if (ISUPDATE_from_mxstatus(members[i].status))
7125  {
7126  /* ignore aborted updaters */
7127  if (TransactionIdDidAbort(memxid))
7128  continue;
7129  }
7130  else
7131  {
7132  /* ignore lockers-only that are no longer in progress */
7133  if (!TransactionIdIsInProgress(memxid))
7134  continue;
7135  }
7136 
7137  /*
7138  * Whatever remains are either live lockers that conflict with our
7139  * wanted lock, and updaters that are not aborted. Those conflict
7140  * with what we want, so return true.
7141  */
7142  result = true;
7143  break;
7144  }
7145  pfree(members);
7146  }
7147 
7148  return result;
7149 }
uint32 TransactionId
Definition: c.h:463
int LOCKMODE
Definition: lockdefs.h:26
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:766
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:999
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:243
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:173
void pfree(void *pointer)
Definition: mcxt.c:936
TransactionId xid
Definition: multixact.h:61
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:556
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:55
bool TransactionIdDidAbort(TransactionId transactionId)
Definition: transam.c:181
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:221
static const struct @20 tupleLockExtraInfo[MaxLockTupleMode+1]
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225

◆ ExtractReplicaIdentity()

static HeapTuple ExtractReplicaIdentity ( Relation  rel,
HeapTuple  tup,
bool  key_modified,
bool copy 
)
static

Definition at line 7918 of file heapam.c.

References DEBUG4, elog, ERROR, heap_deform_tuple(), heap_form_tuple(), heap_freetuple(), HeapTupleGetOid, HeapTupleHasExternal, HeapTupleSetOid, MaxHeapAttributeNumber, tupleDesc::natts, ObjectIdAttributeNumber, OidIsValid, RelationData::rd_index, RelationData::rd_rel, RelationClose(), RelationGetDescr, RelationGetRelationName, RelationGetReplicaIndex(), RelationIdGetRelation(), RelationIsLogicallyLogged, REPLICA_IDENTITY_FULL, REPLICA_IDENTITY_NOTHING, toast_flatten_tuple(), and values.

Referenced by heap_delete(), and heap_update().

7919 {
7920  TupleDesc desc = RelationGetDescr(relation);
7921  Oid replidindex;
7922  Relation idx_rel;
7923  TupleDesc idx_desc;
7924  char replident = relation->rd_rel->relreplident;
7925  HeapTuple key_tuple = NULL;
7926  bool nulls[MaxHeapAttributeNumber];
7928  int natt;
7929 
7930  *copy = false;
7931 
7932  if (!RelationIsLogicallyLogged(relation))
7933  return NULL;
7934 
7935  if (replident == REPLICA_IDENTITY_NOTHING)
7936  return NULL;
7937 
7938  if (replident == REPLICA_IDENTITY_FULL)
7939  {
7940  /*
7941  * When logging the entire old tuple, it very well could contain
7942  * toasted columns. If so, force them to be inlined.
7943  */
7944  if (HeapTupleHasExternal(tp))
7945  {
7946  *copy = true;
7947  tp = toast_flatten_tuple(tp, RelationGetDescr(relation));
7948  }
7949  return tp;
7950  }
7951 
7952  /* if the key hasn't changed and we're only logging the key, we're done */
7953  if (!key_changed)
7954  return NULL;
7955 
7956  /* find the replica identity index */
7957  replidindex = RelationGetReplicaIndex(relation);
7958  if (!OidIsValid(replidindex))
7959  {
7960  elog(DEBUG4, "could not find configured replica identity for table \"%s\"",
7961  RelationGetRelationName(relation));
7962  return NULL;
7963  }
7964 
7965  idx_rel = RelationIdGetRelation(replidindex);
7966  idx_desc = RelationGetDescr(idx_rel);
7967 
7968  /* deform tuple, so we have fast access to columns */
7969  heap_deform_tuple(tp, desc, values, nulls);
7970 
7971  /* set all columns to NULL, regardless of whether they actually are */
7972  memset(nulls, 1, sizeof(nulls));
7973 
7974  /*
7975  * Now set all columns contained in the index to NOT NULL, they cannot
7976  * currently be NULL.
7977  */
7978  for (natt = 0; natt < idx_desc->natts; natt++)
7979  {
7980  int attno = idx_rel->rd_index->indkey.values[natt];
7981 
7982  if (attno < 0)
7983  {
7984  /*
7985  * The OID column can appear in an index definition, but that's
7986  * OK, because we always copy the OID if present (see below).
7987  * Other system columns may not.
7988  */
7989  if (attno == ObjectIdAttributeNumber)
7990  continue;
7991  elog(ERROR, "system column in index");
7992  }
7993  nulls[attno - 1] = false;
7994  }
7995 
7996  key_tuple = heap_form_tuple(desc, values, nulls);
7997  *copy = true;
7998  RelationClose(idx_rel);
7999 
8000  /*
8001  * Always copy oids if the table has them, even if not included in the
8002  * index. The space in the logged tuple is used anyway, so there's little
8003  * point in not including the information.
8004  */
8005  if (relation->rd_rel->relhasoids)
8006  HeapTupleSetOid(key_tuple, HeapTupleGetOid(tp));
8007 
8008  /*
8009  * If the tuple, which by here only contains indexed columns, still has
8010  * toasted columns, force them to be inlined. This is somewhat unlikely
8011  * since there's limits on the size of indexed columns, so we don't
8012  * duplicate toast_flatten_tuple()s functionality in the above loop over
8013  * the indexed columns, even if it would be more efficient.
8014  */
8015  if (HeapTupleHasExternal(key_tuple))
8016  {
8017  HeapTuple oldtup = key_tuple;
8018 
8019  key_tuple = toast_flatten_tuple(oldtup, RelationGetDescr(relation));
8020  heap_freetuple(oldtup);
8021  }
8022 
8023  return key_tuple;
8024 }
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition: tuptoaster.c:1085
Oid RelationGetReplicaIndex(Relation relation)
Definition: relcache.c:4660
#define RelationGetDescr(relation)
Definition: rel.h:437
#define ObjectIdAttributeNumber
Definition: sysattr.h:22
#define REPLICA_IDENTITY_NOTHING
Definition: pg_class.h:178
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: heaptuple.c:695
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:584
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1373
unsigned int Oid
Definition: postgres_ext.h:31
#define DEBUG4
Definition: elog.h:22
#define OidIsValid(objectId)
Definition: c.h:594
int natts
Definition: tupdesc.h:79
#define HeapTupleSetOid(tuple, oid)
Definition: htup_details.h:703
Form_pg_index rd_index
Definition: rel.h:159
#define REPLICA_IDENTITY_FULL
Definition: pg_class.h:180
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:445
void RelationClose(Relation relation)
Definition: relcache.c:2133
uintptr_t Datum
Definition: postgres.h:365
#define MaxHeapAttributeNumber
Definition: htup_details.h:47
void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull)
Definition: heaptuple.c:936
static Datum values[MAXATTR]
Definition: bootstrap.c:164
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:679
#define elog
Definition: elog.h:219
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:700
Relation RelationIdGetRelation(Oid relationId)
Definition: relcache.c:2043

◆ fix_infomask_from_infobits()

static void fix_infomask_from_infobits ( uint8  infobits,
uint16 infomask,
uint16 infomask2 
)
static

Definition at line 8314 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_xlog_delete(), heap_xlog_lock(), heap_xlog_lock_updated(), and heap_xlog_update().

8315 {
8316  *infomask &= ~(HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY |
8318  *infomask2 &= ~HEAP_KEYS_UPDATED;
8319 
8320  if (infobits & XLHL_XMAX_IS_MULTI)
8321  *infomask |= HEAP_XMAX_IS_MULTI;
8322  if (infobits & XLHL_XMAX_LOCK_ONLY)
8323  *infomask |= HEAP_XMAX_LOCK_ONLY;
8324  if (infobits & XLHL_XMAX_EXCL_LOCK)
8325  *infomask |= HEAP_XMAX_EXCL_LOCK;
8326  /* note HEAP_XMAX_SHR_LOCK isn't considered here */
8327  if (infobits & XLHL_XMAX_KEYSHR_LOCK)
8328  *infomask |= HEAP_XMAX_KEYSHR_LOCK;
8329 
8330  if (infobits & XLHL_KEYS_UPDATED)
8331  *infomask2 |= HEAP_KEYS_UPDATED;
8332 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:184
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:187
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:241
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:240
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:186
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:242
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:244
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:269
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:199
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:243

◆ FreeBulkInsertState()

void FreeBulkInsertState ( BulkInsertState  bistate)

Definition at line 2373 of file heapam.c.

References BulkInsertStateData::current_buf, FreeAccessStrategy(), InvalidBuffer, pfree(), ReleaseBuffer(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), intorel_shutdown(), and transientrel_shutdown().

2374 {
2375  if (bistate->current_buf != InvalidBuffer)
2376  ReleaseBuffer(bistate->current_buf);
2377  FreeAccessStrategy(bistate->strategy);
2378  pfree(bistate);
2379 }
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
void pfree(void *pointer)
Definition: mcxt.c:936
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:597
BufferAccessStrategy strategy
Definition: hio.h:33
Buffer current_buf
Definition: hio.h:34

◆ FreezeMultiXactId()

static TransactionId FreezeMultiXactId ( MultiXactId  multi,
uint16  t_infomask,
TransactionId  relfrozenxid,
TransactionId  relminmxid,
TransactionId  cutoff_xid,
MultiXactId  cutoff_multi,
uint16 flags 
)
static

Definition at line 6382 of file heapam.c.

References Assert, ereport, errcode(), errmsg_internal(), ERROR, FRM_INVALIDATE_XMAX, FRM_MARK_COMMITTED, FRM_NOOP, FRM_RETURN_IS_MULTI, FRM_RETURN_IS_XID, GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, i, InvalidTransactionId, ISUPDATE_from_mxstatus, MultiXactIdCreateFromMembers(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactIdIsValid, MultiXactIdPrecedes(), palloc(), pfree(), status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TransactionIdIsValid, TransactionIdPrecedes(), and MultiXactMember::xid.

Referenced by heap_prepare_freeze_tuple().

6386 {
6388  int i;
6389  MultiXactMember *members;
6390  int nmembers;
6391  bool need_replace;
6392  int nnewmembers;
6393  MultiXactMember *newmembers;
6394  bool has_lockers;
6395  TransactionId update_xid;
6396  bool update_committed;
6397 
6398  *flags = 0;
6399 
6400  /* We should only be called in Multis */
6401  Assert(t_infomask & HEAP_XMAX_IS_MULTI);
6402 
6403  if (!MultiXactIdIsValid(multi) ||
6404  HEAP_LOCKED_UPGRADED(t_infomask))
6405  {
6406  /* Ensure infomask bits are appropriately set/reset */
6407  *flags |= FRM_INVALIDATE_XMAX;
6408  return InvalidTransactionId;
6409  }
6410  else if (MultiXactIdPrecedes(multi, relminmxid))
6411  ereport(ERROR,
6412  (errcode(ERRCODE_DATA_CORRUPTED),
6413  errmsg_internal("found multixact %u from before relminmxid %u",
6414  multi, relminmxid)));
6415  else if (MultiXactIdPrecedes(multi, cutoff_multi))
6416  {
6417  /*
6418  * This old multi cannot possibly have members still running, but
6419  * verify just in case. If it was a locker only, it can be removed
6420  * without any further consideration; but if it contained an update, we
6421  * might need to preserve it.
6422  */
6423  if (MultiXactIdIsRunning(multi,
6424  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
6425  ereport(ERROR,
6426  (errcode(ERRCODE_DATA_CORRUPTED),
6427  errmsg_internal("multixact %u from before cutoff %u found to be still running",
6428  multi, cutoff_multi)));
6429 
6430  if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
6431  {
6432  *flags |= FRM_INVALIDATE_XMAX;
6433  xid = InvalidTransactionId; /* not strictly necessary */
6434  }
6435  else
6436  {
6437  /* replace multi by update xid */
6438  xid = MultiXactIdGetUpdateXid(multi, t_infomask);
6439 
6440  /* wasn't only a lock, xid needs to be valid */
6442 
6443  if (TransactionIdPrecedes(xid, relfrozenxid))
6444  ereport(ERROR,
6445  (errcode(ERRCODE_DATA_CORRUPTED),
6446  errmsg_internal("found update xid %u from before relfrozenxid %u",
6447  xid, relfrozenxid)));
6448 
6449  /*
6450  * If the xid is older than the cutoff, it has to have aborted,
6451  * otherwise the tuple would have gotten pruned away.
6452  */
6453  if (TransactionIdPrecedes(xid, cutoff_xid))
6454  {
6455  if (TransactionIdDidCommit(xid))
6456  ereport(ERROR,
6457  (errcode(ERRCODE_DATA_CORRUPTED),
6458  errmsg_internal("cannot freeze committed update xid %u", xid)));
6459  *flags |= FRM_INVALIDATE_XMAX;
6460  xid = InvalidTransactionId; /* not strictly necessary */
6461  }
6462  else
6463  {
6464  *flags |= FRM_RETURN_IS_XID;
6465  }
6466  }
6467 
6468  return xid;
6469  }
6470 
6471  /*
6472  * This multixact might have or might not have members still running, but
6473  * we know it's valid and is newer than the cutoff point for multis.
6474  * However, some member(s) of it may be below the cutoff for Xids, so we
6475  * need to walk the whole members array to figure out what to do, if
6476  * anything.
6477  */
6478 
6479  nmembers =
6480  GetMultiXactIdMembers(multi, &members, false,
6481  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
6482  if (nmembers <= 0)
6483  {
6484  /* Nothing worth keeping */
6485  *flags |= FRM_INVALIDATE_XMAX;
6486  return InvalidTransactionId;
6487  }
6488 
6489  /* is there anything older than the cutoff? */
6490  need_replace = false;
6491  for (i = 0; i < nmembers; i++)
6492  {
6493  if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
6494  {
6495  need_replace = true;
6496  break;
6497  }
6498  }
6499 
6500  /*
6501  * In the simplest case, there is no member older than the cutoff; we can
6502  * keep the existing MultiXactId as is.
6503  */
6504  if (!need_replace)
6505  {
6506  *flags |= FRM_NOOP;
6507  pfree(members);
6508  return InvalidTransactionId;
6509  }
6510 
6511  /*
6512  * If the multi needs to be updated, figure out which members do we need
6513  * to keep.
6514  */
6515  nnewmembers = 0;
6516  newmembers = palloc(sizeof(MultiXactMember) * nmembers);
6517  has_lockers = false;
6518  update_xid = InvalidTransactionId;
6519  update_committed = false;
6520 
6521  for (i = 0; i < nmembers; i++)
6522  {
6523  /*
6524  * Determine whether to keep this member or ignore it.
6525  */
6526  if (ISUPDATE_from_mxstatus(members[i].status))
6527  {
6528  TransactionId xid = members[i].xid;
6529 
6531  if (TransactionIdPrecedes(xid, relfrozenxid))
6532  ereport(ERROR,
6533  (errcode(ERRCODE_DATA_CORRUPTED),
6534  errmsg_internal("found update xid %u from before relfrozenxid %u",
6535  xid, relfrozenxid)));
6536 
6537  /*
6538  * It's an update; should we keep it? If the transaction is known
6539  * aborted or crashed then it's okay to ignore it, otherwise not.
6540  * Note that an updater older than cutoff_xid cannot possibly be
6541  * committed, because HeapTupleSatisfiesVacuum would have returned
6542  * HEAPTUPLE_DEAD and we would not be trying to freeze the tuple.
6543  *
6544  * As with all tuple visibility routines, it's critical to test
6545  * TransactionIdIsInProgress before TransactionIdDidCommit,
6546  * because of race conditions explained in detail in tqual.c.
6547  */
6550  {
6551  Assert(!TransactionIdIsValid(update_xid));
6552  update_xid = xid;
6553  }
6554  else if (TransactionIdDidCommit(xid))
6555  {
6556  /*
6557  * The transaction committed, so we can tell caller to set
6558  * HEAP_XMAX_COMMITTED. (We can only do this because we know
6559  * the transaction is not running.)
6560  */
6561  Assert(!TransactionIdIsValid(update_xid));
6562  update_committed = true;
6563  update_xid = xid;
6564  }
6565  else
6566  {
6567  /*
6568  * Not in progress, not committed -- must be aborted or crashed;
6569  * we can ignore it.
6570  */
6571  }
6572 
6573  /*
6574  * Since the tuple wasn't marked HEAPTUPLE_DEAD by vacuum, the
6575  * update Xid cannot possibly be older than the xid cutoff. The
6576  * presence of such a tuple would cause corruption, so be paranoid
6577  * and check.
6578  */
6579  if (TransactionIdIsValid(update_xid) &&
6580  TransactionIdPrecedes(update_xid, cutoff_xid))
6581  ereport(ERROR,
6582  (errcode(ERRCODE_DATA_CORRUPTED),
6583  errmsg_internal("found update xid %u from before xid cutoff %u",
6584  update_xid, cutoff_xid)));
6585 
6586  /*
6587  * If we determined that it's an Xid corresponding to an update
6588  * that must be retained, additionally add it to the list of
6589  * members of the new Multi, in case we end up using that. (We
6590  * might still decide to use only an update Xid and not a multi,
6591  * but it's easier to maintain the list as we walk the old members
6592  * list.)
6593  */
6594  if (TransactionIdIsValid(update_xid))
6595  newmembers[nnewmembers++] = members[i];
6596  }
6597  else
6598  {
6599  /* We only keep lockers if they are still running */
6600  if (TransactionIdIsCurrentTransactionId(members[i].xid) ||
6601  TransactionIdIsInProgress(members[i].xid))
6602  {
6603  /* running locker cannot possibly be older than the cutoff */
6604  Assert(!TransactionIdPrecedes(members[i].xid, cutoff_xid));
6605  newmembers[nnewmembers++] = members[i];
6606  has_lockers = true;
6607  }
6608  }
6609  }
6610 
6611  pfree(members);
6612 
6613  if (nnewmembers == 0)
6614  {
6615  /* nothing worth keeping!? Tell caller to remove the whole thing */
6616  *flags |= FRM_INVALIDATE_XMAX;
6617  xid = InvalidTransactionId;
6618  }
6619  else if (TransactionIdIsValid(update_xid) && !has_lockers)
6620  {
6621  /*
6622  * If there's a single member and it's an update, pass it back alone
6623  * without creating a new Multi. (XXX we could do this when there's a
6624  * single remaining locker, too, but that would complicate the API too
6625  * much; moreover, the case with the single updater is more
6626  * interesting, because those are longer-lived.)
6627  */
6628  Assert(nnewmembers == 1);
6629  *flags |= FRM_RETURN_IS_XID;
6630  if (update_committed)
6631  *flags |= FRM_MARK_COMMITTED;
6632  xid = update_xid;
6633  }
6634  else
6635  {
6636  /*
6637  * Create a new multixact with the surviving members of the previous
6638  * one, to set as new Xmax in the tuple.
6639  */
6640  xid = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
6641  *flags |= FRM_RETURN_IS_MULTI;
6642  }
6643 
6644  pfree(newmembers);
6645 
6646  return xid;
6647 }
#define FRM_RETURN_IS_XID
Definition: heapam.c:6356
#define FRM_MARK_COMMITTED
Definition: heapam.c:6358
uint32 TransactionId
Definition: c.h:463
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:766
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:999
MultiXactId MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members)
Definition: multixact.c:746
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:243
int errcode(int sqlerrcode)
Definition: elog.c:575
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:7026
void pfree(void *pointer)
Definition: mcxt.c:936
#define ERROR
Definition: elog.h:43
TransactionId xid
Definition: multixact.h:61
#define FRM_INVALIDATE_XMAX
Definition: heapam.c:6355
#define InvalidTransactionId
Definition: transam.h:31
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:55
#define MultiXactIdIsValid(multi)
Definition: multixact.h:27
#define ereport(elevel, rest)
Definition: elog.h:122
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
#define FRM_RETURN_IS_MULTI
Definition: heapam.c:6357
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:221
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:199
int errmsg_internal(const char *fmt,...)
Definition: elog.c:827
#define Assert(condition)
Definition: c.h:688
#define FRM_NOOP
Definition: heapam.c:6354
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3140
void * palloc(Size size)
Definition: mcxt.c:835
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:549

◆ get_mxact_status_for_lock()

static MultiXactStatus get_mxact_status_for_lock ( LockTupleMode  mode,
bool  is_update 
)
static

Definition at line 4530 of file heapam.c.

References elog, ERROR, and tupleLockExtraInfo.

Referenced by compute_new_xmax_infomask(), heap_lock_tuple(), and test_lockmode_for_conflict().

4531 {
4532  int retval;
4533 
4534  if (is_update)
4535  retval = tupleLockExtraInfo[mode].updstatus;
4536  else
4537  retval = tupleLockExtraInfo[mode].lockstatus;
4538 
4539  if (retval == -1)
4540  elog(ERROR, "invalid lock tuple mode %d/%s", mode,
4541  is_update ? "true" : "false");
4542 
4543  return (MultiXactStatus) retval;
4544 }
MultiXactStatus
Definition: multixact.h:40
#define ERROR
Definition: elog.h:43
static const struct @20 tupleLockExtraInfo[MaxLockTupleMode+1]
#define elog
Definition: elog.h:219

◆ GetBulkInsertState()

BulkInsertState GetBulkInsertState ( void  )

Definition at line 2359 of file heapam.c.

References BAS_BULKWRITE, BulkInsertStateData::current_buf, GetAccessStrategy(), InvalidBuffer, palloc(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), intorel_startup(), and transientrel_startup().

2360 {
2361  BulkInsertState bistate;
2362 
2363  bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
2365  bistate->current_buf = InvalidBuffer;
2366  return bistate;
2367 }
BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
Definition: freelist.c:542
#define InvalidBuffer
Definition: buf.h:25
struct BulkInsertStateData * BulkInsertState
Definition: heapam.h:33
BufferAccessStrategy strategy
Definition: hio.h:33
void * palloc(Size size)
Definition: mcxt.c:835
Buffer current_buf
Definition: hio.h:34

◆ GetMultiXactIdHintBits()

static void GetMultiXactIdHintBits ( MultiXactId  multi,
uint16 new_infomask,
uint16 new_infomask2 
)
static

Definition at line 6945 of file heapam.c.

References GetMultiXactIdMembers(), HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, i, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, pfree(), status(), and TUPLOCK_from_mxstatus.

Referenced by compute_new_xmax_infomask(), heap_prepare_freeze_tuple(), and heap_update().

6947 {
6948  int nmembers;
6949  MultiXactMember *members;
6950  int i;
6951  uint16 bits = HEAP_XMAX_IS_MULTI;
6952  uint16 bits2 = 0;
6953  bool has_update = false;
6954  LockTupleMode strongest = LockTupleKeyShare;
6955 
6956  /*
6957  * We only use this in multis we just created, so they cannot be values
6958  * pre-pg_upgrade.
6959  */
6960  nmembers = GetMultiXactIdMembers(multi, &members, false, false);
6961 
6962  for (i = 0; i < nmembers; i++)
6963  {
6964  LockTupleMode mode;
6965 
6966  /*
6967  * Remember the strongest lock mode held by any member of the
6968  * multixact.
6969  */
6970  mode = TUPLOCK_from_mxstatus(members[i].status);
6971  if (mode > strongest)
6972  strongest = mode;
6973 
6974  /* See what other bits we need */
6975  switch (members[i].status)
6976  {
6980  break;
6981 
6983  bits2 |= HEAP_KEYS_UPDATED;
6984  break;
6985 
6987  has_update = true;
6988  break;
6989 
6990  case MultiXactStatusUpdate:
6991  bits2 |= HEAP_KEYS_UPDATED;
6992  has_update = true;
6993  break;
6994  }
6995  }
6996 
6997  if (strongest == LockTupleExclusive ||
6998  strongest == LockTupleNoKeyExclusive)
6999  bits |= HEAP_XMAX_EXCL_LOCK;
7000  else if (strongest == LockTupleShare)
7001  bits |= HEAP_XMAX_SHR_LOCK;
7002  else if (strongest == LockTupleKeyShare)
7003  bits |= HEAP_XMAX_KEYSHR_LOCK;
7004 
7005  if (!has_update)
7006  bits |= HEAP_XMAX_LOCK_ONLY;
7007 
7008  if (nmembers > 0)
7009  pfree(members);
7010 
7011  *new_infomask = bits;
7012  *new_infomask2 = bits2;
7013 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:184
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:187
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:190
LockTupleMode
Definition: heapam.h:38
unsigned short uint16
Definition: c.h:313
void pfree(void *pointer)
Definition: mcxt.c:936
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:186
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:269
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:199
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:203
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225

◆ heap2_redo()

void heap2_redo ( XLogReaderState record)

Definition at line 9178 of file heapam.c.

References elog, heap_xlog_clean(), heap_xlog_cleanup_info(), heap_xlog_freeze_page(), heap_xlog_lock_updated(), heap_xlog_logical_rewrite(), heap_xlog_multi_insert(), heap_xlog_visible(), PANIC, XLOG_HEAP2_CLEAN, XLOG_HEAP2_CLEANUP_INFO, XLOG_HEAP2_FREEZE_PAGE, XLOG_HEAP2_LOCK_UPDATED, XLOG_HEAP2_MULTI_INSERT, XLOG_HEAP2_NEW_CID, XLOG_HEAP2_REWRITE, XLOG_HEAP2_VISIBLE, XLOG_HEAP_OPMASK, XLogRecGetInfo, and XLR_INFO_MASK.

9179 {
9180  uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
9181 
9182  switch (info & XLOG_HEAP_OPMASK)
9183  {
9184  case XLOG_HEAP2_CLEAN:
9185  heap_xlog_clean(record);
9186  break;
9188  heap_xlog_freeze_page(record);
9189  break;
9191  heap_xlog_cleanup_info(record);
9192  break;
9193  case XLOG_HEAP2_VISIBLE:
9194  heap_xlog_visible(record);
9195  break;
9197  heap_xlog_multi_insert(record);
9198  break;
9200  heap_xlog_lock_updated(record);
9201  break;
9202  case XLOG_HEAP2_NEW_CID:
9203 
9204  /*
9205  * Nothing to do on a real replay, only used during logical
9206  * decoding.
9207  */
9208  break;
9209  case XLOG_HEAP2_REWRITE:
9210  heap_xlog_logical_rewrite(record);
9211  break;
9212  default:
9213  elog(PANIC, "heap2_redo: unknown op code %u", info);
9214  }
9215 }
void heap_xlog_logical_rewrite(XLogReaderState *r)
Definition: rewriteheap.c:1120
#define XLOG_HEAP2_LOCK_UPDATED
Definition: heapam_xlog.h:59
#define XLOG_HEAP2_REWRITE
Definition: heapam_xlog.h:53
unsigned char uint8
Definition: c.h:312
#define XLOG_HEAP_OPMASK
Definition: heapam_xlog.h:41
#define PANIC
Definition: elog.h:53
#define XLOG_HEAP2_MULTI_INSERT
Definition: heapam_xlog.h:58
#define XLOG_HEAP2_VISIBLE
Definition: heapam_xlog.h:57
static void heap_xlog_lock_updated(XLogReaderState *record)
Definition: heapam.c:9039
static void heap_xlog_freeze_page(XLogReaderState *record)
Definition: heapam.c:8256
#define XLOG_HEAP2_CLEAN
Definition: heapam_xlog.h:54
#define XLOG_HEAP2_CLEANUP_INFO
Definition: heapam_xlog.h:56
static void heap_xlog_multi_insert(XLogReaderState *record)
Definition: heapam.c:8521
#define XLOG_HEAP2_NEW_CID
Definition: heapam_xlog.h:60
#define XLogRecGetInfo(decoder)
Definition: xlogreader.h:222
#define XLR_INFO_MASK
Definition: xlogrecord.h:62
static void heap_xlog_cleanup_info(XLogReaderState *record)
Definition: heapam.c:8030
#define XLOG_HEAP2_FREEZE_PAGE
Definition: heapam_xlog.h:55
static void heap_xlog_visible(XLogReaderState *record)
Definition: heapam.c:8141
#define elog
Definition: elog.h:219
static void heap_xlog_clean(XLogReaderState *record)
Definition: heapam.c:8051

◆ heap_abort_speculative()

void heap_abort_speculative ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 6130 of file heapam.c.

References Assert, buffer, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, compute_infobits(), elog, END_CRIT_SECTION, ERROR, xl_heap_delete::flags, GetCurrentTransactionId(), HEAP_KEYS_UPDATED, HEAP_MOVED, HEAP_XMAX_BITS, HeapTupleHasExternal, HeapTupleHeaderIsHeapOnly, HeapTupleHeaderIsSpeculative, HeapTupleHeaderSetXmin, xl_heap_delete::infobits_set, InvalidTransactionId, IsToastRelation(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), MarkBufferDirty(), xl_heap_delete::offnum, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), ReadBuffer(), RecentGlobalXmin, REGBUF_STANDARD, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapDelete, START_CRIT_SECTION, HeapTupleHeaderData::t_choice, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_heap, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, HeapTupleFields::t_xmin, toast_delete(), TransactionIdIsValid, XLH_DELETE_IS_SUPER, XLOG_HEAP_DELETE, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and xl_heap_delete::xmax.

Referenced by ExecInsert(), and toast_delete_datum().

6131 {
6133  ItemPointer tid = &(tuple->t_self);
6134  ItemId lp;
6135  HeapTupleData tp;
6136  Page page;
6137  BlockNumber block;
6138  Buffer buffer;
6139 
6140  Assert(ItemPointerIsValid(tid));
6141 
6142  block = ItemPointerGetBlockNumber(tid);
6143  buffer = ReadBuffer(relation, block);
6144  page = BufferGetPage(buffer);
6145 
6147 
6148  /*
6149  * Page can't be all visible, we just inserted into it, and are still
6150  * running.
6151  */
6152  Assert(!PageIsAllVisible(page));
6153 
6154  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
6155  Assert(ItemIdIsNormal(lp));
6156 
6157  tp.t_tableOid = RelationGetRelid(relation);
6158  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
6159  tp.t_len = ItemIdGetLength(lp);
6160  tp.t_self = *tid;
6161 
6162  /*
6163  * Sanity check that the tuple really is a speculatively inserted tuple,
6164  * inserted by us.
6165  */
6166  if (tp.t_data->t_choice.t_heap.t_xmin != xid)
6167  elog(ERROR, "attempted to kill a tuple inserted by another transaction");
6168  if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
6169  elog(ERROR, "attempted to kill a non-speculative tuple");
6171 
6172  /*
6173  * No need to check for serializable conflicts here. There is never a
6174  * need for a combocid, either. No need to extract replica identity, or
6175  * do anything special with infomask bits.
6176  */
6177 
6179 
6180  /*
6181  * The tuple will become DEAD immediately. Flag that this page
6182  * immediately is a candidate for pruning by setting xmin to
6183  * RecentGlobalXmin. That's not pretty, but it doesn't seem worth
6184  * inventing a nicer API for this.
6185  */
6188 
6189  /* store transaction information of xact deleting the tuple */
6192 
6193  /*
6194  * Set the tuple header xmin to InvalidTransactionId. This makes the
6195  * tuple immediately invisible everyone. (In particular, to any
6196  * transactions waiting on the speculative token, woken up later.)
6197  */
6199 
6200  /* Clear the speculative insertion token too */
6201  tp.t_data->t_ctid = tp.t_self;
6202 
6203  MarkBufferDirty(buffer);
6204 
6205  /*
6206  * XLOG stuff
6207  *
6208  * The WAL records generated here match heap_delete(). The same recovery
6209  * routines are used.
6210  */
6211  if (RelationNeedsWAL(relation))
6212  {
6213  xl_heap_delete xlrec;
6214  XLogRecPtr recptr;
6215 
6216  xlrec.flags = XLH_DELETE_IS_SUPER;
6218  tp.t_data->t_infomask2);
6220  xlrec.xmax = xid;
6221 
6222  XLogBeginInsert();
6223  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
6224  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6225 
6226  /* No replica identity & replication origin logged */
6227 
6228  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
6229 
6230  PageSetLSN(page, recptr);
6231  }
6232 
6233  END_CRIT_SECTION();
6234 
6235  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6236 
6237  if (HeapTupleHasExternal(&tp))
6238  {
6239  Assert(!IsToastRelation(relation));
6240  toast_delete(relation, &tp, true);
6241  }
6242 
6243  /*
6244  * Never need to mark tuple for invalidation, since catalogs don't support
6245  * speculative insertion
6246  */
6247 
6248  /* Now we can release the buffer */
6249  ReleaseBuffer(buffer);
6250 
6251  /* count deletion, as we counted the insertion too */
6252  pgstat_count_heap_delete(relation);
6253 }
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:60
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
bool IsToastRelation(Relation relation)
Definition: catalog.c:136
#define HEAP_XMAX_BITS
Definition: htup_details.h:261
union HeapTupleHeaderData::@45 t_choice
#define XLH_DELETE_IS_SUPER
Definition: heapam_xlog.h:95
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2994
HeapTupleFields t_heap
Definition: htup_details.h:151
#define PageIsAllVisible(page)
Definition: bufpage.h:381
uint32 TransactionId
Definition: c.h:463
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:428
#define PageSetPrunable(page, xid)
Definition: bufpage.h:394
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
OffsetNumber offnum
Definition: heapam_xlog.h:105
HeapTupleHeader t_data
Definition: htup.h:67
#define HeapTupleHeaderIsHeapOnly(tup)
Definition: htup_details.h:507
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
#define ERROR
Definition: elog.h:43
ItemPointerData t_ctid
Definition: htup_details.h:155
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:104
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:418
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:110
TransactionId RecentGlobalXmin
Definition: snapmgr.c:166
#define REGBUF_STANDARD
Definition: xloginsert.h:34
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
TransactionId t_xmin
Definition: htup_details.h:118
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:269
#define HEAP_MOVED
Definition: htup_details.h:207
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:688
uint8 infobits_set
Definition: heapam_xlog.h:106
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:215
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define RelationNeedsWAL(relation)
Definition: rel.h:514
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:1953
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:679
void toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: tuptoaster.c:464
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define TransactionIdIsValid(xid)
Definition: transam.h:41
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:425
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
#define HeapTupleHeaderSetXmin(tup, xid)
Definition: htup_details.h:318

◆ heap_acquire_tuplock()

static bool heap_acquire_tuplock ( Relation  relation,
ItemPointer  tid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
bool have_tuple_lock 
)
static

Definition at line 5249 of file heapam.c.

References ConditionalLockTupleTuplock, ereport, errcode(), errmsg(), ERROR, LockTupleTuplock, LockWaitBlock, LockWaitError, LockWaitSkip, and RelationGetRelationName.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

5251 {
5252  if (*have_tuple_lock)
5253  return true;
5254 
5255  switch (wait_policy)
5256  {
5257  case LockWaitBlock:
5258  LockTupleTuplock(relation, tid, mode);
5259  break;
5260 
5261  case LockWaitSkip:
5262  if (!ConditionalLockTupleTuplock(relation, tid, mode))
5263  return false;
5264  break;
5265 
5266  case LockWaitError:
5267  if (!ConditionalLockTupleTuplock(relation, tid, mode))
5268  ereport(ERROR,
5269  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5270  errmsg("could not obtain lock on row in relation \"%s\"",
5271  RelationGetRelationName(relation))));
5272  break;
5273  }
5274  *have_tuple_lock = true;
5275 
5276  return true;
5277 }
#define LockTupleTuplock(rel, tup, mode)
Definition: heapam.c:181
#define ConditionalLockTupleTuplock(rel, tup, mode)
Definition: heapam.c:185
int errcode(int sqlerrcode)
Definition: elog.c:575
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:445
#define ereport(elevel, rest)
Definition: elog.h:122
int errmsg(const char *fmt,...)
Definition: elog.c:797

◆ heap_beginscan()

HeapScanDesc heap_beginscan ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key 
)

Definition at line 1400 of file heapam.c.

References heap_beginscan_internal().

Referenced by AlterDomainNotNull(), ATRewriteTable(), check_default_allows_bound(), copy_heap_data(), CopyTo(), DefineQueryRewrite(), pgrowlocks(), pgstat_collect_oids(), RelationFindReplTupleSeq(), SeqNext(), validateCheckConstraint(), validateDomainConstraint(), and validateForeignKeyConstraint().

1402 {
1403  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1404  true, true, true, false, false, false);
1405 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1446

◆ heap_beginscan_bm()

HeapScanDesc heap_beginscan_bm ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key 
)

Definition at line 1428 of file heapam.c.

References heap_beginscan_internal().

Referenced by ExecInitBitmapHeapScan().

1430 {
1431  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1432  false, false, true, true, false, false);
1433 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1446

◆ heap_beginscan_catalog()

HeapScanDesc heap_beginscan_catalog ( Relation  relation,
int  nkeys,
ScanKey  key 
)

Definition at line 1408 of file heapam.c.

References GetCatalogSnapshot(), heap_beginscan_internal(), RegisterSnapshot(), and RelationGetRelid.

Referenced by AlterTableMoveAll(), AlterTableSpaceOptions(), boot_openrel(), check_db_file_conflict(), createdb(), do_autovacuum(), DropSetting(), DropTableSpace(), find_typed_table_dependencies(), get_all_vacuum_rels(), get_database_list(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), index_update_stats(), objectsInSchemaToOids(), ReindexMultipleTables(), remove_dbtablespaces(), RemoveConversionById(), RemoveSubscriptionRel(), RenameTableSpace(), ThereIsAtLeastOneRole(), and vac_truncate_clog().

1409 {
1410  Oid relid = RelationGetRelid(relation);
1411  Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
1412 
1413  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1414  true, true, true, false, false, true);
1415 }
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:863
Snapshot GetCatalogSnapshot(Oid relid)
Definition: snapmgr.c:440
unsigned int Oid
Definition: postgres_ext.h:31
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1446
#define RelationGetRelid(relation)
Definition: rel.h:425

◆ heap_beginscan_internal()

static HeapScanDesc heap_beginscan_internal ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
ParallelHeapScanDesc  parallel_scan,
bool  allow_strat,
bool  allow_sync,
bool  allow_pagemode,
bool  is_bitmapscan,
bool  is_samplescan,
bool  temp_snap 
)
static

Definition at line 1446 of file heapam.c.

References initscan(), IsMVCCSnapshot, palloc(), PredicateLockRelation(), RelationGetRelid, RelationIncrementReferenceCount(), HeapScanDescData::rs_allow_strat, HeapScanDescData::rs_allow_sync, HeapScanDescData::rs_bitmapscan, HeapScanDescData::rs_ctup, HeapScanDescData::rs_key, HeapScanDescData::rs_nkeys, HeapScanDescData::rs_pageatatime, HeapScanDescData::rs_parallel, HeapScanDescData::rs_rd, HeapScanDescData::rs_samplescan, HeapScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, HeapScanDescData::rs_temp_snap, and HeapTupleData::t_tableOid.

Referenced by heap_beginscan(), heap_beginscan_bm(), heap_beginscan_catalog(), heap_beginscan_parallel(), heap_beginscan_sampling(), and heap_beginscan_strat().

1455 {
1456  HeapScanDesc scan;
1457 
1458  /*
1459  * increment relation ref count while scanning relation
1460  *
1461  * This is just to make really sure the relcache entry won't go away while
1462  * the scan has a pointer to it. Caller should be holding the rel open
1463  * anyway, so this is redundant in all normal scenarios...
1464  */
1466 
1467  /*
1468  * allocate and initialize scan descriptor
1469  */
1470  scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
1471 
1472  scan->rs_rd = relation;
1473  scan->rs_snapshot = snapshot;
1474  scan->rs_nkeys = nkeys;
1475  scan->rs_bitmapscan = is_bitmapscan;
1476  scan->rs_samplescan = is_samplescan;
1477  scan->rs_strategy = NULL; /* set in initscan */
1478  scan->rs_allow_strat = allow_strat;
1479  scan->rs_allow_sync = allow_sync;
1480  scan->rs_temp_snap = temp_snap;
1481  scan->rs_parallel = parallel_scan;
1482 
1483  /*
1484  * we can use page-at-a-time mode if it's an MVCC-safe snapshot
1485  */
1486  scan->rs_pageatatime = allow_pagemode && IsMVCCSnapshot(snapshot);
1487 
1488  /*
1489  * For a seqscan in a serializable transaction, acquire a predicate lock
1490  * on the entire relation. This is required not only to lock all the
1491  * matching tuples, but also to conflict with new insertions into the
1492  * table. In an indexscan, we take page locks on the index pages covering
1493  * the range specified in the scan qual, but in a heap scan there is
1494  * nothing more fine-grained to lock. A bitmap scan is a different story,
1495  * there we have already scanned the index and locked the index pages
1496  * covering the predicate. But in that case we still have to lock any
1497  * matching heap tuples.
1498  */
1499  if (!is_bitmapscan)
1500  PredicateLockRelation(relation, snapshot);
1501 
1502  /* we only need to set this up once */
1503  scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1504 
1505  /*
1506  * we do this here instead of in initscan() because heap_rescan also calls
1507  * initscan() and we don't want to allocate memory again
1508  */
1509  if (nkeys > 0)
1510  scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
1511  else
1512  scan->rs_key = NULL;
1513 
1514  initscan(scan, key, false);
1515 
1516  return scan;
1517 }
bool rs_allow_sync
Definition: relscan.h:57
void PredicateLockRelation(Relation relation, Snapshot snapshot)
Definition: predicate.c:2498
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:100
HeapTupleData rs_ctup
Definition: relscan.h:70
bool rs_bitmapscan
Definition: relscan.h:53
bool rs_pageatatime
Definition: relscan.h:55
ParallelHeapScanDesc rs_parallel
Definition: relscan.h:74
ScanKeyData * ScanKey
Definition: skey.h:75
Snapshot rs_snapshot
Definition: relscan.h:50
Oid t_tableOid
Definition: htup.h:66
bool rs_temp_snap
Definition: relscan.h:58
void RelationIncrementReferenceCount(Relation rel)
Definition: relcache.c:2100
BufferAccessStrategy rs_strategy
Definition: relscan.h:65
Relation rs_rd
Definition: relscan.h:49
#define IsMVCCSnapshot(snapshot)
Definition: tqual.h:31
void * palloc(Size size)
Definition: mcxt.c:835
bool rs_allow_strat
Definition: relscan.h:56
static void initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
Definition: heapam.c:216
bool rs_samplescan
Definition: relscan.h:54
#define RelationGetRelid(relation)
Definition: rel.h:425
ScanKey rs_key
Definition: relscan.h:52

◆ heap_beginscan_parallel()

HeapScanDesc heap_beginscan_parallel ( Relation  relation,
ParallelHeapScanDesc  parallel_scan 
)

Definition at line 1662 of file heapam.c.

References Assert, heap_beginscan_internal(), ParallelHeapScanDescData::phs_relid, ParallelHeapScanDescData::phs_snapshot_any, ParallelHeapScanDescData::phs_snapshot_data, RegisterSnapshot(), RelationGetRelid, RestoreSnapshot(), and SnapshotAny.

Referenced by _bt_parallel_scan_and_sort(), ExecSeqScanInitializeDSM(), and ExecSeqScanInitializeWorker().

1663 {
1664  Snapshot snapshot;
1665 
1666  Assert(RelationGetRelid(relation) == parallel_scan->phs_relid);
1667 
1668  if (!parallel_scan->phs_snapshot_any)
1669  {
1670  /* Snapshot was serialized -- restore it */
1671  snapshot = RestoreSnapshot(parallel_scan->phs_snapshot_data);
1672  RegisterSnapshot(snapshot);
1673  }
1674  else
1675  {
1676  /* SnapshotAny passed by caller (not serialized) */
1677  snapshot = SnapshotAny;
1678  }
1679 
1680  return heap_beginscan_internal(relation, snapshot, 0, NULL, parallel_scan,
1681  true, true, true, false, false,
1682  !parallel_scan->phs_snapshot_any);
1683 }
char phs_snapshot_data[FLEXIBLE_ARRAY_MEMBER]
Definition: relscan.h:43
Snapshot RestoreSnapshot(char *start_address)
Definition: snapmgr.c:2127
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:863
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1446
#define SnapshotAny
Definition: tqual.h:28
#define Assert(condition)
Definition: c.h:688
#define RelationGetRelid(relation)
Definition: rel.h:425

◆ heap_beginscan_sampling()

HeapScanDesc heap_beginscan_sampling ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
bool  allow_strat,
bool  allow_sync,
bool  allow_pagemode 
)

Definition at line 1436 of file heapam.c.

References heap_beginscan_internal().

Referenced by tablesample_init().

1439 {
1440  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1441  allow_strat, allow_sync, allow_pagemode,
1442  false, true, false);
1443 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1446

◆ heap_beginscan_strat()

HeapScanDesc heap_beginscan_strat ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
bool  allow_strat,
bool  allow_sync 
)

Definition at line 1418 of file heapam.c.

References heap_beginscan_internal().

Referenced by IndexBuildHeapRangeScan(), IndexCheckExclusion(), pgstat_heap(), systable_beginscan(), and validate_index_heapscan().

1421 {
1422  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1423  allow_strat, allow_sync, true,
1424  false, false, false);
1425 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1446

◆ heap_delete()

HTSU_Result heap_delete ( Relation  relation,
ItemPointer  tid,
CommandId  cid,
Snapshot  crosscheck,
bool  wait,
HeapUpdateFailureData hufd 
)

Definition at line 3053 of file heapam.c.

References Assert, buffer, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), HeapUpdateFailureData::cmax, compute_infobits(), compute_new_xmax_infomask(), HeapUpdateFailureData::ctid, DoesMultiXactIdConflict(), END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, ExtractReplicaIdentity(), xl_heap_delete::flags, GetCurrentTransactionId(), heap_acquire_tuplock(), heap_freetuple(), HEAP_KEYS_UPDATED, HEAP_MOVED, HEAP_XMAX_BITS, HEAP_XMAX_INVALID, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HeapTupleBeingUpdated, HeapTupleHasExternal, HeapTupleHeaderAdjustCmax(), HeapTupleHeaderClearHotUpdated, HeapTupleHeaderGetCmax(), HeapTupleHeaderGetRawXmax, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIsOnlyLocked(), HeapTupleHeaderSetCmax, HeapTupleHeaderSetXmax, HeapTupleInvisible, HeapTupleMayBeUpdated, HeapTupleSatisfiesUpdate(), HeapTupleSatisfiesVisibility, HeapTupleSelfUpdated, HeapTupleUpdated, xl_heap_delete::infobits_set, InvalidBuffer, InvalidCommandId, InvalidSnapshot, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), LockTupleExclusive, LockWaitBlock, log_heap_new_cid(), MarkBufferDirty(), MultiXactIdSetOldestMember(), MultiXactIdWait(), MultiXactStatusUpdate, xl_heap_delete::offnum, PageClearAllVisible, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), RelationData::rd_rel, ReadBuffer(), REGBUF_STANDARD, RelationGetRelid, RelationIsAccessibleInLogicalDecoding, RelationNeedsWAL, ReleaseBuffer(), RELKIND_MATVIEW, RELKIND_RELATION, REPLICA_IDENTITY_FULL, SizeOfHeapDelete, SizeOfHeapHeader, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, toast_delete(), TransactionIdEquals, TransactionIdIsCurrentTransactionId(), UnlockReleaseBuffer(), UnlockTupleTuplock, UpdateXmaxHintBits(), visibilitymap_clear(), visibilitymap_pin(), VISIBILITYMAP_VALID_BITS, XactLockTableWait(), XLH_DELETE_ALL_VISIBLE_CLEARED, XLH_DELETE_CONTAINS_OLD_KEY, XLH_DELETE_CONTAINS_OLD_TUPLE, XLOG_HEAP_DELETE, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), XLogSetRecordFlags(), XLTW_Delete, HeapUpdateFailureData::xmax, xl_heap_delete::xmax, and xmax_infomask_changed().

Referenced by ExecDelete(), and simple_heap_delete().

3056 {
3057  HTSU_Result result;
3059  ItemId lp;
3060  HeapTupleData tp;
3061  Page page;
3062  BlockNumber block;
3063  Buffer buffer;
3064  Buffer vmbuffer = InvalidBuffer;
3065  TransactionId new_xmax;
3066  uint16 new_infomask,
3067  new_infomask2;
3068  bool have_tuple_lock = false;
3069  bool iscombo;
3070  bool all_visible_cleared = false;
3071  HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
3072  bool old_key_copied = false;
3073 
3074  Assert(ItemPointerIsValid(tid));
3075 
3076  /*
3077  * Forbid this during a parallel operation, lest it allocate a combocid.
3078  * Other workers might need that combocid for visibility checks, and we
3079  * have no provision for broadcasting it to them.
3080  */
3081  if (IsInParallelMode())
3082  ereport(ERROR,
3083  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
3084  errmsg("cannot delete tuples during a parallel operation")));
3085 
3086  block = ItemPointerGetBlockNumber(tid);
3087  buffer = ReadBuffer(relation, block);
3088  page = BufferGetPage(buffer);
3089 
3090  /*
3091  * Before locking the buffer, pin the visibility map page if it appears to
3092  * be necessary. Since we haven't got the lock yet, someone else might be
3093  * in the middle of changing this, so we'll need to recheck after we have
3094  * the lock.
3095  */
3096  if (PageIsAllVisible(page))
3097  visibilitymap_pin(relation, block, &vmbuffer);
3098 
3100 
3101  /*
3102  * If we didn't pin the visibility map page and the page has become all
3103  * visible while we were busy locking the buffer, we'll have to unlock and
3104  * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
3105  * unfortunate, but hopefully shouldn't happen often.
3106  */
3107  if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3108  {
3109  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3110  visibilitymap_pin(relation, block, &vmbuffer);
3112  }
3113 
3114  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
3115  Assert(ItemIdIsNormal(lp));
3116 
3117  tp.t_tableOid = RelationGetRelid(relation);
3118  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3119  tp.t_len = ItemIdGetLength(lp);
3120  tp.t_self = *tid;
3121 
3122 l1:
3123  result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
3124 
3125  if (result == HeapTupleInvisible)
3126  {
3127  UnlockReleaseBuffer(buffer);
3128  ereport(ERROR,
3129  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
3130  errmsg("attempted to delete invisible tuple")));
3131  }
3132  else if (result == HeapTupleBeingUpdated && wait)
3133  {
3134  TransactionId xwait;
3135  uint16 infomask;
3136 
3137  /* must copy state data before unlocking buffer */
3138  xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
3139  infomask = tp.t_data->t_infomask;
3140 
3141  /*
3142  * Sleep until concurrent transaction ends -- except when there's a
3143  * single locker and it's our own transaction. Note we don't care
3144  * which lock mode the locker has, because we need the strongest one.
3145  *
3146  * Before sleeping, we need to acquire tuple lock to establish our
3147  * priority for the tuple (see heap_lock_tuple). LockTuple will
3148  * release us when we are next-in-line for the tuple.
3149  *
3150  * If we are forced to "start over" below, we keep the tuple lock;
3151  * this arranges that we stay at the head of the line while rechecking
3152  * tuple state.
3153  */
3154  if (infomask & HEAP_XMAX_IS_MULTI)
3155  {
3156  /* wait for multixact */
3157  if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
3159  {
3160  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3161 
3162  /* acquire tuple lock, if necessary */
3164  LockWaitBlock, &have_tuple_lock);
3165 
3166  /* wait for multixact */
3168  relation, &(tp.t_self), XLTW_Delete,
3169  NULL);
3171 
3172  /*
3173  * If xwait had just locked the tuple then some other xact
3174  * could update this tuple before we get to this point. Check
3175  * for xmax change, and start over if so.
3176  */
3177  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
3179  xwait))
3180  goto l1;
3181  }
3182 
3183  /*
3184  * You might think the multixact is necessarily done here, but not
3185  * so: it could have surviving members, namely our own xact or
3186  * other subxacts of this backend. It is legal for us to delete
3187  * the tuple in either case, however (the latter case is
3188  * essentially a situation of upgrading our former shared lock to
3189  * exclusive). We don't bother changing the on-disk hint bits
3190  * since we are about to overwrite the xmax altogether.
3191  */
3192  }
3193  else if (!TransactionIdIsCurrentTransactionId(xwait))
3194  {
3195  /*
3196  * Wait for regular transaction to end; but first, acquire tuple
3197  * lock.
3198  */
3199  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3201  LockWaitBlock, &have_tuple_lock);
3202  XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
3204 
3205  /*
3206  * xwait is done, but if xwait had just locked the tuple then some
3207  * other xact could update this tuple before we get to this point.
3208  * Check for xmax change, and start over if so.
3209  */
3210  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
3212  xwait))
3213  goto l1;
3214 
3215  /* Otherwise check if it committed or aborted */
3216  UpdateXmaxHintBits(tp.t_data, buffer, xwait);
3217  }
3218 
3219  /*
3220  * We may overwrite if previous xmax aborted, or if it committed but
3221  * only locked the tuple without updating it.
3222  */
3223  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
3226  result = HeapTupleMayBeUpdated;
3227  else
3228  result = HeapTupleUpdated;
3229  }
3230 
3231  if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)
3232  {
3233  /* Perform additional check for transaction-snapshot mode RI updates */
3234  if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
3235  result = HeapTupleUpdated;
3236  }
3237 
3238  if (result != HeapTupleMayBeUpdated)
3239  {
3240  Assert(result == HeapTupleSelfUpdated ||
3241  result == HeapTupleUpdated ||
3242  result == HeapTupleBeingUpdated);
3244  hufd->ctid = tp.t_data->t_ctid;
3246  if (result == HeapTupleSelfUpdated)
3247  hufd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
3248  else
3249  hufd->cmax = InvalidCommandId;
3250  UnlockReleaseBuffer(buffer);
3251  if (have_tuple_lock)
3252  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3253  if (vmbuffer != InvalidBuffer)
3254  ReleaseBuffer(vmbuffer);
3255  return result;
3256  }
3257 
3258  /*
3259  * We're about to do the actual delete -- check for conflict first, to
3260  * avoid possibly having to roll back work we've just done.
3261  *
3262  * This is safe without a recheck as long as there is no possibility of
3263  * another process scanning the page between this check and the delete
3264  * being visible to the scan (i.e., an exclusive buffer content lock is
3265  * continuously held from this point until the tuple delete is visible).
3266  */
3267  CheckForSerializableConflictIn(relation, &tp, buffer);
3268 
3269  /* replace cid with a combo cid if necessary */
3270  HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
3271 
3272  /*
3273  * Compute replica identity tuple before entering the critical section so
3274  * we don't PANIC upon a memory allocation failure.
3275  */
3276  old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
3277 
3278  /*
3279  * If this is the first possibly-multixact-able operation in the current
3280  * transaction, set my per-backend OldestMemberMXactId setting. We can be
3281  * certain that the transaction will never become a member of any older
3282  * MultiXactIds than that. (We have to do this even if we end up just
3283  * using our own TransactionId below, since some other backend could
3284  * incorporate our XID into a MultiXact immediately afterwards.)
3285  */
3287 
3290  xid, LockTupleExclusive, true,
3291  &new_xmax, &new_infomask, &new_infomask2);
3292 
3294 
3295  /*
3296  * If this transaction commits, the tuple will become DEAD sooner or
3297  * later. Set flag that this page is a candidate for pruning once our xid
3298  * falls below the OldestXmin horizon. If the transaction finally aborts,
3299  * the subsequent page pruning will be a no-op and the hint will be
3300  * cleared.
3301  */
3302  PageSetPrunable(page, xid);
3303 
3304  if (PageIsAllVisible(page))
3305  {
3306  all_visible_cleared = true;
3307  PageClearAllVisible(page);
3308  visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
3309  vmbuffer, VISIBILITYMAP_VALID_BITS);
3310  }
3311 
3312  /* store transaction information of xact deleting the tuple */
3315  tp.t_data->t_infomask |= new_infomask;
3316  tp.t_data->t_infomask2 |= new_infomask2;
3318  HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
3319  HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
3320  /* Make sure there is no forward chain link in t_ctid */
3321  tp.t_data->t_ctid = tp.t_self;
3322 
3323  MarkBufferDirty(buffer);
3324 
3325  /*
3326  * XLOG stuff
3327  *
3328  * NB: heap_abort_speculative() uses the same xlog record and replay
3329  * routines.
3330  */
3331  if (RelationNeedsWAL(relation))
3332  {
3333  xl_heap_delete xlrec;
3334  XLogRecPtr recptr;
3335 
3336  /* For logical decode we need combocids to properly decode the catalog */
3338  log_heap_new_cid(relation, &tp);
3339 
3340  xlrec.flags = all_visible_cleared ? XLH_DELETE_ALL_VISIBLE_CLEARED : 0;
3342  tp.t_data->t_infomask2);
3344  xlrec.xmax = new_xmax;
3345 
3346  if (old_key_tuple != NULL)
3347  {
3348  if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
3350  else
3352  }
3353 
3354  XLogBeginInsert();
3355  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
3356 
3357  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
3358 
3359  /*
3360  * Log replica identity of the deleted tuple if there is one
3361  */
3362  if (old_key_tuple != NULL)
3363  {
3364  xl_heap_header xlhdr;
3365 
3366  xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
3367  xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
3368  xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
3369 
3370  XLogRegisterData((char *) &xlhdr, SizeOfHeapHeader);
3371  XLogRegisterData((char *) old_key_tuple->t_data
3373  old_key_tuple->t_len
3375  }
3376 
3377  /* filtering by origin on a row level is much more efficient */
3379 
3380  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
3381 
3382  PageSetLSN(page, recptr);
3383  }
3384 
3385  END_CRIT_SECTION();
3386 
3387  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3388 
3389  if (vmbuffer != InvalidBuffer)
3390  ReleaseBuffer(vmbuffer);
3391 
3392  /*
3393  * If the tuple has toasted out-of-line attributes, we need to delete
3394  * those items too. We have to do this before releasing the buffer
3395  * because we need to look at the contents of the tuple, but it's OK to
3396  * release the content lock on the buffer first.
3397  */
3398  if (relation->rd_rel->relkind != RELKIND_RELATION &&
3399  relation->rd_rel->relkind != RELKIND_MATVIEW)
3400  {
3401  /* toast table entries should never be recursively toasted */
3403  }
3404  else if (HeapTupleHasExternal(&tp))
3405  toast_delete(relation, &tp, false);
3406 
3407  /*
3408  * Mark tuple for invalidation from system caches at next command
3409  * boundary. We have to do this before releasing the buffer because we
3410  * need to look at the contents of the tuple.
3411  */
3412  CacheInvalidateHeapTuple(relation, &tp, NULL);
3413 
3414  /* Now we can release the buffer */
3415  ReleaseBuffer(buffer);
3416 
3417  /*
3418  * Release the lmgr tuple lock, if we had it.
3419  */
3420  if (have_tuple_lock)
3421  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3422 
3423  pgstat_count_heap_delete(relation);
3424 
3425  if (old_key_tuple != NULL && old_key_copied)
3426  heap_freetuple(old_key_tuple);
3427 
3428  return HeapTupleMayBeUpdated;
3429 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:364
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
Definition: tqual.c:1596
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:60
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
#define SizeofHeapTupleHeader
Definition: htup_details.h:175
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:7842
#define HEAP_XMAX_BITS
Definition: htup_details.h:261
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2994
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1094
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
#define PageIsAllVisible(page)
Definition: bufpage.h:381
uint32 TransactionId
Definition: c.h:463
HTSU_Result HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid, Buffer buffer)
Definition: tqual.c:460
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:766
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
static bool xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
Definition: heapam.c:3016
#define HeapTupleHeaderClearHotUpdated(tup)
Definition: htup_details.h:502
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define RELKIND_MATVIEW
Definition: pg_class.h:165
#define InvalidBuffer
Definition: buf.h:25
uint16 t_infomask2
Definition: heapam_xlog.h:122
#define PageSetPrunable(page, xid)
Definition: bufpage.h:394
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
int errcode(int sqlerrcode)
Definition: elog.c:575
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:192
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
Form_pg_class rd_rel
Definition: rel.h:114
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1373
void CheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
Definition: predicate.c:4326
#define UnlockTupleTuplock(rel, tup, mode)
Definition: heapam.c:183
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
OffsetNumber offnum
Definition: heapam_xlog.h:105
void MultiXactIdSetOldestMember(void)
Definition: multixact.c:623
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:67
#define HeapTupleHeaderGetRawXmax(tup)
Definition: htup_details.h:374
unsigned short uint16
Definition: c.h:313
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
bool IsInParallelMode(void)
Definition: xact.c:906
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define REPLICA_IDENTITY_FULL
Definition: pg_class.h:180
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:198
ItemPointerData t_ctid
Definition: htup_details.h:155
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:104
static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:7258
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:418
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:110
#define REGBUF_STANDARD
Definition: xloginsert.h:34
#define XLH_DELETE_CONTAINS_OLD_KEY
Definition: heapam_xlog.h:94
CommandId cmax
Definition: heapam.h:72
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:379
HTSU_Result
Definition: snapshot.h:121
Oid t_tableOid
Definition: htup.h:66
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define HeapTupleHeaderSetCmax(tup, cid, iscombo)
Definition: htup_details.h:404
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define ereport(elevel, rest)
Definition: elog.h:122
static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
Definition: heapam.c:5298
TransactionId xmax
Definition: heapam.h:71
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
#define InvalidSnapshot
Definition: snapshot.h:25
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:568
#define InvalidCommandId
Definition: c.h:480
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:221
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:269
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:199
static void UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
Definition: heapam.c:2337
#define HEAP_MOVED
Definition: htup_details.h:207
static bool heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
Definition: heapam.c:5249
TransactionId MultiXactId
Definition: c.h:473
#define PageClearAllVisible(page)
Definition: bufpage.h:385
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:554
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:688
uint8 infobits_set
Definition: heapam_xlog.h:106
static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified, bool *copy)
Definition: heapam.c:7918
CommandId HeapTupleHeaderGetCmax(HeapTupleHeader tup)
Definition: combocid.c:119
static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask, LockTupleMode lockmode)
Definition: heapam.c:7091
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:215
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
uint16 t_infomask
Definition: heapam_xlog.h:123
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define RelationNeedsWAL(relation)
Definition: rel.h:514
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:1953
void HeapTupleHeaderAdjustCmax(HeapTupleHeader tup, CommandId *cmax, bool *iscombo)
Definition: combocid.c:154
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:679
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define XLH_DELETE_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:92
void toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: tuptoaster.c:464
ItemPointerData ctid
Definition: heapam.h:70
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define RELKIND_RELATION
Definition: pg_class.h:160
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:425
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
#define SizeOfHeapHeader
Definition: heapam_xlog.h:127
Pointer Page
Definition: bufpage.h:74
#define XLH_DELETE_CONTAINS_OLD_TUPLE
Definition: heapam_xlog.h:93

◆ heap_endscan()

void heap_endscan ( HeapScanDesc  scan)

Definition at line 1568 of file heapam.c.

References BufferIsValid, FreeAccessStrategy(), pfree(), RelationDecrementReferenceCount(), ReleaseBuffer(), HeapScanDescData::rs_cbuf, HeapScanDescData::rs_key, HeapScanDescData::rs_rd, HeapScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, HeapScanDescData::rs_temp_snap, and UnregisterSnapshot().

Referenced by AlterDomainNotNull(), AlterTableMoveAll(), AlterTableSpaceOptions(), ATRewriteTable(), boot_openrel(), check_db_file_conflict(), check_default_allows_bound(), copy_heap_data(), CopyTo(), createdb(), DefineQueryRewrite(), do_autovacuum(), DropSetting(), DropTableSpace(), ExecEndBitmapHeapScan(), ExecEndSampleScan(), ExecEndSeqScan(), find_typed_table_dependencies(), get_all_vacuum_rels(), get_database_list(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), index_update_stats(), IndexBuildHeapRangeScan(), IndexCheckExclusion(), objectsInSchemaToOids(), pgrowlocks(), pgstat_collect_oids(), pgstat_heap(), ReindexMultipleTables(), RelationFindReplTupleSeq(), remove_dbtablespaces(), RemoveConversionById(), RemoveSubscriptionRel(), RenameTableSpace(), systable_endscan(), ThereIsAtLeastOneRole(), vac_truncate_clog(), validate_index_heapscan(), validateCheckConstraint(), validateDomainConstraint(), and validateForeignKeyConstraint().

1569 {
1570  /* Note: no locking manipulations needed */
1571 
1572  /*
1573  * unpin scan buffers
1574  */
1575  if (BufferIsValid(scan->rs_cbuf))
1576  ReleaseBuffer(scan->rs_cbuf);
1577 
1578  /*
1579  * decrement relation reference count and free scan descriptor storage
1580  */
1582 
1583  if (scan->rs_key)
1584  pfree(scan->rs_key);
1585 
1586  if (scan->rs_strategy != NULL)
1588 
1589  if (scan->rs_temp_snap)
1591 
1592  pfree(scan);
1593 }
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
void pfree(void *pointer)
Definition: mcxt.c:936
void RelationDecrementReferenceCount(Relation rel)
Definition: relcache.c:2113
Snapshot rs_snapshot
Definition: relscan.h:50
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:905
bool rs_temp_snap
Definition: relscan.h:58
BufferAccessStrategy rs_strategy
Definition: relscan.h:65
Relation rs_rd
Definition: relscan.h:49
Buffer rs_cbuf
Definition: relscan.h:72
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:597
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
ScanKey rs_key
Definition: relscan.h:52

◆ heap_execute_freeze_tuple()

void heap_execute_freeze_tuple ( HeapTupleHeader  tuple,
xl_heap_freeze_tuple frz 
)

Definition at line 6893 of file heapam.c.

References FrozenTransactionId, xl_heap_freeze_tuple::frzflags, HeapTupleHeaderSetXmax, HeapTupleHeaderSetXvac, InvalidTransactionId, HeapTupleHeaderData::t_infomask, xl_heap_freeze_tuple::t_infomask, HeapTupleHeaderData::t_infomask2, xl_heap_freeze_tuple::t_infomask2, XLH_FREEZE_XVAC, XLH_INVALID_XVAC, and xl_heap_freeze_tuple::xmax.

Referenced by heap_freeze_tuple(), heap_xlog_freeze_page(), and lazy_scan_heap().

6894 {
6895  HeapTupleHeaderSetXmax(tuple, frz->xmax);
6896 
6897  if (frz->frzflags & XLH_FREEZE_XVAC)
6899 
6900  if (frz->frzflags & XLH_INVALID_XVAC)
6902 
6903  tuple->t_infomask = frz->t_infomask;
6904  tuple->t_infomask2 = frz->t_infomask2;
6905 }
#define HeapTupleHeaderSetXvac(tup, xid)
Definition: htup_details.h:422
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:379
#define InvalidTransactionId
Definition: transam.h:31
#define FrozenTransactionId
Definition: transam.h:33
TransactionId xmax
Definition: heapam_xlog.h:298
#define XLH_INVALID_XVAC
Definition: heapam_xlog.h:294
#define XLH_FREEZE_XVAC
Definition: heapam_xlog.h:293

◆ heap_fetch()

bool heap_fetch ( Relation  relation,
Snapshot  snapshot,
HeapTuple  tuple,
Buffer userbuf,
bool  keep_buf,
Relation  stats_relation 
)

Definition at line 1899 of file heapam.c.

References buffer, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, CheckForSerializableConflictOut(), HeapTupleSatisfiesVisibility, InvalidBuffer, ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, pgstat_count_heap_fetch, PredicateLockTuple(), ReadBuffer(), RelationGetRelid, ReleaseBuffer(), HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, and TestForOldSnapshot().

Referenced by AfterTriggerExecute(), EvalPlanQualFetch(), EvalPlanQualFetchRowMarks(), ExecCheckTIDVisible(), ExecDelete(), ExecLockRows(), heap_lock_updated_tuple_rec(), and TidNext().

1905 {
1906  ItemPointer tid = &(tuple->t_self);
1907  ItemId lp;
1908  Buffer buffer;
1909  Page page;
1910  OffsetNumber offnum;
1911  bool valid;
1912 
1913  /*
1914  * Fetch and pin the appropriate page of the relation.
1915  */
1916  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1917 
1918  /*
1919  * Need share lock on buffer to examine tuple commit status.
1920  */
1921  LockBuffer(buffer, BUFFER_LOCK_SHARE);
1922  page = BufferGetPage(buffer);
1923  TestForOldSnapshot(snapshot, relation, page);
1924 
1925  /*
1926  * We'd better check for out-of-range offnum in case of VACUUM since the
1927  * TID was obtained.
1928  */
1929  offnum = ItemPointerGetOffsetNumber(tid);
1930  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1931  {
1932  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1933  if (keep_buf)
1934  *userbuf = buffer;
1935  else
1936  {
1937  ReleaseBuffer(buffer);
1938  *userbuf = InvalidBuffer;
1939  }
1940  tuple->t_data = NULL;
1941  return false;
1942  }
1943 
1944  /*
1945  * get the item line pointer corresponding to the requested tid
1946  */
1947  lp = PageGetItemId(page, offnum);
1948 
1949  /*
1950  * Must check for deleted tuple.
1951  */
1952  if (!ItemIdIsNormal(lp))
1953  {
1954  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1955  if (keep_buf)
1956  *userbuf = buffer;
1957  else
1958  {
1959  ReleaseBuffer(buffer);
1960  *userbuf = InvalidBuffer;
1961  }
1962  tuple->t_data = NULL;
1963  return false;
1964  }
1965 
1966  /*
1967  * fill in *tuple fields
1968  */
1969  tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1970  tuple->t_len = ItemIdGetLength(lp);
1971  tuple->t_tableOid = RelationGetRelid(relation);
1972 
1973  /*
1974  * check time qualification of tuple, then release lock
1975  */
1976  valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1977 
1978  if (valid)
1979  PredicateLockTuple(relation, tuple, snapshot);
1980 
1981  CheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1982 
1983  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1984 
1985  if (valid)
1986  {
1987  /*
1988  * All checks passed, so return the tuple as valid. Caller is now
1989  * responsible for releasing the buffer.
1990  */
1991  *userbuf = buffer;
1992 
1993  /* Count the successful fetch against appropriate rel, if any */
1994  if (stats_relation != NULL)
1995  pgstat_count_heap_fetch(stats_relation);
1996 
1997  return true;
1998  }
1999 
2000  /* Tuple failed time qual, but maybe caller wants to see it anyway. */
2001  if (keep_buf)
2002  *userbuf = buffer;
2003  else
2004  {
2005  ReleaseBuffer(buffer);
2006  *userbuf = InvalidBuffer;
2007  }
2008 
2009  return false;
2010 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:265
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:3945
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
ItemPointerData t_self
Definition: htup.h:65
#define pgstat_count_heap_fetch(rel)
Definition: pgstat.h:1286
uint32 t_len
Definition: htup.h:64
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:215
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
void PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
Definition: predicate.c:2543
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:425
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74

◆ heap_finish_speculative()

void heap_finish_speculative ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 6039 of file heapam.c.

References Assert, buffer, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, elog, END_CRIT_SECTION, ERROR, HeapTupleHeaderIsSpeculative, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), MaxOffsetNumber, xl_heap_confirm::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapConfirm, SpecTokenOffsetNumber, START_CRIT_SECTION, StaticAssertStmt, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleData::t_self, UnlockReleaseBuffer(), XLOG_HEAP_CONFIRM, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by ExecInsert().

6040 {
6041  Buffer buffer;
6042  Page page;
6043  OffsetNumber offnum;
6044  ItemId lp = NULL;
6045  HeapTupleHeader htup;
6046 
6047  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
6049  page = (Page) BufferGetPage(buffer);
6050 
6051  offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
6052  if (PageGetMaxOffsetNumber(page) >= offnum)
6053  lp = PageGetItemId(page, offnum);
6054 
6055  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
6056  elog(ERROR, "invalid lp");
6057 
6058  htup = (HeapTupleHeader) PageGetItem(page, lp);
6059 
6060  /* SpecTokenOffsetNumber should be distinguishable from any real offset */
6062  "invalid speculative token constant");
6063 
6064  /* NO EREPORT(ERROR) from here till changes are logged */
6066 
6068 
6069  MarkBufferDirty(buffer);
6070 
6071  /*
6072  * Replace the speculative insertion token with a real t_ctid, pointing to
6073  * itself like it does on regular tuples.
6074  */
6075  htup->t_ctid = tuple->t_self;
6076 
6077  /* XLOG stuff */
6078  if (RelationNeedsWAL(relation))
6079  {
6080  xl_heap_confirm xlrec;
6081  XLogRecPtr recptr;
6082 
6083  xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6084 
6085  XLogBeginInsert();
6086 
6087  /* We want the same filtering on this as on a plain insert */
6089 
6090  XLogRegisterData((char *) &xlrec, SizeOfHeapConfirm);
6091  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6092 
6093  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
6094 
6095  PageSetLSN(page, recptr);
6096  }
6097 
6098  END_CRIT_SECTION();
6099 
6100  UnlockReleaseBuffer(buffer);
6101 }
OffsetNumber offnum
Definition: heapam_xlog.h:274
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define MaxOffsetNumber
Definition: off.h:28
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:428
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:192
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:784
#define SpecTokenOffsetNumber
Definition: htup_details.h:290
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define ERROR
Definition: elog.h:43
ItemPointerData t_ctid
Definition: htup_details.h:155
ItemPointerData t_self
Definition: htup.h:65
#define REGBUF_STANDARD
Definition: xloginsert.h:34
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define SizeOfHeapConfirm
Definition: heapam_xlog.h:277
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:688
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:215
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define RelationNeedsWAL(relation)
Definition: rel.h:514
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
#define XLOG_HEAP_CONFIRM
Definition: heapam_xlog.h:37

◆ heap_freeze_tuple()

bool heap_freeze_tuple ( HeapTupleHeader  tuple,
TransactionId  relfrozenxid,
TransactionId  relminmxid,
TransactionId  cutoff_xid,
TransactionId  cutoff_multi 
)

Definition at line 6914 of file heapam.c.

References heap_execute_freeze_tuple(), and heap_prepare_freeze_tuple().

Referenced by rewrite_heap_tuple().

6917 {
6919  bool do_freeze;
6920  bool tuple_totally_frozen;
6921 
6922  do_freeze = heap_prepare_freeze_tuple(tuple,
6923  relfrozenxid, relminmxid,
6924  cutoff_xid, cutoff_multi,
6925  &frz, &tuple_totally_frozen);
6926 
6927  /*
6928  * Note that because this is not a WAL-logged operation, we don't need to
6929  * fill in the offset in the freeze record.
6930  */
6931 
6932  if (do_freeze)
6933  heap_execute_freeze_tuple(tuple, &frz);
6934  return do_freeze;
6935 }
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
Definition: heapam.c:6681
void heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
Definition: heapam.c:6893

◆ heap_get_latest_tid()

void heap_get_latest_tid ( Relation  relation,
Snapshot  snapshot,
ItemPointer  tid 
)

Definition at line 2207 of file heapam.c.

References buffer, BUFFER_LOCK_SHARE, BufferGetPage, CheckForSerializableConflictOut(), elog, ERROR, HEAP_XMAX_INVALID, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleHeaderIsOnlyLocked(), HeapTupleSatisfiesVisibility, InvalidTransactionId, ItemIdGetLength, ItemIdIsNormal, ItemPointerEquals(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, ReadBuffer(), RelationGetNumberOfBlocks, RelationGetRelationName, RelationGetRelid, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TestForOldSnapshot(), TransactionIdEquals, TransactionIdIsValid, and UnlockReleaseBuffer().

Referenced by currtid_byrelname(), currtid_byreloid(), and TidNext().

2210 {
2211  BlockNumber blk;
2212  ItemPointerData ctid;
2213  TransactionId priorXmax;
2214 
2215  /* this is to avoid Assert failures on bad input */
2216  if (!ItemPointerIsValid(tid))
2217  return;
2218 
2219  /*
2220  * Since this can be called with user-supplied TID, don't trust the input
2221  * too much. (RelationGetNumberOfBlocks is an expensive check, so we
2222  * don't check t_ctid links again this way. Note that it would not do to
2223  * call it just once and save the result, either.)
2224  */
2225  blk = ItemPointerGetBlockNumber(tid);
2226  if (blk >= RelationGetNumberOfBlocks(relation))
2227  elog(ERROR, "block number %u is out of range for relation \"%s\"",
2228  blk, RelationGetRelationName(relation));
2229 
2230  /*
2231  * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
2232  * need to examine, and *tid is the TID we will return if ctid turns out
2233  * to be bogus.
2234  *
2235  * Note that we will loop until we reach the end of the t_ctid chain.
2236  * Depending on the snapshot passed, there might be at most one visible
2237  * version of the row, but we don't try to optimize for that.
2238  */
2239  ctid = *tid;
2240  priorXmax = InvalidTransactionId; /* cannot check first XMIN */
2241  for (;;)
2242  {
2243  Buffer buffer;
2244  Page page;
2245  OffsetNumber offnum;
2246  ItemId lp;
2247  HeapTupleData tp;
2248  bool valid;
2249 
2250  /*
2251  * Read, pin, and lock the page.
2252  */
2253  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
2254  LockBuffer(buffer, BUFFER_LOCK_SHARE);
2255  page = BufferGetPage(buffer);
2256  TestForOldSnapshot(snapshot, relation, page);
2257 
2258  /*
2259  * Check for bogus item number. This is not treated as an error
2260  * condition because it can happen while following a t_ctid link. We
2261  * just assume that the prior tid is OK and return it unchanged.
2262  */
2263  offnum = ItemPointerGetOffsetNumber(&ctid);
2264  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
2265  {
2266  UnlockReleaseBuffer(buffer);
2267  break;
2268  }
2269  lp = PageGetItemId(page, offnum);
2270  if (!ItemIdIsNormal(lp))
2271  {
2272  UnlockReleaseBuffer(buffer);
2273  break;
2274  }
2275 
2276  /* OK to access the tuple */
2277  tp.t_self = ctid;
2278  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2279  tp.t_len = ItemIdGetLength(lp);
2280  tp.t_tableOid = RelationGetRelid(relation);
2281 
2282  /*
2283  * After following a t_ctid link, we might arrive at an unrelated
2284  * tuple. Check for XMIN match.
2285  */
2286  if (TransactionIdIsValid(priorXmax) &&
2288  {
2289  UnlockReleaseBuffer(buffer);
2290  break;
2291  }
2292 
2293  /*
2294  * Check time qualification of tuple; if visible, set it as the new
2295  * result candidate.
2296  */
2297  valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
2298  CheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
2299  if (valid)
2300  *tid = ctid;
2301 
2302  /*
2303  * If there's a valid t_ctid link, follow it, else we're done.
2304  */
2305  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2308  {
2309  UnlockReleaseBuffer(buffer);
2310  break;
2311  }
2312 
2313  ctid = tp.t_data->t_ctid;
2314  priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
2315  UnlockReleaseBuffer(buffer);
2316  } /* end of loop */
2317 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:364
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
Definition: tqual.c:1596
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:60
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:265
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:463
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
uint32 BlockNumber
Definition: block.h:31
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:3945
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:198
ItemPointerData t_ctid
Definition: htup_details.h:155
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define InvalidTransactionId
Definition: transam.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:445
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:199
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:215
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:312
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define TransactionIdIsValid(xid)
Definition: transam.h:41
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:425
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74

◆ heap_getnext()

HeapTuple heap_getnext ( HeapScanDesc  scan,
ScanDirection  direction 
)

Definition at line 1831 of file heapam.c.

References HEAPDEBUG_1, HEAPDEBUG_2, HEAPDEBUG_3, heapgettup(), heapgettup_pagemode(), pgstat_count_heap_getnext, HeapScanDescData::rs_ctup, HeapScanDescData::rs_key, HeapScanDescData::rs_nkeys, HeapScanDescData::rs_pageatatime, HeapScanDescData::rs_rd, and HeapTupleData::t_data.

Referenced by AlterDomainNotNull(), AlterTableMoveAll(), AlterTableSpaceOptions(), ATRewriteTable(), boot_openrel(), check_db_file_conflict(), check_default_allows_bound(), copy_heap_data(), CopyTo(), createdb(), DefineQueryRewrite(), do_autovacuum(), DropSetting(), DropTableSpace(), find_typed_table_dependencies(), get_all_vacuum_rels(), get_database_list(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), index_update_stats(), IndexBuildHeapRangeScan(), IndexCheckExclusion(), objectsInSchemaToOids(), pgrowlocks(), pgstat_collect_oids(), pgstat_heap(), ReindexMultipleTables(), RelationFindReplTupleSeq(), remove_dbtablespaces(), RemoveConversionById(), RemoveSubscriptionRel(), RenameTableSpace(), SeqNext(), systable_getnext(), ThereIsAtLeastOneRole(), vac_truncate_clog(), validate_index_heapscan(), validateCheckConstraint(), validateDomainConstraint(), and validateForeignKeyConstraint().

1832 {
1833  /* Note: no locking manipulations needed */
1834 
1835  HEAPDEBUG_1; /* heap_getnext( info ) */
1836 
1837  if (scan->rs_pageatatime)
1838  heapgettup_pagemode(scan, direction,
1839  scan->rs_nkeys, scan->rs_key);
1840  else
1841  heapgettup(scan, direction, scan->rs_nkeys, scan->rs_key);
1842 
1843  if (scan->rs_ctup.t_data == NULL)
1844  {
1845  HEAPDEBUG_2; /* heap_getnext returning EOS */
1846  return NULL;
1847  }
1848 
1849  /*
1850  * if we get here it means we have a new current scan tuple, so point to
1851  * the proper return buffer and return the tuple.
1852  */
1853  HEAPDEBUG_3; /* heap_getnext returning tuple */
1854 
1856 
1857  return &(scan->rs_ctup);
1858 }
#define HEAPDEBUG_2
Definition: heapam.c:1825
HeapTupleData rs_ctup
Definition: relscan.h:70
HeapTupleHeader t_data
Definition: htup.h:67
bool rs_pageatatime
Definition: relscan.h:55
#define HEAPDEBUG_1
Definition: heapam.c:1824
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:481
Relation rs_rd
Definition: relscan.h:49
#define HEAPDEBUG_3
Definition: heapam.c:1826
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:1281
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:785
ScanKey rs_key
Definition: relscan.h:52

◆ heap_hot_search()

bool heap_hot_search ( ItemPointer  tid,
Relation  relation,
Snapshot  snapshot,
bool all_dead 
)

Definition at line 2179 of file heapam.c.

References buffer, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, heap_hot_search_buffer(), ItemPointerGetBlockNumber, LockBuffer(), ReadBuffer(), and ReleaseBuffer().

Referenced by _bt_check_unique(), and unique_key_recheck().

2181 {
2182  bool result;
2183  Buffer buffer;
2184  HeapTupleData heapTuple;
2185 
2186  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
2187  LockBuffer(buffer, BUFFER_LOCK_SHARE);
2188  result = heap_hot_search_buffer(tid, relation, buffer, snapshot,
2189  &heapTuple, all_dead, true);
2190  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2191  ReleaseBuffer(buffer);
2192  return result;
2193 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
Definition: heapam.c:2034
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:215
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
int Buffer
Definition: buf.h:23

◆ heap_hot_search_buffer()

bool heap_hot_search_buffer ( ItemPointer  tid,
Relation  relation,
Buffer  buffer,
Snapshot  snapshot,
HeapTuple  heapTuple,
bool all_dead,
bool  first_call 
)

Definition at line 2034 of file heapam.c.

References Assert, BufferGetBlockNumber(), BufferGetPage, CheckForSerializableConflictOut(), HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleIsHeapOnly, HeapTupleIsHotUpdated, HeapTupleIsSurelyDead(), HeapTupleSatisfiesVisibility, InvalidTransactionId, ItemIdGetLength, ItemIdGetRedirect, ItemIdIsNormal, ItemIdIsRedirected, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerSet, ItemPointerSetOffsetNumber, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PredicateLockTuple(), RecentGlobalXmin, RelationGetRelid, skip(), HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdEquals, and TransactionIdIsValid.

Referenced by bitgetpage(), heap_hot_search(), and index_fetch_heap().

2037 {
2038  Page dp = (Page) BufferGetPage(buffer);
2039  TransactionId prev_xmax = InvalidTransactionId;
2040  OffsetNumber offnum;
2041  bool at_chain_start;
2042  bool valid;
2043  bool skip;
2044 
2045  /* If this is not the first call, previous call returned a (live!) tuple */
2046  if (all_dead)
2047  *all_dead = first_call;
2048 
2050 
2052  offnum = ItemPointerGetOffsetNumber(tid);
2053  at_chain_start = first_call;
2054  skip = !first_call;
2055 
2056  heapTuple->t_self = *tid;
2057 
2058  /* Scan through possible multiple members of HOT-chain */
2059  for (;;)
2060  {
2061  ItemId lp;
2062 
2063  /* check for bogus TID */
2064  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
2065  break;
2066 
2067  lp = PageGetItemId(dp, offnum);
2068 
2069  /* check for unused, dead, or redirected items */
2070  if (!ItemIdIsNormal(lp))
2071  {
2072  /* We should only see a redirect at start of chain */
2073  if (ItemIdIsRedirected(lp) && at_chain_start)
2074  {
2075  /* Follow the redirect */
2076  offnum = ItemIdGetRedirect(lp);
2077  at_chain_start = false;
2078  continue;
2079  }
2080  /* else must be end of chain */
2081  break;
2082  }
2083 
2084  heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
2085  heapTuple->t_len = ItemIdGetLength(lp);
2086  heapTuple->t_tableOid = RelationGetRelid(relation);
2087  ItemPointerSetOffsetNumber(&heapTuple->t_self, offnum);
2088 
2089  /*
2090  * Shouldn't see a HEAP_ONLY tuple at chain start.
2091  */
2092  if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
2093  break;
2094 
2095  /*
2096  * The xmin should match the previous xmax value, else chain is
2097  * broken.
2098  */
2099  if (TransactionIdIsValid(prev_xmax) &&
2100  !TransactionIdEquals(prev_xmax,
2101  HeapTupleHeaderGetXmin(heapTuple->t_data)))
2102  break;
2103 
2104  /*
2105  * When first_call is true (and thus, skip is initially false) we'll
2106  * return the first tuple we find. But on later passes, heapTuple
2107  * will initially be pointing to the tuple we returned last time.
2108  * Returning it again would be incorrect (and would loop forever), so
2109  * we skip it and return the next match we find.
2110  */
2111  if (!skip)
2112  {
2113  /*
2114  * For the benefit of logical decoding, have t_self point at the
2115  * element of the HOT chain we're currently investigating instead
2116  * of the root tuple of the HOT chain. This is important because
2117  * the *Satisfies routine for historical mvcc snapshots needs the
2118  * correct tid to decide about the visibility in some cases.
2119  */
2120  ItemPointerSet(&(heapTuple->t_self), BufferGetBlockNumber(buffer), offnum);
2121 
2122  /* If it's visible per the snapshot, we must return it */
2123  valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
2124  CheckForSerializableConflictOut(valid, relation, heapTuple,
2125  buffer, snapshot);
2126  /* reset to original, non-redirected, tid */
2127  heapTuple->t_self = *tid;
2128 
2129  if (valid)
2130  {
2131  ItemPointerSetOffsetNumber(tid, offnum);
2132  PredicateLockTuple(relation, heapTuple, snapshot);
2133  if (all_dead)
2134  *all_dead = false;
2135  return true;
2136  }
2137  }
2138  skip = false;
2139 
2140  /*
2141  * If we can't see it, maybe no one else can either. At caller
2142  * request, check whether all chain members are dead to all
2143  * transactions.
2144  *
2145  * Note: if you change the criterion here for what is "dead", fix the
2146  * planner's get_actual_variable_range() function to match.
2147  */
2148  if (all_dead && *all_dead &&
2150  *all_dead = false;
2151 
2152  /*
2153  * Check to see if HOT chain continues past this tuple; if so fetch
2154  * the next offnum and loop around.
2155  */
2156  if (HeapTupleIsHotUpdated(heapTuple))
2157  {
2160  offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
2161  at_chain_start = false;
2162  prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
2163  }
2164  else
2165  break; /* end of chain */
2166  }
2167 
2168  return false;
2169 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:364
static void skip(struct vars *v)
Definition: regc_lex.c:1109
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:105
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:463
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:77
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:3945
bool HeapTupleIsSurelyDead(HeapTuple htup, TransactionId OldestXmin)
Definition: tqual.c:1420
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define HeapTupleIsHotUpdated(tuple)
Definition: htup_details.h:682
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
ItemPointerData t_ctid
Definition: htup_details.h:155
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
TransactionId RecentGlobalXmin
Definition: snapmgr.c:166
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:691
#define Assert(condition)
Definition: c.h:688
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:215
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:312
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
void PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
Definition: predicate.c:2543
#define ItemPointerSetOffsetNumber(pointer, offsetNumber)
Definition: itemptr.h:126
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define TransactionIdIsValid(xid)
Definition: transam.h:41
#define RelationGetRelid(relation)
Definition: rel.h:425
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:105

◆ heap_inplace_update()

void heap_inplace_update ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 6271 of file heapam.c.

References buffer, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, CacheInvalidateHeapTuple(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, IsBootstrapProcessingMode, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), xl_heap_inplace::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapInplace, START_CRIT_SECTION, HeapTupleData::t_data, HeapTupleHeaderData::t_hoff, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), XLOG_HEAP_INPLACE, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by create_toast_table(), index_set_state_flags(), index_update_stats(), vac_update_datfrozenxid(), and vac_update_relstats().

6272 {
6273  Buffer buffer;
6274  Page page;
6275  OffsetNumber offnum;
6276  ItemId lp = NULL;
6277  HeapTupleHeader htup;
6278  uint32 oldlen;
6279  uint32 newlen;
6280 
6281  /*
6282  * For now, parallel operations are required to be strictly read-only.
6283  * Unlike a regular update, this should never create a combo CID, so it
6284  * might be possible to relax this restriction, but not without more
6285  * thought and testing. It's not clear that it would be useful, anyway.
6286  */
6287  if (IsInParallelMode())
6288  ereport(ERROR,
6289  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
6290  errmsg("cannot update tuples during a parallel operation")));
6291 
6292  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
6294  page = (Page) BufferGetPage(buffer);
6295 
6296  offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
6297  if (PageGetMaxOffsetNumber(page) >= offnum)
6298  lp = PageGetItemId(page, offnum);
6299 
6300  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
6301  elog(ERROR, "invalid lp");
6302 
6303  htup = (HeapTupleHeader) PageGetItem(page, lp);
6304 
6305  oldlen = ItemIdGetLength(lp) - htup->t_hoff;
6306  newlen = tuple->t_len - tuple->t_data->t_hoff;
6307  if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
6308  elog(ERROR, "wrong tuple length");
6309 
6310  /* NO EREPORT(ERROR) from here till changes are logged */
6312 
6313  memcpy((char *) htup + htup->t_hoff,
6314  (char *) tuple->t_data + tuple->t_data->t_hoff,
6315  newlen);
6316 
6317  MarkBufferDirty(buffer);
6318 
6319  /* XLOG stuff */
6320  if (RelationNeedsWAL(relation))
6321  {
6322  xl_heap_inplace xlrec;
6323  XLogRecPtr recptr;
6324 
6325  xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6326 
6327  XLogBeginInsert();
6328  XLogRegisterData((char *) &xlrec, SizeOfHeapInplace);
6329 
6330  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6331  XLogRegisterBufData(0, (char *) htup + htup->t_hoff, newlen);
6332 
6333  /* inplace updates aren't decoded atm, don't log the origin */
6334 
6335  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
6336 
6337  PageSetLSN(page, recptr);
6338  }
6339 
6340  END_CRIT_SECTION();
6341 
6342  UnlockReleaseBuffer(buffer);
6343 
6344  /*
6345  * Send out shared cache inval if necessary. Note that because we only
6346  * pass the new version of the tuple, this mustn't be used for any
6347  * operations that could change catcache lookup keys. But we aren't
6348  * bothering with index updates either, so that's true a fortiori.
6349  */
6351  CacheInvalidateHeapTuple(relation, tuple, NULL);
6352 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:361
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1094
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define SizeOfHeapInplace
Definition: heapam_xlog.h:286
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
int errcode(int sqlerrcode)
Definition: elog.c:575
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
bool IsInParallelMode(void)
Definition: xact.c:906
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define ERROR
Definition: elog.h:43
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define REGBUF_STANDARD
Definition: xloginsert.h:34
unsigned int uint32
Definition: c.h:314
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define ereport(elevel, rest)
Definition: elog.h:122
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
OffsetNumber offnum
Definition: heapam_xlog.h:282
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:215
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define XLOG_HEAP_INPLACE
Definition: heapam_xlog.h:39
#define RelationNeedsWAL(relation)
Definition: rel.h:514
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:368
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74

◆ heap_insert()

Oid heap_insert ( Relation  relation,
HeapTuple  tup,
CommandId  cid,
int  options,
BulkInsertState  bistate 
)

Definition at line 2436 of file heapam.c.

References Assert, buffer, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), END_CRIT_SECTION, FirstOffsetNumber, xl_heap_insert::flags, GetCurrentTransactionId(), heap_freetuple(), HEAP_INSERT_SKIP_WAL, HEAP_INSERT_SPECULATIVE, heap_prepare_insert(), HeapTupleGetOid, InvalidBuffer, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, log_heap_new_cid(), MarkBufferDirty(), xl_heap_insert::offnum, PageClearAllVisible, PageGetMaxOffsetNumber, PageIsAllVisible, PageSetLSN, pgstat_count_heap_insert(), REGBUF_KEEP_DATA, REGBUF_STANDARD, REGBUF_WILL_INIT, RelationGetBufferForTuple(), RelationIsAccessibleInLogicalDecoding, RelationIsLogicallyLogged, RelationNeedsWAL, RelationPutHeapTuple(), ReleaseBuffer(), SizeOfHeapHeader, SizeOfHeapInsert, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), visibilitymap_clear(), VISIBILITYMAP_VALID_BITS, XLH_INSERT_ALL_VISIBLE_CLEARED, XLH_INSERT_CONTAINS_NEW_TUPLE, XLH_INSERT_IS_SPECULATIVE, XLOG_HEAP_INIT_PAGE, XLOG_HEAP_INSERT, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by ATRewriteTable(), CopyFrom(), ExecInsert(), intorel_receive(), simple_heap_insert(), toast_save_datum(), and transientrel_receive().

2438 {
2440  HeapTuple heaptup;
2441  Buffer buffer;
2442  Buffer vmbuffer = InvalidBuffer;
2443  bool all_visible_cleared = false;
2444 
2445  /*
2446  * Fill in tuple header fields, assign an OID, and toast the tuple if
2447  * necessary.
2448  *
2449  * Note: below this point, heaptup is the data we actually intend to store
2450  * into the relation; tup is the caller's original untoasted data.
2451  */
2452  heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
2453 
2454  /*
2455  * Find buffer to insert this tuple into. If the page is all visible,
2456  * this will also pin the requisite visibility map page.
2457  */
2458  buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
2459  InvalidBuffer, options, bistate,
2460  &vmbuffer, NULL);
2461 
2462  /*
2463  * We're about to do the actual insert -- but check for conflict first, to
2464  * avoid possibly having to roll back work we've just done.
2465  *
2466  * This is safe without a recheck as long as there is no possibility of
2467  * another process scanning the page between this check and the insert
2468  * being visible to the scan (i.e., an exclusive buffer content lock is
2469  * continuously held from this point until the tuple insert is visible).
2470  *
2471  * For a heap insert, we only need to check for table-level SSI locks. Our
2472  * new tuple can't possibly conflict with existing tuple locks, and heap
2473  * page locks are only consolidated versions of tuple locks; they do not
2474  * lock "gaps" as index page locks do. So we don't need to specify a
2475  * buffer when making the call, which makes for a faster check.
2476  */
2478 
2479  /* NO EREPORT(ERROR) from here till changes are logged */
2481 
2482  RelationPutHeapTuple(relation, buffer, heaptup,
2483  (options & HEAP_INSERT_SPECULATIVE) != 0);
2484 
2485  if (PageIsAllVisible(BufferGetPage(buffer)))
2486  {
2487  all_visible_cleared = true;
2489  visibilitymap_clear(relation,
2490  ItemPointerGetBlockNumber(&(heaptup->t_self)),
2491  vmbuffer, VISIBILITYMAP_VALID_BITS);
2492  }
2493 
2494  /*
2495  * XXX Should we set PageSetPrunable on this page ?
2496  *
2497  * The inserting transaction may eventually abort thus making this tuple
2498  * DEAD and hence available for pruning. Though we don't want to optimize
2499  * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
2500  * aborted tuple will never be pruned until next vacuum is triggered.
2501  *
2502  * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
2503  */
2504 
2505  MarkBufferDirty(buffer);
2506 
2507  /* XLOG stuff */
2508  if (!(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation))
2509  {
2510  xl_heap_insert xlrec;
2511  xl_heap_header xlhdr;
2512  XLogRecPtr recptr;
2513  Page page = BufferGetPage(buffer);
2514  uint8 info = XLOG_HEAP_INSERT;
2515  int bufflags = 0;
2516 
2517  /*
2518  * If this is a catalog, we need to transmit combocids to properly
2519  * decode, so log that as well.
2520  */
2522  log_heap_new_cid(relation, heaptup);
2523 
2524  /*
2525  * If this is the single and first tuple on page, we can reinit the
2526  * page instead of restoring the whole thing. Set flag, and hide
2527  * buffer references from XLogInsert.
2528  */
2529  if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
2531  {
2532  info |= XLOG_HEAP_INIT_PAGE;
2533  bufflags |= REGBUF_WILL_INIT;
2534  }
2535 
2536  xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
2537  xlrec.flags = 0;
2538  if (all_visible_cleared)
2543 
2544  /*
2545  * For logical decoding, we need the tuple even if we're doing a full
2546  * page write, so make sure it's included even if we take a full-page
2547  * image. (XXX We could alternatively store a pointer into the FPW).
2548  */
2549  if (RelationIsLogicallyLogged(relation))
2550  {
2552  bufflags |= REGBUF_KEEP_DATA;
2553  }
2554 
2555  XLogBeginInsert();
2556  XLogRegisterData((char *) &xlrec, SizeOfHeapInsert);
2557 
2558  xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
2559  xlhdr.t_infomask = heaptup->t_data->t_infomask;
2560  xlhdr.t_hoff = heaptup->t_data->t_hoff;
2561 
2562  /*
2563  * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
2564  * write the whole page to the xlog, we don't need to store
2565  * xl_heap_header in the xlog.
2566  */
2567  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2568  XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
2569  /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
2571  (char *) heaptup->t_data + SizeofHeapTupleHeader,
2572  heaptup->t_len - SizeofHeapTupleHeader);
2573 
2574  /* filtering by origin on a row level is much more efficient */
2576 
2577  recptr = XLogInsert(RM_HEAP_ID, info);
2578 
2579  PageSetLSN(page, recptr);
2580  }
2581 
2582  END_CRIT_SECTION();
2583 
2584  UnlockReleaseBuffer(buffer);
2585  if (vmbuffer != InvalidBuffer)
2586  ReleaseBuffer(vmbuffer);
2587 
2588  /*
2589  * If tuple is cachable, mark it for invalidation from the caches in case
2590  * we abort. Note it is OK to do this after releasing the buffer, because
2591  * the heaptup data structure is all in local memory, not in the shared
2592  * buffer.
2593  */
2594  CacheInvalidateHeapTuple(relation, heaptup, NULL);
2595 
2596  /* Note: speculative insertions are counted too, even if aborted later */
2597  pgstat_count_heap_insert(relation, 1);
2598 
2599  /*
2600  * If heaptup is a private copy, release it. Don't forget to copy t_self
2601  * back to the caller's image, too.
2602  */
2603  if (heaptup != tup)
2604  {
2605  tup->t_self = heaptup->t_self;
2606  heap_freetuple(heaptup);
2607  }
2608 
2609  return HeapTupleGetOid(tup);
2610 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:361
#define SizeofHeapTupleHeader
Definition: htup_details.h:175
#define XLOG_HEAP_INSERT
Definition: heapam_xlog.h:32
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:7842
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1094
static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
Definition: heapam.c:2620
#define PageIsAllVisible(page)
Definition: bufpage.h:381
uint32 TransactionId
Definition: c.h:463
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
unsigned char uint8
Definition: c.h:312
#define XLH_INSERT_IS_SPECULATIVE
Definition: heapam_xlog.h:68
#define InvalidBuffer
Definition: buf.h:25
#define REGBUF_WILL_INIT
Definition: xloginsert.h:32
uint16 t_infomask2
Definition: heapam_xlog.h:122
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:192
#define HEAP_INSERT_SKIP_WAL
Definition: heapam.h:28
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:584
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1373
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
void RelationPutHeapTuple(Relation relation, Buffer buffer, HeapTuple tuple, bool token)
Definition: hio.c:36
void CheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
Definition: predicate.c:4326
#define XLOG_HEAP_INIT_PAGE
Definition: heapam_xlog.h:46
#define HEAP_INSERT_SPECULATIVE
Definition: heapam.h:31
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:67
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define XLH_INSERT_CONTAINS_NEW_TUPLE
Definition: heapam_xlog.h:69
ItemPointerData t_self
Definition: htup.h:65
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:418
uint32 t_len
Definition: htup.h:64
#define FirstOffsetNumber
Definition: off.h:27
#define REGBUF_STANDARD
Definition: xloginsert.h:34
Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer otherBuffer, int options, BulkInsertState bistate, Buffer *vmbuffer, Buffer *vmbuffer_other)
Definition: hio.c:297
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:568
#define REGBUF_KEEP_DATA
Definition: xloginsert.h:37
#define PageClearAllVisible(page)
Definition: bufpage.h:385
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:688
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:215
uint16 t_infomask
Definition: heapam_xlog.h:123
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define RelationNeedsWAL(relation)
Definition: rel.h:514
#define SizeOfHeapInsert
Definition: heapam_xlog.h:138
#define XLH_INSERT_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:66
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
void pgstat_count_heap_insert(Relation rel, PgStat_Counter n)
Definition: pgstat.c:1907
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:700
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
OffsetNumber offnum
Definition: heapam_xlog.h:132
#define SizeOfHeapHeader
Definition: heapam_xlog.h:127
Pointer Page
Definition: bufpage.h:74

◆ heap_lock_tuple()

HTSU_Result heap_lock_tuple ( Relation  relation,
HeapTuple  tuple,
CommandId  cid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
bool  follow_updates,
Buffer buffer,
HeapUpdateFailureData hufd 
)

Definition at line 4583 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferIsValid, HeapUpdateFailureData::cmax, compute_infobits(), compute_new_xmax_infomask(), ConditionalMultiXactIdWait(), ConditionalXactLockTableWait(), HeapUpdateFailureData::ctid, DoesMultiXactIdConflict(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, xl_heap_lock::flags, get_mxact_status_for_lock(), GetCurrentTransactionId(), GetMultiXactIdMembers(), heap_acquire_tuplock(), HEAP_KEYS_UPDATED, heap_lock_updated_tuple(), HEAP_XMAX_BITS, HEAP_XMAX_INVALID, HEAP_XMAX_IS_EXCL_LOCKED, HEAP_XMAX_IS_KEYSHR_LOCKED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HEAP_XMAX_IS_SHR_LOCKED, HeapTupleBeingUpdated, HeapTupleHeaderClearHotUpdated, HeapTupleHeaderGetCmax(), HeapTupleHeaderGetRawXmax, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIsOnlyLocked(), HeapTupleHeaderSetXmax, HeapTupleInvisible, HeapTupleMayBeUpdated, HeapTupleSatisfiesUpdate(), HeapTupleSelfUpdated, HeapTupleUpdated, HeapTupleWouldBlock, i, xl_heap_lock::infobits_set, InvalidBuffer, InvalidCommandId, ItemIdGetLength, ItemIdIsNormal, ItemPointerCopy, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), xl_heap_lock::locking_xid, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, LockWaitBlock, LockWaitError, LockWaitSkip, MarkBufferDirty(), MultiXactIdSetOldestMember(), MultiXactIdWait(), MultiXactStatusNoKeyUpdate, xl_heap_lock::offnum, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, pfree(), ReadBuffer(), REGBUF_STANDARD, RelationGetRelationName, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapLock, START_CRIT_SECTION, status(), HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdEquals, TransactionIdIsCurrentTransactionId(), TUPLOCK_from_mxstatus, UnlockTupleTuplock, UpdateXmaxHintBits(), VISIBILITYMAP_ALL_FROZEN, visibilitymap_clear(), visibilitymap_pin(), XactLockTableWait(), XLH_LOCK_ALL_FROZEN_CLEARED, XLOG_HEAP_LOCK, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), XLTW_Lock, HeapUpdateFailureData::xmax, and xmax_infomask_changed().

Referenced by EvalPlanQualFetch(), ExecLockRows(), ExecOnConflictUpdate(), GetTupleForTrigger(), RelationFindReplTupleByIndex(), and RelationFindReplTupleSeq().

4587 {
4588  HTSU_Result result;
4589  ItemPointer tid = &(tuple->t_self);
4590  ItemId lp;
4591  Page page;
4592  Buffer vmbuffer = InvalidBuffer;
4593  BlockNumber block;
4594  TransactionId xid,
4595  xmax;
4596  uint16 old_infomask,
4597  new_infomask,
4598  new_infomask2;
4599  bool first_time = true;
4600  bool have_tuple_lock = false;
4601  bool cleared_all_frozen = false;
4602 
4603  *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
4604  block = ItemPointerGetBlockNumber(tid);
4605 
4606  /*
4607  * Before locking the buffer, pin the visibility map page if it appears to
4608  * be necessary. Since we haven't got the lock yet, someone else might be
4609  * in the middle of changing this, so we'll need to recheck after we have
4610  * the lock.
4611  */
4613  visibilitymap_pin(relation, block, &vmbuffer);
4614 
4616 
4617  page = BufferGetPage(*buffer);
4618  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
4619  Assert(ItemIdIsNormal(lp));
4620 
4621  tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
4622  tuple->t_len = ItemIdGetLength(lp);
4623  tuple->t_tableOid = RelationGetRelid(relation);
4624 
4625 l3:
4626  result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
4627 
4628  if (result == HeapTupleInvisible)
4629  {
4630  /*
4631  * This is possible, but only when locking a tuple for ON CONFLICT
4632  * UPDATE. We return this value here rather than throwing an error in
4633  * order to give that case the opportunity to throw a more specific
4634  * error.
4635  */
4636  result = HeapTupleInvisible;
4637  goto out_locked;
4638  }
4639  else if (result == HeapTupleBeingUpdated || result == HeapTupleUpdated)
4640  {
4641  TransactionId xwait;
4642  uint16 infomask;
4643  uint16 infomask2;
4644  bool require_sleep;
4645  ItemPointerData t_ctid;
4646 
4647  /* must copy state data before unlocking buffer */
4648  xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
4649  infomask = tuple->t_data->t_infomask;
4650  infomask2 = tuple->t_data->t_infomask2;
4651  ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
4652 
4654 
4655  /*
4656  * If any subtransaction of the current top transaction already holds
4657  * a lock as strong as or stronger than what we're requesting, we
4658  * effectively hold the desired lock already. We *must* succeed
4659  * without trying to take the tuple lock, else we will deadlock
4660  * against anyone wanting to acquire a stronger lock.
4661  *
4662  * Note we only do this the first time we loop on the HTSU result;
4663  * there is no point in testing in subsequent passes, because
4664  * evidently our own transaction cannot have acquired a new lock after
4665  * the first time we checked.
4666  */
4667  if (first_time)
4668  {
4669  first_time = false;
4670 
4671  if (infomask & HEAP_XMAX_IS_MULTI)
4672  {
4673  int i;
4674  int nmembers;
4675  MultiXactMember *members;
4676 
4677  /*
4678  * We don't need to allow old multixacts here; if that had
4679  * been the case, HeapTupleSatisfiesUpdate would have returned
4680  * MayBeUpdated and we wouldn't be here.
4681  */
4682  nmembers =
4683  GetMultiXactIdMembers(xwait, &members, false,
4684  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
4685 
4686  for (i = 0; i < nmembers; i++)
4687  {
4688  /* only consider members of our own transaction */
4689  if (!TransactionIdIsCurrentTransactionId(members[i].xid))
4690  continue;
4691 
4692  if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
4693  {
4694  pfree(members);
4695  result = HeapTupleMayBeUpdated;
4696  goto out_unlocked;
4697  }
4698  }
4699 
4700  if (members)
4701  pfree(members);
4702  }
4703  else if (TransactionIdIsCurrentTransactionId(xwait))
4704  {
4705  switch (mode)
4706  {
4707  case LockTupleKeyShare:
4708  Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
4709  HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4710  HEAP_XMAX_IS_EXCL_LOCKED(infomask));
4711  result = HeapTupleMayBeUpdated;
4712  goto out_unlocked;
4713  case LockTupleShare:
4714  if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4715  HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4716  {
4717  result = HeapTupleMayBeUpdated;
4718  goto out_unlocked;
4719  }
4720  break;
4722  if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4723  {
4724  result = HeapTupleMayBeUpdated;
4725  goto out_unlocked;
4726  }
4727  break;
4728  case LockTupleExclusive:
4729  if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
4730  infomask2 & HEAP_KEYS_UPDATED)
4731  {
4732  result = HeapTupleMayBeUpdated;
4733  goto out_unlocked;
4734  }
4735  break;
4736  }
4737  }
4738  }
4739 
4740  /*
4741  * Initially assume that we will have to wait for the locking
4742  * transaction(s) to finish. We check various cases below in which
4743  * this can be turned off.
4744  */
4745  require_sleep = true;
4746  if (mode == LockTupleKeyShare)
4747  {
4748  /*
4749  * If we're requesting KeyShare, and there's no update present, we
4750  * don't need to wait. Even if there is an update, we can still
4751  * continue if the key hasn't been modified.
4752  *
4753  * However, if there are updates, we need to walk the update chain
4754  * to mark future versions of the row as locked, too. That way,
4755  * if somebody deletes that future version, we're protected
4756  * against the key going away. This locking of future versions
4757  * could block momentarily, if a concurrent transaction is
4758  * deleting a key; or it could return a value to the effect that
4759  * the transaction deleting the key has already committed. So we
4760  * do this before re-locking the buffer; otherwise this would be
4761  * prone to deadlocks.
4762  *
4763  * Note that the TID we're locking was grabbed before we unlocked
4764  * the buffer. For it to change while we're not looking, the
4765  * other properties we're testing for below after re-locking the
4766  * buffer would also change, in which case we would restart this
4767  * loop above.
4768  */
4769  if (!(infomask2 & HEAP_KEYS_UPDATED))
4770  {
4771  bool updated;
4772 
4773  updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
4774 
4775  /*
4776  * If there are updates, follow the update chain; bail out if
4777  * that cannot be done.
4778  */
4779  if (follow_updates && updated)
4780  {
4781  HTSU_Result res;
4782 
4783  res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4785  mode);
4786  if (res != HeapTupleMayBeUpdated)
4787  {
4788  result = res;
4789  /* recovery code expects to have buffer lock held */
4791  goto failed;
4792  }
4793  }
4794 
4796 
4797  /*
4798  * Make sure it's still an appropriate lock, else start over.
4799  * Also, if it wasn't updated before we released the lock, but
4800  * is updated now, we start over too; the reason is that we
4801  * now need to follow the update chain to lock the new
4802  * versions.
4803  */
4804  if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
4805  ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
4806  !updated))
4807  goto l3;
4808 
4809  /* Things look okay, so we can skip sleeping */
4810  require_sleep = false;
4811 
4812  /*
4813  * Note we allow Xmax to change here; other updaters/lockers
4814  * could have modified it before we grabbed the buffer lock.
4815  * However, this is not a problem, because with the recheck we
4816  * just did we ensure that they still don't conflict with the
4817  * lock we want.
4818  */
4819  }
4820  }
4821  else if (mode == LockTupleShare)
4822  {
4823  /*
4824  * If we're requesting Share, we can similarly avoid sleeping if
4825  * there's no update and no exclusive lock present.
4826  */
4827  if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
4828  !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4829  {
4831 
4832  /*
4833  * Make sure it's still an appropriate lock, else start over.
4834  * See above about allowing xmax to change.
4835  */
4836  if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
4838  goto l3;
4839  require_sleep = false;
4840  }
4841  }
4842  else if (mode == LockTupleNoKeyExclusive)
4843  {
4844  /*
4845  * If we're requesting NoKeyExclusive, we might also be able to
4846  * avoid sleeping; just ensure that there no conflicting lock
4847  * already acquired.
4848  */
4849  if (infomask & HEAP_XMAX_IS_MULTI)
4850  {
4851  if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
4852  mode))
4853  {
4854  /*
4855  * No conflict, but if the xmax changed under us in the
4856  * meantime, start over.
4857  */
4859  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4861  xwait))
4862  goto l3;
4863 
4864  /* otherwise, we're good */
4865  require_sleep = false;
4866  }
4867  }
4868  else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
4869  {
4871 
4872  /* if the xmax changed in the meantime, start over */
4873  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4876  xwait))
4877  goto l3;
4878  /* otherwise, we're good */
4879  require_sleep = false;
4880  }
4881  }
4882 
4883  /*
4884  * As a check independent from those above, we can also avoid sleeping
4885  * if the current transaction is the sole locker of the tuple. Note
4886  * that the strength of the lock already held is irrelevant; this is
4887  * not about recording the lock in Xmax (which will be done regardless
4888  * of this optimization, below). Also, note that the cases where we
4889  * hold a lock stronger than we are requesting are already handled
4890  * above by not doing anything.
4891  *
4892  * Note we only deal with the non-multixact case here; MultiXactIdWait
4893  * is well equipped to deal with this situation on its own.
4894  */
4895  if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
4897  {
4898  /* ... but if the xmax changed in the meantime, start over */
4900  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4902  xwait))
4903  goto l3;
4905  require_sleep = false;
4906  }
4907 
4908  /*
4909  * Time to sleep on the other transaction/multixact, if necessary.
4910  *
4911  * If the other transaction is an update that's already committed,
4912  * then sleeping cannot possibly do any good: if we're required to
4913  * sleep, get out to raise an error instead.
4914  *
4915  * By here, we either have already acquired the buffer exclusive lock,
4916  * or we must wait for the locking transaction or multixact; so below
4917  * we ensure that we grab buffer lock after the sleep.
4918  */
4919  if (require_sleep && result == HeapTupleUpdated)
4920  {
4922  goto failed;
4923  }
4924  else if (require_sleep)
4925  {
4926  /*
4927  * Acquire tuple lock to establish our priority for the tuple, or
4928  * die trying. LockTuple will release us when we are next-in-line
4929  * for the tuple. We must do this even if we are share-locking.
4930  *
4931  * If we are forced to "start over" below, we keep the tuple lock;
4932  * this arranges that we stay at the head of the line while
4933  * rechecking tuple state.
4934  */
4935  if (!heap_acquire_tuplock(relation, tid, mode, wait_policy,
4936  &have_tuple_lock))
4937  {
4938  /*
4939  * This can only happen if wait_policy is Skip and the lock
4940  * couldn't be obtained.
4941  */
4942  result = HeapTupleWouldBlock;
4943  /* recovery code expects to have buffer lock held */
4945  goto failed;
4946  }
4947 
4948  if (infomask & HEAP_XMAX_IS_MULTI)
4949  {
4951 
4952  /* We only ever lock tuples, never update them */
4953  if (status >= MultiXactStatusNoKeyUpdate)
4954  elog(ERROR, "invalid lock mode in heap_lock_tuple");
4955 
4956  /* wait for multixact to end, or die trying */
4957  switch (wait_policy)
4958  {
4959  case LockWaitBlock:
4960  MultiXactIdWait((MultiXactId) xwait, status, infomask,
4961  relation, &tuple->t_self, XLTW_Lock, NULL);
4962  break;
4963  case LockWaitSkip:
4965  status, infomask, relation,
4966  NULL))
4967  {
4968  result = HeapTupleWouldBlock;
4969  /* recovery code expects to have buffer lock held */
4971  goto failed;
4972  }
4973  break;
4974  case LockWaitError:
4976  status, infomask, relation,
4977  NULL))
4978  ereport(ERROR,
4979  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4980  errmsg("could not obtain lock on row in relation \"%s\"",
4981  RelationGetRelationName(relation))));
4982 
4983  break;
4984  }
4985 
4986  /*
4987  * Of course, the multixact might not be done here: if we're
4988  * requesting a light lock mode, other transactions with light
4989  * locks could still be alive, as well as locks owned by our
4990  * own xact or other subxacts of this backend. We need to
4991  * preserve the surviving MultiXact members. Note that it
4992  * isn't absolutely necessary in the latter case, but doing so
4993  * is simpler.
4994  */
4995  }
4996  else
4997  {
4998  /* wait for regular transaction to end, or die trying */
4999  switch (wait_policy)
5000  {
5001  case LockWaitBlock:
5002  XactLockTableWait(xwait, relation, &tuple->t_self,
5003  XLTW_Lock);
5004  break;
5005  case LockWaitSkip:
5006  if (!ConditionalXactLockTableWait(xwait))
5007  {
5008  result = HeapTupleWouldBlock;
5009  /* recovery code expects to have buffer lock held */
5011  goto failed;
5012  }
5013  break;
5014  case LockWaitError:
5015  if (!ConditionalXactLockTableWait(xwait))
5016  ereport(ERROR,
5017  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5018  errmsg("could not obtain lock on row in relation \"%s\"",
5019  RelationGetRelationName(relation))));
5020  break;
5021  }
5022  }
5023 
5024  /* if there are updates, follow the update chain */
5025  if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
5026  {
5027  HTSU_Result res;
5028 
5029  res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
5031  mode);
5032  if (res != HeapTupleMayBeUpdated)
5033  {
5034  result = res;
5035  /* recovery code expects to have buffer lock held */
5037  goto failed;
5038  }
5039  }
5040 
5042 
5043  /*
5044  * xwait is done, but if xwait had just locked the tuple then some
5045  * other xact could update this tuple before we get to this point.
5046  * Check for xmax change, and start over if so.
5047  */
5048  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
5050  xwait))
5051  goto l3;
5052 
5053  if (!(infomask & HEAP_XMAX_IS_MULTI))
5054  {
5055  /*
5056  * Otherwise check if it committed or aborted. Note we cannot
5057  * be here if the tuple was only locked by somebody who didn't
5058  * conflict with us; that would have been handled above. So
5059  * that transaction must necessarily be gone by now. But
5060  * don't check for this in the multixact case, because some
5061  * locker transactions might still be running.
5062  */
5063  UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
5064  }
5065  }
5066 
5067  /* By here, we're certain that we hold buffer exclusive lock again */
5068 
5069  /*
5070  * We may lock if previous xmax aborted, or if it committed but only
5071  * locked the tuple without updating it; or if we didn't have to wait
5072  * at all for whatever reason.
5073  */
5074  if (!require_sleep ||
5075  (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
5078  result = HeapTupleMayBeUpdated;
5079  else
5080  result = HeapTupleUpdated;
5081  }
5082 
5083 failed:
5084  if (result != HeapTupleMayBeUpdated)
5085  {
5086  Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated ||
5087  result == HeapTupleWouldBlock);
5088  Assert(!(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
5089  hufd->ctid = tuple->t_data->t_ctid;
5090  hufd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
5091  if (result == HeapTupleSelfUpdated)
5092  hufd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
5093  else
5094  hufd->cmax = InvalidCommandId;
5095  goto out_locked;
5096  }
5097 
5098  /*
5099  * If we didn't pin the visibility map page and the page has become all
5100  * visible while we were busy locking the buffer, or during some
5101  * subsequent window during which we had it unlocked, we'll have to unlock
5102  * and re-lock, to avoid holding the buffer lock across I/O. That's a bit
5103  * unfortunate, especially since we'll now have to recheck whether the
5104  * tuple has been locked or updated under us, but hopefully it won't
5105  * happen very often.
5106  */