PostgreSQL Source Code  git master
heapam.c File Reference
#include "postgres.h"
#include "access/bufmask.h"
#include "access/genam.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/heaptoast.h"
#include "access/hio.h"
#include "access/multixact.h"
#include "access/parallel.h"
#include "access/relscan.h"
#include "access/subtrans.h"
#include "access/sysattr.h"
#include "access/tableam.h"
#include "access/transam.h"
#include "access/valid.h"
#include "access/visibilitymap.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "access/xloginsert.h"
#include "access/xlogutils.h"
#include "catalog/catalog.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "port/atomics.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
#include "storage/procarray.h"
#include "storage/smgr.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "utils/datum.h"
#include "utils/inval.h"
#include "utils/lsyscache.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
#include "utils/spccache.h"
Include dependency graph for heapam.c:

Go to the source code of this file.

Macros

#define LOCKMODE_from_mxstatus(status)   (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
 
#define LockTupleTuplock(rel, tup, mode)   LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define UnlockTupleTuplock(rel, tup, mode)   UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define ConditionalLockTupleTuplock(rel, tup, mode)   ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define TUPLOCK_from_mxstatus(status)   (MultiXactStatusLock[(status)])
 
#define HEAPDEBUG_1
 
#define HEAPDEBUG_2
 
#define HEAPDEBUG_3
 
#define HEAPAMSLOTDEBUG_1
 
#define HEAPAMSLOTDEBUG_2
 
#define HEAPAMSLOTDEBUG_3
 
#define FRM_NOOP   0x0001
 
#define FRM_INVALIDATE_XMAX   0x0002
 
#define FRM_RETURN_IS_XID   0x0004
 
#define FRM_RETURN_IS_MULTI   0x0008
 
#define FRM_MARK_COMMITTED   0x0010
 

Functions

static HeapTuple heap_prepare_insert (Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
 
static XLogRecPtr log_heap_update (Relation reln, Buffer oldbuf, Buffer newbuf, HeapTuple oldtup, HeapTuple newtup, HeapTuple old_key_tuple, bool all_visible_cleared, bool new_all_visible_cleared)
 
static BitmapsetHeapDetermineModifiedColumns (Relation relation, Bitmapset *interesting_cols, HeapTuple oldtup, HeapTuple newtup)
 
static bool heap_acquire_tuplock (Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
 
static void compute_new_xmax_infomask (TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
 
static TM_Result heap_lock_updated_tuple (Relation rel, HeapTuple tuple, ItemPointer ctid, TransactionId xid, LockTupleMode mode)
 
static void GetMultiXactIdHintBits (MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
 
static TransactionId MultiXactIdGetUpdateXid (TransactionId xmax, uint16 t_infomask)
 
static bool DoesMultiXactIdConflict (MultiXactId multi, uint16 infomask, LockTupleMode lockmode, bool *current_is_member)
 
static void MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
static bool ConditionalMultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, int *remaining)
 
static XLogRecPtr log_heap_new_cid (Relation relation, HeapTuple tup)
 
static HeapTuple ExtractReplicaIdentity (Relation rel, HeapTuple tup, bool key_changed, bool *copy)
 
static void initscan (HeapScanDesc scan, ScanKey key, bool keep_startblock)
 
void heap_setscanlimits (TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
 
void heapgetpage (TableScanDesc sscan, BlockNumber page)
 
static void heapgettup (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
static void heapgettup_pagemode (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
TableScanDesc heap_beginscan (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelTableScanDesc parallel_scan, uint32 flags)
 
void heap_rescan (TableScanDesc sscan, ScanKey key, bool set_params, bool allow_strat, bool allow_sync, bool allow_pagemode)
 
void heap_endscan (TableScanDesc sscan)
 
HeapTuple heap_getnext (TableScanDesc sscan, ScanDirection direction)
 
bool heap_getnextslot (TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
 
bool heap_fetch (Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf)
 
bool heap_hot_search_buffer (ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
 
void heap_get_latest_tid (TableScanDesc sscan, ItemPointer tid)
 
static void UpdateXmaxHintBits (HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
 
BulkInsertState GetBulkInsertState (void)
 
void FreeBulkInsertState (BulkInsertState bistate)
 
void ReleaseBulkInsertStatePin (BulkInsertState bistate)
 
void heap_insert (Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate)
 
void heap_multi_insert (Relation relation, TupleTableSlot **slots, int ntuples, CommandId cid, int options, BulkInsertState bistate)
 
void simple_heap_insert (Relation relation, HeapTuple tup)
 
static uint8 compute_infobits (uint16 infomask, uint16 infomask2)
 
static bool xmax_infomask_changed (uint16 new_infomask, uint16 old_infomask)
 
TM_Result heap_delete (Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
 
void simple_heap_delete (Relation relation, ItemPointer tid)
 
TM_Result heap_update (Relation relation, ItemPointer otid, HeapTuple newtup, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode)
 
static bool heap_tuple_attr_equals (TupleDesc tupdesc, int attrnum, HeapTuple tup1, HeapTuple tup2)
 
void simple_heap_update (Relation relation, ItemPointer otid, HeapTuple tup)
 
static MultiXactStatus get_mxact_status_for_lock (LockTupleMode mode, bool is_update)
 
TM_Result heap_lock_tuple (Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, TM_FailureData *tmfd)
 
static TM_Result test_lockmode_for_conflict (MultiXactStatus status, TransactionId xid, LockTupleMode mode, HeapTuple tup, bool *needwait)
 
static TM_Result heap_lock_updated_tuple_rec (Relation rel, ItemPointer tid, TransactionId xid, LockTupleMode mode)
 
void heap_finish_speculative (Relation relation, ItemPointer tid)
 
void heap_abort_speculative (Relation relation, ItemPointer tid)
 
void heap_inplace_update (Relation relation, HeapTuple tuple)
 
static TransactionId FreezeMultiXactId (MultiXactId multi, uint16 t_infomask, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, MultiXactId cutoff_multi, uint16 *flags)
 
bool heap_prepare_freeze_tuple (HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
 
void heap_execute_freeze_tuple (HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
 
bool heap_freeze_tuple (HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi)
 
TransactionId HeapTupleGetUpdateXid (HeapTupleHeader tuple)
 
static bool Do_MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
bool heap_tuple_needs_eventual_freeze (HeapTupleHeader tuple)
 
bool heap_tuple_needs_freeze (HeapTupleHeader tuple, TransactionId cutoff_xid, MultiXactId cutoff_multi, Buffer buf)
 
void HeapTupleHeaderAdvanceLatestRemovedXid (HeapTupleHeader tuple, TransactionId *latestRemovedXid)
 
TransactionId heap_compute_xid_horizon_for_tuples (Relation rel, ItemPointerData *tids, int nitems)
 
XLogRecPtr log_heap_cleanup_info (RelFileNode rnode, TransactionId latestRemovedXid)
 
XLogRecPtr log_heap_clean (Relation reln, Buffer buffer, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused, TransactionId latestRemovedXid)
 
XLogRecPtr log_heap_freeze (Relation reln, Buffer buffer, TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples, int ntuples)
 
XLogRecPtr log_heap_visible (RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer, TransactionId cutoff_xid, uint8 vmflags)
 
static void heap_xlog_cleanup_info (XLogReaderState *record)
 
static void heap_xlog_clean (XLogReaderState *record)
 
static void heap_xlog_visible (XLogReaderState *record)
 
static void heap_xlog_freeze_page (XLogReaderState *record)
 
static void fix_infomask_from_infobits (uint8 infobits, uint16 *infomask, uint16 *infomask2)
 
static void heap_xlog_delete (XLogReaderState *record)
 
static void heap_xlog_insert (XLogReaderState *record)
 
static void heap_xlog_multi_insert (XLogReaderState *record)
 
static void heap_xlog_update (XLogReaderState *record, bool hot_update)
 
static void heap_xlog_confirm (XLogReaderState *record)
 
static void heap_xlog_lock (XLogReaderState *record)
 
static void heap_xlog_lock_updated (XLogReaderState *record)
 
static void heap_xlog_inplace (XLogReaderState *record)
 
void heap_redo (XLogReaderState *record)
 
void heap2_redo (XLogReaderState *record)
 
void heap_mask (char *pagedata, BlockNumber blkno)
 
void HeapCheckForSerializableConflictOut (bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
 

Variables

struct {
   LOCKMODE   hwlock
 
   int   lockstatus
 
   int   updstatus
 
tupleLockExtraInfo [MaxLockTupleMode+1]
 
static const int MultiXactStatusLock [MaxMultiXactStatus+1]
 

Macro Definition Documentation

◆ ConditionalLockTupleTuplock

#define ConditionalLockTupleTuplock (   rel,
  tup,
  mode 
)    ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 163 of file heapam.c.

Referenced by heap_acquire_tuplock().

◆ FRM_INVALIDATE_XMAX

#define FRM_INVALIDATE_XMAX   0x0002

Definition at line 5809 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_MARK_COMMITTED

#define FRM_MARK_COMMITTED   0x0010

Definition at line 5812 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_NOOP

#define FRM_NOOP   0x0001

Definition at line 5808 of file heapam.c.

Referenced by FreezeMultiXactId().

◆ FRM_RETURN_IS_MULTI

#define FRM_RETURN_IS_MULTI   0x0008

Definition at line 5811 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_RETURN_IS_XID

#define FRM_RETURN_IS_XID   0x0004

Definition at line 5810 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ HEAPAMSLOTDEBUG_1

#define HEAPAMSLOTDEBUG_1

Definition at line 1343 of file heapam.c.

Referenced by heap_getnextslot().

◆ HEAPAMSLOTDEBUG_2

#define HEAPAMSLOTDEBUG_2

Definition at line 1344 of file heapam.c.

Referenced by heap_getnextslot().

◆ HEAPAMSLOTDEBUG_3

#define HEAPAMSLOTDEBUG_3

Definition at line 1345 of file heapam.c.

Referenced by heap_getnextslot().

◆ HEAPDEBUG_1

#define HEAPDEBUG_1

Definition at line 1283 of file heapam.c.

Referenced by heap_getnext().

◆ HEAPDEBUG_2

#define HEAPDEBUG_2

Definition at line 1284 of file heapam.c.

Referenced by heap_getnext().

◆ HEAPDEBUG_3

#define HEAPDEBUG_3

Definition at line 1285 of file heapam.c.

Referenced by heap_getnext().

◆ LOCKMODE_from_mxstatus

#define LOCKMODE_from_mxstatus (   status)    (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)

◆ LockTupleTuplock

#define LockTupleTuplock (   rel,
  tup,
  mode 
)    LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 159 of file heapam.c.

Referenced by heap_acquire_tuplock().

◆ TUPLOCK_from_mxstatus

#define TUPLOCK_from_mxstatus (   status)    (MultiXactStatusLock[(status)])

Definition at line 195 of file heapam.c.

Referenced by compute_new_xmax_infomask(), GetMultiXactIdHintBits(), and heap_lock_tuple().

◆ UnlockTupleTuplock

#define UnlockTupleTuplock (   rel,
  tup,
  mode 
)    UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 161 of file heapam.c.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

Function Documentation

◆ compute_infobits()

static uint8 compute_infobits ( uint16  infomask,
uint16  infomask2 
)
static

Definition at line 2407 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_abort_speculative(), heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_update(), and log_heap_update().

2408 {
2409  return
2410  ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
2411  ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
2412  ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
2413  /* note we ignore HEAP_XMAX_SHR_LOCK here */
2414  ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
2415  ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
2416  XLHL_KEYS_UPDATED : 0);
2417 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:193
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:196
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:263
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:262
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:195
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:264
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:266
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:265

◆ compute_new_xmax_infomask()

static void compute_new_xmax_infomask ( TransactionId  xmax,
uint16  old_infomask,
uint16  old_infomask2,
TransactionId  add_to_xmax,
LockTupleMode  mode,
bool  is_update,
TransactionId result_xmax,
uint16 result_infomask,
uint16 result_infomask2 
)
static

Definition at line 4722 of file heapam.c.

References Assert, elog, ERROR, get_mxact_status_for_lock(), GetMultiXactIdHintBits(), HEAP_KEYS_UPDATED, HEAP_LOCKED_UPGRADED, HEAP_XMAX_COMMITTED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_INVALID, HEAP_XMAX_IS_EXCL_LOCKED, HEAP_XMAX_IS_KEYSHR_LOCKED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HEAP_XMAX_IS_SHR_LOCKED, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, InvalidTransactionId, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, MultiXactIdCreate(), MultiXactIdExpand(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TUPLOCK_from_mxstatus, and WARNING.

Referenced by heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), and heap_update().

4727 {
4728  TransactionId new_xmax;
4729  uint16 new_infomask,
4730  new_infomask2;
4731 
4733 
4734 l5:
4735  new_infomask = 0;
4736  new_infomask2 = 0;
4737  if (old_infomask & HEAP_XMAX_INVALID)
4738  {
4739  /*
4740  * No previous locker; we just insert our own TransactionId.
4741  *
4742  * Note that it's critical that this case be the first one checked,
4743  * because there are several blocks below that come back to this one
4744  * to implement certain optimizations; old_infomask might contain
4745  * other dirty bits in those cases, but we don't really care.
4746  */
4747  if (is_update)
4748  {
4749  new_xmax = add_to_xmax;
4750  if (mode == LockTupleExclusive)
4751  new_infomask2 |= HEAP_KEYS_UPDATED;
4752  }
4753  else
4754  {
4755  new_infomask |= HEAP_XMAX_LOCK_ONLY;
4756  switch (mode)
4757  {
4758  case LockTupleKeyShare:
4759  new_xmax = add_to_xmax;
4760  new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
4761  break;
4762  case LockTupleShare:
4763  new_xmax = add_to_xmax;
4764  new_infomask |= HEAP_XMAX_SHR_LOCK;
4765  break;
4767  new_xmax = add_to_xmax;
4768  new_infomask |= HEAP_XMAX_EXCL_LOCK;
4769  break;
4770  case LockTupleExclusive:
4771  new_xmax = add_to_xmax;
4772  new_infomask |= HEAP_XMAX_EXCL_LOCK;
4773  new_infomask2 |= HEAP_KEYS_UPDATED;
4774  break;
4775  default:
4776  new_xmax = InvalidTransactionId; /* silence compiler */
4777  elog(ERROR, "invalid lock mode");
4778  }
4779  }
4780  }
4781  else if (old_infomask & HEAP_XMAX_IS_MULTI)
4782  {
4783  MultiXactStatus new_status;
4784 
4785  /*
4786  * Currently we don't allow XMAX_COMMITTED to be set for multis, so
4787  * cross-check.
4788  */
4789  Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
4790 
4791  /*
4792  * A multixact together with LOCK_ONLY set but neither lock bit set
4793  * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
4794  * anymore. This check is critical for databases upgraded by
4795  * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
4796  * that such multis are never passed.
4797  */
4798  if (HEAP_LOCKED_UPGRADED(old_infomask))
4799  {
4800  old_infomask &= ~HEAP_XMAX_IS_MULTI;
4801  old_infomask |= HEAP_XMAX_INVALID;
4802  goto l5;
4803  }
4804 
4805  /*
4806  * If the XMAX is already a MultiXactId, then we need to expand it to
4807  * include add_to_xmax; but if all the members were lockers and are
4808  * all gone, we can do away with the IS_MULTI bit and just set
4809  * add_to_xmax as the only locker/updater. If all lockers are gone
4810  * and we have an updater that aborted, we can also do without a
4811  * multi.
4812  *
4813  * The cost of doing GetMultiXactIdMembers would be paid by
4814  * MultiXactIdExpand if we weren't to do this, so this check is not
4815  * incurring extra work anyhow.
4816  */
4817  if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
4818  {
4819  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
4821  old_infomask)))
4822  {
4823  /*
4824  * Reset these bits and restart; otherwise fall through to
4825  * create a new multi below.
4826  */
4827  old_infomask &= ~HEAP_XMAX_IS_MULTI;
4828  old_infomask |= HEAP_XMAX_INVALID;
4829  goto l5;
4830  }
4831  }
4832 
4833  new_status = get_mxact_status_for_lock(mode, is_update);
4834 
4835  new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
4836  new_status);
4837  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4838  }
4839  else if (old_infomask & HEAP_XMAX_COMMITTED)
4840  {
4841  /*
4842  * It's a committed update, so we need to preserve him as updater of
4843  * the tuple.
4844  */
4846  MultiXactStatus new_status;
4847 
4848  if (old_infomask2 & HEAP_KEYS_UPDATED)
4849  status = MultiXactStatusUpdate;
4850  else
4851  status = MultiXactStatusNoKeyUpdate;
4852 
4853  new_status = get_mxact_status_for_lock(mode, is_update);
4854 
4855  /*
4856  * since it's not running, it's obviously impossible for the old
4857  * updater to be identical to the current one, so we need not check
4858  * for that case as we do in the block above.
4859  */
4860  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
4861  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4862  }
4863  else if (TransactionIdIsInProgress(xmax))
4864  {
4865  /*
4866  * If the XMAX is a valid, in-progress TransactionId, then we need to
4867  * create a new MultiXactId that includes both the old locker or
4868  * updater and our own TransactionId.
4869  */
4870  MultiXactStatus new_status;
4871  MultiXactStatus old_status;
4872  LockTupleMode old_mode;
4873 
4874  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
4875  {
4876  if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
4877  old_status = MultiXactStatusForKeyShare;
4878  else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
4879  old_status = MultiXactStatusForShare;
4880  else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
4881  {
4882  if (old_infomask2 & HEAP_KEYS_UPDATED)
4883  old_status = MultiXactStatusForUpdate;
4884  else
4885  old_status = MultiXactStatusForNoKeyUpdate;
4886  }
4887  else
4888  {
4889  /*
4890  * LOCK_ONLY can be present alone only when a page has been
4891  * upgraded by pg_upgrade. But in that case,
4892  * TransactionIdIsInProgress() should have returned false. We
4893  * assume it's no longer locked in this case.
4894  */
4895  elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
4896  old_infomask |= HEAP_XMAX_INVALID;
4897  old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
4898  goto l5;
4899  }
4900  }
4901  else
4902  {
4903  /* it's an update, but which kind? */
4904  if (old_infomask2 & HEAP_KEYS_UPDATED)
4905  old_status = MultiXactStatusUpdate;
4906  else
4907  old_status = MultiXactStatusNoKeyUpdate;
4908  }
4909 
4910  old_mode = TUPLOCK_from_mxstatus(old_status);
4911 
4912  /*
4913  * If the lock to be acquired is for the same TransactionId as the
4914  * existing lock, there's an optimization possible: consider only the
4915  * strongest of both locks as the only one present, and restart.
4916  */
4917  if (xmax == add_to_xmax)
4918  {
4919  /*
4920  * Note that it's not possible for the original tuple to be
4921  * updated: we wouldn't be here because the tuple would have been
4922  * invisible and we wouldn't try to update it. As a subtlety,
4923  * this code can also run when traversing an update chain to lock
4924  * future versions of a tuple. But we wouldn't be here either,
4925  * because the add_to_xmax would be different from the original
4926  * updater.
4927  */
4928  Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
4929 
4930  /* acquire the strongest of both */
4931  if (mode < old_mode)
4932  mode = old_mode;
4933  /* mustn't touch is_update */
4934 
4935  old_infomask |= HEAP_XMAX_INVALID;
4936  goto l5;
4937  }
4938 
4939  /* otherwise, just fall back to creating a new multixact */
4940  new_status = get_mxact_status_for_lock(mode, is_update);
4941  new_xmax = MultiXactIdCreate(xmax, old_status,
4942  add_to_xmax, new_status);
4943  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4944  }
4945  else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
4946  TransactionIdDidCommit(xmax))
4947  {
4948  /*
4949  * It's a committed update, so we gotta preserve him as updater of the
4950  * tuple.
4951  */
4953  MultiXactStatus new_status;
4954 
4955  if (old_infomask2 & HEAP_KEYS_UPDATED)
4956  status = MultiXactStatusUpdate;
4957  else
4958  status = MultiXactStatusNoKeyUpdate;
4959 
4960  new_status = get_mxact_status_for_lock(mode, is_update);
4961 
4962  /*
4963  * since it's not running, it's obviously impossible for the old
4964  * updater to be identical to the current one, so we need not check
4965  * for that case as we do in the block above.
4966  */
4967  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
4968  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4969  }
4970  else
4971  {
4972  /*
4973  * Can get here iff the locking/updating transaction was running when
4974  * the infomask was extracted from the tuple, but finished before
4975  * TransactionIdIsInProgress got to run. Deal with it as if there was
4976  * no locker at all in the first place.
4977  */
4978  old_infomask |= HEAP_XMAX_INVALID;
4979  goto l5;
4980  }
4981 
4982  *result_infomask = new_infomask;
4983  *result_infomask2 = new_infomask2;
4984  *result_xmax = new_xmax;
4985 }
static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
Definition: heapam.c:6417
static PgChecksumMode mode
Definition: pg_checksums.c:61
MultiXactStatus
Definition: multixact.h:40
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:193
LockTupleMode
Definition: lockoptions.h:49
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:196
uint32 TransactionId
Definition: c.h:513
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:853
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:988
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:252
#define HEAP_XMAX_COMMITTED
Definition: htup_details.h:206
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:199
#define HEAP_XMAX_IS_SHR_LOCKED(infomask)
Definition: htup_details.h:262
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:6498
unsigned short uint16
Definition: c.h:366
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:207
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:195
#define InvalidTransactionId
Definition: transam.h:31
#define WARNING
Definition: elog.h:40
MultiXactId MultiXactIdCreate(TransactionId xid1, MultiXactStatus status1, TransactionId xid2, MultiXactStatus status2)
Definition: multixact.c:386
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
TransactionId MultiXactId
Definition: c.h:523
#define Assert(condition)
Definition: c.h:738
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:195
#define elog(elevel,...)
Definition: elog.h:214
static MultiXactStatus get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
Definition: heapam.c:3935
#define HEAP_XMAX_IS_EXCL_LOCKED(infomask)
Definition: htup_details.h:264
#define HEAP_XMAX_IS_KEYSHR_LOCKED(infomask)
Definition: htup_details.h:266
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:551
MultiXactId MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
Definition: multixact.c:439

◆ ConditionalMultiXactIdWait()

static bool ConditionalMultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
Relation  rel,
int *  remaining 
)
static

Definition at line 6765 of file heapam.c.

References Do_MultiXactIdWait(), and XLTW_None.

Referenced by heap_lock_tuple().

6767 {
6768  return Do_MultiXactIdWait(multi, status, infomask, true,
6769  rel, NULL, XLTW_None, remaining);
6770 }
int remaining
Definition: informix.c:667
Definition: lmgr.h:26
static bool Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:6665
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225

◆ Do_MultiXactIdWait()

static bool Do_MultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
bool  nowait,
Relation  rel,
ItemPointer  ctid,
XLTW_Oper  oper,
int *  remaining 
)
static

Definition at line 6665 of file heapam.c.

References ConditionalXactLockTableWait(), DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, LOCKMODE_from_mxstatus, pfree(), MultiXactMember::status, TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), XactLockTableWait(), and MultiXactMember::xid.

Referenced by ConditionalMultiXactIdWait(), and MultiXactIdWait().

6669 {
6670  bool result = true;
6671  MultiXactMember *members;
6672  int nmembers;
6673  int remain = 0;
6674 
6675  /* for pre-pg_upgrade tuples, no need to sleep at all */
6676  nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
6677  GetMultiXactIdMembers(multi, &members, false,
6678  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
6679 
6680  if (nmembers >= 0)
6681  {
6682  int i;
6683 
6684  for (i = 0; i < nmembers; i++)
6685  {
6686  TransactionId memxid = members[i].xid;
6687  MultiXactStatus memstatus = members[i].status;
6688 
6690  {
6691  remain++;
6692  continue;
6693  }
6694 
6697  {
6698  if (remaining && TransactionIdIsInProgress(memxid))
6699  remain++;
6700  continue;
6701  }
6702 
6703  /*
6704  * This member conflicts with our multi, so we have to sleep (or
6705  * return failure, if asked to avoid waiting.)
6706  *
6707  * Note that we don't set up an error context callback ourselves,
6708  * but instead we pass the info down to XactLockTableWait. This
6709  * might seem a bit wasteful because the context is set up and
6710  * tore down for each member of the multixact, but in reality it
6711  * should be barely noticeable, and it avoids duplicate code.
6712  */
6713  if (nowait)
6714  {
6715  result = ConditionalXactLockTableWait(memxid);
6716  if (!result)
6717  break;
6718  }
6719  else
6720  XactLockTableWait(memxid, rel, ctid, oper);
6721  }
6722 
6723  pfree(members);
6724  }
6725 
6726  if (remaining)
6727  *remaining = remain;
6728 
6729  return result;
6730 }
int remaining
Definition: informix.c:667
MultiXactStatus
Definition: multixact.h:40
uint32 TransactionId
Definition: c.h:513
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:853
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:988
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:252
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:151
bool ConditionalXactLockTableWait(TransactionId xid)
Definition: lmgr.c:697
void pfree(void *pointer)
Definition: mcxt.c:1056
TransactionId xid
Definition: multixact.h:61
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:583
MultiXactStatus status
Definition: multixact.h:62
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:624
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1204
Operator oper(ParseState *pstate, List *opname, Oid ltypeId, Oid rtypeId, bool noError, int location)
Definition: parse_oper.c:377
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225

◆ DoesMultiXactIdConflict()

static bool DoesMultiXactIdConflict ( MultiXactId  multi,
uint16  infomask,
LockTupleMode  lockmode,
bool current_is_member 
)
static

Definition at line 6566 of file heapam.c.

References DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, ISUPDATE_from_mxstatus, LOCKMODE_from_mxstatus, pfree(), status(), TransactionIdDidAbort(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), tupleLockExtraInfo, and MultiXactMember::xid.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

6568 {
6569  int nmembers;
6570  MultiXactMember *members;
6571  bool result = false;
6572  LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
6573 
6574  if (HEAP_LOCKED_UPGRADED(infomask))
6575  return false;
6576 
6577  nmembers = GetMultiXactIdMembers(multi, &members, false,
6578  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
6579  if (nmembers >= 0)
6580  {
6581  int i;
6582 
6583  for (i = 0; i < nmembers; i++)
6584  {
6585  TransactionId memxid;
6586  LOCKMODE memlockmode;
6587 
6588  if (result && (current_is_member == NULL || *current_is_member))
6589  break;
6590 
6591  memlockmode = LOCKMODE_from_mxstatus(members[i].status);
6592 
6593  /* ignore members from current xact (but track their presence) */
6594  memxid = members[i].xid;
6596  {
6597  if (current_is_member != NULL)
6598  *current_is_member = true;
6599  continue;
6600  }
6601  else if (result)
6602  continue;
6603 
6604  /* ignore members that don't conflict with the lock we want */
6605  if (!DoLockModesConflict(memlockmode, wanted))
6606  continue;
6607 
6608  if (ISUPDATE_from_mxstatus(members[i].status))
6609  {
6610  /* ignore aborted updaters */
6611  if (TransactionIdDidAbort(memxid))
6612  continue;
6613  }
6614  else
6615  {
6616  /* ignore lockers-only that are no longer in progress */
6617  if (!TransactionIdIsInProgress(memxid))
6618  continue;
6619  }
6620 
6621  /*
6622  * Whatever remains are either live lockers that conflict with our
6623  * wanted lock, and updaters that are not aborted. Those conflict
6624  * with what we want. Set up to return true, but keep going to
6625  * look for the current transaction among the multixact members,
6626  * if needed.
6627  */
6628  result = true;
6629  }
6630  pfree(members);
6631  }
6632 
6633  return result;
6634 }
uint32 TransactionId
Definition: c.h:513
int LOCKMODE
Definition: lockdefs.h:26
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:853
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:988
static const struct @19 tupleLockExtraInfo[MaxLockTupleMode+1]
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:252
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:151
void pfree(void *pointer)
Definition: mcxt.c:1056
TransactionId xid
Definition: multixact.h:61
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:583
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:55
bool TransactionIdDidAbort(TransactionId transactionId)
Definition: transam.c:181
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1204
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225

◆ ExtractReplicaIdentity()

static HeapTuple ExtractReplicaIdentity ( Relation  rel,
HeapTuple  tup,
bool  key_changed,
bool copy 
)
static

Definition at line 7613 of file heapam.c.

References Assert, bms_free(), bms_is_empty(), bms_is_member(), FirstLowInvalidHeapAttributeNumber, heap_deform_tuple(), heap_form_tuple(), heap_freetuple(), HeapTupleHasExternal, i, INDEX_ATTR_BITMAP_IDENTITY_KEY, MaxHeapAttributeNumber, TupleDescData::natts, RelationData::rd_rel, RelationGetDescr, RelationGetIndexAttrBitmap(), RelationIsLogicallyLogged, toast_flatten_tuple(), and values.

Referenced by heap_delete(), and heap_update().

7615 {
7616  TupleDesc desc = RelationGetDescr(relation);
7617  char replident = relation->rd_rel->relreplident;
7618  Bitmapset *idattrs;
7619  HeapTuple key_tuple;
7620  bool nulls[MaxHeapAttributeNumber];
7622 
7623  *copy = false;
7624 
7625  if (!RelationIsLogicallyLogged(relation))
7626  return NULL;
7627 
7628  if (replident == REPLICA_IDENTITY_NOTHING)
7629  return NULL;
7630 
7631  if (replident == REPLICA_IDENTITY_FULL)
7632  {
7633  /*
7634  * When logging the entire old tuple, it very well could contain
7635  * toasted columns. If so, force them to be inlined.
7636  */
7637  if (HeapTupleHasExternal(tp))
7638  {
7639  *copy = true;
7640  tp = toast_flatten_tuple(tp, desc);
7641  }
7642  return tp;
7643  }
7644 
7645  /* if the key hasn't changed and we're only logging the key, we're done */
7646  if (!key_changed)
7647  return NULL;
7648 
7649  /* find out the replica identity columns */
7650  idattrs = RelationGetIndexAttrBitmap(relation,
7652 
7653  /*
7654  * If there's no defined replica identity columns, treat as !key_changed.
7655  * (This case should not be reachable from heap_update, since that should
7656  * calculate key_changed accurately. But heap_delete just passes constant
7657  * true for key_changed, so we can hit this case in deletes.)
7658  */
7659  if (bms_is_empty(idattrs))
7660  return NULL;
7661 
7662  /*
7663  * Construct a new tuple containing only the replica identity columns,
7664  * with nulls elsewhere. While we're at it, assert that the replica
7665  * identity columns aren't null.
7666  */
7667  heap_deform_tuple(tp, desc, values, nulls);
7668 
7669  for (int i = 0; i < desc->natts; i++)
7670  {
7672  idattrs))
7673  Assert(!nulls[i]);
7674  else
7675  nulls[i] = true;
7676  }
7677 
7678  key_tuple = heap_form_tuple(desc, values, nulls);
7679  *copy = true;
7680 
7681  bms_free(idattrs);
7682 
7683  /*
7684  * If the tuple, which by here only contains indexed columns, still has
7685  * toasted columns, force them to be inlined. This is somewhat unlikely
7686  * since there's limits on the size of indexed columns, so we don't
7687  * duplicate toast_flatten_tuple()s functionality in the above loop over
7688  * the indexed columns, even if it would be more efficient.
7689  */
7690  if (HeapTupleHasExternal(key_tuple))
7691  {
7692  HeapTuple oldtup = key_tuple;
7693 
7694  key_tuple = toast_flatten_tuple(oldtup, desc);
7695  heap_freetuple(oldtup);
7696  }
7697 
7698  return key_tuple;
7699 }
#define RelationGetDescr(relation)
Definition: rel.h:482
#define FirstLowInvalidHeapAttributeNumber
Definition: sysattr.h:27
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: heaptuple.c:1020
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:635
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1338
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition: heaptoast.c:350
bool bms_is_empty(const Bitmapset *a)
Definition: bitmapset.c:701
uintptr_t Datum
Definition: postgres.h:367
#define MaxHeapAttributeNumber
Definition: htup_details.h:47
void bms_free(Bitmapset *a)
Definition: bitmapset.c:208
#define Assert(condition)
Definition: c.h:738
void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull)
Definition: heaptuple.c:1249
static Datum values[MAXATTR]
Definition: bootstrap.c:167
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:673
int i
Bitmapset * RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
Definition: relcache.c:4936
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:427

◆ fix_infomask_from_infobits()

static void fix_infomask_from_infobits ( uint8  infobits,
uint16 infomask,
uint16 infomask2 
)
static

Definition at line 8017 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_xlog_delete(), heap_xlog_lock(), heap_xlog_lock_updated(), and heap_xlog_update().

8018 {
8019  *infomask &= ~(HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY |
8021  *infomask2 &= ~HEAP_KEYS_UPDATED;
8022 
8023  if (infobits & XLHL_XMAX_IS_MULTI)
8024  *infomask |= HEAP_XMAX_IS_MULTI;
8025  if (infobits & XLHL_XMAX_LOCK_ONLY)
8026  *infomask |= HEAP_XMAX_LOCK_ONLY;
8027  if (infobits & XLHL_XMAX_EXCL_LOCK)
8028  *infomask |= HEAP_XMAX_EXCL_LOCK;
8029  /* note HEAP_XMAX_SHR_LOCK isn't considered here */
8030  if (infobits & XLHL_XMAX_KEYSHR_LOCK)
8031  *infomask |= HEAP_XMAX_KEYSHR_LOCK;
8032 
8033  if (infobits & XLHL_KEYS_UPDATED)
8034  *infomask2 |= HEAP_KEYS_UPDATED;
8035 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:193
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:196
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:263
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:262
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:195
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:264
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:266
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:265

◆ FreeBulkInsertState()

void FreeBulkInsertState ( BulkInsertState  bistate)

Definition at line 1831 of file heapam.c.

References BulkInsertStateData::current_buf, FreeAccessStrategy(), InvalidBuffer, pfree(), ReleaseBuffer(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), CopyMultiInsertBufferCleanup(), intorel_shutdown(), and transientrel_shutdown().

1832 {
1833  if (bistate->current_buf != InvalidBuffer)
1834  ReleaseBuffer(bistate->current_buf);
1835  FreeAccessStrategy(bistate->strategy);
1836  pfree(bistate);
1837 }
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3483
void pfree(void *pointer)
Definition: mcxt.c:1056
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:597
BufferAccessStrategy strategy
Definition: hio.h:31
Buffer current_buf
Definition: hio.h:32

◆ FreezeMultiXactId()

static TransactionId FreezeMultiXactId ( MultiXactId  multi,
uint16  t_infomask,
TransactionId  relfrozenxid,
TransactionId  relminmxid,
TransactionId  cutoff_xid,
MultiXactId  cutoff_multi,
uint16 flags 
)
static

Definition at line 5836 of file heapam.c.

References Assert, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg_internal(), ERROR, FRM_INVALIDATE_XMAX, FRM_MARK_COMMITTED, FRM_NOOP, FRM_RETURN_IS_MULTI, FRM_RETURN_IS_XID, GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, i, InvalidTransactionId, ISUPDATE_from_mxstatus, MultiXactIdCreateFromMembers(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactIdIsValid, MultiXactIdPrecedes(), palloc(), pfree(), status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TransactionIdIsValid, TransactionIdPrecedes(), and MultiXactMember::xid.

Referenced by heap_prepare_freeze_tuple().

5840 {
5842  int i;
5843  MultiXactMember *members;
5844  int nmembers;
5845  bool need_replace;
5846  int nnewmembers;
5847  MultiXactMember *newmembers;
5848  bool has_lockers;
5849  TransactionId update_xid;
5850  bool update_committed;
5851 
5852  *flags = 0;
5853 
5854  /* We should only be called in Multis */
5855  Assert(t_infomask & HEAP_XMAX_IS_MULTI);
5856 
5857  if (!MultiXactIdIsValid(multi) ||
5858  HEAP_LOCKED_UPGRADED(t_infomask))
5859  {
5860  /* Ensure infomask bits are appropriately set/reset */
5861  *flags |= FRM_INVALIDATE_XMAX;
5862  return InvalidTransactionId;
5863  }
5864  else if (MultiXactIdPrecedes(multi, relminmxid))
5865  ereport(ERROR,
5867  errmsg_internal("found multixact %u from before relminmxid %u",
5868  multi, relminmxid)));
5869  else if (MultiXactIdPrecedes(multi, cutoff_multi))
5870  {
5871  /*
5872  * This old multi cannot possibly have members still running, but
5873  * verify just in case. If it was a locker only, it can be removed
5874  * without any further consideration; but if it contained an update,
5875  * we might need to preserve it.
5876  */
5877  if (MultiXactIdIsRunning(multi,
5878  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
5879  ereport(ERROR,
5881  errmsg_internal("multixact %u from before cutoff %u found to be still running",
5882  multi, cutoff_multi)));
5883 
5884  if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
5885  {
5886  *flags |= FRM_INVALIDATE_XMAX;
5887  xid = InvalidTransactionId; /* not strictly necessary */
5888  }
5889  else
5890  {
5891  /* replace multi by update xid */
5892  xid = MultiXactIdGetUpdateXid(multi, t_infomask);
5893 
5894  /* wasn't only a lock, xid needs to be valid */
5896 
5897  if (TransactionIdPrecedes(xid, relfrozenxid))
5898  ereport(ERROR,
5900  errmsg_internal("found update xid %u from before relfrozenxid %u",
5901  xid, relfrozenxid)));
5902 
5903  /*
5904  * If the xid is older than the cutoff, it has to have aborted,
5905  * otherwise the tuple would have gotten pruned away.
5906  */
5907  if (TransactionIdPrecedes(xid, cutoff_xid))
5908  {
5909  if (TransactionIdDidCommit(xid))
5910  ereport(ERROR,
5912  errmsg_internal("cannot freeze committed update xid %u", xid)));
5913  *flags |= FRM_INVALIDATE_XMAX;
5914  xid = InvalidTransactionId; /* not strictly necessary */
5915  }
5916  else
5917  {
5918  *flags |= FRM_RETURN_IS_XID;
5919  }
5920  }
5921 
5922  return xid;
5923  }
5924 
5925  /*
5926  * This multixact might have or might not have members still running, but
5927  * we know it's valid and is newer than the cutoff point for multis.
5928  * However, some member(s) of it may be below the cutoff for Xids, so we
5929  * need to walk the whole members array to figure out what to do, if
5930  * anything.
5931  */
5932 
5933  nmembers =
5934  GetMultiXactIdMembers(multi, &members, false,
5935  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
5936  if (nmembers <= 0)
5937  {
5938  /* Nothing worth keeping */
5939  *flags |= FRM_INVALIDATE_XMAX;
5940  return InvalidTransactionId;
5941  }
5942 
5943  /* is there anything older than the cutoff? */
5944  need_replace = false;
5945  for (i = 0; i < nmembers; i++)
5946  {
5947  if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
5948  {
5949  need_replace = true;
5950  break;
5951  }
5952  }
5953 
5954  /*
5955  * In the simplest case, there is no member older than the cutoff; we can
5956  * keep the existing MultiXactId as is.
5957  */
5958  if (!need_replace)
5959  {
5960  *flags |= FRM_NOOP;
5961  pfree(members);
5962  return InvalidTransactionId;
5963  }
5964 
5965  /*
5966  * If the multi needs to be updated, figure out which members do we need
5967  * to keep.
5968  */
5969  nnewmembers = 0;
5970  newmembers = palloc(sizeof(MultiXactMember) * nmembers);
5971  has_lockers = false;
5972  update_xid = InvalidTransactionId;
5973  update_committed = false;
5974 
5975  for (i = 0; i < nmembers; i++)
5976  {
5977  /*
5978  * Determine whether to keep this member or ignore it.
5979  */
5980  if (ISUPDATE_from_mxstatus(members[i].status))
5981  {
5982  TransactionId xid = members[i].xid;
5983 
5985  if (TransactionIdPrecedes(xid, relfrozenxid))
5986  ereport(ERROR,
5988  errmsg_internal("found update xid %u from before relfrozenxid %u",
5989  xid, relfrozenxid)));
5990 
5991  /*
5992  * It's an update; should we keep it? If the transaction is known
5993  * aborted or crashed then it's okay to ignore it, otherwise not.
5994  * Note that an updater older than cutoff_xid cannot possibly be
5995  * committed, because HeapTupleSatisfiesVacuum would have returned
5996  * HEAPTUPLE_DEAD and we would not be trying to freeze the tuple.
5997  *
5998  * As with all tuple visibility routines, it's critical to test
5999  * TransactionIdIsInProgress before TransactionIdDidCommit,
6000  * because of race conditions explained in detail in
6001  * heapam_visibility.c.
6002  */
6005  {
6006  Assert(!TransactionIdIsValid(update_xid));
6007  update_xid = xid;
6008  }
6009  else if (TransactionIdDidCommit(xid))
6010  {
6011  /*
6012  * The transaction committed, so we can tell caller to set
6013  * HEAP_XMAX_COMMITTED. (We can only do this because we know
6014  * the transaction is not running.)
6015  */
6016  Assert(!TransactionIdIsValid(update_xid));
6017  update_committed = true;
6018  update_xid = xid;
6019  }
6020  else
6021  {
6022  /*
6023  * Not in progress, not committed -- must be aborted or
6024  * crashed; we can ignore it.
6025  */
6026  }
6027 
6028  /*
6029  * Since the tuple wasn't marked HEAPTUPLE_DEAD by vacuum, the
6030  * update Xid cannot possibly be older than the xid cutoff. The
6031  * presence of such a tuple would cause corruption, so be paranoid
6032  * and check.
6033  */
6034  if (TransactionIdIsValid(update_xid) &&
6035  TransactionIdPrecedes(update_xid, cutoff_xid))
6036  ereport(ERROR,
6038  errmsg_internal("found update xid %u from before xid cutoff %u",
6039  update_xid, cutoff_xid)));
6040 
6041  /*
6042  * If we determined that it's an Xid corresponding to an update
6043  * that must be retained, additionally add it to the list of
6044  * members of the new Multi, in case we end up using that. (We
6045  * might still decide to use only an update Xid and not a multi,
6046  * but it's easier to maintain the list as we walk the old members
6047  * list.)
6048  */
6049  if (TransactionIdIsValid(update_xid))
6050  newmembers[nnewmembers++] = members[i];
6051  }
6052  else
6053  {
6054  /* We only keep lockers if they are still running */
6055  if (TransactionIdIsCurrentTransactionId(members[i].xid) ||
6056  TransactionIdIsInProgress(members[i].xid))
6057  {
6058  /* running locker cannot possibly be older than the cutoff */
6059  Assert(!TransactionIdPrecedes(members[i].xid, cutoff_xid));
6060  newmembers[nnewmembers++] = members[i];
6061  has_lockers = true;
6062  }
6063  }
6064  }
6065 
6066  pfree(members);
6067 
6068  if (nnewmembers == 0)
6069  {
6070  /* nothing worth keeping!? Tell caller to remove the whole thing */
6071  *flags |= FRM_INVALIDATE_XMAX;
6072  xid = InvalidTransactionId;
6073  }
6074  else if (TransactionIdIsValid(update_xid) && !has_lockers)
6075  {
6076  /*
6077  * If there's a single member and it's an update, pass it back alone
6078  * without creating a new Multi. (XXX we could do this when there's a
6079  * single remaining locker, too, but that would complicate the API too
6080  * much; moreover, the case with the single updater is more
6081  * interesting, because those are longer-lived.)
6082  */
6083  Assert(nnewmembers == 1);
6084  *flags |= FRM_RETURN_IS_XID;
6085  if (update_committed)
6086  *flags |= FRM_MARK_COMMITTED;
6087  xid = update_xid;
6088  }
6089  else
6090  {
6091  /*
6092  * Create a new multixact with the surviving members of the previous
6093  * one, to set as new Xmax in the tuple.
6094  */
6095  xid = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
6096  *flags |= FRM_RETURN_IS_MULTI;
6097  }
6098 
6099  pfree(newmembers);
6100 
6101  return xid;
6102 }
#define FRM_RETURN_IS_XID
Definition: heapam.c:5810
#define FRM_MARK_COMMITTED
Definition: heapam.c:5812
uint32 TransactionId
Definition: c.h:513
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:853
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:988
MultiXactId MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members)
Definition: multixact.c:748
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:252
int errcode(int sqlerrcode)
Definition: elog.c:610
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:6498
void pfree(void *pointer)
Definition: mcxt.c:1056
#define ERROR
Definition: elog.h:43
TransactionId xid
Definition: multixact.h:61
#define FRM_INVALIDATE_XMAX
Definition: heapam.c:5809
#define InvalidTransactionId
Definition: transam.h:31
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:55
#define MultiXactIdIsValid(multi)
Definition: multixact.h:27
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:45
#define FRM_RETURN_IS_MULTI
Definition: heapam.c:5811
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
#define ereport(elevel,...)
Definition: elog.h:144
int errmsg_internal(const char *fmt,...)
Definition: elog.c:911
#define Assert(condition)
Definition: c.h:738
#define FRM_NOOP
Definition: heapam.c:5808
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3142
void * palloc(Size size)
Definition: mcxt.c:949
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1204
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:551

◆ get_mxact_status_for_lock()

static MultiXactStatus get_mxact_status_for_lock ( LockTupleMode  mode,
bool  is_update 
)
static

Definition at line 3935 of file heapam.c.

References elog, ERROR, mode, and tupleLockExtraInfo.

Referenced by compute_new_xmax_infomask(), heap_lock_tuple(), and test_lockmode_for_conflict().

3936 {
3937  int retval;
3938 
3939  if (is_update)
3940  retval = tupleLockExtraInfo[mode].updstatus;
3941  else
3942  retval = tupleLockExtraInfo[mode].lockstatus;
3943 
3944  if (retval == -1)
3945  elog(ERROR, "invalid lock tuple mode %d/%s", mode,
3946  is_update ? "true" : "false");
3947 
3948  return (MultiXactStatus) retval;
3949 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
MultiXactStatus
Definition: multixact.h:40
static const struct @19 tupleLockExtraInfo[MaxLockTupleMode+1]
#define ERROR
Definition: elog.h:43
#define elog(elevel,...)
Definition: elog.h:214

◆ GetBulkInsertState()

BulkInsertState GetBulkInsertState ( void  )

Definition at line 1817 of file heapam.c.

References BAS_BULKWRITE, BulkInsertStateData::current_buf, GetAccessStrategy(), InvalidBuffer, palloc(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), CopyMultiInsertBufferInit(), intorel_startup(), and transientrel_startup().

1818 {
1819  BulkInsertState bistate;
1820 
1821  bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
1823  bistate->current_buf = InvalidBuffer;
1824  return bistate;
1825 }
BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
Definition: freelist.c:542
#define InvalidBuffer
Definition: buf.h:25
struct BulkInsertStateData * BulkInsertState
Definition: heapam.h:39
BufferAccessStrategy strategy
Definition: hio.h:31
void * palloc(Size size)
Definition: mcxt.c:949
Buffer current_buf
Definition: hio.h:32

◆ GetMultiXactIdHintBits()

static void GetMultiXactIdHintBits ( MultiXactId  multi,
uint16 new_infomask,
uint16 new_infomask2 
)
static

Definition at line 6417 of file heapam.c.

References GetMultiXactIdMembers(), HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, i, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, mode, MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, pfree(), status(), and TUPLOCK_from_mxstatus.

Referenced by compute_new_xmax_infomask(), heap_prepare_freeze_tuple(), and heap_update().

6419 {
6420  int nmembers;
6421  MultiXactMember *members;
6422  int i;
6423  uint16 bits = HEAP_XMAX_IS_MULTI;
6424  uint16 bits2 = 0;
6425  bool has_update = false;
6426  LockTupleMode strongest = LockTupleKeyShare;
6427 
6428  /*
6429  * We only use this in multis we just created, so they cannot be values
6430  * pre-pg_upgrade.
6431  */
6432  nmembers = GetMultiXactIdMembers(multi, &members, false, false);
6433 
6434  for (i = 0; i < nmembers; i++)
6435  {
6437 
6438  /*
6439  * Remember the strongest lock mode held by any member of the
6440  * multixact.
6441  */
6442  mode = TUPLOCK_from_mxstatus(members[i].status);
6443  if (mode > strongest)
6444  strongest = mode;
6445 
6446  /* See what other bits we need */
6447  switch (members[i].status)
6448  {
6452  break;
6453 
6455  bits2 |= HEAP_KEYS_UPDATED;
6456  break;
6457 
6459  has_update = true;
6460  break;
6461 
6462  case MultiXactStatusUpdate:
6463  bits2 |= HEAP_KEYS_UPDATED;
6464  has_update = true;
6465  break;
6466  }
6467  }
6468 
6469  if (strongest == LockTupleExclusive ||
6470  strongest == LockTupleNoKeyExclusive)
6471  bits |= HEAP_XMAX_EXCL_LOCK;
6472  else if (strongest == LockTupleShare)
6473  bits |= HEAP_XMAX_SHR_LOCK;
6474  else if (strongest == LockTupleKeyShare)
6475  bits |= HEAP_XMAX_KEYSHR_LOCK;
6476 
6477  if (!has_update)
6478  bits |= HEAP_XMAX_LOCK_ONLY;
6479 
6480  if (nmembers > 0)
6481  pfree(members);
6482 
6483  *new_infomask = bits;
6484  *new_infomask2 = bits2;
6485 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:193
LockTupleMode
Definition: lockoptions.h:49
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:196
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:199
unsigned short uint16
Definition: c.h:366
void pfree(void *pointer)
Definition: mcxt.c:1056
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:195
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:195
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1204
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225

◆ heap2_redo()

void heap2_redo ( XLogReaderState record)

Definition at line 8892 of file heapam.c.

References elog, heap_xlog_clean(), heap_xlog_cleanup_info(), heap_xlog_freeze_page(), heap_xlog_lock_updated(), heap_xlog_logical_rewrite(), heap_xlog_multi_insert(), heap_xlog_visible(), PANIC, XLOG_HEAP2_CLEAN, XLOG_HEAP2_CLEANUP_INFO, XLOG_HEAP2_FREEZE_PAGE, XLOG_HEAP2_LOCK_UPDATED, XLOG_HEAP2_MULTI_INSERT, XLOG_HEAP2_NEW_CID, XLOG_HEAP2_REWRITE, XLOG_HEAP2_VISIBLE, XLOG_HEAP_OPMASK, XLogRecGetInfo, and XLR_INFO_MASK.

8893 {
8894  uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
8895 
8896  switch (info & XLOG_HEAP_OPMASK)
8897  {
8898  case XLOG_HEAP2_CLEAN:
8899  heap_xlog_clean(record);
8900  break;
8902  heap_xlog_freeze_page(record);
8903  break;
8905  heap_xlog_cleanup_info(record);
8906  break;
8907  case XLOG_HEAP2_VISIBLE:
8908  heap_xlog_visible(record);
8909  break;
8911  heap_xlog_multi_insert(record);
8912  break;
8914  heap_xlog_lock_updated(record);
8915  break;
8916  case XLOG_HEAP2_NEW_CID:
8917 
8918  /*
8919  * Nothing to do on a real replay, only used during logical
8920  * decoding.
8921  */
8922  break;
8923  case XLOG_HEAP2_REWRITE:
8924  heap_xlog_logical_rewrite(record);
8925  break;
8926  default:
8927  elog(PANIC, "heap2_redo: unknown op code %u", info);
8928  }
8929 }
void heap_xlog_logical_rewrite(XLogReaderState *r)
Definition: rewriteheap.c:1112
#define XLOG_HEAP2_LOCK_UPDATED
Definition: heapam_xlog.h:59
#define XLOG_HEAP2_REWRITE
Definition: heapam_xlog.h:53
unsigned char uint8
Definition: c.h:365
#define XLOG_HEAP_OPMASK
Definition: heapam_xlog.h:41
#define PANIC
Definition: elog.h:53
#define XLOG_HEAP2_MULTI_INSERT
Definition: heapam_xlog.h:58
#define XLOG_HEAP2_VISIBLE
Definition: heapam_xlog.h:57
static void heap_xlog_lock_updated(XLogReaderState *record)
Definition: heapam.c:8745
static void heap_xlog_freeze_page(XLogReaderState *record)
Definition: heapam.c:7959
#define XLOG_HEAP2_CLEAN
Definition: heapam_xlog.h:54
#define XLOG_HEAP2_CLEANUP_INFO
Definition: heapam_xlog.h:56
static void heap_xlog_multi_insert(XLogReaderState *record)
Definition: heapam.c:8227
#define XLOG_HEAP2_NEW_CID
Definition: heapam_xlog.h:60
#define XLogRecGetInfo(decoder)
Definition: xlogreader.h:284
#define XLR_INFO_MASK
Definition: xlogrecord.h:62
static void heap_xlog_cleanup_info(XLogReaderState *record)
Definition: heapam.c:7705
#define XLOG_HEAP2_FREEZE_PAGE
Definition: heapam_xlog.h:55
#define elog(elevel,...)
Definition: elog.h:214
static void heap_xlog_visible(XLogReaderState *record)
Definition: heapam.c:7819
static void heap_xlog_clean(XLogReaderState *record)
Definition: heapam.c:7726

◆ heap_abort_speculative()

void heap_abort_speculative ( Relation  relation,
ItemPointer  tid 
)

Definition at line 5576 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, compute_infobits(), elog, END_CRIT_SECTION, ERROR, xl_heap_delete::flags, GetCurrentTransactionId(), HEAP_KEYS_UPDATED, HEAP_MOVED, heap_toast_delete(), HEAP_XMAX_BITS, HeapTupleHasExternal, HeapTupleHeaderIsHeapOnly, HeapTupleHeaderIsSpeculative, HeapTupleHeaderSetXmin, xl_heap_delete::infobits_set, InvalidTransactionId, IsToastRelation(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), MarkBufferDirty(), xl_heap_delete::offnum, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), RelationData::rd_rel, ReadBuffer(), REGBUF_STANDARD, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapDelete, START_CRIT_SECTION, HeapTupleHeaderData::t_choice, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_heap, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, HeapTupleFields::t_xmin, TransactionIdIsValid, TransactionIdPrecedes(), TransactionXmin, XLH_DELETE_IS_SUPER, XLOG_HEAP_DELETE, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and xl_heap_delete::xmax.

Referenced by heapam_tuple_complete_speculative(), and toast_delete_datum().

5577 {
5579  ItemId lp;
5580  HeapTupleData tp;
5581  Page page;
5582  BlockNumber block;
5583  Buffer buffer;
5584  TransactionId prune_xid;
5585 
5586  Assert(ItemPointerIsValid(tid));
5587 
5588  block = ItemPointerGetBlockNumber(tid);
5589  buffer = ReadBuffer(relation, block);
5590  page = BufferGetPage(buffer);
5591 
5593 
5594  /*
5595  * Page can't be all visible, we just inserted into it, and are still
5596  * running.
5597  */
5598  Assert(!PageIsAllVisible(page));
5599 
5600  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
5601  Assert(ItemIdIsNormal(lp));
5602 
5603  tp.t_tableOid = RelationGetRelid(relation);
5604  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
5605  tp.t_len = ItemIdGetLength(lp);
5606  tp.t_self = *tid;
5607 
5608  /*
5609  * Sanity check that the tuple really is a speculatively inserted tuple,
5610  * inserted by us.
5611  */
5612  if (tp.t_data->t_choice.t_heap.t_xmin != xid)
5613  elog(ERROR, "attempted to kill a tuple inserted by another transaction");
5614  if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
5615  elog(ERROR, "attempted to kill a non-speculative tuple");
5617 
5618  /*
5619  * No need to check for serializable conflicts here. There is never a
5620  * need for a combocid, either. No need to extract replica identity, or
5621  * do anything special with infomask bits.
5622  */
5623 
5625 
5626  /*
5627  * The tuple will become DEAD immediately. Flag that this page is a
5628  * candidate for pruning by setting xmin to TransactionXmin. While not
5629  * immediately prunable, it is the oldest xid we can cheaply determine
5630  * that's safe against wraparound / being older than the table's
5631  * relfrozenxid. To defend against the unlikely case of a new relation
5632  * having a newer relfrozenxid than our TransactionXmin, use relfrozenxid
5633  * if so (vacuum can't subsequently move relfrozenxid to beyond
5634  * TransactionXmin, so there's no race here).
5635  */
5637  if (TransactionIdPrecedes(TransactionXmin, relation->rd_rel->relfrozenxid))
5638  prune_xid = relation->rd_rel->relfrozenxid;
5639  else
5640  prune_xid = TransactionXmin;
5641  PageSetPrunable(page, prune_xid);
5642 
5643  /* store transaction information of xact deleting the tuple */
5646 
5647  /*
5648  * Set the tuple header xmin to InvalidTransactionId. This makes the
5649  * tuple immediately invisible everyone. (In particular, to any
5650  * transactions waiting on the speculative token, woken up later.)
5651  */
5653 
5654  /* Clear the speculative insertion token too */
5655  tp.t_data->t_ctid = tp.t_self;
5656 
5657  MarkBufferDirty(buffer);
5658 
5659  /*
5660  * XLOG stuff
5661  *
5662  * The WAL records generated here match heap_delete(). The same recovery
5663  * routines are used.
5664  */
5665  if (RelationNeedsWAL(relation))
5666  {
5667  xl_heap_delete xlrec;
5668  XLogRecPtr recptr;
5669 
5670  xlrec.flags = XLH_DELETE_IS_SUPER;
5672  tp.t_data->t_infomask2);
5674  xlrec.xmax = xid;
5675 
5676  XLogBeginInsert();
5677  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
5678  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
5679 
5680  /* No replica identity & replication origin logged */
5681 
5682  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
5683 
5684  PageSetLSN(page, recptr);
5685  }
5686 
5687  END_CRIT_SECTION();
5688 
5689  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
5690 
5691  if (HeapTupleHasExternal(&tp))
5692  {
5693  Assert(!IsToastRelation(relation));
5694  heap_toast_delete(relation, &tp, true);
5695  }
5696 
5697  /*
5698  * Never need to mark tuple for invalidation, since catalogs don't support
5699  * speculative insertion
5700  */
5701 
5702  /* Now we can release the buffer */
5703  ReleaseBuffer(buffer);
5704 
5705  /* count deletion, as we counted the insertion too */
5706  pgstat_count_heap_delete(relation);
5707 }
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
bool IsToastRelation(Relation relation)
Definition: catalog.c:140
#define HEAP_XMAX_BITS
Definition: htup_details.h:270
#define XLH_DELETE_IS_SUPER
Definition: heapam_xlog.h:95
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2407
HeapTupleFields t_heap
Definition: htup_details.h:156
#define PageIsAllVisible(page)
Definition: bufpage.h:385
uint32 TransactionId
Definition: c.h:513
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1468
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:214
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:429
#define PageSetPrunable(page, xid)
Definition: bufpage.h:398
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3483
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
Form_pg_class rd_rel
Definition: rel.h:109
OffsetNumber offnum
Definition: heapam_xlog.h:106
TransactionId TransactionXmin
Definition: snapmgr.c:166
HeapTupleHeader t_data
Definition: htup.h:68
#define HeapTupleHeaderIsHeapOnly(tup)
Definition: htup_details.h:501
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ERROR
Definition: elog.h:43
ItemPointerData t_ctid
Definition: htup_details.h:160
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:105
void heap_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: heaptoast.c:43
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:422
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:111
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
TransactionId t_xmin
Definition: htup_details.h:123
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:324
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:416
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3722
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_MOVED
Definition: htup_details.h:216
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:738
uint8 infobits_set
Definition: heapam_xlog.h:107
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:606
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
union HeapTupleHeaderData::@44 t_choice
#define RelationNeedsWAL(relation)
Definition: rel.h:562
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:2041
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:673
#define elog(elevel,...)
Definition: elog.h:214
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define TransactionIdIsValid(xid)
Definition: transam.h:41
void XLogBeginInsert(void)
Definition: xloginsert.c:121
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:456
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define HeapTupleHeaderSetXmin(tup, xid)
Definition: htup_details.h:319

◆ heap_acquire_tuplock()

static bool heap_acquire_tuplock ( Relation  relation,
ItemPointer  tid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
bool have_tuple_lock 
)
static

Definition at line 4673 of file heapam.c.

References ConditionalLockTupleTuplock, ereport, errcode(), errmsg(), ERROR, LockTupleTuplock, LockWaitBlock, LockWaitError, LockWaitSkip, and RelationGetRelationName.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

4675 {
4676  if (*have_tuple_lock)
4677  return true;
4678 
4679  switch (wait_policy)
4680  {
4681  case LockWaitBlock:
4682  LockTupleTuplock(relation, tid, mode);
4683  break;
4684 
4685  case LockWaitSkip:
4686  if (!ConditionalLockTupleTuplock(relation, tid, mode))
4687  return false;
4688  break;
4689 
4690  case LockWaitError:
4691  if (!ConditionalLockTupleTuplock(relation, tid, mode))
4692  ereport(ERROR,
4693  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4694  errmsg("could not obtain lock on row in relation \"%s\"",
4695  RelationGetRelationName(relation))));
4696  break;
4697  }
4698  *have_tuple_lock = true;
4699 
4700  return true;
4701 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define LockTupleTuplock(rel, tup, mode)
Definition: heapam.c:159
#define ConditionalLockTupleTuplock(rel, tup, mode)
Definition: heapam.c:163
int errcode(int sqlerrcode)
Definition: elog.c:610
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:490
#define ereport(elevel,...)
Definition: elog.h:144
int errmsg(const char *fmt,...)
Definition: elog.c:824

◆ heap_beginscan()

TableScanDesc heap_beginscan ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
ParallelTableScanDesc  parallel_scan,
uint32  flags 
)

Definition at line 1132 of file heapam.c.

References Assert, initscan(), IsMVCCSnapshot, palloc(), PredicateLockRelation(), RelationGetRelid, RelationIncrementReferenceCount(), HeapScanDescData::rs_base, HeapScanDescData::rs_ctup, TableScanDescData::rs_flags, TableScanDescData::rs_key, TableScanDescData::rs_nkeys, TableScanDescData::rs_parallel, TableScanDescData::rs_rd, TableScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, SO_ALLOW_PAGEMODE, SO_TYPE_SAMPLESCAN, SO_TYPE_SEQSCAN, and HeapTupleData::t_tableOid.

Referenced by SampleHeapTupleVisible().

1136 {
1137  HeapScanDesc scan;
1138 
1139  /*
1140  * increment relation ref count while scanning relation
1141  *
1142  * This is just to make really sure the relcache entry won't go away while
1143  * the scan has a pointer to it. Caller should be holding the rel open
1144  * anyway, so this is redundant in all normal scenarios...
1145  */
1147 
1148  /*
1149  * allocate and initialize scan descriptor
1150  */
1151  scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
1152 
1153  scan->rs_base.rs_rd = relation;
1154  scan->rs_base.rs_snapshot = snapshot;
1155  scan->rs_base.rs_nkeys = nkeys;
1156  scan->rs_base.rs_flags = flags;
1157  scan->rs_base.rs_parallel = parallel_scan;
1158  scan->rs_strategy = NULL; /* set in initscan */
1159 
1160  /*
1161  * Disable page-at-a-time mode if it's not a MVCC-safe snapshot.
1162  */
1163  if (!(snapshot && IsMVCCSnapshot(snapshot)))
1165 
1166  /*
1167  * For seqscan and sample scans in a serializable transaction, acquire a
1168  * predicate lock on the entire relation. This is required not only to
1169  * lock all the matching tuples, but also to conflict with new insertions
1170  * into the table. In an indexscan, we take page locks on the index pages
1171  * covering the range specified in the scan qual, but in a heap scan there
1172  * is nothing more fine-grained to lock. A bitmap scan is a different
1173  * story, there we have already scanned the index and locked the index
1174  * pages covering the predicate. But in that case we still have to lock
1175  * any matching heap tuples. For sample scan we could optimize the locking
1176  * to be at least page-level granularity, but we'd need to add per-tuple
1177  * locking for that.
1178  */
1180  {
1181  /*
1182  * Ensure a missing snapshot is noticed reliably, even if the
1183  * isolation mode means predicate locking isn't performed (and
1184  * therefore the snapshot isn't used here).
1185  */
1186  Assert(snapshot);
1187  PredicateLockRelation(relation, snapshot);
1188  }
1189 
1190  /* we only need to set this up once */
1191  scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1192 
1193  /*
1194  * we do this here instead of in initscan() because heap_rescan also calls
1195  * initscan() and we don't want to allocate memory again
1196  */
1197  if (nkeys > 0)
1198  scan->rs_base.rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
1199  else
1200  scan->rs_base.rs_key = NULL;
1201 
1202  initscan(scan, key, false);
1203 
1204  return (TableScanDesc) scan;
1205 }
TableScanDescData rs_base
Definition: heapam.h:49
void PredicateLockRelation(Relation relation, Snapshot snapshot)
Definition: predicate.c:2500
uint32 rs_flags
Definition: relscan.h:43
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
HeapTupleData rs_ctup
Definition: heapam.h:66
ScanKeyData * ScanKey
Definition: skey.h:75
Oid t_tableOid
Definition: htup.h:66
struct ScanKeyData * rs_key
Definition: relscan.h:37
void RelationIncrementReferenceCount(Relation rel)
Definition: relcache.c:2069
BufferAccessStrategy rs_strategy
Definition: heapam.h:64
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:97
#define Assert(condition)
Definition: c.h:738
Relation rs_rd
Definition: relscan.h:34
struct SnapshotData * rs_snapshot
Definition: relscan.h:35
void * palloc(Size size)
Definition: mcxt.c:949
struct ParallelTableScanDescData * rs_parallel
Definition: relscan.h:45
static void initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
Definition: heapam.c:208
#define RelationGetRelid(relation)
Definition: rel.h:456

◆ heap_compute_xid_horizon_for_tuples()

TransactionId heap_compute_xid_horizon_for_tuples ( Relation  rel,
ItemPointerData tids,
int  nitems 
)

Definition at line 7004 of file heapam.c.

References Assert, buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferIsValid, CHECK_FOR_INTERRUPTS, get_tablespace_maintenance_io_concurrency(), HeapTupleHeaderAdvanceLatestRemovedXid(), i, InvalidBlockNumber, InvalidBuffer, InvalidTransactionId, IsCatalogRelation(), ItemIdGetRedirect, ItemIdHasStorage, ItemIdIsDead, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerCompare(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), maintenance_io_concurrency, PageGetItem, PageGetItemId, qsort, RelationData::rd_rel, ReadBuffer(), and ReleaseBuffer().

Referenced by SampleHeapTupleVisible().

7007 {
7008  TransactionId latestRemovedXid = InvalidTransactionId;
7009  BlockNumber hblkno;
7011  Page hpage;
7012 #ifdef USE_PREFETCH
7013  XidHorizonPrefetchState prefetch_state;
7014  int prefetch_distance;
7015 #endif
7016 
7017  /*
7018  * Sort to avoid repeated lookups for the same page, and to make it more
7019  * likely to access items in an efficient order. In particular, this
7020  * ensures that if there are multiple pointers to the same page, they all
7021  * get processed looking up and locking the page just once.
7022  */
7023  qsort((void *) tids, nitems, sizeof(ItemPointerData),
7024  (int (*) (const void *, const void *)) ItemPointerCompare);
7025 
7026 #ifdef USE_PREFETCH
7027  /* Initialize prefetch state. */
7028  prefetch_state.cur_hblkno = InvalidBlockNumber;
7029  prefetch_state.next_item = 0;
7030  prefetch_state.nitems = nitems;
7031  prefetch_state.tids = tids;
7032 
7033  /*
7034  * Compute the prefetch distance that we will attempt to maintain.
7035  *
7036  * Since the caller holds a buffer lock somewhere in rel, we'd better make
7037  * sure that isn't a catalog relation before we call code that does
7038  * syscache lookups, to avoid risk of deadlock.
7039  */
7040  if (IsCatalogRelation(rel))
7041  prefetch_distance = maintenance_io_concurrency;
7042  else
7043  prefetch_distance =
7045 
7046  /* Start prefetching. */
7047  xid_horizon_prefetch_buffer(rel, &prefetch_state, prefetch_distance);
7048 #endif
7049 
7050  /* Iterate over all tids, and check their horizon */
7051  hblkno = InvalidBlockNumber;
7052  hpage = NULL;
7053  for (int i = 0; i < nitems; i++)
7054  {
7055  ItemPointer htid = &tids[i];
7056  ItemId hitemid;
7057  OffsetNumber hoffnum;
7058 
7059  /*
7060  * Read heap buffer, but avoid refetching if it's the same block as
7061  * required for the last tid.
7062  */
7063  if (hblkno == InvalidBlockNumber ||
7064  ItemPointerGetBlockNumber(htid) != hblkno)
7065  {
7066  /* release old buffer */
7067  if (BufferIsValid(buf))
7068  {
7070  ReleaseBuffer(buf);
7071  }
7072 
7073  hblkno = ItemPointerGetBlockNumber(htid);
7074 
7075  buf = ReadBuffer(rel, hblkno);
7076 
7077 #ifdef USE_PREFETCH
7078 
7079  /*
7080  * To maintain the prefetch distance, prefetch one more page for
7081  * each page we read.
7082  */
7083  xid_horizon_prefetch_buffer(rel, &prefetch_state, 1);
7084 #endif
7085 
7086  hpage = BufferGetPage(buf);
7087 
7089  }
7090 
7091  hoffnum = ItemPointerGetOffsetNumber(htid);
7092  hitemid = PageGetItemId(hpage, hoffnum);
7093 
7094  /*
7095  * Follow any redirections until we find something useful.
7096  */
7097  while (ItemIdIsRedirected(hitemid))
7098  {
7099  hoffnum = ItemIdGetRedirect(hitemid);
7100  hitemid = PageGetItemId(hpage, hoffnum);
7102  }
7103 
7104  /*
7105  * If the heap item has storage, then read the header and use that to
7106  * set latestRemovedXid.
7107  *
7108  * Some LP_DEAD items may not be accessible, so we ignore them.
7109  */
7110  if (ItemIdHasStorage(hitemid))
7111  {
7112  HeapTupleHeader htuphdr;
7113 
7114  htuphdr = (HeapTupleHeader) PageGetItem(hpage, hitemid);
7115 
7116  HeapTupleHeaderAdvanceLatestRemovedXid(htuphdr, &latestRemovedXid);
7117  }
7118  else if (ItemIdIsDead(hitemid))
7119  {
7120  /*
7121  * Conjecture: if hitemid is dead then it had xids before the xids
7122  * marked on LP_NORMAL items. So we just ignore this item and move
7123  * onto the next, for the purposes of calculating
7124  * latestRemovedXid.
7125  */
7126  }
7127  else
7128  Assert(!ItemIdIsUsed(hitemid));
7129 
7130  }
7131 
7132  if (BufferIsValid(buf))
7133  {
7135  ReleaseBuffer(buf);
7136  }
7137 
7138  /*
7139  * If all heap tuples were LP_DEAD then we will be returning
7140  * InvalidTransactionId here, which avoids conflicts. This matches
7141  * existing logic which assumes that LP_DEAD tuples must already be older
7142  * than the latestRemovedXid on the cleanup record that set them as
7143  * LP_DEAD, hence must already have generated a conflict.
7144  */
7145 
7146  return latestRemovedXid;
7147 }
int32 ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
Definition: itemptr.c:52
void HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, TransactionId *latestRemovedXid)
Definition: heapam.c:6913
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
bool IsCatalogRelation(Relation relation)
Definition: catalog.c:98
int maintenance_io_concurrency
Definition: bufmgr.c:141
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
uint32 TransactionId
Definition: c.h:513
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:78
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
#define InvalidBuffer
Definition: buf.h:25
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3483
Form_pg_class rd_rel
Definition: rel.h:109
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
uint16 OffsetNumber
Definition: off.h:24
int get_tablespace_maintenance_io_concurrency(Oid spcid)
Definition: spccache.c:229
static char * buf
Definition: pg_test_fsync.c:67
#define InvalidTransactionId
Definition: transam.h:31
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3722
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
#define Assert(condition)
Definition: c.h:738
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:606
#define InvalidBlockNumber
Definition: block.h:33
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
int i
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define qsort(a, b, c, d)
Definition: port.h:479
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78

◆ heap_delete()

TM_Result heap_delete ( Relation  relation,
ItemPointer  tid,
CommandId  cid,
Snapshot  crosscheck,
bool  wait,
TM_FailureData tmfd,
bool  changingPart 
)

Definition at line 2452 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), TM_FailureData::cmax, compute_infobits(), compute_new_xmax_infomask(), TM_FailureData::ctid, DoesMultiXactIdConflict(), END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, ExtractReplicaIdentity(), xl_heap_delete::flags, GetCurrentTransactionId(), heap_acquire_tuplock(), heap_freetuple(), HEAP_KEYS_UPDATED, HEAP_MOVED, heap_toast_delete(), HEAP_XMAX_BITS, HEAP_XMAX_INVALID, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HeapTupleHasExternal, HeapTupleHeaderAdjustCmax(), HeapTupleHeaderClearHotUpdated, HeapTupleHeaderGetCmax(), HeapTupleHeaderGetRawXmax, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIndicatesMovedPartitions, HeapTupleHeaderIsOnlyLocked(), HeapTupleHeaderSetCmax, HeapTupleHeaderSetMovedPartitions, HeapTupleHeaderSetXmax, HeapTupleSatisfiesUpdate(), HeapTupleSatisfiesVisibility(), xl_heap_delete::infobits_set, InvalidBuffer, InvalidCommandId, InvalidSnapshot, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerEquals(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), LockTupleExclusive, LockWaitBlock, log_heap_new_cid(), MarkBufferDirty(), MultiXactIdSetOldestMember(), MultiXactIdWait(), MultiXactStatusUpdate, xl_heap_delete::offnum, PageClearAllVisible, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), RelationData::rd_rel, ReadBuffer(), REGBUF_STANDARD, RelationGetRelid, RelationIsAccessibleInLogicalDecoding, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapDelete, SizeOfHeapHeader, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TM_BeingModified, TM_Deleted, TM_Invisible, TM_Ok, TM_SelfModified, TM_Updated, TransactionIdEquals, TransactionIdIsCurrentTransactionId(), UnlockReleaseBuffer(), UnlockTupleTuplock, UpdateXmaxHintBits(), visibilitymap_clear(), visibilitymap_pin(), VISIBILITYMAP_VALID_BITS, XactLockTableWait(), XLH_DELETE_ALL_VISIBLE_CLEARED, XLH_DELETE_CONTAINS_OLD_KEY, XLH_DELETE_CONTAINS_OLD_TUPLE, XLH_DELETE_IS_PARTITION_MOVE, XLOG_HEAP_DELETE, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), XLogSetRecordFlags(), XLTW_Delete, xl_heap_delete::xmax, TM_FailureData::xmax, and xmax_infomask_changed().

Referenced by heapam_tuple_delete(), and simple_heap_delete().

2455 {
2456  TM_Result result;
2458  ItemId lp;
2459  HeapTupleData tp;
2460  Page page;
2461  BlockNumber block;
2462  Buffer buffer;
2463  Buffer vmbuffer = InvalidBuffer;
2464  TransactionId new_xmax;
2465  uint16 new_infomask,
2466  new_infomask2;
2467  bool have_tuple_lock = false;
2468  bool iscombo;
2469  bool all_visible_cleared = false;
2470  HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
2471  bool old_key_copied = false;
2472 
2473  Assert(ItemPointerIsValid(tid));
2474 
2475  /*
2476  * Forbid this during a parallel operation, lest it allocate a combocid.
2477  * Other workers might need that combocid for visibility checks, and we
2478  * have no provision for broadcasting it to them.
2479  */
2480  if (IsInParallelMode())
2481  ereport(ERROR,
2482  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
2483  errmsg("cannot delete tuples during a parallel operation")));
2484 
2485  block = ItemPointerGetBlockNumber(tid);
2486  buffer = ReadBuffer(relation, block);
2487  page = BufferGetPage(buffer);
2488 
2489  /*
2490  * Before locking the buffer, pin the visibility map page if it appears to
2491  * be necessary. Since we haven't got the lock yet, someone else might be
2492  * in the middle of changing this, so we'll need to recheck after we have
2493  * the lock.
2494  */
2495  if (PageIsAllVisible(page))
2496  visibilitymap_pin(relation, block, &vmbuffer);
2497 
2499 
2500  /*
2501  * If we didn't pin the visibility map page and the page has become all
2502  * visible while we were busy locking the buffer, we'll have to unlock and
2503  * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
2504  * unfortunate, but hopefully shouldn't happen often.
2505  */
2506  if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
2507  {
2508  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2509  visibilitymap_pin(relation, block, &vmbuffer);
2511  }
2512 
2513  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
2514  Assert(ItemIdIsNormal(lp));
2515 
2516  tp.t_tableOid = RelationGetRelid(relation);
2517  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2518  tp.t_len = ItemIdGetLength(lp);
2519  tp.t_self = *tid;
2520 
2521 l1:
2522  result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
2523 
2524  if (result == TM_Invisible)
2525  {
2526  UnlockReleaseBuffer(buffer);
2527  ereport(ERROR,
2528  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2529  errmsg("attempted to delete invisible tuple")));
2530  }
2531  else if (result == TM_BeingModified && wait)
2532  {
2533  TransactionId xwait;
2534  uint16 infomask;
2535 
2536  /* must copy state data before unlocking buffer */
2537  xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
2538  infomask = tp.t_data->t_infomask;
2539 
2540  /*
2541  * Sleep until concurrent transaction ends -- except when there's a
2542  * single locker and it's our own transaction. Note we don't care
2543  * which lock mode the locker has, because we need the strongest one.
2544  *
2545  * Before sleeping, we need to acquire tuple lock to establish our
2546  * priority for the tuple (see heap_lock_tuple). LockTuple will
2547  * release us when we are next-in-line for the tuple.
2548  *
2549  * If we are forced to "start over" below, we keep the tuple lock;
2550  * this arranges that we stay at the head of the line while rechecking
2551  * tuple state.
2552  */
2553  if (infomask & HEAP_XMAX_IS_MULTI)
2554  {
2555  bool current_is_member = false;
2556 
2557  if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
2558  LockTupleExclusive, &current_is_member))
2559  {
2560  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2561 
2562  /*
2563  * Acquire the lock, if necessary (but skip it when we're
2564  * requesting a lock and already have one; avoids deadlock).
2565  */
2566  if (!current_is_member)
2568  LockWaitBlock, &have_tuple_lock);
2569 
2570  /* wait for multixact */
2572  relation, &(tp.t_self), XLTW_Delete,
2573  NULL);
2575 
2576  /*
2577  * If xwait had just locked the tuple then some other xact
2578  * could update this tuple before we get to this point. Check
2579  * for xmax change, and start over if so.
2580  */
2581  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2583  xwait))
2584  goto l1;
2585  }
2586 
2587  /*
2588  * You might think the multixact is necessarily done here, but not
2589  * so: it could have surviving members, namely our own xact or
2590  * other subxacts of this backend. It is legal for us to delete
2591  * the tuple in either case, however (the latter case is
2592  * essentially a situation of upgrading our former shared lock to
2593  * exclusive). We don't bother changing the on-disk hint bits
2594  * since we are about to overwrite the xmax altogether.
2595  */
2596  }
2597  else if (!TransactionIdIsCurrentTransactionId(xwait))
2598  {
2599  /*
2600  * Wait for regular transaction to end; but first, acquire tuple
2601  * lock.
2602  */
2603  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2605  LockWaitBlock, &have_tuple_lock);
2606  XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
2608 
2609  /*
2610  * xwait is done, but if xwait had just locked the tuple then some
2611  * other xact could update this tuple before we get to this point.
2612  * Check for xmax change, and start over if so.
2613  */
2614  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2616  xwait))
2617  goto l1;
2618 
2619  /* Otherwise check if it committed or aborted */
2620  UpdateXmaxHintBits(tp.t_data, buffer, xwait);
2621  }
2622 
2623  /*
2624  * We may overwrite if previous xmax aborted, or if it committed but
2625  * only locked the tuple without updating it.
2626  */
2627  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2630  result = TM_Ok;
2631  else if (!ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid) ||
2633  result = TM_Updated;
2634  else
2635  result = TM_Deleted;
2636  }
2637 
2638  if (crosscheck != InvalidSnapshot && result == TM_Ok)
2639  {
2640  /* Perform additional check for transaction-snapshot mode RI updates */
2641  if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
2642  result = TM_Updated;
2643  }
2644 
2645  if (result != TM_Ok)
2646  {
2647  Assert(result == TM_SelfModified ||
2648  result == TM_Updated ||
2649  result == TM_Deleted ||
2650  result == TM_BeingModified);
2652  Assert(result != TM_Updated ||
2653  !ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid));
2654  tmfd->ctid = tp.t_data->t_ctid;
2656  if (result == TM_SelfModified)
2657  tmfd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
2658  else
2659  tmfd->cmax = InvalidCommandId;
2660  UnlockReleaseBuffer(buffer);
2661  if (have_tuple_lock)
2662  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
2663  if (vmbuffer != InvalidBuffer)
2664  ReleaseBuffer(vmbuffer);
2665  return result;
2666  }
2667 
2668  /*
2669  * We're about to do the actual delete -- check for conflict first, to
2670  * avoid possibly having to roll back work we've just done.
2671  *
2672  * This is safe without a recheck as long as there is no possibility of
2673  * another process scanning the page between this check and the delete
2674  * being visible to the scan (i.e., an exclusive buffer content lock is
2675  * continuously held from this point until the tuple delete is visible).
2676  */
2677  CheckForSerializableConflictIn(relation, tid, BufferGetBlockNumber(buffer));
2678 
2679  /* replace cid with a combo cid if necessary */
2680  HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
2681 
2682  /*
2683  * Compute replica identity tuple before entering the critical section so
2684  * we don't PANIC upon a memory allocation failure.
2685  */
2686  old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
2687 
2688  /*
2689  * If this is the first possibly-multixact-able operation in the current
2690  * transaction, set my per-backend OldestMemberMXactId setting. We can be
2691  * certain that the transaction will never become a member of any older
2692  * MultiXactIds than that. (We have to do this even if we end up just
2693  * using our own TransactionId below, since some other backend could
2694  * incorporate our XID into a MultiXact immediately afterwards.)
2695  */
2697 
2700  xid, LockTupleExclusive, true,
2701  &new_xmax, &new_infomask, &new_infomask2);
2702 
2704 
2705  /*
2706  * If this transaction commits, the tuple will become DEAD sooner or
2707  * later. Set flag that this page is a candidate for pruning once our xid
2708  * falls below the OldestXmin horizon. If the transaction finally aborts,
2709  * the subsequent page pruning will be a no-op and the hint will be
2710  * cleared.
2711  */
2712  PageSetPrunable(page, xid);
2713 
2714  if (PageIsAllVisible(page))
2715  {
2716  all_visible_cleared = true;
2717  PageClearAllVisible(page);
2718  visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
2719  vmbuffer, VISIBILITYMAP_VALID_BITS);
2720  }
2721 
2722  /* store transaction information of xact deleting the tuple */
2725  tp.t_data->t_infomask |= new_infomask;
2726  tp.t_data->t_infomask2 |= new_infomask2;
2728  HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
2729  HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
2730  /* Make sure there is no forward chain link in t_ctid */
2731  tp.t_data->t_ctid = tp.t_self;
2732 
2733  /* Signal that this is actually a move into another partition */
2734  if (changingPart)
2736 
2737  MarkBufferDirty(buffer);
2738 
2739  /*
2740  * XLOG stuff
2741  *
2742  * NB: heap_abort_speculative() uses the same xlog record and replay
2743  * routines.
2744  */
2745  if (RelationNeedsWAL(relation))
2746  {
2747  xl_heap_delete xlrec;
2748  xl_heap_header xlhdr;
2749  XLogRecPtr recptr;
2750 
2751  /* For logical decode we need combocids to properly decode the catalog */
2753  log_heap_new_cid(relation, &tp);
2754 
2755  xlrec.flags = 0;
2756  if (all_visible_cleared)
2758  if (changingPart)
2761  tp.t_data->t_infomask2);
2763  xlrec.xmax = new_xmax;
2764 
2765  if (old_key_tuple != NULL)
2766  {
2767  if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
2769  else
2771  }
2772 
2773  XLogBeginInsert();
2774  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
2775 
2776  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
2777 
2778  /*
2779  * Log replica identity of the deleted tuple if there is one
2780  */
2781  if (old_key_tuple != NULL)
2782  {
2783  xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
2784  xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
2785  xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
2786 
2787  XLogRegisterData((char *) &xlhdr, SizeOfHeapHeader);
2788  XLogRegisterData((char *) old_key_tuple->t_data
2790  old_key_tuple->t_len
2792  }
2793 
2794  /* filtering by origin on a row level is much more efficient */
2796 
2797  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
2798 
2799  PageSetLSN(page, recptr);
2800  }
2801 
2802  END_CRIT_SECTION();
2803 
2804  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2805 
2806  if (vmbuffer != InvalidBuffer)
2807  ReleaseBuffer(vmbuffer);
2808 
2809  /*
2810  * If the tuple has toasted out-of-line attributes, we need to delete
2811  * those items too. We have to do this before releasing the buffer
2812  * because we need to look at the contents of the tuple, but it's OK to
2813  * release the content lock on the buffer first.
2814  */
2815  if (relation->rd_rel->relkind != RELKIND_RELATION &&
2816  relation->rd_rel->relkind != RELKIND_MATVIEW)
2817  {
2818  /* toast table entries should never be recursively toasted */
2820  }
2821  else if (HeapTupleHasExternal(&tp))
2822  heap_toast_delete(relation, &tp, false);
2823 
2824  /*
2825  * Mark tuple for invalidation from system caches at next command
2826  * boundary. We have to do this before releasing the buffer because we
2827  * need to look at the contents of the tuple.
2828  */
2829  CacheInvalidateHeapTuple(relation, &tp, NULL);
2830 
2831  /* Now we can release the buffer */
2832  ReleaseBuffer(buffer);
2833 
2834  /*
2835  * Release the lmgr tuple lock, if we had it.
2836  */
2837  if (have_tuple_lock)
2838  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
2839 
2840  pgstat_count_heap_delete(relation);
2841 
2842  if (old_key_tuple != NULL && old_key_copied)
2843  heap_freetuple(old_key_tuple);
2844 
2845  return TM_Ok;
2846 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
ItemPointerData ctid
Definition: tableam.h:124
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
#define SizeofHeapTupleHeader
Definition: htup_details.h:184
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:7531
#define HEAP_XMAX_BITS
Definition: htup_details.h:270
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2407
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1114
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
#define PageIsAllVisible(page)
Definition: bufpage.h:385
uint32 TransactionId
Definition: c.h:513
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:853
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1468
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:214
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_changed, bool *copy)
Definition: heapam.c:7613
static bool xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
Definition: heapam.c:2429
#define HeapTupleHeaderClearHotUpdated(tup)
Definition: htup_details.h:496
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
CommandId cmax
Definition: tableam.h:126
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
#define InvalidBuffer
Definition: buf.h:25
uint16 t_infomask2
Definition: heapam_xlog.h:144
#define PageSetPrunable(page, xid)
Definition: bufpage.h:398
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
int errcode(int sqlerrcode)
Definition: elog.c:610
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:230
#define HeapTupleHeaderIndicatesMovedPartitions(tup)
Definition: htup_details.h:445
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3483
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
Form_pg_class rd_rel
Definition: rel.h:109
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1338
TM_Result HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid, Buffer buffer)
#define UnlockTupleTuplock(rel, tup, mode)
Definition: heapam.c:161
OffsetNumber offnum
Definition: heapam_xlog.h:106
void MultiXactIdSetOldestMember(void)
Definition: multixact.c:625
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:68
#define HeapTupleHeaderGetRawXmax(tup)
Definition: htup_details.h:375
unsigned short uint16
Definition: c.h:366
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
bool IsInParallelMode(void)
Definition: xact.c:996
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3506
TransactionId xmax
Definition: tableam.h:125
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:207
ItemPointerData t_ctid
Definition: htup_details.h:160
#define HeapTupleHeaderSetMovedPartitions(tup)
Definition: htup_details.h:449
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:105
static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:6743
void heap_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: heaptoast.c:43
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:422
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:111
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define XLH_DELETE_CONTAINS_OLD_KEY
Definition: heapam_xlog.h:94
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:380
Oid t_tableOid
Definition: htup.h:66
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:398
#define HeapTupleHeaderSetCmax(tup, cid, iscombo)
Definition: htup_details.h:405
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
Definition: heapam.c:4722
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
#define InvalidSnapshot
Definition: snapshot.h:123
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:324
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:416
TM_Result
Definition: tableam.h:69
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:619
#define InvalidCommandId
Definition: c.h:530
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3722
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
void CheckForSerializableConflictIn(Relation relation, ItemPointer tid, BlockNumber blkno)
Definition: predicate.c:4374
static void UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
Definition: heapam.c:1795
#define HEAP_MOVED
Definition: htup_details.h:216
static bool heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
Definition: heapam.c:4673
#define ereport(elevel,...)
Definition: elog.h:144
TransactionId MultiXactId
Definition: c.h:523
#define PageClearAllVisible(page)
Definition: bufpage.h:389
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:624
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:738
uint8 infobits_set
Definition: heapam_xlog.h:107
CommandId HeapTupleHeaderGetCmax(HeapTupleHeader tup)
Definition: combocid.c:118
Definition: tableam.h:75
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:606
uint16 t_infomask
Definition: heapam_xlog.h:145
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask, LockTupleMode lockmode, bool *current_is_member)
Definition: heapam.c:6566
#define RelationNeedsWAL(relation)
Definition: rel.h:562
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:2041
void HeapTupleHeaderAdjustCmax(HeapTupleHeader tup, CommandId *cmax, bool *iscombo)
Definition: combocid.c:153
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2633
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:673
int errmsg(const char *fmt,...)
Definition: elog.c:824
#define XLH_DELETE_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:92
#define XLH_DELETE_IS_PARTITION_MOVE
Definition: heapam_xlog.h:96
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
void XLogBeginInsert(void)
Definition: xloginsert.c:121
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:456
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
#define SizeOfHeapHeader
Definition: heapam_xlog.h:149
Pointer Page
Definition: bufpage.h:78
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)
#define XLH_DELETE_CONTAINS_OLD_TUPLE
Definition: heapam_xlog.h:93

◆ heap_endscan()

void heap_endscan ( TableScanDesc  sscan)

Definition at line 1245 of file heapam.c.

References BufferIsValid, FreeAccessStrategy(), pfree(), RelationDecrementReferenceCount(), ReleaseBuffer(), HeapScanDescData::rs_base, HeapScanDescData::rs_cbuf, TableScanDescData::rs_flags, TableScanDescData::rs_key, TableScanDescData::rs_rd, TableScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, SO_TEMP_SNAPSHOT, and UnregisterSnapshot().

Referenced by SampleHeapTupleVisible().

1246 {
1247  HeapScanDesc scan = (HeapScanDesc) sscan;
1248 
1249  /* Note: no locking manipulations needed */
1250 
1251  /*
1252  * unpin scan buffers
1253  */
1254  if (BufferIsValid(scan->rs_cbuf))
1255  ReleaseBuffer(scan->rs_cbuf);
1256 
1257  /*
1258  * decrement relation reference count and free scan descriptor storage
1259  */
1261 
1262  if (scan->rs_base.rs_key)
1263  pfree(scan->rs_base.rs_key);
1264 
1265  if (scan->rs_strategy != NULL)
1267 
1268  if (scan->rs_base.rs_flags & SO_TEMP_SNAPSHOT)
1270 
1271  pfree(scan);
1272 }
TableScanDescData rs_base
Definition: heapam.h:49
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3483
uint32 rs_flags
Definition: relscan.h:43
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
void pfree(void *pointer)
Definition: mcxt.c:1056
void RelationDecrementReferenceCount(Relation rel)
Definition: relcache.c:2082
struct ScanKeyData * rs_key
Definition: relscan.h:37
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:907
BufferAccessStrategy rs_strategy
Definition: heapam.h:64
Buffer rs_cbuf
Definition: heapam.h:60
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:597
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
Relation rs_rd
Definition: relscan.h:34
struct SnapshotData * rs_snapshot
Definition: relscan.h:35

◆ heap_execute_freeze_tuple()

void heap_execute_freeze_tuple ( HeapTupleHeader  tuple,
xl_heap_freeze_tuple frz 
)

Definition at line 6365 of file heapam.c.

References FrozenTransactionId, xl_heap_freeze_tuple::frzflags, HeapTupleHeaderSetXmax, HeapTupleHeaderSetXvac, InvalidTransactionId, HeapTupleHeaderData::t_infomask, xl_heap_freeze_tuple::t_infomask, HeapTupleHeaderData::t_infomask2, xl_heap_freeze_tuple::t_infomask2, XLH_FREEZE_XVAC, XLH_INVALID_XVAC, and xl_heap_freeze_tuple::xmax.

Referenced by heap_freeze_tuple(), heap_xlog_freeze_page(), and lazy_scan_heap().

6366 {
6367  HeapTupleHeaderSetXmax(tuple, frz->xmax);
6368 
6369  if (frz->frzflags & XLH_FREEZE_XVAC)
6371 
6372  if (frz->frzflags & XLH_INVALID_XVAC)
6374 
6375  tuple->t_infomask = frz->t_infomask;
6376  tuple->t_infomask2 = frz->t_infomask2;
6377 }
#define HeapTupleHeaderSetXvac(tup, xid)
Definition: htup_details.h:423
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:380
#define InvalidTransactionId
Definition: transam.h:31
#define FrozenTransactionId
Definition: transam.h:33
TransactionId xmax
Definition: heapam_xlog.h:320
#define XLH_INVALID_XVAC
Definition: heapam_xlog.h:316
#define XLH_FREEZE_XVAC
Definition: heapam_xlog.h:315

◆ heap_fetch()

bool heap_fetch ( Relation  relation,
Snapshot  snapshot,
HeapTuple  tuple,
Buffer userbuf 
)

Definition at line 1412 of file heapam.c.

References BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, HeapCheckForSerializableConflictOut(), HeapTupleHeaderGetXmin, HeapTupleSatisfiesVisibility(), InvalidBuffer, ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PredicateLockTID(), ReadBuffer(), RelationGetRelid, ReleaseBuffer(), HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, and TestForOldSnapshot().

Referenced by heap_lock_updated_tuple_rec(), heapam_fetch_row_version(), and heapam_tuple_lock().

1416 {
1417  ItemPointer tid = &(tuple->t_self);
1418  ItemId lp;
1419  Buffer buffer;
1420  Page page;
1421  OffsetNumber offnum;
1422  bool valid;
1423 
1424  /*
1425  * Fetch and pin the appropriate page of the relation.
1426  */
1427  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1428 
1429  /*
1430  * Need share lock on buffer to examine tuple commit status.
1431  */
1432  LockBuffer(buffer, BUFFER_LOCK_SHARE);
1433  page = BufferGetPage(buffer);
1434  TestForOldSnapshot(snapshot, relation, page);
1435 
1436  /*
1437  * We'd better check for out-of-range offnum in case of VACUUM since the
1438  * TID was obtained.
1439  */
1440  offnum = ItemPointerGetOffsetNumber(tid);
1441  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1442  {
1443  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1444  ReleaseBuffer(buffer);
1445  *userbuf = InvalidBuffer;
1446  tuple->t_data = NULL;
1447  return false;
1448  }
1449 
1450  /*
1451  * get the item line pointer corresponding to the requested tid
1452  */
1453  lp = PageGetItemId(page, offnum);
1454 
1455  /*
1456  * Must check for deleted tuple.
1457  */
1458  if (!ItemIdIsNormal(lp))
1459  {
1460  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1461  ReleaseBuffer(buffer);
1462  *userbuf = InvalidBuffer;
1463  tuple->t_data = NULL;
1464  return false;
1465  }
1466 
1467  /*
1468  * fill in *tuple fields
1469  */
1470  tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1471  tuple->t_len = ItemIdGetLength(lp);
1472  tuple->t_tableOid = RelationGetRelid(relation);
1473 
1474  /*
1475  * check tuple visibility, then release lock
1476  */
1477  valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1478 
1479  if (valid)
1480  PredicateLockTID(relation, &(tuple->t_self), snapshot,
1481  HeapTupleHeaderGetXmin(tuple->t_data));
1482 
1483  HeapCheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1484 
1485  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1486 
1487  if (valid)
1488  {
1489  /*
1490  * All checks passed, so return the tuple as valid. Caller is now
1491  * responsible for releasing the buffer.
1492  */
1493  *userbuf = buffer;
1494 
1495  return true;
1496  }
1497 
1498  /* Tuple failed time qual */
1499  ReleaseBuffer(buffer);
1500  *userbuf = InvalidBuffer;
1501 
1502  return false;
1503 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:277
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3483
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3722
void PredicateLockTID(Relation relation, ItemPointer tid, Snapshot snapshot, TransactionId tuple_xid)
Definition: predicate.c:2545
void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: heapam.c:9033
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:606
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:456
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)

◆ heap_finish_speculative()

void heap_finish_speculative ( Relation  relation,
ItemPointer  tid 
)

Definition at line 5485 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, elog, END_CRIT_SECTION, ERROR, HeapTupleHeaderIsSpeculative, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), MaxOffsetNumber, xl_heap_confirm::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapConfirm, SpecTokenOffsetNumber, START_CRIT_SECTION, StaticAssertStmt, HeapTupleHeaderData::t_ctid, UnlockReleaseBuffer(), XLOG_HEAP_CONFIRM, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by heapam_tuple_complete_speculative().

5486 {
5487  Buffer buffer;
5488  Page page;
5489  OffsetNumber offnum;
5490  ItemId lp = NULL;
5491  HeapTupleHeader htup;
5492 
5493  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
5495  page = (Page) BufferGetPage(buffer);
5496 
5497  offnum = ItemPointerGetOffsetNumber(tid);
5498  if (PageGetMaxOffsetNumber(page) >= offnum)
5499  lp = PageGetItemId(page, offnum);
5500 
5501  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
5502  elog(ERROR, "invalid lp");
5503 
5504  htup = (HeapTupleHeader) PageGetItem(page, lp);
5505 
5506  /* SpecTokenOffsetNumber should be distinguishable from any real offset */
5508  "invalid speculative token constant");
5509 
5510  /* NO EREPORT(ERROR) from here till changes are logged */
5512 
5514 
5515  MarkBufferDirty(buffer);
5516 
5517  /*
5518  * Replace the speculative insertion token with a real t_ctid, pointing to
5519  * itself like it does on regular tuples.
5520  */
5521  htup->t_ctid = *tid;
5522 
5523  /* XLOG stuff */
5524  if (RelationNeedsWAL(relation))
5525  {
5526  xl_heap_confirm xlrec;
5527  XLogRecPtr recptr;
5528 
5529  xlrec.offnum = ItemPointerGetOffsetNumber(tid);
5530 
5531  XLogBeginInsert();
5532 
5533  /* We want the same filtering on this as on a plain insert */
5535 
5536  XLogRegisterData((char *) &xlrec, SizeOfHeapConfirm);
5537  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
5538 
5539  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
5540 
5541  PageSetLSN(page, recptr);
5542  }
5543 
5544  END_CRIT_SECTION();
5545 
5546  UnlockReleaseBuffer(buffer);
5547 }
OffsetNumber offnum
Definition: heapam_xlog.h:296
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1468
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:214
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define MaxOffsetNumber
Definition: off.h:28
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:429
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:230
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
#define SpecTokenOffsetNumber
Definition: itemptr.h:63
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:852
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3506
#define ERROR
Definition: elog.h:43
ItemPointerData t_ctid
Definition: htup_details.h:160
#define REGBUF_STANDARD
Definition: xloginsert.h:35
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:398
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define SizeOfHeapConfirm
Definition: heapam_xlog.h:299
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:324
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:416
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3722
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:738
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:606
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define RelationNeedsWAL(relation)
Definition: rel.h:562
#define elog(elevel,...)
Definition: elog.h:214
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
void XLogBeginInsert(void)
Definition: xloginsert.c:121
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define XLOG_HEAP_CONFIRM
Definition: heapam_xlog.h:37

◆ heap_freeze_tuple()

bool heap_freeze_tuple ( HeapTupleHeader  tuple,
TransactionId  relfrozenxid,
TransactionId  relminmxid,
TransactionId  cutoff_xid,
TransactionId  cutoff_multi 
)

Definition at line 6386 of file heapam.c.

References heap_execute_freeze_tuple(), and heap_prepare_freeze_tuple().

Referenced by rewrite_heap_tuple().

6389 {
6391  bool do_freeze;
6392  bool tuple_totally_frozen;
6393 
6394  do_freeze = heap_prepare_freeze_tuple(tuple,
6395  relfrozenxid, relminmxid,
6396  cutoff_xid, cutoff_multi,
6397  &frz, &tuple_totally_frozen);
6398 
6399  /*
6400  * Note that because this is not a WAL-logged operation, we don't need to
6401  * fill in the offset in the freeze record.
6402  */
6403 
6404  if (do_freeze)
6405  heap_execute_freeze_tuple(tuple, &frz);
6406  return do_freeze;
6407 }
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
Definition: heapam.c:6136
void heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
Definition: heapam.c:6365

◆ heap_get_latest_tid()

void heap_get_latest_tid ( TableScanDesc  sscan,
ItemPointer  tid 
)

Definition at line 1672 of file heapam.c.

References Assert, BUFFER_LOCK_SHARE, BufferGetPage, HEAP_XMAX_INVALID, HeapCheckForSerializableConflictOut(), HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleHeaderIndicatesMovedPartitions, HeapTupleHeaderIsOnlyLocked(), HeapTupleSatisfiesVisibility(), InvalidTransactionId, ItemIdGetLength, ItemIdIsNormal, ItemPointerEquals(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, ReadBuffer(), RelationGetRelid, TableScanDescData::rs_rd, TableScanDescData::rs_snapshot, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TestForOldSnapshot(), TransactionIdEquals, TransactionIdIsValid, and UnlockReleaseBuffer().

Referenced by SampleHeapTupleVisible().

1674 {
1675  Relation relation = sscan->rs_rd;
1676  Snapshot snapshot = sscan->rs_snapshot;
1677  ItemPointerData ctid;
1678  TransactionId priorXmax;
1679 
1680  /*
1681  * table_get_latest_tid verified that the passed in tid is valid. Assume
1682  * that t_ctid links are valid however - there shouldn't be invalid ones
1683  * in the table.
1684  */
1685  Assert(ItemPointerIsValid(tid));
1686 
1687  /*
1688  * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
1689  * need to examine, and *tid is the TID we will return if ctid turns out
1690  * to be bogus.
1691  *
1692  * Note that we will loop until we reach the end of the t_ctid chain.
1693  * Depending on the snapshot passed, there might be at most one visible
1694  * version of the row, but we don't try to optimize for that.
1695  */
1696  ctid = *tid;
1697  priorXmax = InvalidTransactionId; /* cannot check first XMIN */
1698  for (;;)
1699  {
1700  Buffer buffer;
1701  Page page;
1702  OffsetNumber offnum;
1703  ItemId lp;
1704  HeapTupleData tp;
1705  bool valid;
1706 
1707  /*
1708  * Read, pin, and lock the page.
1709  */
1710  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
1711  LockBuffer(buffer, BUFFER_LOCK_SHARE);
1712  page = BufferGetPage(buffer);
1713  TestForOldSnapshot(snapshot, relation, page);
1714 
1715  /*
1716  * Check for bogus item number. This is not treated as an error
1717  * condition because it can happen while following a t_ctid link. We
1718  * just assume that the prior tid is OK and return it unchanged.
1719  */
1720  offnum = ItemPointerGetOffsetNumber(&ctid);
1721  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1722  {
1723  UnlockReleaseBuffer(buffer);
1724  break;
1725  }
1726  lp = PageGetItemId(page, offnum);
1727  if (!ItemIdIsNormal(lp))
1728  {
1729  UnlockReleaseBuffer(buffer);
1730  break;
1731  }
1732 
1733  /* OK to access the tuple */
1734  tp.t_self = ctid;
1735  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
1736  tp.t_len = ItemIdGetLength(lp);
1737  tp.t_tableOid = RelationGetRelid(relation);
1738 
1739  /*
1740  * After following a t_ctid link, we might arrive at an unrelated
1741  * tuple. Check for XMIN match.
1742  */
1743  if (TransactionIdIsValid(priorXmax) &&
1745  {
1746  UnlockReleaseBuffer(buffer);
1747  break;
1748  }
1749 
1750  /*
1751  * Check tuple visibility; if visible, set it as the new result
1752  * candidate.
1753  */
1754  valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
1755  HeapCheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
1756  if (valid)
1757  *tid = ctid;
1758 
1759  /*
1760  * If there's a valid t_ctid link, follow it, else we're done.
1761  */
1762  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
1766  {
1767  UnlockReleaseBuffer(buffer);
1768  break;
1769  }
1770 
1771  ctid = tp.t_data->t_ctid;
1772  priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
1773  UnlockReleaseBuffer(buffer);
1774  } /* end of loop */
1775 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:277
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:513
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
#define HeapTupleHeaderIndicatesMovedPartitions(tup)
Definition: htup_details.h:445
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3506
#define HEAP_XMAX_INVALID
Definition: htup_details.h:207
ItemPointerData t_ctid
Definition: htup_details.h:160
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3722
void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: heapam.c:9033
#define Assert(condition)
Definition: c.h:738
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:606
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
Relation rs_rd
Definition: relscan.h:34
struct SnapshotData * rs_snapshot
Definition: relscan.h:35
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define TransactionIdIsValid(xid)
Definition: transam.h:41
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:456
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)

◆ heap_getnext()

HeapTuple heap_getnext ( TableScanDesc  sscan,
ScanDirection  direction 
)

Definition at line 1290 of file heapam.c.

References ereport, errcode(), errmsg_internal(), ERROR, GetHeapamTableAmRoutine(), HEAPDEBUG_1, HEAPDEBUG_2, HEAPDEBUG_3, heapgettup(), heapgettup_pagemode(), pgstat_count_heap_getnext, RelationData::rd_tableam, HeapScanDescData::rs_base, HeapScanDescData::rs_ctup, TableScanDescData::rs_flags, TableScanDescData::rs_key, TableScanDescData::rs_nkeys, TableScanDescData::rs_rd, SO_ALLOW_PAGEMODE, HeapTupleData::t_data, and unlikely.

Referenced by AlterTableMoveAll(), AlterTableSpaceOptions(), boot_openrel(), check_db_file_conflict(), createdb(), do_autovacuum(), DropSetting(), DropTableSpace(), find_typed_table_dependencies(), get_all_vacuum_rels(), get_database_list(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), heapam_index_build_range_scan(), heapam_index_validate_scan(), index_update_stats(), objectsInSchemaToOids(), pgrowlocks(), pgstat_collect_oids(), pgstat_heap(), ReindexMultipleTables(), remove_dbtablespaces(), RemoveConversionById(), RemoveSubscriptionRel(), RenameTableSpace(), ThereIsAtLeastOneRole(), and vac_truncate_clog().

1291 {
1292  HeapScanDesc scan = (HeapScanDesc) sscan;
1293 
1294  /*
1295  * This is still widely used directly, without going through table AM, so
1296  * add a safety check. It's possible we should, at a later point,
1297  * downgrade this to an assert. The reason for checking the AM routine,
1298  * rather than the AM oid, is that this allows to write regression tests
1299  * that create another AM reusing the heap handler.
1300  */
1302  ereport(ERROR,
1303  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1304  errmsg_internal("only heap AM is supported")));
1305 
1306  /* Note: no locking manipulations needed */
1307 
1308  HEAPDEBUG_1; /* heap_getnext( info ) */
1309 
1310  if (scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE)
1311  heapgettup_pagemode(scan, direction,
1312  scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1313  else
1314  heapgettup(scan, direction,
1315  scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1316 
1317  if (scan->rs_ctup.t_data == NULL)
1318  {
1319  HEAPDEBUG_2; /* heap_getnext returning EOS */
1320  return NULL;
1321  }
1322 
1323  /*
1324  * if we get here it means we have a new current scan tuple, so point to
1325  * the proper return buffer and return the tuple.
1326  */
1327  HEAPDEBUG_3; /* heap_getnext returning tuple */
1328 
1330 
1331  return &scan->rs_ctup;
1332 }
TableScanDescData rs_base
Definition: heapam.h:49
int errcode(int sqlerrcode)
Definition: elog.c:610
uint32 rs_flags
Definition: relscan.h:43
#define HEAPDEBUG_2
Definition: heapam.c:1284
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
HeapTupleData rs_ctup
Definition: heapam.h:66
HeapTupleHeader t_data
Definition: htup.h:68
#define ERROR
Definition: elog.h:43
struct ScanKeyData * rs_key
Definition: relscan.h:37
#define HEAPDEBUG_1
Definition: heapam.c:1283
const struct TableAmRoutine * rd_tableam
Definition: rel.h:171
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:487
#define ereport(elevel,...)
Definition: elog.h:144
int errmsg_internal(const char *fmt,...)
Definition: elog.c:911
#define HEAPDEBUG_3
Definition: heapam.c:1285
Relation rs_rd
Definition: relscan.h:34
#define unlikely(x)
Definition: c.h:206
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:1407
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:801
const TableAmRoutine * GetHeapamTableAmRoutine(void)

◆ heap_getnextslot()

bool heap_getnextslot ( TableScanDesc  sscan,
ScanDirection  direction,
TupleTableSlot slot 
)

Definition at line 1349 of file heapam.c.

References ExecClearTuple(), ExecStoreBufferHeapTuple(), HEAPAMSLOTDEBUG_1, HEAPAMSLOTDEBUG_2, HEAPAMSLOTDEBUG_3, heapgettup(), heapgettup_pagemode(), pgstat_count_heap_getnext, HeapScanDescData::rs_base, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_ctup, TableScanDescData::rs_flags, TableScanDescData::rs_key, TableScanDescData::rs_nkeys, TableScanDescData::rs_rd, SO_ALLOW_PAGEMODE, and HeapTupleData::t_data.

Referenced by SampleHeapTupleVisible().

1350 {
1351  HeapScanDesc scan = (HeapScanDesc) sscan;
1352 
1353  /* Note: no locking manipulations needed */
1354 
1355  HEAPAMSLOTDEBUG_1; /* heap_getnextslot( info ) */
1356 
1357  if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
1358  heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1359  else
1360  heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1361 
1362  if (scan->rs_ctup.t_data == NULL)
1363  {
1364  HEAPAMSLOTDEBUG_2; /* heap_getnextslot returning EOS */
1365  ExecClearTuple(slot);
1366  return false;
1367  }
1368 
1369  /*
1370  * if we get here it means we have a new current scan tuple, so point to
1371  * the proper return buffer and return the tuple.
1372  */
1373  HEAPAMSLOTDEBUG_3; /* heap_getnextslot returning tuple */
1374 
1376 
1377  ExecStoreBufferHeapTuple(&scan->rs_ctup, slot,
1378  scan->rs_cbuf);
1379  return true;
1380 }
#define HEAPAMSLOTDEBUG_2
Definition: heapam.c:1344
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:425
#define HEAPAMSLOTDEBUG_1
Definition: heapam.c:1343
TableScanDescData rs_base
Definition: heapam.h:49
uint32 rs_flags
Definition: relscan.h:43
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
HeapTupleData rs_ctup
Definition: heapam.h:66
HeapTupleHeader t_data
Definition: htup.h:68
struct ScanKeyData * rs_key
Definition: relscan.h:37
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:487
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1362
Buffer rs_cbuf
Definition: heapam.h:60
Relation rs_rd
Definition: relscan.h:34
#define HEAPAMSLOTDEBUG_3
Definition: heapam.c:1345
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:1407
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:801

◆ heap_hot_search_buffer()

bool heap_hot_search_buffer ( ItemPointer  tid,
Relation  relation,
Buffer  buffer,
Snapshot  snapshot,
HeapTuple  heapTuple,
bool all_dead,
bool  first_call 
)

Definition at line 1527 of file heapam.c.

References Assert, BufferGetBlockNumber(), BufferGetPage, HeapCheckForSerializableConflictOut(), HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleIsHeapOnly, HeapTupleIsHotUpdated, HeapTupleIsSurelyDead(), HeapTupleSatisfiesVisibility(), InvalidTransactionId, ItemIdGetLength, ItemIdGetRedirect, ItemIdIsNormal, ItemIdIsRedirected, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerSet, ItemPointerSetOffsetNumber, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PredicateLockTID(), RecentGlobalXmin, RelationGetRelid, skip, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdEquals, and TransactionIdIsValid.

Referenced by heapam_index_fetch_tuple(), and heapam_scan_bitmap_next_block().

1530 {
1531  Page dp = (Page) BufferGetPage(buffer);
1532  TransactionId prev_xmax = InvalidTransactionId;
1533  BlockNumber blkno;
1534  OffsetNumber offnum;
1535  bool at_chain_start;
1536  bool valid;
1537  bool skip;
1538 
1539  /* If this is not the first call, previous call returned a (live!) tuple */
1540  if (all_dead)
1541  *all_dead = first_call;
1542 
1543  blkno = ItemPointerGetBlockNumber(tid);
1544  offnum = ItemPointerGetOffsetNumber(tid);
1545  at_chain_start = first_call;
1546  skip = !first_call;
1547 
1549  Assert(BufferGetBlockNumber(buffer) == blkno);
1550 
1551  /* Scan through possible multiple members of HOT-chain */
1552  for (;;)
1553  {
1554  ItemId lp;
1555 
1556  /* check for bogus TID */
1557  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
1558  break;
1559 
1560  lp = PageGetItemId(dp, offnum);
1561 
1562  /* check for unused, dead, or redirected items */
1563  if (!ItemIdIsNormal(lp))
1564  {
1565  /* We should only see a redirect at start of chain */
1566  if (ItemIdIsRedirected(lp) && at_chain_start)
1567  {
1568  /* Follow the redirect */
1569  offnum = ItemIdGetRedirect(lp);
1570  at_chain_start = false;
1571  continue;
1572  }
1573  /* else must be end of chain */
1574  break;
1575  }
1576 
1577  /*
1578  * Update heapTuple to point to the element of the HOT chain we're
1579  * currently investigating. Having t_self set correctly is important
1580  * because the SSI checks and the *Satisfies routine for historical
1581  * MVCC snapshots need the correct tid to decide about the visibility.
1582  */
1583  heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
1584  heapTuple->t_len = ItemIdGetLength(lp);
1585  heapTuple->t_tableOid = RelationGetRelid(relation);
1586  ItemPointerSet(&heapTuple->t_self, blkno, offnum);
1587 
1588  /*
1589  * Shouldn't see a HEAP_ONLY tuple at chain start.
1590  */
1591  if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
1592  break;
1593 
1594  /*
1595  * The xmin should match the previous xmax value, else chain is
1596  * broken.
1597  */
1598  if (TransactionIdIsValid(prev_xmax) &&
1599  !TransactionIdEquals(prev_xmax,
1600  HeapTupleHeaderGetXmin(heapTuple->t_data)))
1601  break;
1602 
1603  /*
1604  * When first_call is true (and thus, skip is initially false) we'll
1605  * return the first tuple we find. But on later passes, heapTuple
1606  * will initially be pointing to the tuple we returned last time.
1607  * Returning it again would be incorrect (and would loop forever), so
1608  * we skip it and return the next match we find.
1609  */
1610  if (!skip)
1611  {
1612  /* If it's visible per the snapshot, we must return it */
1613  valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
1614  HeapCheckForSerializableConflictOut(valid, relation, heapTuple,
1615  buffer, snapshot);
1616 
1617  if (valid)
1618  {
1619  ItemPointerSetOffsetNumber(tid, offnum);
1620  PredicateLockTID(relation, &heapTuple->t_self, snapshot,
1621  HeapTupleHeaderGetXmin(heapTuple->t_data));
1622  if (all_dead)
1623  *all_dead = false;
1624  return true;
1625  }
1626  }
1627  skip = false;
1628 
1629  /*
1630  * If we can't see it, maybe no one else can either. At caller
1631  * request, check whether all chain members are dead to all
1632  * transactions.
1633  *
1634  * Note: if you change the criterion here for what is "dead", fix the
1635  * planner's get_actual_variable_range() function to match.
1636  */
1637  if (all_dead && *all_dead &&
1639  *all_dead = false;
1640 
1641  /*
1642  * Check to see if HOT chain continues past this tuple; if so fetch
1643  * the next offnum and loop around.
1644  */
1645  if (HeapTupleIsHotUpdated(heapTuple))
1646  {
1648  blkno);
1649  offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
1650  at_chain_start = false;
1651  prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
1652  }
1653  else
1654  break; /* end of chain */
1655  }
1656 
1657  return false;
1658 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:513
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:78
static const struct exclude_list_item skip[]
Definition: pg_checksums.c:112
uint32 BlockNumber
Definition: block.h:31
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define HeapTupleIsHotUpdated(tuple)
Definition: htup_details.h:676
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
ItemPointerData t_ctid
Definition: htup_details.h:160
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
TransactionId RecentGlobalXmin
Definition: snapmgr.c:168
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
bool HeapTupleIsSurelyDead(HeapTuple htup, TransactionId OldestXmin)
void PredicateLockTID(Relation relation, ItemPointer tid, Snapshot snapshot, TransactionId tuple_xid)
Definition: predicate.c:2545
void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: heapam.c:9033
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:685
#define Assert(condition)
Definition: c.h:738
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define ItemPointerSetOffsetNumber(pointer, offsetNumber)
Definition: itemptr.h:148
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2633
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define TransactionIdIsValid(xid)
Definition: transam.h:41
#define RelationGetRelid(relation)
Definition: rel.h:456
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)

◆ heap_inplace_update()

void heap_inplace_update ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 5725 of file heapam.c.

References BUFFER_LOCK_EXCLUSIVE, BufferGetPage, CacheInvalidateHeapTuple(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, IsBootstrapProcessingMode, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), xl_heap_inplace::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapInplace, START_CRIT_SECTION, HeapTupleData::t_data, HeapTupleHeaderData::t_hoff, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), XLOG_HEAP_INPLACE, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by create_toast_table(), index_set_state_flags(), index_update_stats(), vac_update_datfrozenxid(), and vac_update_relstats().

5726 {
5727  Buffer buffer;
5728  Page page;
5729  OffsetNumber offnum;
5730  ItemId lp = NULL;
5731  HeapTupleHeader htup;
5732  uint32 oldlen;
5733  uint32 newlen;
5734 
5735  /*
5736  * For now, parallel operations are required to be strictly read-only.
5737  * Unlike a regular update, this should never create a combo CID, so it
5738  * might be possible to relax this restriction, but not without more
5739  * thought and testing. It's not clear that it would be useful, anyway.
5740  */
5741  if (IsInParallelMode())
5742  ereport(ERROR,
5743  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
5744  errmsg("cannot update tuples during a parallel operation")));
5745 
5746  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
5748  page = (Page) BufferGetPage(buffer);
5749 
5750  offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
5751  if (PageGetMaxOffsetNumber(page) >= offnum)
5752  lp = PageGetItemId(page, offnum);
5753 
5754  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
5755  elog(ERROR, "invalid lp");
5756 
5757  htup = (HeapTupleHeader) PageGetItem(page, lp);
5758 
5759  oldlen = ItemIdGetLength(lp) - htup->t_hoff;
5760  newlen = tuple->t_len - tuple->t_data->t_hoff;
5761  if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
5762  elog(ERROR, "wrong tuple length");
5763 
5764  /* NO EREPORT(ERROR) from here till changes are logged */
5766 
5767  memcpy((char *) htup + htup->t_hoff,
5768  (char *) tuple->t_data + tuple->t_data->t_hoff,
5769  newlen);
5770 
5771  MarkBufferDirty(buffer);
5772 
5773  /* XLOG stuff */
5774  if (RelationNeedsWAL(relation))
5775  {
5776  xl_heap_inplace xlrec;
5777  XLogRecPtr recptr;
5778 
5779  xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
5780 
5781  XLogBeginInsert();
5782  XLogRegisterData((char *) &xlrec, SizeOfHeapInplace);
5783 
5784  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
5785  XLogRegisterBufData(0, (char *) htup + htup->t_hoff, newlen);
5786 
5787  /* inplace updates aren't decoded atm, don't log the origin */
5788 
5789  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
5790 
5791  PageSetLSN(page, recptr);
5792  }
5793 
5794  END_CRIT_SECTION();
5795 
5796  UnlockReleaseBuffer(buffer);
5797 
5798  /*
5799  * Send out shared cache inval if necessary. Note that because we only
5800  * pass the new version of the tuple, this mustn't be used for any
5801  * operations that could change catcache lookup keys. But we aren't
5802  * bothering with index updates either, so that's true a fortiori.
5803  */
5805  CacheInvalidateHeapTuple(relation, tuple, NULL);
5806 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:362
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1114
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1468
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:214
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
#define SizeOfHeapInplace
Definition: heapam_xlog.h:308
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
int errcode(int sqlerrcode)
Definition: elog.c:610
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
bool IsInParallelMode(void)
Definition: xact.c:996
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3506
#define ERROR
Definition: elog.h:43
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define REGBUF_STANDARD
Definition: xloginsert.h:35
unsigned int uint32
Definition: c.h:367
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:324
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:416
OffsetNumber offnum
Definition: heapam_xlog.h:304
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3722
#define ereport(elevel,...)
Definition: elog.h:144
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:606
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define XLOG_HEAP_INPLACE
Definition: heapam_xlog.h:39
#define RelationNeedsWAL(relation)
Definition: rel.h:562
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:392
int errmsg(const char *fmt,...)
Definition: elog.c:824
#define elog(elevel,...)
Definition: elog.h:214
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
void XLogBeginInsert(void)
Definition: xloginsert.c:121
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78

◆ heap_insert()

void heap_insert ( Relation  relation,
HeapTuple  tup,
CommandId  cid,
int  options,
BulkInsertState  bistate 
)

Definition at line 1870 of file heapam.c.

References Assert, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), END_CRIT_SECTION, FirstOffsetNumber, xl_heap_insert::flags, GetCurrentTransactionId(), heap_freetuple(), HEAP_INSERT_NO_LOGICAL, HEAP_INSERT_SPECULATIVE, heap_prepare_insert(), InvalidBlockNumber, InvalidBuffer, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, log_heap_new_cid(), MarkBufferDirty(), xl_heap_insert::offnum, PageClearAllVisible, PageGetMaxOffsetNumber, PageIsAllVisible, PageSetLSN, pgstat_count_heap_insert(), REGBUF_KEEP_DATA, REGBUF_STANDARD, REGBUF_WILL_INIT, RelationGetBufferForTuple(), RelationIsAccessibleInLogicalDecoding, RelationIsLogicallyLogged, RelationNeedsWAL, RelationPutHeapTuple(), ReleaseBuffer(), SizeOfHeapHeader, SizeOfHeapInsert, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), visibilitymap_clear(), VISIBILITYMAP_VALID_BITS, XLH_INSERT_ALL_VISIBLE_CLEARED, XLH_INSERT_CONTAINS_NEW_TUPLE, XLH_INSERT_IS_SPECULATIVE, XLOG_HEAP_INIT_PAGE, XLOG_HEAP_INSERT, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by heapam_tuple_insert(), heapam_tuple_insert_speculative(), simple_heap_insert(), and toast_save_datum().

1872 {
1874  HeapTuple heaptup;
1875  Buffer buffer;
1876  Buffer vmbuffer = InvalidBuffer;
1877  bool all_visible_cleared = false;
1878 
1879  /*
1880  * Fill in tuple header fields and toast the tuple if necessary.
1881  *
1882  * Note: below this point, heaptup is the data we actually intend to store
1883  * into the relation; tup is the caller's original untoasted data.
1884  */
1885  heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
1886 
1887  /*
1888  * Find buffer to insert this tuple into. If the page is all visible,
1889  * this will also pin the requisite visibility map page.
1890  */
1891  buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
1892  InvalidBuffer, options, bistate,
1893  &vmbuffer, NULL);
1894 
1895  /*
1896  * We're about to do the actual insert -- but check for conflict first, to
1897  * avoid possibly having to roll back work we've just done.
1898  *
1899  * This is safe without a recheck as long as there is no possibility of
1900  * another process scanning the page between this check and the insert
1901  * being visible to the scan (i.e., an exclusive buffer content lock is
1902  * continuously held from this point until the tuple insert is visible).
1903  *
1904  * For a heap insert, we only need to check for table-level SSI locks. Our
1905  * new tuple can't possibly conflict with existing tuple locks, and heap
1906  * page locks are only consolidated versions of tuple locks; they do not
1907  * lock "gaps" as index page locks do. So we don't need to specify a
1908  * buffer when making the call, which makes for a faster check.
1909  */
1911 
1912  /* NO EREPORT(ERROR) from here till changes are logged */
1914 
1915  RelationPutHeapTuple(relation, buffer, heaptup,
1916  (options & HEAP_INSERT_SPECULATIVE) != 0);
1917 
1918  if (PageIsAllVisible(BufferGetPage(buffer)))
1919  {
1920  all_visible_cleared = true;
1922  visibilitymap_clear(relation,
1923  ItemPointerGetBlockNumber(&(heaptup->t_self)),
1924  vmbuffer, VISIBILITYMAP_VALID_BITS);
1925  }
1926 
1927  /*
1928  * XXX Should we set PageSetPrunable on this page ?
1929  *
1930  * The inserting transaction may eventually abort thus making this tuple
1931  * DEAD and hence available for pruning. Though we don't want to optimize
1932  * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
1933  * aborted tuple will never be pruned until next vacuum is triggered.
1934  *
1935  * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
1936  */
1937 
1938  MarkBufferDirty(buffer);
1939 
1940  /* XLOG stuff */
1941  if (RelationNeedsWAL(relation))
1942  {
1943  xl_heap_insert xlrec;
1944  xl_heap_header xlhdr;
1945  XLogRecPtr recptr;
1946  Page page = BufferGetPage(buffer);
1947  uint8 info = XLOG_HEAP_INSERT;
1948  int bufflags = 0;
1949 
1950  /*
1951  * If this is a catalog, we need to transmit combocids to properly
1952  * decode, so log that as well.
1953  */
1955  log_heap_new_cid(relation, heaptup);
1956 
1957  /*
1958  * If this is the single and first tuple on page, we can reinit the
1959  * page instead of restoring the whole thing. Set flag, and hide
1960  * buffer references from XLogInsert.
1961  */
1962  if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
1964  {
1965  info |= XLOG_HEAP_INIT_PAGE;
1966  bufflags |= REGBUF_WILL_INIT;
1967  }
1968 
1969  xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
1970  xlrec.flags = 0;
1971  if (all_visible_cleared)
1976 
1977  /*
1978  * For logical decoding, we need the tuple even if we're doing a full
1979  * page write, so make sure it's included even if we take a full-page
1980  * image. (XXX We could alternatively store a pointer into the FPW).
1981  */
1982  if (RelationIsLogicallyLogged(relation) &&
1984  {
1986  bufflags |= REGBUF_KEEP_DATA;
1987  }
1988 
1989  XLogBeginInsert();
1990  XLogRegisterData((char *) &xlrec, SizeOfHeapInsert);
1991 
1992  xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
1993  xlhdr.t_infomask = heaptup->t_data->t_infomask;
1994  xlhdr.t_hoff = heaptup->t_data->t_hoff;
1995 
1996  /*
1997  * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
1998  * write the whole page to the xlog, we don't need to store
1999  * xl_heap_header in the xlog.
2000  */
2001  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2002  XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
2003  /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
2005  (char *) heaptup->t_data + SizeofHeapTupleHeader,
2006  heaptup->t_len - SizeofHeapTupleHeader);
2007 
2008  /* filtering by origin on a row level is much more efficient */
2010 
2011  recptr = XLogInsert(RM_HEAP_ID, info);
2012 
2013  PageSetLSN(page, recptr);
2014  }
2015 
2016  END_CRIT_SECTION();
2017 
2018  UnlockReleaseBuffer(buffer);
2019  if (vmbuffer != InvalidBuffer)
2020  ReleaseBuffer(vmbuffer);
2021 
2022  /*
2023  * If tuple is cachable, mark it for invalidation from the caches in case
2024  * we abort. Note it is OK to do this after releasing the buffer, because
2025  * the heaptup data structure is all in local memory, not in the shared
2026  * buffer.
2027  */
2028  CacheInvalidateHeapTuple(relation, heaptup, NULL);
2029 
2030  /* Note: speculative insertions are counted too, even if aborted later */
2031  pgstat_count_heap_insert(relation, 1);
2032 
2033  /*
2034  * If heaptup is a private copy, release it. Don't forget to copy t_self
2035  * back to the caller's image, too.
2036  */
2037  if (heaptup != tup)
2038  {
2039  tup->t_self = heaptup->t_self;
2040  heap_freetuple(heaptup);
2041  }
2042 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:362
#define SizeofHeapTupleHeader
Definition: htup_details.h:184
#define XLOG_HEAP_INSERT
Definition: heapam_xlog.h:32
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:7531
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1114
static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
Definition: heapam.c:2051
#define PageIsAllVisible(page)
Definition: bufpage.h:385
uint32 TransactionId
Definition: c.h:513
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1468
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:214
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
unsigned char uint8
Definition: c.h:365
#define XLH_INSERT_IS_SPECULATIVE
Definition: heapam_xlog.h:68
#define InvalidBuffer
Definition: buf.h:25
#define REGBUF_WILL_INIT
Definition: xloginsert.h:33
uint16 t_infomask2
Definition: heapam_xlog.h:144
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:230
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3483
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:635
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1338
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
void RelationPutHeapTuple(Relation relation, Buffer buffer, HeapTuple tuple, bool token)
Definition: hio.c:36
#define XLOG_HEAP_INIT_PAGE
Definition: heapam_xlog.h:46
#define HEAP_INSERT_SPECULATIVE
Definition: heapam.h:37
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:68
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3506
#define XLH_INSERT_CONTAINS_NEW_TUPLE
Definition: heapam_xlog.h:69
ItemPointerData t_self
Definition: htup.h:65
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:422
uint32 t_len
Definition: htup.h:64
#define FirstOffsetNumber
Definition: off.h:27
#define REGBUF_STANDARD
Definition: xloginsert.h:35
Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer otherBuffer, int options, BulkInsertState bistate, Buffer *vmbuffer, Buffer *vmbuffer_other)
Definition: hio.c:320
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:398
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:324
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:416
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:619
#define REGBUF_KEEP_DATA
Definition: xloginsert.h:38
void CheckForSerializableConflictIn(Relation relation, ItemPointer tid, BlockNumber blkno)
Definition: predicate.c:4374
#define PageClearAllVisible(page)
Definition: bufpage.h:389
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:738
uint16 t_infomask
Definition: heapam_xlog.h:145
#define InvalidBlockNumber
Definition: block.h:33
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define RelationNeedsWAL(relation)
Definition: rel.h:562
#define SizeOfHeapInsert
Definition: heapam_xlog.h:160
#define XLH_INSERT_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:66
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2633
void pgstat_count_heap_insert(Relation rel, PgStat_Counter n)
Definition: pgstat.c:1995
#define HEAP_INSERT_NO_LOGICAL
Definition: heapam.h:36
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
void XLogBeginInsert(void)
Definition: xloginsert.c:121
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
OffsetNumber offnum
Definition: heapam_xlog.h:154
#define SizeOfHeapHeader
Definition: heapam_xlog.h:149
Pointer Page
Definition: bufpage.h:78

◆ heap_lock_tuple()

TM_Result heap_lock_tuple ( Relation  relation,
HeapTuple  tuple,
CommandId  cid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
bool  follow_updates,
Buffer buffer,
TM_FailureData tmfd 
)

Definition at line 3983 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferIsValid, TM_FailureData::cmax, compute_infobits(), compute_new_xmax_infomask(), ConditionalMultiXactIdWait(), ConditionalXactLockTableWait(), TM_FailureData::ctid, DoesMultiXactIdConflict(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, xl_heap_lock::flags, get_mxact_status_for_lock(), GetCurrentTransactionId(), GetMultiXactIdMembers(), heap_acquire_tuplock(), HEAP_KEYS_UPDATED, heap_lock_updated_tuple(), HEAP_XMAX_BITS, HEAP_XMAX_INVALID, HEAP_XMAX_IS_EXCL_LOCKED, HEAP_XMAX_IS_KEYSHR_LOCKED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HEAP_XMAX_IS_SHR_LOCKED, HeapTupleHeaderClearHotUpdated, HeapTupleHeaderGetCmax(), HeapTupleHeaderGetRawXmax, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIndicatesMovedPartitions, HeapTupleHeaderIsOnlyLocked(), HeapTupleHeaderSetXmax, HeapTupleSatisfiesUpdate(), i, xl_heap_lock::infobits_set, InvalidBuffer, InvalidCommandId, ItemIdGetLength, ItemIdIsNormal, ItemPointerCopy, ItemPointerEquals(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), xl_heap_lock::locking_xid, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, LockWaitBlock, LockWaitError, LockWaitSkip, MarkBufferDirty(), MultiXactIdSetOldestMember(), MultiXactIdWait(), MultiXactStatusNoKeyUpdate, xl_heap_lock::offnum, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, pfree(), ReadBuffer(), REGBUF_STANDARD, RelationGetRelationName, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapLock, START_CRIT_SECTION, status(), HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TM_BeingModified, TM_Deleted, TM_Invisible, TM_Ok, TM_SelfModified, TM_Updated, TM_WouldBlock, TransactionIdEquals, TransactionIdIsCurrentTransactionId(), TUPLOCK_from_mxstatus, UnlockTupleTuplock, UpdateXmaxHintBits(), VISIBILITYMAP_ALL_FROZEN, visibilitymap_clear(), visibilitymap_pin(), XactLockTableWait(), XLH_LOCK_ALL_FROZEN_CLEARED, XLOG_HEAP_LOCK, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), XLTW_Lock, TM_FailureData::xmax, and xmax_infomask_changed().

Referenced by heapam_tuple_lock().

3987 {
3988  TM_Result result;
3989  ItemPointer tid = &(tuple->t_self);
3990  ItemId lp;
3991  Page page;
3992  Buffer vmbuffer = InvalidBuffer;
3993  BlockNumber block;
3994  TransactionId xid,
3995  xmax;
3996  uint16 old_infomask,
3997  new_infomask,
3998  new_infomask2;
3999  bool first_time = true;
4000  bool skip_tuple_lock = false;
4001  bool have_tuple_lock = false;
4002  bool cleared_all_frozen = false;
4003 
4004  *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
4005  block = ItemPointerGetBlockNumber(tid);
4006 
4007  /*
4008  * Before locking the buffer, pin the visibility map page if it appears to
4009  * be necessary. Since we haven't got the lock yet, someone else might be
4010  * in the middle of changing this, so we'll need to recheck after we have
4011  * the lock.
4012  */
4013  if (PageIsAllVisible(BufferGetPage(*buffer)))
4014  visibilitymap_pin(relation, block, &vmbuffer);
4015 
4017 
4018  page = BufferGetPage(*buffer);
4019  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
4020  Assert(ItemIdIsNormal(lp));
4021 
4022  tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
4023  tuple->t_len = ItemIdGetLength(lp);
4024  tuple->t_tableOid = RelationGetRelid(relation);
4025 
4026 l3:
4027  result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
4028 
4029  if (result == TM_Invisible)
4030  {
4031  /*
4032  * This is possible, but only when locking a tuple for ON CONFLICT
4033  * UPDATE. We return this value here rather than throwing an error in
4034  * order to give that case the opportunity to throw a more specific
4035  * error.
4036  */
4037  result = TM_Invisible;
4038  goto out_locked;
4039  }
4040  else if (result == TM_BeingModified ||
4041  result == TM_Updated ||
4042  result == TM_Deleted)
4043  {
4044  TransactionId xwait;
4045  uint16 infomask;
4046  uint16 infomask2;
4047  bool require_sleep;
4048  ItemPointerData t_ctid;
4049 
4050  /* must copy state data before unlocking buffer */
4051  xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
4052  infomask = tuple->t_data->t_infomask;
4053  infomask2 = tuple->t_data->t_infomask2;
4054  ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
4055 
4056  LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
4057 
4058  /*
4059  * If any subtransaction of the current top transaction already holds
4060  * a lock as strong as or stronger than what we're requesting, we
4061  * effectively hold the desired lock already. We *must* succeed
4062  * without trying to take the tuple lock, else we will deadlock
4063  * against anyone wanting to acquire a stronger lock.
4064  *
4065  * Note we only do this the first time we loop on the HTSU result;
4066  * there is no point in testing in subsequent passes, because
4067  * evidently our own transaction cannot have acquired a new lock after
4068  * the first time we checked.
4069  */
4070  if (first_time)
4071  {
4072  first_time = false;
4073 
4074  if (infomask & HEAP_XMAX_IS_MULTI)
4075  {
4076  int i;
4077  int nmembers;
4078  MultiXactMember *members;
4079 
4080  /*
4081  * We don't need to allow old multixacts here; if that had
4082  * been the case, HeapTupleSatisfiesUpdate would have returned
4083  * MayBeUpdated and we wouldn't be here.
4084  */
4085  nmembers =
4086  GetMultiXactIdMembers(xwait, &members, false,
4087  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
4088 
4089  for (i = 0; i < nmembers; i++)
4090  {
4091  /* only consider members of our own transaction */
4092  if (!TransactionIdIsCurrentTransactionId(members[i].xid))
4093  continue;
4094 
4095  if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
4096  {
4097  pfree(members);
4098  result = TM_Ok;
4099  goto out_unlocked;
4100  }
4101  else
4102  {
4103  /*
4104  * Disable acquisition of the heavyweight tuple lock.
4105  * Otherwise, when promoting a weaker lock, we might
4106  * deadlock with another locker that has acquired the
4107  * heavyweight tuple lock and is waiting for our
4108  * transaction to finish.
4109  *
4110  * Note that in this case we still need to wait for
4111  * the multixact if required, to avoid acquiring
4112  * conflicting locks.
4113  */
4114  skip_tuple_lock = true;
4115  }
4116  }
4117 
4118  if (members)
4119  pfree(members);
4120  }
4121  else if (TransactionIdIsCurrentTransactionId(xwait))
4122  {
4123  switch (mode)
4124  {
4125  case LockTupleKeyShare:
4126  Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
4127  HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4128  HEAP_XMAX_IS_EXCL_LOCKED(infomask));
4129  result = TM_Ok;
4130  goto out_unlocked;
4131  case LockTupleShare:
4132  if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4133  HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4134  {
4135  result = TM_Ok;
4136  goto out_unlocked;
4137  }
4138  break;
4140  if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4141  {
4142  result = TM_Ok;
4143  goto out_unlocked;
4144  }
4145  break;
4146  case LockTupleExclusive:
4147  if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
4148  infomask2 & HEAP_KEYS_UPDATED)
4149  {
4150  result = TM_Ok;
4151  goto out_unlocked;
4152  }
4153  break;
4154  }
4155  }
4156  }
4157 
4158  /*
4159  * Initially assume that we will have to wait for the locking
4160  * transaction(s) to finish. We check various cases below in which
4161  * this can be turned off.
4162  */
4163  require_sleep = true;
4164  if (mode == LockTupleKeyShare)
4165  {
4166  /*
4167  * If we're requesting KeyShare, and there's no update present, we
4168  * don't need to wait. Even if there is an update, we can still
4169  * continue if the key hasn't been modified.
4170  *
4171  * However, if there are updates, we need to walk the update chain
4172  * to mark future versions of the row as locked, too. That way,
4173  * if somebody deletes that future version, we're protected
4174  * against the key going away. This locking of future versions
4175  * could block momentarily, if a concurrent transaction is
4176  * deleting a key; or it could return a value to the effect that
4177  * the transaction deleting the key has already committed. So we
4178  * do this before re-locking the buffer; otherwise this would be
4179  * prone to deadlocks.
4180  *
4181  * Note that the TID we're locking was grabbed before we unlocked
4182  * the buffer. For it to change while we're not looking, the
4183  * other properties we're testing for below after re-locking the
4184  * buffer would also change, in which case we would restart this
4185  * loop above.
4186  */
4187  if (!(infomask2 & HEAP_KEYS_UPDATED))
4188  {
4189  bool updated;
4190 
4191  updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
4192 
4193  /*
4194  * If there are updates, follow the update chain; bail out if
4195  * that cannot be done.
4196  */
4197  if (follow_updates && updated)
4198  {
4199  TM_Result res;
4200 
4201  res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4203  mode);
4204  if (res != TM_Ok)
4205  {
4206  result = res;
4207  /* recovery code expects to have buffer lock held */
4209  goto failed;
4210  }
4211  }
4212 
4214 
4215  /*
4216  * Make sure it's still an appropriate lock, else start over.
4217  * Also, if it wasn't updated before we released the lock, but
4218  * is updated now, we start over too; the reason is that we
4219  * now need to follow the update chain to lock the new
4220  * versions.
4221  */
4222  if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
4223  ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
4224  !updated))
4225  goto l3;
4226 
4227  /* Things look okay, so we can skip sleeping */
4228  require_sleep = false;
4229 
4230  /*
4231  * Note we allow Xmax to change here; other updaters/lockers
4232  * could have modified it before we grabbed the buffer lock.
4233  * However, this is not a problem, because with the recheck we
4234  * just did we ensure that they still don't conflict with the
4235  * lock we want.
4236  */
4237  }
4238  }
4239  else if (mode == LockTupleShare)
4240  {
4241  /*
4242  * If we're requesting Share, we can similarly avoid sleeping if
4243  * there's no update and no exclusive lock present.
4244  */
4245  if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
4246  !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4247  {
4249 
4250  /*
4251  * Make sure it's still an appropriate lock, else start over.
4252  * See above about allowing xmax to change.
4253  */
4254  if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
4256  goto l3;
4257  require_sleep = false;
4258  }
4259  }
4260  else if (mode == LockTupleNoKeyExclusive)
4261  {
4262  /*
4263  * If we're requesting NoKeyExclusive, we might also be able to
4264  * avoid sleeping; just ensure that there no conflicting lock
4265  * already acquired.
4266  */
4267  if (infomask & HEAP_XMAX_IS_MULTI)
4268  {
4269  if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
4270  mode, NULL))
4271  {
4272  /*
4273  * No conflict, but if the xmax changed under us in the
4274  * meantime, start over.
4275  */
4277  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4279  xwait))
4280  goto l3;
4281 
4282  /* otherwise, we're good */
4283  require_sleep = false;
4284  }
4285  }
4286  else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
4287  {
4289 
4290  /* if the xmax changed in the meantime, start over */
4291  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4293  xwait))
4294  goto l3;
4295  /* otherwise, we're good */
4296  require_sleep = false;
4297  }
4298  }
4299 
4300  /*
4301  * As a check independent from those above, we can also avoid sleeping
4302  * if the current transaction is the sole locker of the tuple. Note
4303  * that the strength of the lock already held is irrelevant; this is
4304  * not about recording the lock in Xmax (which will be done regardless
4305  * of this optimization, below). Also, note that the cases where we
4306  * hold a lock stronger than we are requesting are already handled
4307  * above by not doing anything.
4308  *
4309  * Note we only deal with the non-multixact case here; MultiXactIdWait
4310  * is well equipped to deal with this situation on its own.
4311  */
4312  if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
4314  {
4315  /* ... but if the xmax changed in the meantime, start over */
4317  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4319  xwait))
4320  goto l3;
4322  require_sleep = false;
4323  }
4324 
4325  /*
4326  * Time to sleep on the other transaction/multixact, if necessary.
4327  *
4328  * If the other transaction is an update/delete that's already
4329  * committed, then sleeping cannot possibly do any good: if we're
4330  * required to sleep, get out to raise an error instead.
4331  *
4332  * By here, we either have already acquired the buffer exclusive lock,
4333  * or we must wait for the locking transaction or multixact; so below
4334  * we ensure that we grab buffer lock after the sleep.
4335  */
4336  if (require_sleep && (result == TM_Updated || result == TM_Deleted))
4337  {
4339  goto failed;
4340  }
4341  else if (require_sleep)
4342  {
4343  /*
4344  * Acquire tuple lock to establish our priority for the tuple, or
4345  * die trying. LockTuple will release us when we are next-in-line
4346  * for the tuple. We must do this even if we are share-locking,
4347  * but not if we already have a weaker lock on the tuple.
4348  *
4349  * If we are forced to "start over" below, we keep the tuple lock;
4350  * this arranges that we stay at the head of the line while
4351  * rechecking tuple state.
4352  */
4353  if (!skip_tuple_lock &&
4354  !heap_acquire_tuplock(relation, tid, mode, wait_policy,
4355  &have_tuple_lock))
4356  {
4357  /*
4358  * This can only happen if wait_policy is Skip and the lock
4359  * couldn't be obtained.
4360  */
4361  result = TM_WouldBlock;
4362  /* recovery code expects to have buffer lock held */
4364  goto failed;
4365  }
4366 
4367  if (infomask & HEAP_XMAX_IS_MULTI)
4368  {
4370 
4371  /* We only ever lock tuples, never update them */
4372  if (status >= MultiXactStatusNoKeyUpdate)
4373  elog(ERROR, "invalid lock mode in heap_lock_tuple");
4374 
4375  /* wait for multixact to end, or die trying */
4376  switch (wait_policy)
4377  {
4378  case LockWaitBlock:
4379  MultiXactIdWait((MultiXactId) xwait, status, infomask,
4380  relation, &tuple->t_self, XLTW_Lock, NULL);
4381  break;
4382  case LockWaitSkip:
4384  status, infomask, relation,
4385  NULL))
4386  {
4387  result = TM_WouldBlock;
4388  /* recovery code expects to have buffer lock held */
4390  goto failed;
4391  }
4392  break;
4393  case LockWaitError:
4395  status, infomask, relation,
4396  NULL))
4397  ereport(ERROR,
4398  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4399  errmsg("could not obtain lock on row in relation \"%s\"",
4400  RelationGetRelationName(relation))));
4401 
4402  break;
4403  }
4404 
4405  /*
4406  * Of course, the multixact might not be done here: if we're
4407  * requesting a light lock mode, other transactions with light
4408  * locks could still be alive, as well as locks owned by our
4409  * own xact or other subxacts of this backend. We need to
4410  * preserve the surviving MultiXact members. Note that it
4411  * isn't absolutely necessary in the latter case, but doing so
4412  * is simpler.
4413  */
4414  }
4415  else
4416  {
4417  /* wait for regular transaction to end, or die trying */
4418  switch (wait_policy)
4419  {
4420  case LockWaitBlock:
4421  XactLockTableWait(xwait, relation, &tuple->t_self,
4422  XLTW_Lock);
4423  break;
4424  case LockWaitSkip:
4425  if (!ConditionalXactLockTableWait(xwait))
4426  {
4427  result = TM_WouldBlock;
4428  /* recovery code expects to have buffer lock held */
4430  goto failed;
4431  }
4432  break;
4433  case LockWaitError:
4434  if (!ConditionalXactLockTableWait(xwait))
4435  ereport(ERROR,
4436  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4437  errmsg("could not obtain lock on row in relation \"%s\"",
4438  RelationGetRelationName(relation))));
4439  break;
4440  }
4441  }
4442 
4443  /* if there are updates, follow the update chain */
4444  if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
4445  {
4446  TM_Result res;
4447 
4448  res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4450  mode);
4451  if (res != TM_Ok)
4452  {
4453  result = res;
4454  /* recovery code expects to have buffer lock held */
4456  goto failed;
4457  }
4458  }
4459 
4461 
4462  /*
4463  * xwait is done, but if xwait had just locked the tuple then some
4464  * other xact could update this tuple before we get to this point.
4465  * Check for xmax change, and start over if so.
4466  */
4467  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4469  xwait))
4470  goto l3;
4471 
4472  if (!(infomask & HEAP_XMAX_IS_MULTI))
4473  {
4474  /*
4475  * Otherwise check if it committed or aborted. Note we cannot
4476  * be here if the tuple was only locked by somebody who didn't
4477  * conflict with us; that would have been handled above. So
4478  * that transaction must necessarily be gone by now. But
4479  * don't check for this in the multixact case, because some
4480  * locker transactions might still be running.
4481  */
4482  UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
4483  }
4484  }
4485 
4486