PostgreSQL Source Code  git master
heapam.c File Reference
#include "postgres.h"
#include "access/bufmask.h"
#include "access/genam.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/heaptoast.h"
#include "access/hio.h"
#include "access/multixact.h"
#include "access/parallel.h"
#include "access/relscan.h"
#include "access/subtrans.h"
#include "access/syncscan.h"
#include "access/sysattr.h"
#include "access/tableam.h"
#include "access/transam.h"
#include "access/valid.h"
#include "access/visibilitymap.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "access/xloginsert.h"
#include "access/xlogutils.h"
#include "catalog/catalog.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "port/atomics.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
#include "storage/procarray.h"
#include "storage/smgr.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "utils/datum.h"
#include "utils/inval.h"
#include "utils/lsyscache.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
#include "utils/spccache.h"
Include dependency graph for heapam.c:

Go to the source code of this file.

Macros

#define LOCKMODE_from_mxstatus(status)   (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
 
#define LockTupleTuplock(rel, tup, mode)   LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define UnlockTupleTuplock(rel, tup, mode)   UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define ConditionalLockTupleTuplock(rel, tup, mode)   ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define TUPLOCK_from_mxstatus(status)   (MultiXactStatusLock[(status)])
 
#define FRM_NOOP   0x0001
 
#define FRM_INVALIDATE_XMAX   0x0002
 
#define FRM_RETURN_IS_XID   0x0004
 
#define FRM_RETURN_IS_MULTI   0x0008
 
#define FRM_MARK_COMMITTED   0x0010
 

Functions

static HeapTuple heap_prepare_insert (Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
 
static XLogRecPtr log_heap_update (Relation reln, Buffer oldbuf, Buffer newbuf, HeapTuple oldtup, HeapTuple newtup, HeapTuple old_key_tuple, bool all_visible_cleared, bool new_all_visible_cleared)
 
static BitmapsetHeapDetermineModifiedColumns (Relation relation, Bitmapset *interesting_cols, HeapTuple oldtup, HeapTuple newtup)
 
static bool heap_acquire_tuplock (Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
 
static void compute_new_xmax_infomask (TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
 
static TM_Result heap_lock_updated_tuple (Relation rel, HeapTuple tuple, ItemPointer ctid, TransactionId xid, LockTupleMode mode)
 
static void GetMultiXactIdHintBits (MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
 
static TransactionId MultiXactIdGetUpdateXid (TransactionId xmax, uint16 t_infomask)
 
static bool DoesMultiXactIdConflict (MultiXactId multi, uint16 infomask, LockTupleMode lockmode, bool *current_is_member)
 
static void MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
static bool ConditionalMultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, int *remaining)
 
static XLogRecPtr log_heap_new_cid (Relation relation, HeapTuple tup)
 
static HeapTuple ExtractReplicaIdentity (Relation rel, HeapTuple tup, bool key_changed, bool *copy)
 
static void initscan (HeapScanDesc scan, ScanKey key, bool keep_startblock)
 
void heap_setscanlimits (TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
 
void heapgetpage (TableScanDesc sscan, BlockNumber page)
 
static void heapgettup (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
static void heapgettup_pagemode (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
TableScanDesc heap_beginscan (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelTableScanDesc parallel_scan, uint32 flags)
 
void heap_rescan (TableScanDesc sscan, ScanKey key, bool set_params, bool allow_strat, bool allow_sync, bool allow_pagemode)
 
void heap_endscan (TableScanDesc sscan)
 
HeapTuple heap_getnext (TableScanDesc sscan, ScanDirection direction)
 
bool heap_getnextslot (TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
 
bool heap_fetch (Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf)
 
bool heap_hot_search_buffer (ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
 
void heap_get_latest_tid (TableScanDesc sscan, ItemPointer tid)
 
static void UpdateXmaxHintBits (HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
 
BulkInsertState GetBulkInsertState (void)
 
void FreeBulkInsertState (BulkInsertState bistate)
 
void ReleaseBulkInsertStatePin (BulkInsertState bistate)
 
void heap_insert (Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate)
 
void heap_multi_insert (Relation relation, TupleTableSlot **slots, int ntuples, CommandId cid, int options, BulkInsertState bistate)
 
void simple_heap_insert (Relation relation, HeapTuple tup)
 
static uint8 compute_infobits (uint16 infomask, uint16 infomask2)
 
static bool xmax_infomask_changed (uint16 new_infomask, uint16 old_infomask)
 
TM_Result heap_delete (Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
 
void simple_heap_delete (Relation relation, ItemPointer tid)
 
TM_Result heap_update (Relation relation, ItemPointer otid, HeapTuple newtup, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode)
 
static bool heap_tuple_attr_equals (TupleDesc tupdesc, int attrnum, HeapTuple tup1, HeapTuple tup2)
 
void simple_heap_update (Relation relation, ItemPointer otid, HeapTuple tup)
 
static MultiXactStatus get_mxact_status_for_lock (LockTupleMode mode, bool is_update)
 
TM_Result heap_lock_tuple (Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, TM_FailureData *tmfd)
 
static TM_Result test_lockmode_for_conflict (MultiXactStatus status, TransactionId xid, LockTupleMode mode, HeapTuple tup, bool *needwait)
 
static TM_Result heap_lock_updated_tuple_rec (Relation rel, ItemPointer tid, TransactionId xid, LockTupleMode mode)
 
void heap_finish_speculative (Relation relation, ItemPointer tid)
 
void heap_abort_speculative (Relation relation, ItemPointer tid)
 
void heap_inplace_update (Relation relation, HeapTuple tuple)
 
static TransactionId FreezeMultiXactId (MultiXactId multi, uint16 t_infomask, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, MultiXactId cutoff_multi, uint16 *flags)
 
bool heap_prepare_freeze_tuple (HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
 
void heap_execute_freeze_tuple (HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
 
bool heap_freeze_tuple (HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi)
 
TransactionId HeapTupleGetUpdateXid (HeapTupleHeader tuple)
 
static bool Do_MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
bool heap_tuple_needs_eventual_freeze (HeapTupleHeader tuple)
 
bool heap_tuple_needs_freeze (HeapTupleHeader tuple, TransactionId cutoff_xid, MultiXactId cutoff_multi, Buffer buf)
 
void HeapTupleHeaderAdvanceLatestRemovedXid (HeapTupleHeader tuple, TransactionId *latestRemovedXid)
 
TransactionId heap_compute_xid_horizon_for_tuples (Relation rel, ItemPointerData *tids, int nitems)
 
XLogRecPtr log_heap_cleanup_info (RelFileNode rnode, TransactionId latestRemovedXid)
 
XLogRecPtr log_heap_clean (Relation reln, Buffer buffer, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused, TransactionId latestRemovedXid)
 
XLogRecPtr log_heap_freeze (Relation reln, Buffer buffer, TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples, int ntuples)
 
XLogRecPtr log_heap_visible (RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer, TransactionId cutoff_xid, uint8 vmflags)
 
static void heap_xlog_cleanup_info (XLogReaderState *record)
 
static void heap_xlog_clean (XLogReaderState *record)
 
static void heap_xlog_visible (XLogReaderState *record)
 
static void heap_xlog_freeze_page (XLogReaderState *record)
 
static void fix_infomask_from_infobits (uint8 infobits, uint16 *infomask, uint16 *infomask2)
 
static void heap_xlog_delete (XLogReaderState *record)
 
static void heap_xlog_insert (XLogReaderState *record)
 
static void heap_xlog_multi_insert (XLogReaderState *record)
 
static void heap_xlog_update (XLogReaderState *record, bool hot_update)
 
static void heap_xlog_confirm (XLogReaderState *record)
 
static void heap_xlog_lock (XLogReaderState *record)
 
static void heap_xlog_lock_updated (XLogReaderState *record)
 
static void heap_xlog_inplace (XLogReaderState *record)
 
void heap_redo (XLogReaderState *record)
 
void heap2_redo (XLogReaderState *record)
 
void heap_mask (char *pagedata, BlockNumber blkno)
 
void HeapCheckForSerializableConflictOut (bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
 

Variables

struct {
   LOCKMODE   hwlock
 
   int   lockstatus
 
   int   updstatus
 
tupleLockExtraInfo [MaxLockTupleMode+1]
 
static const int MultiXactStatusLock [MaxMultiXactStatus+1]
 

Macro Definition Documentation

◆ ConditionalLockTupleTuplock

#define ConditionalLockTupleTuplock (   rel,
  tup,
  mode 
)    ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 164 of file heapam.c.

Referenced by heap_acquire_tuplock().

◆ FRM_INVALIDATE_XMAX

#define FRM_INVALIDATE_XMAX   0x0002

Definition at line 5799 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_MARK_COMMITTED

#define FRM_MARK_COMMITTED   0x0010

Definition at line 5802 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_NOOP

#define FRM_NOOP   0x0001

Definition at line 5798 of file heapam.c.

Referenced by FreezeMultiXactId().

◆ FRM_RETURN_IS_MULTI

#define FRM_RETURN_IS_MULTI   0x0008

Definition at line 5801 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_RETURN_IS_XID

#define FRM_RETURN_IS_XID   0x0004

Definition at line 5800 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ LOCKMODE_from_mxstatus

#define LOCKMODE_from_mxstatus (   status)    (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)

◆ LockTupleTuplock

#define LockTupleTuplock (   rel,
  tup,
  mode 
)    LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 160 of file heapam.c.

Referenced by heap_acquire_tuplock().

◆ TUPLOCK_from_mxstatus

#define TUPLOCK_from_mxstatus (   status)    (MultiXactStatusLock[(status)])

Definition at line 196 of file heapam.c.

Referenced by compute_new_xmax_infomask(), GetMultiXactIdHintBits(), and heap_lock_tuple().

◆ UnlockTupleTuplock

#define UnlockTupleTuplock (   rel,
  tup,
  mode 
)    UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 162 of file heapam.c.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

Function Documentation

◆ compute_infobits()

static uint8 compute_infobits ( uint16  infomask,
uint16  infomask2 
)
static

Definition at line 2397 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_abort_speculative(), heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_update(), and log_heap_update().

2398 {
2399  return
2400  ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
2401  ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
2402  ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
2403  /* note we ignore HEAP_XMAX_SHR_LOCK here */
2404  ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
2405  ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
2406  XLHL_KEYS_UPDATED : 0);
2407 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:193
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:196
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:262
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:261
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:195
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:263
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:265
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:264

◆ compute_new_xmax_infomask()

static void compute_new_xmax_infomask ( TransactionId  xmax,
uint16  old_infomask,
uint16  old_infomask2,
TransactionId  add_to_xmax,
LockTupleMode  mode,
bool  is_update,
TransactionId result_xmax,
uint16 result_infomask,
uint16 result_infomask2 
)
static

Definition at line 4712 of file heapam.c.

References Assert, elog, ERROR, get_mxact_status_for_lock(), GetMultiXactIdHintBits(), HEAP_KEYS_UPDATED, HEAP_LOCKED_UPGRADED, HEAP_XMAX_COMMITTED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_INVALID, HEAP_XMAX_IS_EXCL_LOCKED, HEAP_XMAX_IS_KEYSHR_LOCKED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HEAP_XMAX_IS_SHR_LOCKED, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, InvalidTransactionId, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, MultiXactIdCreate(), MultiXactIdExpand(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TUPLOCK_from_mxstatus, and WARNING.

Referenced by heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), and heap_update().

4717 {
4718  TransactionId new_xmax;
4719  uint16 new_infomask,
4720  new_infomask2;
4721 
4723 
4724 l5:
4725  new_infomask = 0;
4726  new_infomask2 = 0;
4727  if (old_infomask & HEAP_XMAX_INVALID)
4728  {
4729  /*
4730  * No previous locker; we just insert our own TransactionId.
4731  *
4732  * Note that it's critical that this case be the first one checked,
4733  * because there are several blocks below that come back to this one
4734  * to implement certain optimizations; old_infomask might contain
4735  * other dirty bits in those cases, but we don't really care.
4736  */
4737  if (is_update)
4738  {
4739  new_xmax = add_to_xmax;
4740  if (mode == LockTupleExclusive)
4741  new_infomask2 |= HEAP_KEYS_UPDATED;
4742  }
4743  else
4744  {
4745  new_infomask |= HEAP_XMAX_LOCK_ONLY;
4746  switch (mode)
4747  {
4748  case LockTupleKeyShare:
4749  new_xmax = add_to_xmax;
4750  new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
4751  break;
4752  case LockTupleShare:
4753  new_xmax = add_to_xmax;
4754  new_infomask |= HEAP_XMAX_SHR_LOCK;
4755  break;
4757  new_xmax = add_to_xmax;
4758  new_infomask |= HEAP_XMAX_EXCL_LOCK;
4759  break;
4760  case LockTupleExclusive:
4761  new_xmax = add_to_xmax;
4762  new_infomask |= HEAP_XMAX_EXCL_LOCK;
4763  new_infomask2 |= HEAP_KEYS_UPDATED;
4764  break;
4765  default:
4766  new_xmax = InvalidTransactionId; /* silence compiler */
4767  elog(ERROR, "invalid lock mode");
4768  }
4769  }
4770  }
4771  else if (old_infomask & HEAP_XMAX_IS_MULTI)
4772  {
4773  MultiXactStatus new_status;
4774 
4775  /*
4776  * Currently we don't allow XMAX_COMMITTED to be set for multis, so
4777  * cross-check.
4778  */
4779  Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
4780 
4781  /*
4782  * A multixact together with LOCK_ONLY set but neither lock bit set
4783  * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
4784  * anymore. This check is critical for databases upgraded by
4785  * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
4786  * that such multis are never passed.
4787  */
4788  if (HEAP_LOCKED_UPGRADED(old_infomask))
4789  {
4790  old_infomask &= ~HEAP_XMAX_IS_MULTI;
4791  old_infomask |= HEAP_XMAX_INVALID;
4792  goto l5;
4793  }
4794 
4795  /*
4796  * If the XMAX is already a MultiXactId, then we need to expand it to
4797  * include add_to_xmax; but if all the members were lockers and are
4798  * all gone, we can do away with the IS_MULTI bit and just set
4799  * add_to_xmax as the only locker/updater. If all lockers are gone
4800  * and we have an updater that aborted, we can also do without a
4801  * multi.
4802  *
4803  * The cost of doing GetMultiXactIdMembers would be paid by
4804  * MultiXactIdExpand if we weren't to do this, so this check is not
4805  * incurring extra work anyhow.
4806  */
4807  if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
4808  {
4809  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
4811  old_infomask)))
4812  {
4813  /*
4814  * Reset these bits and restart; otherwise fall through to
4815  * create a new multi below.
4816  */
4817  old_infomask &= ~HEAP_XMAX_IS_MULTI;
4818  old_infomask |= HEAP_XMAX_INVALID;
4819  goto l5;
4820  }
4821  }
4822 
4823  new_status = get_mxact_status_for_lock(mode, is_update);
4824 
4825  new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
4826  new_status);
4827  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4828  }
4829  else if (old_infomask & HEAP_XMAX_COMMITTED)
4830  {
4831  /*
4832  * It's a committed update, so we need to preserve him as updater of
4833  * the tuple.
4834  */
4836  MultiXactStatus new_status;
4837 
4838  if (old_infomask2 & HEAP_KEYS_UPDATED)
4839  status = MultiXactStatusUpdate;
4840  else
4841  status = MultiXactStatusNoKeyUpdate;
4842 
4843  new_status = get_mxact_status_for_lock(mode, is_update);
4844 
4845  /*
4846  * since it's not running, it's obviously impossible for the old
4847  * updater to be identical to the current one, so we need not check
4848  * for that case as we do in the block above.
4849  */
4850  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
4851  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4852  }
4853  else if (TransactionIdIsInProgress(xmax))
4854  {
4855  /*
4856  * If the XMAX is a valid, in-progress TransactionId, then we need to
4857  * create a new MultiXactId that includes both the old locker or
4858  * updater and our own TransactionId.
4859  */
4860  MultiXactStatus new_status;
4861  MultiXactStatus old_status;
4862  LockTupleMode old_mode;
4863 
4864  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
4865  {
4866  if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
4867  old_status = MultiXactStatusForKeyShare;
4868  else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
4869  old_status = MultiXactStatusForShare;
4870  else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
4871  {
4872  if (old_infomask2 & HEAP_KEYS_UPDATED)
4873  old_status = MultiXactStatusForUpdate;
4874  else
4875  old_status = MultiXactStatusForNoKeyUpdate;
4876  }
4877  else
4878  {
4879  /*
4880  * LOCK_ONLY can be present alone only when a page has been
4881  * upgraded by pg_upgrade. But in that case,
4882  * TransactionIdIsInProgress() should have returned false. We
4883  * assume it's no longer locked in this case.
4884  */
4885  elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
4886  old_infomask |= HEAP_XMAX_INVALID;
4887  old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
4888  goto l5;
4889  }
4890  }
4891  else
4892  {
4893  /* it's an update, but which kind? */
4894  if (old_infomask2 & HEAP_KEYS_UPDATED)
4895  old_status = MultiXactStatusUpdate;
4896  else
4897  old_status = MultiXactStatusNoKeyUpdate;
4898  }
4899 
4900  old_mode = TUPLOCK_from_mxstatus(old_status);
4901 
4902  /*
4903  * If the lock to be acquired is for the same TransactionId as the
4904  * existing lock, there's an optimization possible: consider only the
4905  * strongest of both locks as the only one present, and restart.
4906  */
4907  if (xmax == add_to_xmax)
4908  {
4909  /*
4910  * Note that it's not possible for the original tuple to be
4911  * updated: we wouldn't be here because the tuple would have been
4912  * invisible and we wouldn't try to update it. As a subtlety,
4913  * this code can also run when traversing an update chain to lock
4914  * future versions of a tuple. But we wouldn't be here either,
4915  * because the add_to_xmax would be different from the original
4916  * updater.
4917  */
4918  Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
4919 
4920  /* acquire the strongest of both */
4921  if (mode < old_mode)
4922  mode = old_mode;
4923  /* mustn't touch is_update */
4924 
4925  old_infomask |= HEAP_XMAX_INVALID;
4926  goto l5;
4927  }
4928 
4929  /* otherwise, just fall back to creating a new multixact */
4930  new_status = get_mxact_status_for_lock(mode, is_update);
4931  new_xmax = MultiXactIdCreate(xmax, old_status,
4932  add_to_xmax, new_status);
4933  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4934  }
4935  else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
4936  TransactionIdDidCommit(xmax))
4937  {
4938  /*
4939  * It's a committed update, so we gotta preserve him as updater of the
4940  * tuple.
4941  */
4943  MultiXactStatus new_status;
4944 
4945  if (old_infomask2 & HEAP_KEYS_UPDATED)
4946  status = MultiXactStatusUpdate;
4947  else
4948  status = MultiXactStatusNoKeyUpdate;
4949 
4950  new_status = get_mxact_status_for_lock(mode, is_update);
4951 
4952  /*
4953  * since it's not running, it's obviously impossible for the old
4954  * updater to be identical to the current one, so we need not check
4955  * for that case as we do in the block above.
4956  */
4957  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
4958  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4959  }
4960  else
4961  {
4962  /*
4963  * Can get here iff the locking/updating transaction was running when
4964  * the infomask was extracted from the tuple, but finished before
4965  * TransactionIdIsInProgress got to run. Deal with it as if there was
4966  * no locker at all in the first place.
4967  */
4968  old_infomask |= HEAP_XMAX_INVALID;
4969  goto l5;
4970  }
4971 
4972  *result_infomask = new_infomask;
4973  *result_infomask2 = new_infomask2;
4974  *result_xmax = new_xmax;
4975 }
static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
Definition: heapam.c:6407
static PgChecksumMode mode
Definition: pg_checksums.c:61
MultiXactStatus
Definition: multixact.h:41
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:193
LockTupleMode
Definition: lockoptions.h:49
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:196
uint32 TransactionId
Definition: c.h:575
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:869
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:1320
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:252
#define HEAP_XMAX_COMMITTED
Definition: htup_details.h:206
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:199
#define HEAP_XMAX_IS_SHR_LOCKED(infomask)
Definition: htup_details.h:262
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:6488
unsigned short uint16
Definition: c.h:428
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:207
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:195
#define InvalidTransactionId
Definition: transam.h:31
#define WARNING
Definition: elog.h:40
MultiXactId MultiXactIdCreate(TransactionId xid1, MultiXactStatus status1, TransactionId xid2, MultiXactStatus status2)
Definition: multixact.c:386
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
TransactionId MultiXactId
Definition: c.h:585
#define Assert(condition)
Definition: c.h:800
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:196
#define elog(elevel,...)
Definition: elog.h:228
static MultiXactStatus get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
Definition: heapam.c:3925
#define HEAP_XMAX_IS_EXCL_LOCKED(infomask)
Definition: htup_details.h:264
#define HEAP_XMAX_IS_KEYSHR_LOCKED(infomask)
Definition: htup_details.h:266
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:551
MultiXactId MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
Definition: multixact.c:439

◆ ConditionalMultiXactIdWait()

static bool ConditionalMultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
Relation  rel,
int *  remaining 
)
static

Definition at line 6755 of file heapam.c.

References Do_MultiXactIdWait(), and XLTW_None.

Referenced by heap_lock_tuple().

6757 {
6758  return Do_MultiXactIdWait(multi, status, infomask, true,
6759  rel, NULL, XLTW_None, remaining);
6760 }
int remaining
Definition: informix.c:667
Definition: lmgr.h:26
static bool Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:6655
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227

◆ Do_MultiXactIdWait()

static bool Do_MultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
bool  nowait,
Relation  rel,
ItemPointer  ctid,
XLTW_Oper  oper,
int *  remaining 
)
static

Definition at line 6655 of file heapam.c.

References ConditionalXactLockTableWait(), DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, LOCKMODE_from_mxstatus, pfree(), MultiXactMember::status, TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), XactLockTableWait(), and MultiXactMember::xid.

Referenced by ConditionalMultiXactIdWait(), and MultiXactIdWait().

6659 {
6660  bool result = true;
6661  MultiXactMember *members;
6662  int nmembers;
6663  int remain = 0;
6664 
6665  /* for pre-pg_upgrade tuples, no need to sleep at all */
6666  nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
6667  GetMultiXactIdMembers(multi, &members, false,
6668  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
6669 
6670  if (nmembers >= 0)
6671  {
6672  int i;
6673 
6674  for (i = 0; i < nmembers; i++)
6675  {
6676  TransactionId memxid = members[i].xid;
6677  MultiXactStatus memstatus = members[i].status;
6678 
6680  {
6681  remain++;
6682  continue;
6683  }
6684 
6687  {
6688  if (remaining && TransactionIdIsInProgress(memxid))
6689  remain++;
6690  continue;
6691  }
6692 
6693  /*
6694  * This member conflicts with our multi, so we have to sleep (or
6695  * return failure, if asked to avoid waiting.)
6696  *
6697  * Note that we don't set up an error context callback ourselves,
6698  * but instead we pass the info down to XactLockTableWait. This
6699  * might seem a bit wasteful because the context is set up and
6700  * tore down for each member of the multixact, but in reality it
6701  * should be barely noticeable, and it avoids duplicate code.
6702  */
6703  if (nowait)
6704  {
6705  result = ConditionalXactLockTableWait(memxid);
6706  if (!result)
6707  break;
6708  }
6709  else
6710  XactLockTableWait(memxid, rel, ctid, oper);
6711  }
6712 
6713  pfree(members);
6714  }
6715 
6716  if (remaining)
6717  *remaining = remain;
6718 
6719  return result;
6720 }
int remaining
Definition: informix.c:667
MultiXactStatus
Definition: multixact.h:41
uint32 TransactionId
Definition: c.h:575
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:869
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:1320
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:252
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:152
bool ConditionalXactLockTableWait(TransactionId xid)
Definition: lmgr.c:712
void pfree(void *pointer)
Definition: mcxt.c:1057
TransactionId xid
Definition: multixact.h:62
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:583
MultiXactStatus status
Definition: multixact.h:63
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:639
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1223
Operator oper(ParseState *pstate, List *opname, Oid ltypeId, Oid rtypeId, bool noError, int location)
Definition: parse_oper.c:382
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227

◆ DoesMultiXactIdConflict()

static bool DoesMultiXactIdConflict ( MultiXactId  multi,
uint16  infomask,
LockTupleMode  lockmode,
bool current_is_member 
)
static

Definition at line 6556 of file heapam.c.

References DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, ISUPDATE_from_mxstatus, LOCKMODE_from_mxstatus, pfree(), status(), TransactionIdDidAbort(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), tupleLockExtraInfo, and MultiXactMember::xid.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

6558 {
6559  int nmembers;
6560  MultiXactMember *members;
6561  bool result = false;
6562  LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
6563 
6564  if (HEAP_LOCKED_UPGRADED(infomask))
6565  return false;
6566 
6567  nmembers = GetMultiXactIdMembers(multi, &members, false,
6568  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
6569  if (nmembers >= 0)
6570  {
6571  int i;
6572 
6573  for (i = 0; i < nmembers; i++)
6574  {
6575  TransactionId memxid;
6576  LOCKMODE memlockmode;
6577 
6578  if (result && (current_is_member == NULL || *current_is_member))
6579  break;
6580 
6581  memlockmode = LOCKMODE_from_mxstatus(members[i].status);
6582 
6583  /* ignore members from current xact (but track their presence) */
6584  memxid = members[i].xid;
6586  {
6587  if (current_is_member != NULL)
6588  *current_is_member = true;
6589  continue;
6590  }
6591  else if (result)
6592  continue;
6593 
6594  /* ignore members that don't conflict with the lock we want */
6595  if (!DoLockModesConflict(memlockmode, wanted))
6596  continue;
6597 
6598  if (ISUPDATE_from_mxstatus(members[i].status))
6599  {
6600  /* ignore aborted updaters */
6601  if (TransactionIdDidAbort(memxid))
6602  continue;
6603  }
6604  else
6605  {
6606  /* ignore lockers-only that are no longer in progress */
6607  if (!TransactionIdIsInProgress(memxid))
6608  continue;
6609  }
6610 
6611  /*
6612  * Whatever remains are either live lockers that conflict with our
6613  * wanted lock, and updaters that are not aborted. Those conflict
6614  * with what we want. Set up to return true, but keep going to
6615  * look for the current transaction among the multixact members,
6616  * if needed.
6617  */
6618  result = true;
6619  }
6620  pfree(members);
6621  }
6622 
6623  return result;
6624 }
uint32 TransactionId
Definition: c.h:575
int LOCKMODE
Definition: lockdefs.h:26
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:869
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:1320
static const struct @19 tupleLockExtraInfo[MaxLockTupleMode+1]
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:252
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:152
void pfree(void *pointer)
Definition: mcxt.c:1057
TransactionId xid
Definition: multixact.h:62
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:583
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:56
bool TransactionIdDidAbort(TransactionId transactionId)
Definition: transam.c:181
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1223
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227

◆ ExtractReplicaIdentity()

static HeapTuple ExtractReplicaIdentity ( Relation  rel,
HeapTuple  tup,
bool  key_changed,
bool copy 
)
static

Definition at line 7600 of file heapam.c.

References Assert, bms_free(), bms_is_empty(), bms_is_member(), FirstLowInvalidHeapAttributeNumber, heap_deform_tuple(), heap_form_tuple(), heap_freetuple(), HeapTupleHasExternal, i, INDEX_ATTR_BITMAP_IDENTITY_KEY, MaxHeapAttributeNumber, TupleDescData::natts, RelationData::rd_rel, RelationGetDescr, RelationGetIndexAttrBitmap(), RelationIsLogicallyLogged, toast_flatten_tuple(), and values.

Referenced by heap_delete(), and heap_update().

7602 {
7603  TupleDesc desc = RelationGetDescr(relation);
7604  char replident = relation->rd_rel->relreplident;
7605  Bitmapset *idattrs;
7606  HeapTuple key_tuple;
7607  bool nulls[MaxHeapAttributeNumber];
7609 
7610  *copy = false;
7611 
7612  if (!RelationIsLogicallyLogged(relation))
7613  return NULL;
7614 
7615  if (replident == REPLICA_IDENTITY_NOTHING)
7616  return NULL;
7617 
7618  if (replident == REPLICA_IDENTITY_FULL)
7619  {
7620  /*
7621  * When logging the entire old tuple, it very well could contain
7622  * toasted columns. If so, force them to be inlined.
7623  */
7624  if (HeapTupleHasExternal(tp))
7625  {
7626  *copy = true;
7627  tp = toast_flatten_tuple(tp, desc);
7628  }
7629  return tp;
7630  }
7631 
7632  /* if the key hasn't changed and we're only logging the key, we're done */
7633  if (!key_changed)
7634  return NULL;
7635 
7636  /* find out the replica identity columns */
7637  idattrs = RelationGetIndexAttrBitmap(relation,
7639 
7640  /*
7641  * If there's no defined replica identity columns, treat as !key_changed.
7642  * (This case should not be reachable from heap_update, since that should
7643  * calculate key_changed accurately. But heap_delete just passes constant
7644  * true for key_changed, so we can hit this case in deletes.)
7645  */
7646  if (bms_is_empty(idattrs))
7647  return NULL;
7648 
7649  /*
7650  * Construct a new tuple containing only the replica identity columns,
7651  * with nulls elsewhere. While we're at it, assert that the replica
7652  * identity columns aren't null.
7653  */
7654  heap_deform_tuple(tp, desc, values, nulls);
7655 
7656  for (int i = 0; i < desc->natts; i++)
7657  {
7659  idattrs))
7660  Assert(!nulls[i]);
7661  else
7662  nulls[i] = true;
7663  }
7664 
7665  key_tuple = heap_form_tuple(desc, values, nulls);
7666  *copy = true;
7667 
7668  bms_free(idattrs);
7669 
7670  /*
7671  * If the tuple, which by here only contains indexed columns, still has
7672  * toasted columns, force them to be inlined. This is somewhat unlikely
7673  * since there's limits on the size of indexed columns, so we don't
7674  * duplicate toast_flatten_tuple()s functionality in the above loop over
7675  * the indexed columns, even if it would be more efficient.
7676  */
7677  if (HeapTupleHasExternal(key_tuple))
7678  {
7679  HeapTuple oldtup = key_tuple;
7680 
7681  key_tuple = toast_flatten_tuple(oldtup, desc);
7682  heap_freetuple(oldtup);
7683  }
7684 
7685  return key_tuple;
7686 }
#define RelationGetDescr(relation)
Definition: rel.h:483
#define FirstLowInvalidHeapAttributeNumber
Definition: sysattr.h:27
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: heaptuple.c:1020
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:636
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1338
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition: heaptoast.c:350
bool bms_is_empty(const Bitmapset *a)
Definition: bitmapset.c:701
uintptr_t Datum
Definition: postgres.h:367
#define MaxHeapAttributeNumber
Definition: htup_details.h:47
void bms_free(Bitmapset *a)
Definition: bitmapset.c:208
#define Assert(condition)
Definition: c.h:800
void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull)
Definition: heaptuple.c:1249
static Datum values[MAXATTR]
Definition: bootstrap.c:165
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:673
int i
Bitmapset * RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
Definition: relcache.c:4945
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:427

◆ fix_infomask_from_infobits()

static void fix_infomask_from_infobits ( uint8  infobits,
uint16 infomask,
uint16 infomask2 
)
static

Definition at line 8004 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_xlog_delete(), heap_xlog_lock(), heap_xlog_lock_updated(), and heap_xlog_update().

8005 {
8006  *infomask &= ~(HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY |
8008  *infomask2 &= ~HEAP_KEYS_UPDATED;
8009 
8010  if (infobits & XLHL_XMAX_IS_MULTI)
8011  *infomask |= HEAP_XMAX_IS_MULTI;
8012  if (infobits & XLHL_XMAX_LOCK_ONLY)
8013  *infomask |= HEAP_XMAX_LOCK_ONLY;
8014  if (infobits & XLHL_XMAX_EXCL_LOCK)
8015  *infomask |= HEAP_XMAX_EXCL_LOCK;
8016  /* note HEAP_XMAX_SHR_LOCK isn't considered here */
8017  if (infobits & XLHL_XMAX_KEYSHR_LOCK)
8018  *infomask |= HEAP_XMAX_KEYSHR_LOCK;
8019 
8020  if (infobits & XLHL_KEYS_UPDATED)
8021  *infomask2 |= HEAP_KEYS_UPDATED;
8022 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:193
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:196
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:262
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:261
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:195
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:263
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:265
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:264

◆ FreeBulkInsertState()

void FreeBulkInsertState ( BulkInsertState  bistate)

Definition at line 1820 of file heapam.c.

References BulkInsertStateData::current_buf, FreeAccessStrategy(), InvalidBuffer, pfree(), ReleaseBuffer(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), CopyMultiInsertBufferCleanup(), intorel_shutdown(), and transientrel_shutdown().

1821 {
1822  if (bistate->current_buf != InvalidBuffer)
1823  ReleaseBuffer(bistate->current_buf);
1824  FreeAccessStrategy(bistate->strategy);
1825  pfree(bistate);
1826 }
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3513
void pfree(void *pointer)
Definition: mcxt.c:1057
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:597
BufferAccessStrategy strategy
Definition: hio.h:31
Buffer current_buf
Definition: hio.h:32

◆ FreezeMultiXactId()

static TransactionId FreezeMultiXactId ( MultiXactId  multi,
uint16  t_infomask,
TransactionId  relfrozenxid,
TransactionId  relminmxid,
TransactionId  cutoff_xid,
MultiXactId  cutoff_multi,
uint16 flags 
)
static

Definition at line 5826 of file heapam.c.

References Assert, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg_internal(), ERROR, FRM_INVALIDATE_XMAX, FRM_MARK_COMMITTED, FRM_NOOP, FRM_RETURN_IS_MULTI, FRM_RETURN_IS_XID, GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, i, InvalidTransactionId, ISUPDATE_from_mxstatus, MultiXactIdCreateFromMembers(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactIdIsValid, MultiXactIdPrecedes(), palloc(), pfree(), status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TransactionIdIsValid, TransactionIdPrecedes(), and MultiXactMember::xid.

Referenced by heap_prepare_freeze_tuple().

5830 {
5832  int i;
5833  MultiXactMember *members;
5834  int nmembers;
5835  bool need_replace;
5836  int nnewmembers;
5837  MultiXactMember *newmembers;
5838  bool has_lockers;
5839  TransactionId update_xid;
5840  bool update_committed;
5841 
5842  *flags = 0;
5843 
5844  /* We should only be called in Multis */
5845  Assert(t_infomask & HEAP_XMAX_IS_MULTI);
5846 
5847  if (!MultiXactIdIsValid(multi) ||
5848  HEAP_LOCKED_UPGRADED(t_infomask))
5849  {
5850  /* Ensure infomask bits are appropriately set/reset */
5851  *flags |= FRM_INVALIDATE_XMAX;
5852  return InvalidTransactionId;
5853  }
5854  else if (MultiXactIdPrecedes(multi, relminmxid))
5855  ereport(ERROR,
5857  errmsg_internal("found multixact %u from before relminmxid %u",
5858  multi, relminmxid)));
5859  else if (MultiXactIdPrecedes(multi, cutoff_multi))
5860  {
5861  /*
5862  * This old multi cannot possibly have members still running, but
5863  * verify just in case. If it was a locker only, it can be removed
5864  * without any further consideration; but if it contained an update,
5865  * we might need to preserve it.
5866  */
5867  if (MultiXactIdIsRunning(multi,
5868  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
5869  ereport(ERROR,
5871  errmsg_internal("multixact %u from before cutoff %u found to be still running",
5872  multi, cutoff_multi)));
5873 
5874  if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
5875  {
5876  *flags |= FRM_INVALIDATE_XMAX;
5877  xid = InvalidTransactionId; /* not strictly necessary */
5878  }
5879  else
5880  {
5881  /* replace multi by update xid */
5882  xid = MultiXactIdGetUpdateXid(multi, t_infomask);
5883 
5884  /* wasn't only a lock, xid needs to be valid */
5886 
5887  if (TransactionIdPrecedes(xid, relfrozenxid))
5888  ereport(ERROR,
5890  errmsg_internal("found update xid %u from before relfrozenxid %u",
5891  xid, relfrozenxid)));
5892 
5893  /*
5894  * If the xid is older than the cutoff, it has to have aborted,
5895  * otherwise the tuple would have gotten pruned away.
5896  */
5897  if (TransactionIdPrecedes(xid, cutoff_xid))
5898  {
5899  if (TransactionIdDidCommit(xid))
5900  ereport(ERROR,
5902  errmsg_internal("cannot freeze committed update xid %u", xid)));
5903  *flags |= FRM_INVALIDATE_XMAX;
5904  xid = InvalidTransactionId; /* not strictly necessary */
5905  }
5906  else
5907  {
5908  *flags |= FRM_RETURN_IS_XID;
5909  }
5910  }
5911 
5912  return xid;
5913  }
5914 
5915  /*
5916  * This multixact might have or might not have members still running, but
5917  * we know it's valid and is newer than the cutoff point for multis.
5918  * However, some member(s) of it may be below the cutoff for Xids, so we
5919  * need to walk the whole members array to figure out what to do, if
5920  * anything.
5921  */
5922 
5923  nmembers =
5924  GetMultiXactIdMembers(multi, &members, false,
5925  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
5926  if (nmembers <= 0)
5927  {
5928  /* Nothing worth keeping */
5929  *flags |= FRM_INVALIDATE_XMAX;
5930  return InvalidTransactionId;
5931  }
5932 
5933  /* is there anything older than the cutoff? */
5934  need_replace = false;
5935  for (i = 0; i < nmembers; i++)
5936  {
5937  if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
5938  {
5939  need_replace = true;
5940  break;
5941  }
5942  }
5943 
5944  /*
5945  * In the simplest case, there is no member older than the cutoff; we can
5946  * keep the existing MultiXactId as is.
5947  */
5948  if (!need_replace)
5949  {
5950  *flags |= FRM_NOOP;
5951  pfree(members);
5952  return InvalidTransactionId;
5953  }
5954 
5955  /*
5956  * If the multi needs to be updated, figure out which members do we need
5957  * to keep.
5958  */
5959  nnewmembers = 0;
5960  newmembers = palloc(sizeof(MultiXactMember) * nmembers);
5961  has_lockers = false;
5962  update_xid = InvalidTransactionId;
5963  update_committed = false;
5964 
5965  for (i = 0; i < nmembers; i++)
5966  {
5967  /*
5968  * Determine whether to keep this member or ignore it.
5969  */
5970  if (ISUPDATE_from_mxstatus(members[i].status))
5971  {
5972  TransactionId xid = members[i].xid;
5973 
5975  if (TransactionIdPrecedes(xid, relfrozenxid))
5976  ereport(ERROR,
5978  errmsg_internal("found update xid %u from before relfrozenxid %u",
5979  xid, relfrozenxid)));
5980 
5981  /*
5982  * It's an update; should we keep it? If the transaction is known
5983  * aborted or crashed then it's okay to ignore it, otherwise not.
5984  * Note that an updater older than cutoff_xid cannot possibly be
5985  * committed, because HeapTupleSatisfiesVacuum would have returned
5986  * HEAPTUPLE_DEAD and we would not be trying to freeze the tuple.
5987  *
5988  * As with all tuple visibility routines, it's critical to test
5989  * TransactionIdIsInProgress before TransactionIdDidCommit,
5990  * because of race conditions explained in detail in
5991  * heapam_visibility.c.
5992  */
5995  {
5996  Assert(!TransactionIdIsValid(update_xid));
5997  update_xid = xid;
5998  }
5999  else if (TransactionIdDidCommit(xid))
6000  {
6001  /*
6002  * The transaction committed, so we can tell caller to set
6003  * HEAP_XMAX_COMMITTED. (We can only do this because we know
6004  * the transaction is not running.)
6005  */
6006  Assert(!TransactionIdIsValid(update_xid));
6007  update_committed = true;
6008  update_xid = xid;
6009  }
6010  else
6011  {
6012  /*
6013  * Not in progress, not committed -- must be aborted or
6014  * crashed; we can ignore it.
6015  */
6016  }
6017 
6018  /*
6019  * Since the tuple wasn't marked HEAPTUPLE_DEAD by vacuum, the
6020  * update Xid cannot possibly be older than the xid cutoff. The
6021  * presence of such a tuple would cause corruption, so be paranoid
6022  * and check.
6023  */
6024  if (TransactionIdIsValid(update_xid) &&
6025  TransactionIdPrecedes(update_xid, cutoff_xid))
6026  ereport(ERROR,
6028  errmsg_internal("found update xid %u from before xid cutoff %u",
6029  update_xid, cutoff_xid)));
6030 
6031  /*
6032  * If we determined that it's an Xid corresponding to an update
6033  * that must be retained, additionally add it to the list of
6034  * members of the new Multi, in case we end up using that. (We
6035  * might still decide to use only an update Xid and not a multi,
6036  * but it's easier to maintain the list as we walk the old members
6037  * list.)
6038  */
6039  if (TransactionIdIsValid(update_xid))
6040  newmembers[nnewmembers++] = members[i];
6041  }
6042  else
6043  {
6044  /* We only keep lockers if they are still running */
6045  if (TransactionIdIsCurrentTransactionId(members[i].xid) ||
6046  TransactionIdIsInProgress(members[i].xid))
6047  {
6048  /* running locker cannot possibly be older than the cutoff */
6049  Assert(!TransactionIdPrecedes(members[i].xid, cutoff_xid));
6050  newmembers[nnewmembers++] = members[i];
6051  has_lockers = true;
6052  }
6053  }
6054  }
6055 
6056  pfree(members);
6057 
6058  if (nnewmembers == 0)
6059  {
6060  /* nothing worth keeping!? Tell caller to remove the whole thing */
6061  *flags |= FRM_INVALIDATE_XMAX;
6062  xid = InvalidTransactionId;
6063  }
6064  else if (TransactionIdIsValid(update_xid) && !has_lockers)
6065  {
6066  /*
6067  * If there's a single member and it's an update, pass it back alone
6068  * without creating a new Multi. (XXX we could do this when there's a
6069  * single remaining locker, too, but that would complicate the API too
6070  * much; moreover, the case with the single updater is more
6071  * interesting, because those are longer-lived.)
6072  */
6073  Assert(nnewmembers == 1);
6074  *flags |= FRM_RETURN_IS_XID;
6075  if (update_committed)
6076  *flags |= FRM_MARK_COMMITTED;
6077  xid = update_xid;
6078  }
6079  else
6080  {
6081  /*
6082  * Create a new multixact with the surviving members of the previous
6083  * one, to set as new Xmax in the tuple.
6084  */
6085  xid = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
6086  *flags |= FRM_RETURN_IS_MULTI;
6087  }
6088 
6089  pfree(newmembers);
6090 
6091  return xid;
6092 }
#define FRM_RETURN_IS_XID
Definition: heapam.c:5800
#define FRM_MARK_COMMITTED
Definition: heapam.c:5802
uint32 TransactionId
Definition: c.h:575
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:869
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:1320
MultiXactId MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members)
Definition: multixact.c:767
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:252
int errcode(int sqlerrcode)
Definition: elog.c:691
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:6488
void pfree(void *pointer)
Definition: mcxt.c:1057
#define ERROR
Definition: elog.h:43
TransactionId xid
Definition: multixact.h:62
#define FRM_INVALIDATE_XMAX
Definition: heapam.c:5799
#define InvalidTransactionId
Definition: transam.h:31
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:56
#define MultiXactIdIsValid(multi)
Definition: multixact.h:28
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:45
#define FRM_RETURN_IS_MULTI
Definition: heapam.c:5801
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
#define ereport(elevel,...)
Definition: elog.h:155
int errmsg_internal(const char *fmt,...)
Definition: elog.c:989
#define Assert(condition)
Definition: c.h:800
#define FRM_NOOP
Definition: heapam.c:5798
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3146
void * palloc(Size size)
Definition: mcxt.c:950
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1223
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:551

◆ get_mxact_status_for_lock()

static MultiXactStatus get_mxact_status_for_lock ( LockTupleMode  mode,
bool  is_update 
)
static

Definition at line 3925 of file heapam.c.

References elog, ERROR, mode, and tupleLockExtraInfo.

Referenced by compute_new_xmax_infomask(), heap_lock_tuple(), and test_lockmode_for_conflict().

3926 {
3927  int retval;
3928 
3929  if (is_update)
3930  retval = tupleLockExtraInfo[mode].updstatus;
3931  else
3932  retval = tupleLockExtraInfo[mode].lockstatus;
3933 
3934  if (retval == -1)
3935  elog(ERROR, "invalid lock tuple mode %d/%s", mode,
3936  is_update ? "true" : "false");
3937 
3938  return (MultiXactStatus) retval;
3939 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
MultiXactStatus
Definition: multixact.h:41
static const struct @19 tupleLockExtraInfo[MaxLockTupleMode+1]
#define ERROR
Definition: elog.h:43
#define elog(elevel,...)
Definition: elog.h:228

◆ GetBulkInsertState()

BulkInsertState GetBulkInsertState ( void  )

Definition at line 1806 of file heapam.c.

References BAS_BULKWRITE, BulkInsertStateData::current_buf, GetAccessStrategy(), InvalidBuffer, palloc(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), CopyMultiInsertBufferInit(), intorel_startup(), and transientrel_startup().

1807 {
1808  BulkInsertState bistate;
1809 
1810  bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
1812  bistate->current_buf = InvalidBuffer;
1813  return bistate;
1814 }
BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
Definition: freelist.c:542
#define InvalidBuffer
Definition: buf.h:25
struct BulkInsertStateData * BulkInsertState
Definition: heapam.h:39
BufferAccessStrategy strategy
Definition: hio.h:31
void * palloc(Size size)
Definition: mcxt.c:950
Buffer current_buf
Definition: hio.h:32

◆ GetMultiXactIdHintBits()

static void GetMultiXactIdHintBits ( MultiXactId  multi,
uint16 new_infomask,
uint16 new_infomask2 
)
static

Definition at line 6407 of file heapam.c.

References GetMultiXactIdMembers(), HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, i, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, mode, MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, pfree(), status(), and TUPLOCK_from_mxstatus.

Referenced by compute_new_xmax_infomask(), heap_prepare_freeze_tuple(), and heap_update().

6409 {
6410  int nmembers;
6411  MultiXactMember *members;
6412  int i;
6413  uint16 bits = HEAP_XMAX_IS_MULTI;
6414  uint16 bits2 = 0;
6415  bool has_update = false;
6416  LockTupleMode strongest = LockTupleKeyShare;
6417 
6418  /*
6419  * We only use this in multis we just created, so they cannot be values
6420  * pre-pg_upgrade.
6421  */
6422  nmembers = GetMultiXactIdMembers(multi, &members, false, false);
6423 
6424  for (i = 0; i < nmembers; i++)
6425  {
6427 
6428  /*
6429  * Remember the strongest lock mode held by any member of the
6430  * multixact.
6431  */
6432  mode = TUPLOCK_from_mxstatus(members[i].status);
6433  if (mode > strongest)
6434  strongest = mode;
6435 
6436  /* See what other bits we need */
6437  switch (members[i].status)
6438  {
6442  break;
6443 
6445  bits2 |= HEAP_KEYS_UPDATED;
6446  break;
6447 
6449  has_update = true;
6450  break;
6451 
6452  case MultiXactStatusUpdate:
6453  bits2 |= HEAP_KEYS_UPDATED;
6454  has_update = true;
6455  break;
6456  }
6457  }
6458 
6459  if (strongest == LockTupleExclusive ||
6460  strongest == LockTupleNoKeyExclusive)
6461  bits |= HEAP_XMAX_EXCL_LOCK;
6462  else if (strongest == LockTupleShare)
6463  bits |= HEAP_XMAX_SHR_LOCK;
6464  else if (strongest == LockTupleKeyShare)
6465  bits |= HEAP_XMAX_KEYSHR_LOCK;
6466 
6467  if (!has_update)
6468  bits |= HEAP_XMAX_LOCK_ONLY;
6469 
6470  if (nmembers > 0)
6471  pfree(members);
6472 
6473  *new_infomask = bits;
6474  *new_infomask2 = bits2;
6475 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:193
LockTupleMode
Definition: lockoptions.h:49
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:196
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:199
unsigned short uint16
Definition: c.h:428
void pfree(void *pointer)
Definition: mcxt.c:1057
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:195
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:196
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1223
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:227

◆ heap2_redo()

void heap2_redo ( XLogReaderState record)

Definition at line 8879 of file heapam.c.

References elog, heap_xlog_clean(), heap_xlog_cleanup_info(), heap_xlog_freeze_page(), heap_xlog_lock_updated(), heap_xlog_logical_rewrite(), heap_xlog_multi_insert(), heap_xlog_visible(), PANIC, XLOG_HEAP2_CLEAN, XLOG_HEAP2_CLEANUP_INFO, XLOG_HEAP2_FREEZE_PAGE, XLOG_HEAP2_LOCK_UPDATED, XLOG_HEAP2_MULTI_INSERT, XLOG_HEAP2_NEW_CID, XLOG_HEAP2_REWRITE, XLOG_HEAP2_VISIBLE, XLOG_HEAP_OPMASK, XLogRecGetInfo, and XLR_INFO_MASK.

8880 {
8881  uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
8882 
8883  switch (info & XLOG_HEAP_OPMASK)
8884  {
8885  case XLOG_HEAP2_CLEAN:
8886  heap_xlog_clean(record);
8887  break;
8889  heap_xlog_freeze_page(record);
8890  break;
8892  heap_xlog_cleanup_info(record);
8893  break;
8894  case XLOG_HEAP2_VISIBLE:
8895  heap_xlog_visible(record);
8896  break;
8898  heap_xlog_multi_insert(record);
8899  break;
8901  heap_xlog_lock_updated(record);
8902  break;
8903  case XLOG_HEAP2_NEW_CID:
8904 
8905  /*
8906  * Nothing to do on a real replay, only used during logical
8907  * decoding.
8908  */
8909  break;
8910  case XLOG_HEAP2_REWRITE:
8911  heap_xlog_logical_rewrite(record);
8912  break;
8913  default:
8914  elog(PANIC, "heap2_redo: unknown op code %u", info);
8915  }
8916 }
void heap_xlog_logical_rewrite(XLogReaderState *r)
Definition: rewriteheap.c:1112
#define XLOG_HEAP2_LOCK_UPDATED
Definition: heapam_xlog.h:59
#define XLOG_HEAP2_REWRITE
Definition: heapam_xlog.h:53
unsigned char uint8
Definition: c.h:427
#define XLOG_HEAP_OPMASK
Definition: heapam_xlog.h:41
#define PANIC
Definition: elog.h:53
#define XLOG_HEAP2_MULTI_INSERT
Definition: heapam_xlog.h:58
#define XLOG_HEAP2_VISIBLE
Definition: heapam_xlog.h:57
static void heap_xlog_lock_updated(XLogReaderState *record)
Definition: heapam.c:8732
static void heap_xlog_freeze_page(XLogReaderState *record)
Definition: heapam.c:7946
#define XLOG_HEAP2_CLEAN
Definition: heapam_xlog.h:54
#define XLOG_HEAP2_CLEANUP_INFO
Definition: heapam_xlog.h:56
static void heap_xlog_multi_insert(XLogReaderState *record)
Definition: heapam.c:8214
#define XLOG_HEAP2_NEW_CID
Definition: heapam_xlog.h:60
#define XLogRecGetInfo(decoder)
Definition: xlogreader.h:305
#define XLR_INFO_MASK
Definition: xlogrecord.h:62
static void heap_xlog_cleanup_info(XLogReaderState *record)
Definition: heapam.c:7692
#define XLOG_HEAP2_FREEZE_PAGE
Definition: heapam_xlog.h:55
#define elog(elevel,...)
Definition: elog.h:228
static void heap_xlog_visible(XLogReaderState *record)
Definition: heapam.c:7806
static void heap_xlog_clean(XLogReaderState *record)
Definition: heapam.c:7713

◆ heap_abort_speculative()

void heap_abort_speculative ( Relation  relation,
ItemPointer  tid 
)

Definition at line 5566 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, compute_infobits(), elog, END_CRIT_SECTION, ERROR, xl_heap_delete::flags, GetCurrentTransactionId(), HEAP_KEYS_UPDATED, HEAP_MOVED, heap_toast_delete(), HEAP_XMAX_BITS, HeapTupleHasExternal, HeapTupleHeaderIsHeapOnly, HeapTupleHeaderIsSpeculative, HeapTupleHeaderSetXmin, xl_heap_delete::infobits_set, InvalidTransactionId, IsToastRelation(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), MarkBufferDirty(), xl_heap_delete::offnum, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), RelationData::rd_rel, ReadBuffer(), REGBUF_STANDARD, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapDelete, START_CRIT_SECTION, HeapTupleHeaderData::t_choice, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_heap, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, HeapTupleFields::t_xmin, TransactionIdIsValid, TransactionIdPrecedes(), TransactionXmin, XLH_DELETE_IS_SUPER, XLOG_HEAP_DELETE, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and xl_heap_delete::xmax.

Referenced by heapam_tuple_complete_speculative(), and toast_delete_datum().

5567 {
5569  ItemId lp;
5570  HeapTupleData tp;
5571  Page page;
5572  BlockNumber block;
5573  Buffer buffer;
5574  TransactionId prune_xid;
5575 
5576  Assert(ItemPointerIsValid(tid));
5577 
5578  block = ItemPointerGetBlockNumber(tid);
5579  buffer = ReadBuffer(relation, block);
5580  page = BufferGetPage(buffer);
5581 
5583 
5584  /*
5585  * Page can't be all visible, we just inserted into it, and are still
5586  * running.
5587  */
5588  Assert(!PageIsAllVisible(page));
5589 
5590  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
5591  Assert(ItemIdIsNormal(lp));
5592 
5593  tp.t_tableOid = RelationGetRelid(relation);
5594  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
5595  tp.t_len = ItemIdGetLength(lp);
5596  tp.t_self = *tid;
5597 
5598  /*
5599  * Sanity check that the tuple really is a speculatively inserted tuple,
5600  * inserted by us.
5601  */
5602  if (tp.t_data->t_choice.t_heap.t_xmin != xid)
5603  elog(ERROR, "attempted to kill a tuple inserted by another transaction");
5604  if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
5605  elog(ERROR, "attempted to kill a non-speculative tuple");
5607 
5608  /*
5609  * No need to check for serializable conflicts here. There is never a
5610  * need for a combocid, either. No need to extract replica identity, or
5611  * do anything special with infomask bits.
5612  */
5613 
5615 
5616  /*
5617  * The tuple will become DEAD immediately. Flag that this page is a
5618  * candidate for pruning by setting xmin to TransactionXmin. While not
5619  * immediately prunable, it is the oldest xid we can cheaply determine
5620  * that's safe against wraparound / being older than the table's
5621  * relfrozenxid. To defend against the unlikely case of a new relation
5622  * having a newer relfrozenxid than our TransactionXmin, use relfrozenxid
5623  * if so (vacuum can't subsequently move relfrozenxid to beyond
5624  * TransactionXmin, so there's no race here).
5625  */
5627  if (TransactionIdPrecedes(TransactionXmin, relation->rd_rel->relfrozenxid))
5628  prune_xid = relation->rd_rel->relfrozenxid;
5629  else
5630  prune_xid = TransactionXmin;
5631  PageSetPrunable(page, prune_xid);
5632 
5633  /* store transaction information of xact deleting the tuple */
5636 
5637  /*
5638  * Set the tuple header xmin to InvalidTransactionId. This makes the
5639  * tuple immediately invisible everyone. (In particular, to any
5640  * transactions waiting on the speculative token, woken up later.)
5641  */
5643 
5644  /* Clear the speculative insertion token too */
5645  tp.t_data->t_ctid = tp.t_self;
5646 
5647  MarkBufferDirty(buffer);
5648 
5649  /*
5650  * XLOG stuff
5651  *
5652  * The WAL records generated here match heap_delete(). The same recovery
5653  * routines are used.
5654  */
5655  if (RelationNeedsWAL(relation))
5656  {
5657  xl_heap_delete xlrec;
5658  XLogRecPtr recptr;
5659 
5660  xlrec.flags = XLH_DELETE_IS_SUPER;
5662  tp.t_data->t_infomask2);
5664  xlrec.xmax = xid;
5665 
5666  XLogBeginInsert();
5667  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
5668  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
5669 
5670  /* No replica identity & replication origin logged */
5671 
5672  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
5673 
5674  PageSetLSN(page, recptr);
5675  }
5676 
5677  END_CRIT_SECTION();
5678 
5679  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
5680 
5681  if (HeapTupleHasExternal(&tp))
5682  {
5683  Assert(!IsToastRelation(relation));
5684  heap_toast_delete(relation, &tp, true);
5685  }
5686 
5687  /*
5688  * Never need to mark tuple for invalidation, since catalogs don't support
5689  * speculative insertion
5690  */
5691 
5692  /* Now we can release the buffer */
5693  ReleaseBuffer(buffer);
5694 
5695  /* count deletion, as we counted the insertion too */
5696  pgstat_count_heap_delete(relation);
5697 }
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
bool IsToastRelation(Relation relation)
Definition: catalog.c:138
#define HEAP_XMAX_BITS
Definition: htup_details.h:270
#define XLH_DELETE_IS_SUPER
Definition: heapam_xlog.h:96
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2397
HeapTupleFields t_heap
Definition: htup_details.h:156
#define PageIsAllVisible(page)
Definition: bufpage.h:385
uint32 TransactionId
Definition: c.h:575
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1471
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:220
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:429
#define PageSetPrunable(page, xid)
Definition: bufpage.h:392
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3513
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
Form_pg_class rd_rel
Definition: rel.h:110
OffsetNumber offnum
Definition: heapam_xlog.h:107
TransactionId TransactionXmin
Definition: snapmgr.c:112
HeapTupleHeader t_data
Definition: htup.h:68
#define HeapTupleHeaderIsHeapOnly(tup)
Definition: htup_details.h:501
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ERROR
Definition: elog.h:43
ItemPointerData t_ctid
Definition: htup_details.h:160
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:106
void heap_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: heaptoast.c:43
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:438
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:112
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
TransactionId t_xmin
Definition: htup_details.h:123
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:330
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:422
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3752
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_MOVED
Definition: htup_details.h:216
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:800
uint8 infobits_set
Definition: heapam_xlog.h:108
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:607
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
union HeapTupleHeaderData::@44 t_choice
#define RelationNeedsWAL(relation)
Definition: rel.h:563
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:2157
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:673
#define elog(elevel,...)
Definition: elog.h:228
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define TransactionIdIsValid(xid)
Definition: transam.h:41
void XLogBeginInsert(void)
Definition: xloginsert.c:123
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:457
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define HeapTupleHeaderSetXmin(tup, xid)
Definition: htup_details.h:319

◆ heap_acquire_tuplock()

static bool heap_acquire_tuplock ( Relation  relation,
ItemPointer  tid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
bool have_tuple_lock 
)
static

Definition at line 4663 of file heapam.c.

References ConditionalLockTupleTuplock, ereport, errcode(), errmsg(), ERROR, LockTupleTuplock, LockWaitBlock, LockWaitError, LockWaitSkip, and RelationGetRelationName.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

4665 {
4666  if (*have_tuple_lock)
4667  return true;
4668 
4669  switch (wait_policy)
4670  {
4671  case LockWaitBlock:
4672  LockTupleTuplock(relation, tid, mode);
4673  break;
4674 
4675  case LockWaitSkip:
4676  if (!ConditionalLockTupleTuplock(relation, tid, mode))
4677  return false;
4678  break;
4679 
4680  case LockWaitError:
4681  if (!ConditionalLockTupleTuplock(relation, tid, mode))
4682  ereport(ERROR,
4683  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4684  errmsg("could not obtain lock on row in relation \"%s\"",
4685  RelationGetRelationName(relation))));
4686  break;
4687  }
4688  *have_tuple_lock = true;
4689 
4690  return true;
4691 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define LockTupleTuplock(rel, tup, mode)
Definition: heapam.c:160
#define ConditionalLockTupleTuplock(rel, tup, mode)
Definition: heapam.c:164
int errcode(int sqlerrcode)
Definition: elog.c:691
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:491
#define ereport(elevel,...)
Definition: elog.h:155
int errmsg(const char *fmt,...)
Definition: elog.c:902

◆ heap_beginscan()

TableScanDesc heap_beginscan ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
ParallelTableScanDesc  parallel_scan,
uint32  flags 
)

Definition at line 1141 of file heapam.c.

References Assert, initscan(), IsMVCCSnapshot, palloc(), PredicateLockRelation(), RelationGetRelid, RelationIncrementReferenceCount(), HeapScanDescData::rs_base, HeapScanDescData::rs_ctup, TableScanDescData::rs_flags, TableScanDescData::rs_key, TableScanDescData::rs_nkeys, TableScanDescData::rs_parallel, TableScanDescData::rs_private, TableScanDescData::rs_rd, TableScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, SO_ALLOW_PAGEMODE, SO_TYPE_SAMPLESCAN, SO_TYPE_SEQSCAN, and HeapTupleData::t_tableOid.

Referenced by SampleHeapTupleVisible().

1145 {
1146  HeapScanDesc scan;
1147 
1148  /*
1149  * increment relation ref count while scanning relation
1150  *
1151  * This is just to make really sure the relcache entry won't go away while
1152  * the scan has a pointer to it. Caller should be holding the rel open
1153  * anyway, so this is redundant in all normal scenarios...
1154  */
1156 
1157  /*
1158  * allocate and initialize scan descriptor
1159  */
1160  scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
1161 
1162  scan->rs_base.rs_rd = relation;
1163  scan->rs_base.rs_snapshot = snapshot;
1164  scan->rs_base.rs_nkeys = nkeys;
1165  scan->rs_base.rs_flags = flags;
1166  scan->rs_base.rs_parallel = parallel_scan;
1167  scan->rs_base.rs_private =
1169  scan->rs_strategy = NULL; /* set in initscan */
1170 
1171  /*
1172  * Disable page-at-a-time mode if it's not a MVCC-safe snapshot.
1173  */
1174  if (!(snapshot && IsMVCCSnapshot(snapshot)))
1176 
1177  /*
1178  * For seqscan and sample scans in a serializable transaction, acquire a
1179  * predicate lock on the entire relation. This is required not only to
1180  * lock all the matching tuples, but also to conflict with new insertions
1181  * into the table. In an indexscan, we take page locks on the index pages
1182  * covering the range specified in the scan qual, but in a heap scan there
1183  * is nothing more fine-grained to lock. A bitmap scan is a different
1184  * story, there we have already scanned the index and locked the index
1185  * pages covering the predicate. But in that case we still have to lock
1186  * any matching heap tuples. For sample scan we could optimize the locking
1187  * to be at least page-level granularity, but we'd need to add per-tuple
1188  * locking for that.
1189  */
1191  {
1192  /*
1193  * Ensure a missing snapshot is noticed reliably, even if the
1194  * isolation mode means predicate locking isn't performed (and
1195  * therefore the snapshot isn't used here).
1196  */
1197  Assert(snapshot);
1198  PredicateLockRelation(relation, snapshot);
1199  }
1200 
1201  /* we only need to set this up once */
1202  scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1203 
1204  /*
1205  * we do this here instead of in initscan() because heap_rescan also calls
1206  * initscan() and we don't want to allocate memory again
1207  */
1208  if (nkeys > 0)
1209  scan->rs_base.rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
1210  else
1211  scan->rs_base.rs_key = NULL;
1212 
1213  initscan(scan, key, false);
1214 
1215  return (TableScanDesc) scan;
1216 }
TableScanDescData rs_base
Definition: heapam.h:49
void PredicateLockRelation(Relation relation, Snapshot snapshot)
Definition: predicate.c:2498
uint32 rs_flags
Definition: relscan.h:43
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
HeapTupleData rs_ctup
Definition: heapam.h:66
ScanKeyData * ScanKey
Definition: skey.h:75
Oid t_tableOid
Definition: htup.h:66
struct ScanKeyData * rs_key
Definition: relscan.h:37
void * rs_private
Definition: relscan.h:45
void RelationIncrementReferenceCount(Relation rel)
Definition: relcache.c:2078
BufferAccessStrategy rs_strategy
Definition: heapam.h:64
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:97
#define Assert(condition)
Definition: c.h:800
Relation rs_rd
Definition: relscan.h:34
struct SnapshotData * rs_snapshot
Definition: relscan.h:35
void * palloc(Size size)
Definition: mcxt.c:950
struct ParallelTableScanDescData * rs_parallel
Definition: relscan.h:46
static void initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
Definition: heapam.c:209
#define RelationGetRelid(relation)
Definition: rel.h:457

◆ heap_compute_xid_horizon_for_tuples()

TransactionId heap_compute_xid_horizon_for_tuples ( Relation  rel,
ItemPointerData tids,
int  nitems 
)

Definition at line 6992 of file heapam.c.

References Assert, buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferIsValid, get_tablespace_maintenance_io_concurrency(), HeapTupleHeaderAdvanceLatestRemovedXid(), i, InvalidBlockNumber, InvalidBuffer, InvalidTransactionId, IsCatalogRelation(), ItemIdGetRedirect, ItemIdHasStorage, ItemIdIsDead, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerCompare(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), maintenance_io_concurrency, PageGetItem, PageGetItemId, qsort, RelationData::rd_rel, ReadBuffer(), and ReleaseBuffer().

Referenced by SampleHeapTupleVisible().

6995 {
6996  TransactionId latestRemovedXid = InvalidTransactionId;
6997  BlockNumber hblkno;
6999  Page hpage;
7000 #ifdef USE_PREFETCH
7001  XidHorizonPrefetchState prefetch_state;
7002  int prefetch_distance;
7003 #endif
7004 
7005  /*
7006  * Sort to avoid repeated lookups for the same page, and to make it more
7007  * likely to access items in an efficient order. In particular, this
7008  * ensures that if there are multiple pointers to the same page, they all
7009  * get processed looking up and locking the page just once.
7010  */
7011  qsort((void *) tids, nitems, sizeof(ItemPointerData),
7012  (int (*) (const void *, const void *)) ItemPointerCompare);
7013 
7014 #ifdef USE_PREFETCH
7015  /* Initialize prefetch state. */
7016  prefetch_state.cur_hblkno = InvalidBlockNumber;
7017  prefetch_state.next_item = 0;
7018  prefetch_state.nitems = nitems;
7019  prefetch_state.tids = tids;
7020 
7021  /*
7022  * Compute the prefetch distance that we will attempt to maintain.
7023  *
7024  * Since the caller holds a buffer lock somewhere in rel, we'd better make
7025  * sure that isn't a catalog relation before we call code that does
7026  * syscache lookups, to avoid risk of deadlock.
7027  */
7028  if (IsCatalogRelation(rel))
7029  prefetch_distance = maintenance_io_concurrency;
7030  else
7031  prefetch_distance =
7033 
7034  /* Start prefetching. */
7035  xid_horizon_prefetch_buffer(rel, &prefetch_state, prefetch_distance);
7036 #endif
7037 
7038  /* Iterate over all tids, and check their horizon */
7039  hblkno = InvalidBlockNumber;
7040  hpage = NULL;
7041  for (int i = 0; i < nitems; i++)
7042  {
7043  ItemPointer htid = &tids[i];
7044  ItemId hitemid;
7045  OffsetNumber hoffnum;
7046 
7047  /*
7048  * Read heap buffer, but avoid refetching if it's the same block as
7049  * required for the last tid.
7050  */
7051  if (hblkno == InvalidBlockNumber ||
7052  ItemPointerGetBlockNumber(htid) != hblkno)
7053  {
7054  /* release old buffer */
7055  if (BufferIsValid(buf))
7056  {
7058  ReleaseBuffer(buf);
7059  }
7060 
7061  hblkno = ItemPointerGetBlockNumber(htid);
7062 
7063  buf = ReadBuffer(rel, hblkno);
7064 
7065 #ifdef USE_PREFETCH
7066 
7067  /*
7068  * To maintain the prefetch distance, prefetch one more page for
7069  * each page we read.
7070  */
7071  xid_horizon_prefetch_buffer(rel, &prefetch_state, 1);
7072 #endif
7073 
7074  hpage = BufferGetPage(buf);
7075 
7077  }
7078 
7079  hoffnum = ItemPointerGetOffsetNumber(htid);
7080  hitemid = PageGetItemId(hpage, hoffnum);
7081 
7082  /*
7083  * Follow any redirections until we find something useful.
7084  */
7085  while (ItemIdIsRedirected(hitemid))
7086  {
7087  hoffnum = ItemIdGetRedirect(hitemid);
7088  hitemid = PageGetItemId(hpage, hoffnum);
7089  }
7090 
7091  /*
7092  * If the heap item has storage, then read the header and use that to
7093  * set latestRemovedXid.
7094  *
7095  * Some LP_DEAD items may not be accessible, so we ignore them.
7096  */
7097  if (ItemIdHasStorage(hitemid))
7098  {
7099  HeapTupleHeader htuphdr;
7100 
7101  htuphdr = (HeapTupleHeader) PageGetItem(hpage, hitemid);
7102 
7103  HeapTupleHeaderAdvanceLatestRemovedXid(htuphdr, &latestRemovedXid);
7104  }
7105  else if (ItemIdIsDead(hitemid))
7106  {
7107  /*
7108  * Conjecture: if hitemid is dead then it had xids before the xids
7109  * marked on LP_NORMAL items. So we just ignore this item and move
7110  * onto the next, for the purposes of calculating
7111  * latestRemovedXid.
7112  */
7113  }
7114  else
7115  Assert(!ItemIdIsUsed(hitemid));
7116 
7117  }
7118 
7119  if (BufferIsValid(buf))
7120  {
7122  ReleaseBuffer(buf);
7123  }
7124 
7125  /*
7126  * If all heap tuples were LP_DEAD then we will be returning
7127  * InvalidTransactionId here, which avoids conflicts. This matches
7128  * existing logic which assumes that LP_DEAD tuples must already be older
7129  * than the latestRemovedXid on the cleanup record that set them as
7130  * LP_DEAD, hence must already have generated a conflict.
7131  */
7132 
7133  return latestRemovedXid;
7134 }
int32 ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
Definition: itemptr.c:52
void HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, TransactionId *latestRemovedXid)
Definition: heapam.c:6903
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
bool IsCatalogRelation(Relation relation)
Definition: catalog.c:96
int maintenance_io_concurrency
Definition: bufmgr.c:142
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
uint32 TransactionId
Definition: c.h:575
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:78
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
#define InvalidBuffer
Definition: buf.h:25
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3513
Form_pg_class rd_rel
Definition: rel.h:110
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
uint16 OffsetNumber
Definition: off.h:24
int get_tablespace_maintenance_io_concurrency(Oid spcid)
Definition: spccache.c:229
static char * buf
Definition: pg_test_fsync.c:68
#define InvalidTransactionId
Definition: transam.h:31
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3752
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
#define Assert(condition)
Definition: c.h:800
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:607
#define InvalidBlockNumber
Definition: block.h:33
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
int i
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define qsort(a, b, c, d)
Definition: port.h:497
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78

◆ heap_delete()

TM_Result heap_delete ( Relation  relation,
ItemPointer  tid,
CommandId  cid,
Snapshot  crosscheck,
bool  wait,
TM_FailureData tmfd,
bool  changingPart 
)

Definition at line 2442 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), TM_FailureData::cmax, compute_infobits(), compute_new_xmax_infomask(), TM_FailureData::ctid, DoesMultiXactIdConflict(), END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, ExtractReplicaIdentity(), xl_heap_delete::flags, GetCurrentTransactionId(), heap_acquire_tuplock(), heap_freetuple(), HEAP_KEYS_UPDATED, HEAP_MOVED, heap_toast_delete(), HEAP_XMAX_BITS, HEAP_XMAX_INVALID, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HeapTupleHasExternal, HeapTupleHeaderAdjustCmax(), HeapTupleHeaderClearHotUpdated, HeapTupleHeaderGetCmax(), HeapTupleHeaderGetRawXmax, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIndicatesMovedPartitions, HeapTupleHeaderIsOnlyLocked(), HeapTupleHeaderSetCmax, HeapTupleHeaderSetMovedPartitions, HeapTupleHeaderSetXmax, HeapTupleSatisfiesUpdate(), HeapTupleSatisfiesVisibility(), xl_heap_delete::infobits_set, InvalidBuffer, InvalidCommandId, InvalidSnapshot, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerEquals(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), LockTupleExclusive, LockWaitBlock, log_heap_new_cid(), MarkBufferDirty(), MultiXactIdSetOldestMember(), MultiXactIdWait(), MultiXactStatusUpdate, xl_heap_delete::offnum, PageClearAllVisible, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), RelationData::rd_rel, ReadBuffer(), REGBUF_STANDARD, RelationGetRelid, RelationIsAccessibleInLogicalDecoding, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapDelete, SizeOfHeapHeader, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TM_BeingModified, TM_Deleted, TM_Invisible, TM_Ok, TM_SelfModified, TM_Updated, TransactionIdEquals, TransactionIdIsCurrentTransactionId(), UnlockReleaseBuffer(), UnlockTupleTuplock, UpdateXmaxHintBits(), visibilitymap_clear(), visibilitymap_pin(), VISIBILITYMAP_VALID_BITS, XactLockTableWait(), XLH_DELETE_ALL_VISIBLE_CLEARED, XLH_DELETE_CONTAINS_OLD_KEY, XLH_DELETE_CONTAINS_OLD_TUPLE, XLH_DELETE_IS_PARTITION_MOVE, XLOG_HEAP_DELETE, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), XLogSetRecordFlags(), XLTW_Delete, xl_heap_delete::xmax, TM_FailureData::xmax, and xmax_infomask_changed().

Referenced by heapam_tuple_delete(), and simple_heap_delete().

2445 {
2446  TM_Result result;
2448  ItemId lp;
2449  HeapTupleData tp;
2450  Page page;
2451  BlockNumber block;
2452  Buffer buffer;
2453  Buffer vmbuffer = InvalidBuffer;
2454  TransactionId new_xmax;
2455  uint16 new_infomask,
2456  new_infomask2;
2457  bool have_tuple_lock = false;
2458  bool iscombo;
2459  bool all_visible_cleared = false;
2460  HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
2461  bool old_key_copied = false;
2462 
2463  Assert(ItemPointerIsValid(tid));
2464 
2465  /*
2466  * Forbid this during a parallel operation, lest it allocate a combocid.
2467  * Other workers might need that combocid for visibility checks, and we
2468  * have no provision for broadcasting it to them.
2469  */
2470  if (IsInParallelMode())
2471  ereport(ERROR,
2472  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
2473  errmsg("cannot delete tuples during a parallel operation")));
2474 
2475  block = ItemPointerGetBlockNumber(tid);
2476  buffer = ReadBuffer(relation, block);
2477  page = BufferGetPage(buffer);
2478 
2479  /*
2480  * Before locking the buffer, pin the visibility map page if it appears to
2481  * be necessary. Since we haven't got the lock yet, someone else might be
2482  * in the middle of changing this, so we'll need to recheck after we have
2483  * the lock.
2484  */
2485  if (PageIsAllVisible(page))
2486  visibilitymap_pin(relation, block, &vmbuffer);
2487 
2489 
2490  /*
2491  * If we didn't pin the visibility map page and the page has become all
2492  * visible while we were busy locking the buffer, we'll have to unlock and
2493  * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
2494  * unfortunate, but hopefully shouldn't happen often.
2495  */
2496  if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
2497  {
2498  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2499  visibilitymap_pin(relation, block, &vmbuffer);
2501  }
2502 
2503  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
2504  Assert(ItemIdIsNormal(lp));
2505 
2506  tp.t_tableOid = RelationGetRelid(relation);
2507  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2508  tp.t_len = ItemIdGetLength(lp);
2509  tp.t_self = *tid;
2510 
2511 l1:
2512  result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
2513 
2514  if (result == TM_Invisible)
2515  {
2516  UnlockReleaseBuffer(buffer);
2517  ereport(ERROR,
2518  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2519  errmsg("attempted to delete invisible tuple")));
2520  }
2521  else if (result == TM_BeingModified && wait)
2522  {
2523  TransactionId xwait;
2524  uint16 infomask;
2525 
2526  /* must copy state data before unlocking buffer */
2527  xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
2528  infomask = tp.t_data->t_infomask;
2529 
2530  /*
2531  * Sleep until concurrent transaction ends -- except when there's a
2532  * single locker and it's our own transaction. Note we don't care
2533  * which lock mode the locker has, because we need the strongest one.
2534  *
2535  * Before sleeping, we need to acquire tuple lock to establish our
2536  * priority for the tuple (see heap_lock_tuple). LockTuple will
2537  * release us when we are next-in-line for the tuple.
2538  *
2539  * If we are forced to "start over" below, we keep the tuple lock;
2540  * this arranges that we stay at the head of the line while rechecking
2541  * tuple state.
2542  */
2543  if (infomask & HEAP_XMAX_IS_MULTI)
2544  {
2545  bool current_is_member = false;
2546 
2547  if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
2548  LockTupleExclusive, &current_is_member))
2549  {
2550  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2551 
2552  /*
2553  * Acquire the lock, if necessary (but skip it when we're
2554  * requesting a lock and already have one; avoids deadlock).
2555  */
2556  if (!current_is_member)
2558  LockWaitBlock, &have_tuple_lock);
2559 
2560  /* wait for multixact */
2562  relation, &(tp.t_self), XLTW_Delete,
2563  NULL);
2565 
2566  /*
2567  * If xwait had just locked the tuple then some other xact
2568  * could update this tuple before we get to this point. Check
2569  * for xmax change, and start over if so.
2570  */
2571  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2573  xwait))
2574  goto l1;
2575  }
2576 
2577  /*
2578  * You might think the multixact is necessarily done here, but not
2579  * so: it could have surviving members, namely our own xact or
2580  * other subxacts of this backend. It is legal for us to delete
2581  * the tuple in either case, however (the latter case is
2582  * essentially a situation of upgrading our former shared lock to
2583  * exclusive). We don't bother changing the on-disk hint bits
2584  * since we are about to overwrite the xmax altogether.
2585  */
2586  }
2587  else if (!TransactionIdIsCurrentTransactionId(xwait))
2588  {
2589  /*
2590  * Wait for regular transaction to end; but first, acquire tuple
2591  * lock.
2592  */
2593  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2595  LockWaitBlock, &have_tuple_lock);
2596  XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
2598 
2599  /*
2600  * xwait is done, but if xwait had just locked the tuple then some
2601  * other xact could update this tuple before we get to this point.
2602  * Check for xmax change, and start over if so.
2603  */
2604  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2606  xwait))
2607  goto l1;
2608 
2609  /* Otherwise check if it committed or aborted */
2610  UpdateXmaxHintBits(tp.t_data, buffer, xwait);
2611  }
2612 
2613  /*
2614  * We may overwrite if previous xmax aborted, or if it committed but
2615  * only locked the tuple without updating it.
2616  */
2617  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2620  result = TM_Ok;
2621  else if (!ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid) ||
2623  result = TM_Updated;
2624  else
2625  result = TM_Deleted;
2626  }
2627 
2628  if (crosscheck != InvalidSnapshot && result == TM_Ok)
2629  {
2630  /* Perform additional check for transaction-snapshot mode RI updates */
2631  if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
2632  result = TM_Updated;
2633  }
2634 
2635  if (result != TM_Ok)
2636  {
2637  Assert(result == TM_SelfModified ||
2638  result == TM_Updated ||
2639  result == TM_Deleted ||
2640  result == TM_BeingModified);
2642  Assert(result != TM_Updated ||
2643  !ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid));
2644  tmfd->ctid = tp.t_data->t_ctid;
2646  if (result == TM_SelfModified)
2647  tmfd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
2648  else
2649  tmfd->cmax = InvalidCommandId;
2650  UnlockReleaseBuffer(buffer);
2651  if (have_tuple_lock)
2652  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
2653  if (vmbuffer != InvalidBuffer)
2654  ReleaseBuffer(vmbuffer);
2655  return result;
2656  }
2657 
2658  /*
2659  * We're about to do the actual delete -- check for conflict first, to
2660  * avoid possibly having to roll back work we've just done.
2661  *
2662  * This is safe without a recheck as long as there is no possibility of
2663  * another process scanning the page between this check and the delete
2664  * being visible to the scan (i.e., an exclusive buffer content lock is
2665  * continuously held from this point until the tuple delete is visible).
2666  */
2667  CheckForSerializableConflictIn(relation, tid, BufferGetBlockNumber(buffer));
2668 
2669  /* replace cid with a combo cid if necessary */
2670  HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
2671 
2672  /*
2673  * Compute replica identity tuple before entering the critical section so
2674  * we don't PANIC upon a memory allocation failure.
2675  */
2676  old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
2677 
2678  /*
2679  * If this is the first possibly-multixact-able operation in the current
2680  * transaction, set my per-backend OldestMemberMXactId setting. We can be
2681  * certain that the transaction will never become a member of any older
2682  * MultiXactIds than that. (We have to do this even if we end up just
2683  * using our own TransactionId below, since some other backend could
2684  * incorporate our XID into a MultiXact immediately afterwards.)
2685  */
2687 
2690  xid, LockTupleExclusive, true,
2691  &new_xmax, &new_infomask, &new_infomask2);
2692 
2694 
2695  /*
2696  * If this transaction commits, the tuple will become DEAD sooner or
2697  * later. Set flag that this page is a candidate for pruning once our xid
2698  * falls below the OldestXmin horizon. If the transaction finally aborts,
2699  * the subsequent page pruning will be a no-op and the hint will be
2700  * cleared.
2701  */
2702  PageSetPrunable(page, xid);
2703 
2704  if (PageIsAllVisible(page))
2705  {
2706  all_visible_cleared = true;
2707  PageClearAllVisible(page);
2708  visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
2709  vmbuffer, VISIBILITYMAP_VALID_BITS);
2710  }
2711 
2712  /* store transaction information of xact deleting the tuple */
2715  tp.t_data->t_infomask |= new_infomask;
2716  tp.t_data->t_infomask2 |= new_infomask2;
2718  HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
2719  HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
2720  /* Make sure there is no forward chain link in t_ctid */
2721  tp.t_data->t_ctid = tp.t_self;
2722 
2723  /* Signal that this is actually a move into another partition */
2724  if (changingPart)
2726 
2727  MarkBufferDirty(buffer);
2728 
2729  /*
2730  * XLOG stuff
2731  *
2732  * NB: heap_abort_speculative() uses the same xlog record and replay
2733  * routines.
2734  */
2735  if (RelationNeedsWAL(relation))
2736  {
2737  xl_heap_delete xlrec;
2738  xl_heap_header xlhdr;
2739  XLogRecPtr recptr;
2740 
2741  /* For logical decode we need combocids to properly decode the catalog */
2743  log_heap_new_cid(relation, &tp);
2744 
2745  xlrec.flags = 0;
2746  if (all_visible_cleared)
2748  if (changingPart)
2751  tp.t_data->t_infomask2);
2753  xlrec.xmax = new_xmax;
2754 
2755  if (old_key_tuple != NULL)
2756  {
2757  if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
2759  else
2761  }
2762 
2763  XLogBeginInsert();
2764  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
2765 
2766  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
2767 
2768  /*
2769  * Log replica identity of the deleted tuple if there is one
2770  */
2771  if (old_key_tuple != NULL)
2772  {
2773  xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
2774  xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
2775  xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
2776 
2777  XLogRegisterData((char *) &xlhdr, SizeOfHeapHeader);
2778  XLogRegisterData((char *) old_key_tuple->t_data
2780  old_key_tuple->t_len
2782  }
2783 
2784  /* filtering by origin on a row level is much more efficient */
2786 
2787  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
2788 
2789  PageSetLSN(page, recptr);
2790  }
2791 
2792  END_CRIT_SECTION();
2793 
2794  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2795 
2796  if (vmbuffer != InvalidBuffer)
2797  ReleaseBuffer(vmbuffer);
2798 
2799  /*
2800  * If the tuple has toasted out-of-line attributes, we need to delete
2801  * those items too. We have to do this before releasing the buffer
2802  * because we need to look at the contents of the tuple, but it's OK to
2803  * release the content lock on the buffer first.
2804  */
2805  if (relation->rd_rel->relkind != RELKIND_RELATION &&
2806  relation->rd_rel->relkind != RELKIND_MATVIEW)
2807  {
2808  /* toast table entries should never be recursively toasted */
2810  }
2811  else if (HeapTupleHasExternal(&tp))
2812  heap_toast_delete(relation, &tp, false);
2813 
2814  /*
2815  * Mark tuple for invalidation from system caches at next command
2816  * boundary. We have to do this before releasing the buffer because we
2817  * need to look at the contents of the tuple.
2818  */
2819  CacheInvalidateHeapTuple(relation, &tp, NULL);
2820 
2821  /* Now we can release the buffer */
2822  ReleaseBuffer(buffer);
2823 
2824  /*
2825  * Release the lmgr tuple lock, if we had it.
2826  */
2827  if (have_tuple_lock)
2828  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
2829 
2830  pgstat_count_heap_delete(relation);
2831 
2832  if (old_key_tuple != NULL && old_key_copied)
2833  heap_freetuple(old_key_tuple);
2834 
2835  return TM_Ok;
2836 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
ItemPointerData ctid
Definition: tableam.h:125
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
#define SizeofHeapTupleHeader
Definition: htup_details.h:184
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:7518
#define HEAP_XMAX_BITS
Definition: htup_details.h:270
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2397
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1122
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
#define PageIsAllVisible(page)
Definition: bufpage.h:385
uint32 TransactionId
Definition: c.h:575
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:869
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1471
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:220
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_changed, bool *copy)
Definition: heapam.c:7600
static bool xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
Definition: heapam.c:2419
#define HeapTupleHeaderClearHotUpdated(tup)
Definition: htup_details.h:496
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
CommandId cmax
Definition: tableam.h:127
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
#define InvalidBuffer
Definition: buf.h:25
uint16 t_infomask2
Definition: heapam_xlog.h:143
#define PageSetPrunable(page, xid)
Definition: bufpage.h:392
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
int errcode(int sqlerrcode)
Definition: elog.c:691
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:238
#define HeapTupleHeaderIndicatesMovedPartitions(tup)
Definition: htup_details.h:445
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3513
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
Form_pg_class rd_rel
Definition: rel.h:110
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1338
TM_Result HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid, Buffer buffer)
#define UnlockTupleTuplock(rel, tup, mode)
Definition: heapam.c:162
OffsetNumber offnum
Definition: heapam_xlog.h:107
void MultiXactIdSetOldestMember(void)
Definition: multixact.c:625
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:68
#define HeapTupleHeaderGetRawXmax(tup)
Definition: htup_details.h:375
unsigned short uint16
Definition: c.h:428
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
bool IsInParallelMode(void)
Definition: xact.c:1012
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3536
TransactionId xmax
Definition: tableam.h:126
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:207
ItemPointerData t_ctid
Definition: htup_details.h:160
#define HeapTupleHeaderSetMovedPartitions(tup)
Definition: htup_details.h:449
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:106
static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:6733
void heap_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: heaptoast.c:43
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:438
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:112
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define XLH_DELETE_CONTAINS_OLD_KEY
Definition: heapam_xlog.h:95
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:380
Oid t_tableOid
Definition: htup.h:66
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:404
#define HeapTupleHeaderSetCmax(tup, cid, iscombo)
Definition: htup_details.h:405
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
Definition: heapam.c:4712
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
#define InvalidSnapshot
Definition: snapshot.h:123
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:330
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:422
TM_Result
Definition: tableam.h:70
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:620
#define InvalidCommandId
Definition: c.h:592
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3752
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
void CheckForSerializableConflictIn(Relation relation, ItemPointer tid, BlockNumber blkno)
Definition: predicate.c:4375
static void UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
Definition: heapam.c:1784
#define HEAP_MOVED
Definition: htup_details.h:216
static bool heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
Definition: heapam.c:4663
#define ereport(elevel,...)
Definition: elog.h:155
TransactionId MultiXactId
Definition: c.h:585
#define PageClearAllVisible(page)
Definition: bufpage.h:389
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:639
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:800
uint8 infobits_set
Definition: heapam_xlog.h:108
CommandId HeapTupleHeaderGetCmax(HeapTupleHeader tup)
Definition: combocid.c:118
Definition: tableam.h:76
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:607
uint16 t_infomask
Definition: heapam_xlog.h:144
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask, LockTupleMode lockmode, bool *current_is_member)
Definition: heapam.c:6556
#define RelationNeedsWAL(relation)
Definition: rel.h:563
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:2157
void HeapTupleHeaderAdjustCmax(HeapTupleHeader tup, CommandId *cmax, bool *iscombo)
Definition: combocid.c:153
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2663
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:673
int errmsg(const char *fmt,...)
Definition: elog.c:902
#define XLH_DELETE_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:93
#define XLH_DELETE_IS_PARTITION_MOVE
Definition: heapam_xlog.h:97
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
void XLogBeginInsert(void)
Definition: xloginsert.c:123
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:457
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
#define SizeOfHeapHeader
Definition: heapam_xlog.h:148
Pointer Page
Definition: bufpage.h:78
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)
#define XLH_DELETE_CONTAINS_OLD_TUPLE
Definition: heapam_xlog.h:94

◆ heap_endscan()

void heap_endscan ( TableScanDesc  sscan)

Definition at line 1256 of file heapam.c.

References BufferIsValid, FreeAccessStrategy(), pfree(), RelationDecrementReferenceCount(), ReleaseBuffer(), HeapScanDescData::rs_base, HeapScanDescData::rs_cbuf, TableScanDescData::rs_flags, TableScanDescData::rs_key, TableScanDescData::rs_rd, TableScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, SO_TEMP_SNAPSHOT, and UnregisterSnapshot().

Referenced by SampleHeapTupleVisible().

1257 {
1258  HeapScanDesc scan = (HeapScanDesc) sscan;
1259 
1260  /* Note: no locking manipulations needed */
1261 
1262  /*
1263  * unpin scan buffers
1264  */
1265  if (BufferIsValid(scan->rs_cbuf))
1266  ReleaseBuffer(scan->rs_cbuf);
1267 
1268  /*
1269  * decrement relation reference count and free scan descriptor storage
1270  */
1272 
1273  if (scan->rs_base.rs_key)
1274  pfree(scan->rs_base.rs_key);
1275 
1276  if (scan->rs_strategy != NULL)
1278 
1279  if (scan->rs_base.rs_flags & SO_TEMP_SNAPSHOT)
1281 
1282  pfree(scan);
1283 }
TableScanDescData rs_base
Definition: heapam.h:49
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3513
uint32 rs_flags
Definition: relscan.h:43
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
void pfree(void *pointer)
Definition: mcxt.c:1057
void RelationDecrementReferenceCount(Relation rel)
Definition: relcache.c:2091
struct ScanKeyData * rs_key
Definition: relscan.h:37
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:852
BufferAccessStrategy rs_strategy
Definition: heapam.h:64
Buffer rs_cbuf
Definition: heapam.h:60
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:597
#define BufferIsValid(bufnum)
Definition: bufmgr.h:123
Relation rs_rd
Definition: relscan.h:34
struct SnapshotData * rs_snapshot
Definition: relscan.h:35

◆ heap_execute_freeze_tuple()

void heap_execute_freeze_tuple ( HeapTupleHeader  tuple,
xl_heap_freeze_tuple frz 
)

Definition at line 6355 of file heapam.c.

References FrozenTransactionId, xl_heap_freeze_tuple::frzflags, HeapTupleHeaderSetXmax, HeapTupleHeaderSetXvac, InvalidTransactionId, HeapTupleHeaderData::t_infomask, xl_heap_freeze_tuple::t_infomask, HeapTupleHeaderData::t_infomask2, xl_heap_freeze_tuple::t_infomask2, XLH_FREEZE_XVAC, XLH_INVALID_XVAC, and xl_heap_freeze_tuple::xmax.

Referenced by heap_freeze_tuple(), heap_xlog_freeze_page(), and lazy_scan_heap().

6356 {
6357  HeapTupleHeaderSetXmax(tuple, frz->xmax);
6358 
6359  if (frz->frzflags & XLH_FREEZE_XVAC)
6361 
6362  if (frz->frzflags & XLH_INVALID_XVAC)
6364 
6365  tuple->t_infomask = frz->t_infomask;
6366  tuple->t_infomask2 = frz->t_infomask2;
6367 }
#define HeapTupleHeaderSetXvac(tup, xid)
Definition: htup_details.h:423
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:380
#define InvalidTransactionId
Definition: transam.h:31
#define FrozenTransactionId
Definition: transam.h:33
TransactionId xmax
Definition: heapam_xlog.h:319
#define XLH_INVALID_XVAC
Definition: heapam_xlog.h:315
#define XLH_FREEZE_XVAC
Definition: heapam_xlog.h:314

◆ heap_fetch()

bool heap_fetch ( Relation  relation,
Snapshot  snapshot,
HeapTuple  tuple,
Buffer userbuf 
)

Definition at line 1394 of file heapam.c.

References BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, HeapCheckForSerializableConflictOut(), HeapTupleHeaderGetXmin, HeapTupleSatisfiesVisibility(), InvalidBuffer, ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PredicateLockTID(), ReadBuffer(), RelationGetRelid, ReleaseBuffer(), HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, and TestForOldSnapshot().

Referenced by heap_lock_updated_tuple_rec(), heapam_fetch_row_version(), and heapam_tuple_lock().

1398 {
1399  ItemPointer tid = &(tuple->t_self);
1400  ItemId lp;
1401  Buffer buffer;
1402  Page page;
1403  OffsetNumber offnum;
1404  bool valid;
1405 
1406  /*
1407  * Fetch and pin the appropriate page of the relation.
1408  */
1409  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1410 
1411  /*
1412  * Need share lock on buffer to examine tuple commit status.
1413  */
1414  LockBuffer(buffer, BUFFER_LOCK_SHARE);
1415  page = BufferGetPage(buffer);
1416  TestForOldSnapshot(snapshot, relation, page);
1417 
1418  /*
1419  * We'd better check for out-of-range offnum in case of VACUUM since the
1420  * TID was obtained.
1421  */
1422  offnum = ItemPointerGetOffsetNumber(tid);
1423  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1424  {
1425  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1426  ReleaseBuffer(buffer);
1427  *userbuf = InvalidBuffer;
1428  tuple->t_data = NULL;
1429  return false;
1430  }
1431 
1432  /*
1433  * get the item line pointer corresponding to the requested tid
1434  */
1435  lp = PageGetItemId(page, offnum);
1436 
1437  /*
1438  * Must check for deleted tuple.
1439  */
1440  if (!ItemIdIsNormal(lp))
1441  {
1442  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1443  ReleaseBuffer(buffer);
1444  *userbuf = InvalidBuffer;
1445  tuple->t_data = NULL;
1446  return false;
1447  }
1448 
1449  /*
1450  * fill in *tuple fields
1451  */
1452  tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1453  tuple->t_len = ItemIdGetLength(lp);
1454  tuple->t_tableOid = RelationGetRelid(relation);
1455 
1456  /*
1457  * check tuple visibility, then release lock
1458  */
1459  valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1460 
1461  if (valid)
1462  PredicateLockTID(relation, &(tuple->t_self), snapshot,
1463  HeapTupleHeaderGetXmin(tuple->t_data));
1464 
1465  HeapCheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1466 
1467  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1468 
1469  if (valid)
1470  {
1471  /*
1472  * All checks passed, so return the tuple as valid. Caller is now
1473  * responsible for releasing the buffer.
1474  */
1475  *userbuf = buffer;
1476 
1477  return true;
1478  }
1479 
1480  /* Tuple failed time qual */
1481  ReleaseBuffer(buffer);
1482  *userbuf = InvalidBuffer;
1483 
1484  return false;
1485 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:96
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:277
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3513
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3752
void PredicateLockTID(Relation relation, ItemPointer tid, Snapshot snapshot, TransactionId tuple_xid)
Definition: predicate.c:2543
void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: heapam.c:9018
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:607
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:457
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)

◆ heap_finish_speculative()

void heap_finish_speculative ( Relation  relation,
ItemPointer  tid 
)

Definition at line 5475 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, elog, END_CRIT_SECTION, ERROR, HeapTupleHeaderIsSpeculative, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), MaxOffsetNumber, xl_heap_confirm::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapConfirm, SpecTokenOffsetNumber, START_CRIT_SECTION, StaticAssertStmt, HeapTupleHeaderData::t_ctid, UnlockReleaseBuffer(), XLOG_HEAP_CONFIRM, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by heapam_tuple_complete_speculative().

5476 {
5477  Buffer buffer;
5478  Page page;
5479  OffsetNumber offnum;
5480  ItemId lp = NULL;
5481  HeapTupleHeader htup;
5482 
5483  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
5485  page = (Page) BufferGetPage(buffer);
5486 
5487  offnum = ItemPointerGetOffsetNumber(tid);
5488  if (PageGetMaxOffsetNumber(page) >= offnum)
5489  lp = PageGetItemId(page, offnum);
5490 
5491  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
5492  elog(ERROR, "invalid lp");
5493 
5494  htup = (HeapTupleHeader) PageGetItem(page, lp);
5495 
5496  /* SpecTokenOffsetNumber should be distinguishable from any real offset */
5498  "invalid speculative token constant");
5499 
5500  /* NO EREPORT(ERROR) from here till changes are logged */
5502 
5504 
5505  MarkBufferDirty(buffer);
5506 
5507  /*
5508  * Replace the speculative insertion token with a real t_ctid, pointing to
5509  * itself like it does on regular tuples.
5510  */
5511  htup->t_ctid = *tid;
5512 
5513  /* XLOG stuff */
5514  if (RelationNeedsWAL(relation))
5515  {
5516  xl_heap_confirm xlrec;
5517  XLogRecPtr recptr;
5518 
5519  xlrec.offnum = ItemPointerGetOffsetNumber(tid);
5520 
5521  XLogBeginInsert();
5522 
5523  /* We want the same filtering on this as on a plain insert */
5525 
5526  XLogRegisterData((char *) &xlrec, SizeOfHeapConfirm);
5527  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
5528 
5529  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
5530 
5531  PageSetLSN(page, recptr);
5532  }
5533 
5534  END_CRIT_SECTION();
5535 
5536  UnlockReleaseBuffer(buffer);
5537 }
OffsetNumber offnum
Definition: heapam_xlog.h:295
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1471
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:220
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define MaxOffsetNumber
Definition: off.h:28
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:429
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:238
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
#define SpecTokenOffsetNumber
Definition: itemptr.h:63
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:914
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3536
#define ERROR
Definition: elog.h:43
ItemPointerData t_ctid
Definition: htup_details.h:160
#define REGBUF_STANDARD
Definition: xloginsert.h:35
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:404
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define SizeOfHeapConfirm
Definition: heapam_xlog.h:298
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:330
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:422
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3752
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:800
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:607
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define RelationNeedsWAL(relation)
Definition: rel.h:563
#define elog(elevel,...)
Definition: elog.h:228
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
void XLogBeginInsert(void)
Definition: xloginsert.c:123
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define XLOG_HEAP_CONFIRM
Definition: heapam_xlog.h:37

◆ heap_freeze_tuple()

bool heap_freeze_tuple ( HeapTupleHeader  tuple,
TransactionId  relfrozenxid,
TransactionId  relminmxid,
TransactionId  cutoff_xid,
TransactionId  cutoff_multi 
)

Definition at line 6376 of file heapam.c.

References heap_execute_freeze_tuple(), and heap_prepare_freeze_tuple().

Referenced by rewrite_heap_tuple().

6379 {
6381  bool do_freeze;
6382  bool tuple_totally_frozen;
6383 
6384  do_freeze = heap_prepare_freeze_tuple(tuple,
6385  relfrozenxid, relminmxid,
6386  cutoff_xid, cutoff_multi,
6387  &frz, &tuple_totally_frozen);
6388 
6389  /*
6390  * Note that because this is not a WAL-logged operation, we don't need to
6391  * fill in the offset in the freeze record.
6392  */
6393 
6394  if (do_freeze)
6395  heap_execute_freeze_tuple(tuple, &frz);
6396  return do_freeze;
6397 }
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
Definition: heapam.c:6126
void heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
Definition: heapam.c:6355

◆ heap_get_latest_tid()

void heap_get_latest_tid ( TableScanDesc  sscan,
ItemPointer  tid 
)

Definition at line 1661 of file heapam.c.

References Assert, BUFFER_LOCK_SHARE, BufferGetPage, HEAP_XMAX_INVALID, HeapCheckForSerializableConflictOut(), HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleHeaderIndicatesMovedPartitions, HeapTupleHeaderIsOnlyLocked(), HeapTupleSatisfiesVisibility(), InvalidTransactionId, ItemIdGetLength, ItemIdIsNormal, ItemPointerEquals(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, ReadBuffer(), RelationGetRelid, TableScanDescData::rs_rd, TableScanDescData::rs_snapshot, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TestForOldSnapshot(), TransactionIdEquals, TransactionIdIsValid, and UnlockReleaseBuffer().

Referenced by SampleHeapTupleVisible().

1663 {
1664  Relation relation = sscan->rs_rd;
1665  Snapshot snapshot = sscan->rs_snapshot;
1666  ItemPointerData ctid;
1667  TransactionId priorXmax;
1668 
1669  /*
1670  * table_tuple_get_latest_tid() verified that the passed in tid is valid.
1671  * Assume that t_ctid links are valid however - there shouldn't be invalid
1672  * ones in the table.
1673  */
1674  Assert(ItemPointerIsValid(tid));
1675 
1676  /*
1677  * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
1678  * need to examine, and *tid is the TID we will return if ctid turns out
1679  * to be bogus.
1680  *
1681  * Note that we will loop until we reach the end of the t_ctid chain.
1682  * Depending on the snapshot passed, there might be at most one visible
1683  * version of the row, but we don't try to optimize for that.
1684  */
1685  ctid = *tid;
1686  priorXmax = InvalidTransactionId; /* cannot check first XMIN */
1687  for (;;)
1688  {
1689  Buffer buffer;
1690  Page page;
1691  OffsetNumber offnum;
1692  ItemId lp;
1693  HeapTupleData tp;
1694  bool valid;
1695 
1696  /*
1697  * Read, pin, and lock the page.
1698  */
1699  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
1700  LockBuffer(buffer, BUFFER_LOCK_SHARE);
1701  page = BufferGetPage(buffer);
1702  TestForOldSnapshot(snapshot, relation, page);
1703 
1704  /*
1705  * Check for bogus item number. This is not treated as an error
1706  * condition because it can happen while following a t_ctid link. We
1707  * just assume that the prior tid is OK and return it unchanged.
1708  */
1709  offnum = ItemPointerGetOffsetNumber(&ctid);
1710  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1711  {
1712  UnlockReleaseBuffer(buffer);
1713  break;
1714  }
1715  lp = PageGetItemId(page, offnum);
1716  if (!ItemIdIsNormal(lp))
1717  {
1718  UnlockReleaseBuffer(buffer);
1719  break;
1720  }
1721 
1722  /* OK to access the tuple */
1723  tp.t_self = ctid;
1724  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
1725  tp.t_len = ItemIdGetLength(lp);
1726  tp.t_tableOid = RelationGetRelid(relation);
1727 
1728  /*
1729  * After following a t_ctid link, we might arrive at an unrelated
1730  * tuple. Check for XMIN match.
1731  */
1732  if (TransactionIdIsValid(priorXmax) &&
1734  {
1735  UnlockReleaseBuffer(buffer);
1736  break;
1737  }
1738 
1739  /*
1740  * Check tuple visibility; if visible, set it as the new result
1741  * candidate.
1742  */
1743  valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
1744  HeapCheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
1745  if (valid)
1746  *tid = ctid;
1747 
1748  /*
1749  * If there's a valid t_ctid link, follow it, else we're done.
1750  */
1751  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
1755  {
1756  UnlockReleaseBuffer(buffer);
1757  break;
1758  }
1759 
1760  ctid = tp.t_data->t_ctid;
1761  priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
1762  UnlockReleaseBuffer(buffer);
1763  } /* end of loop */
1764 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:277
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:575
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
#define HeapTupleHeaderIndicatesMovedPartitions(tup)
Definition: htup_details.h:445
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3536
#define HEAP_XMAX_INVALID
Definition: htup_details.h:207
ItemPointerData t_ctid
Definition: htup_details.h:160
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3752
void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: heapam.c:9018
#define Assert(condition)
Definition: c.h:800
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:607
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
Relation rs_rd
Definition: relscan.h:34
struct SnapshotData * rs_snapshot
Definition: relscan.h:35
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:97
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define TransactionIdIsValid(xid)
Definition: transam.h:41
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:457
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)

◆ heap_getnext()

HeapTuple heap_getnext ( TableScanDesc  sscan,
ScanDirection  direction 
)

Definition at line 1286 of file heapam.c.

References bsysscan, CheckXidAlive, elog, ereport, errcode(), errmsg_internal(), ERROR, GetHeapamTableAmRoutine(), heapgettup(), heapgettup_pagemode(), pgstat_count_heap_getnext, RelationData::rd_tableam, HeapScanDescData::rs_base, HeapScanDescData::rs_ctup, TableScanDescData::rs_flags, TableScanDescData::rs_key, TableScanDescData::rs_nkeys, TableScanDescData::rs_rd, SO_ALLOW_PAGEMODE, HeapTupleData::t_data, TransactionIdIsValid, and unlikely.

Referenced by AlterTableMoveAll(), AlterTableSpaceOptions(), check_db_file_conflict(), createdb(), do_autovacuum(), DropSetting(), DropTableSpace(), find_typed_table_dependencies(), get_all_vacuum_rels(), get_database_list(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), heapam_index_build_range_scan(), heapam_index_validate_scan(), index_update_stats(), objectsInSchemaToOids(), pgrowlocks(), pgstat_collect_oids(), pgstat_heap(), populate_typ_array(), ReindexMultipleTables(), remove_dbtablespaces(), RemoveSubscriptionRel(), RenameTableSpace(), ThereIsAtLeastOneRole(), and vac_truncate_clog().

1287 {
1288  HeapScanDesc scan = (HeapScanDesc) sscan;
1289 
1290  /*
1291  * This is still widely used directly, without going through table AM, so
1292  * add a safety check. It's possible we should, at a later point,
1293  * downgrade this to an assert. The reason for checking the AM routine,
1294  * rather than the AM oid, is that this allows to write regression tests
1295  * that create another AM reusing the heap handler.
1296  */
1298  ereport(ERROR,
1299  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1300  errmsg_internal("only heap AM is supported")));
1301 
1302  /*
1303  * We don't expect direct calls to heap_getnext with valid CheckXidAlive
1304  * for catalog or regular tables. See detailed comments in xact.c where
1305  * these variables are declared. Normally we have such a check at tableam
1306  * level API but this is called from many places so we need to ensure it
1307  * here.
1308  */
1310  elog(ERROR, "unexpected heap_getnext call during logical decoding");
1311 
1312  /* Note: no locking manipulations needed */
1313 
1314  if (scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE)
1315  heapgettup_pagemode(scan, direction,
1316  scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1317  else
1318  heapgettup(scan, direction,
1319  scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1320 
1321  if (scan->rs_ctup.t_data == NULL)
1322  return NULL;
1323 
1324  /*
1325  * if we get here it means we have a new current scan tuple, so point to
1326  * the proper return buffer and return the tuple.
1327  */
1328 
1330 
1331  return &scan->rs_ctup;
1332 }
TableScanDescData rs_base
Definition: heapam.h:49
int errcode(int sqlerrcode)
Definition: elog.c:691
uint32 rs_flags
Definition: relscan.h:43
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
HeapTupleData rs_ctup
Definition: heapam.h:66
HeapTupleHeader t_data
Definition: htup.h:68
#define ERROR
Definition: elog.h:43
bool bsysscan
Definition: xact.c:96
struct ScanKeyData * rs_key
Definition: relscan.h:37
TransactionId CheckXidAlive
Definition: xact.c:95
const struct TableAmRoutine * rd_tableam
Definition: rel.h:172
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:488
#define ereport(elevel,...)
Definition: elog.h:155
int errmsg_internal(const char *fmt,...)
Definition: elog.c:989
Relation rs_rd
Definition: relscan.h:34
#define elog(elevel,...)
Definition: elog.h:228
#define unlikely(x)
Definition: c.h:261
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:1487
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:806
const TableAmRoutine * GetHeapamTableAmRoutine(void)

◆ heap_getnextslot()

bool heap_getnextslot ( TableScanDesc  sscan,
ScanDirection  direction,
TupleTableSlot slot 
)

Definition at line 1335 of file heapam.c.

References ExecClearTuple(), ExecStoreBufferHeapTuple(), heapgettup(), heapgettup_pagemode(), pgstat_count_heap_getnext, HeapScanDescData::rs_base, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_ctup, TableScanDescData::rs_flags, TableScanDescData::rs_key, TableScanDescData::rs_nkeys, TableScanDescData::rs_rd, SO_ALLOW_PAGEMODE, and HeapTupleData::t_data.

Referenced by SampleHeapTupleVisible().

1336 {
1337  HeapScanDesc scan = (HeapScanDesc) sscan;
1338 
1339  /* Note: no locking manipulations needed */
1340 
1341  if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
1342  heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1343  else
1344  heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1345 
1346  if (scan->rs_ctup.t_data == NULL)
1347  {
1348  ExecClearTuple(slot);
1349  return false;
1350  }
1351 
1352  /*
1353  * if we get here it means we have a new current scan tuple, so point to
1354  * the proper return buffer and return the tuple.
1355  */
1356 
1358 
1359  ExecStoreBufferHeapTuple(&scan->rs_ctup, slot,
1360  scan->rs_cbuf);
1361  return true;
1362 }
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:425
TableScanDescData rs_base
Definition: heapam.h:49
uint32 rs_flags
Definition: relscan.h:43
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:73
HeapTupleData rs_ctup
Definition: heapam.h:66
HeapTupleHeader t_data
Definition: htup.h:68
struct ScanKeyData * rs_key
Definition: relscan.h:37
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:488
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1362
Buffer rs_cbuf
Definition: heapam.h:60
Relation rs_rd
Definition: relscan.h:34
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:1487
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:806

◆ heap_hot_search_buffer()

bool heap_hot_search_buffer ( ItemPointer  tid,
Relation  relation,
Buffer  buffer,
Snapshot  snapshot,
HeapTuple  heapTuple,
bool all_dead,
bool  first_call 
)

Definition at line 1509 of file heapam.c.

References Assert, BufferGetBlockNumber(), BufferGetPage, GlobalVisTestFor(), HeapCheckForSerializableConflictOut(), HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleIsHeapOnly, HeapTupleIsHotUpdated, HeapTupleIsSurelyDead(), HeapTupleSatisfiesVisibility(), InvalidTransactionId, ItemIdGetLength, ItemIdGetRedirect, ItemIdIsNormal, ItemIdIsRedirected, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerSet, ItemPointerSetOffsetNumber, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PredicateLockTID(), RecentXmin, RelationGetRelid, skip, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdEquals, and TransactionIdIsValid.

Referenced by heapam_index_fetch_tuple(), and heapam_scan_bitmap_next_block().

1512 {
1513  Page dp = (Page) BufferGetPage(buffer);
1514  TransactionId prev_xmax = InvalidTransactionId;
1515  BlockNumber blkno;
1516  OffsetNumber offnum;
1517  bool at_chain_start;
1518  bool valid;
1519  bool skip;
1520  GlobalVisState *vistest = NULL;
1521 
1522  /* If this is not the first call, previous call returned a (live!) tuple */
1523  if (all_dead)
1524  *all_dead = first_call;
1525 
1526  blkno = ItemPointerGetBlockNumber(tid);
1527  offnum = ItemPointerGetOffsetNumber(tid);
1528  at_chain_start = first_call;
1529  skip = !first_call;
1530 
1531  /* XXX: we should assert that a snapshot is pushed or registered */
1533  Assert(BufferGetBlockNumber(buffer) == blkno);
1534 
1535  /* Scan through possible multiple members of HOT-chain */
1536  for (;;)
1537  {
1538  ItemId lp;
1539 
1540  /* check for bogus TID */
1541  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
1542  break;
1543 
1544  lp = PageGetItemId(dp, offnum);
1545 
1546  /* check for unused, dead, or redirected items */
1547  if (!ItemIdIsNormal(lp))
1548  {
1549  /* We should only see a redirect at start of chain */
1550  if (ItemIdIsRedirected(lp) && at_chain_start)
1551  {
1552  /* Follow the redirect */
1553  offnum = ItemIdGetRedirect(lp);
1554  at_chain_start = false;
1555  continue;
1556  }
1557  /* else must be end of chain */
1558  break;
1559  }
1560 
1561  /*
1562  * Update heapTuple to point to the element of the HOT chain we're
1563  * currently investigating. Having t_self set correctly is important
1564  * because the SSI checks and the *Satisfies routine for historical
1565  * MVCC snapshots need the correct tid to decide about the visibility.
1566  */
1567  heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
1568  heapTuple->t_len = ItemIdGetLength(lp);
1569  heapTuple->t_tableOid = RelationGetRelid(relation);
1570  ItemPointerSet(&heapTuple->t_self, blkno, offnum);
1571 
1572  /*
1573  * Shouldn't see a HEAP_ONLY tuple at chain start.
1574  */
1575  if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
1576  break;
1577 
1578  /*
1579  * The xmin should match the previous xmax value, else chain is
1580  * broken.
1581  */
1582  if (TransactionIdIsValid(prev_xmax) &&
1583  !TransactionIdEquals(prev_xmax,
1584  HeapTupleHeaderGetXmin(heapTuple->t_data)))
1585  break;
1586 
1587  /*
1588  * When first_call is true (and thus, skip is initially false) we'll
1589  * return the first tuple we find. But on later passes, heapTuple
1590  * will initially be pointing to the tuple we returned last time.
1591  * Returning it again would be incorrect (and would loop forever), so
1592  * we skip it and return the next match we find.
1593  */
1594  if (!skip)
1595  {
1596  /* If it's visible per the snapshot, we must return it */
1597  valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
1598  HeapCheckForSerializableConflictOut(valid, relation, heapTuple,
1599  buffer, snapshot);
1600 
1601  if (valid)
1602  {
1603  ItemPointerSetOffsetNumber(tid, offnum);
1604  PredicateLockTID(relation, &heapTuple->t_self, snapshot,
1605  HeapTupleHeaderGetXmin(heapTuple->t_data));
1606  if (all_dead)
1607  *all_dead = false;
1608  return true;
1609  }
1610  }
1611  skip = false;
1612 
1613  /*
1614  * If we can't see it, maybe no one else can either. At caller
1615  * request, check whether all chain members are dead to all
1616  * transactions.
1617  *
1618  * Note: if you change the criterion here for what is "dead", fix the
1619  * planner's get_actual_variable_range() function to match.
1620  */
1621  if (all_dead && *all_dead)
1622  {
1623  if (!vistest)
1624  vistest = GlobalVisTestFor(relation);
1625 
1626  if (!HeapTupleIsSurelyDead(heapTuple, vistest))
1627  *all_dead = false;
1628  }
1629 
1630  /*
1631  * Check to see if HOT chain continues past this tuple; if so fetch
1632  * the next offnum and loop around.
1633  */
1634  if (HeapTupleIsHotUpdated(heapTuple))
1635  {
1637  blkno);
1638  offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
1639  at_chain_start = false;
1640  prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
1641  }
1642  else
1643  break; /* end of chain */
1644  }
1645 
1646  return false;
1647 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:575
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:78
static const struct exclude_list_item skip[]
Definition: pg_checksums.c:112
TransactionId RecentXmin
Definition: snapmgr.c:113
uint32 BlockNumber
Definition: block.h:31
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define HeapTupleIsHotUpdated(tuple)
Definition: htup_details.h:676
GlobalVisState * GlobalVisTestFor(Relation rel)
Definition: procarray.c:3922
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
ItemPointerData t_ctid
Definition: htup_details.h:160
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void PredicateLockTID(Relation relation, ItemPointer tid, Snapshot snapshot, TransactionId tuple_xid)
Definition: predicate.c:2543
void HeapCheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: heapam.c:9018
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:685
#define Assert(condition)
Definition: c.h:800
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define ItemPointerSetOffsetNumber(pointer, offsetNumber)
Definition: itemptr.h:148
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2663
bool HeapTupleIsSurelyDead(HeapTuple htup, GlobalVisState *vistest)
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define TransactionIdIsValid(xid)
Definition: transam.h:41
#define RelationGetRelid(relation)
Definition: rel.h:457
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)

◆ heap_inplace_update()

void heap_inplace_update ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 5715 of file heapam.c.

References BUFFER_LOCK_EXCLUSIVE, BufferGetPage, CacheInvalidateHeapTuple(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, IsBootstrapProcessingMode, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), xl_heap_inplace::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapInplace, START_CRIT_SECTION, HeapTupleData::t_data, HeapTupleHeaderData::t_hoff, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), XLOG_HEAP_INPLACE, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by create_toast_table(), index_update_stats(), vac_update_datfrozenxid(), and vac_update_relstats().

5716 {
5717  Buffer buffer;
5718  Page page;
5719  OffsetNumber offnum;
5720  ItemId lp = NULL;
5721  HeapTupleHeader htup;
5722  uint32 oldlen;
5723  uint32 newlen;
5724 
5725  /*
5726  * For now, we don't allow parallel updates. Unlike a regular update,
5727  * this should never create a combo CID, so it might be possible to relax
5728  * this restriction, but not without more thought and testing. It's not
5729  * clear that it would be useful, anyway.
5730  */
5731  if (IsInParallelMode())
5732  ereport(ERROR,
5733  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
5734  errmsg("cannot update tuples during a parallel operation")));
5735 
5736  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
5738  page = (Page) BufferGetPage(buffer);
5739 
5740  offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
5741  if (PageGetMaxOffsetNumber(page) >= offnum)
5742  lp = PageGetItemId(page, offnum);
5743 
5744  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
5745  elog(ERROR, "invalid lp");
5746 
5747  htup = (HeapTupleHeader) PageGetItem(page, lp);
5748 
5749  oldlen = ItemIdGetLength(lp) - htup->t_hoff;
5750  newlen = tuple->t_len - tuple->t_data->t_hoff;
5751  if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
5752  elog(ERROR, "wrong tuple length");
5753 
5754  /* NO EREPORT(ERROR) from here till changes are logged */
5756 
5757  memcpy((char *) htup + htup->t_hoff,
5758  (char *) tuple->t_data + tuple->t_data->t_hoff,
5759  newlen);
5760 
5761  MarkBufferDirty(buffer);
5762 
5763  /* XLOG stuff */
5764  if (RelationNeedsWAL(relation))
5765  {
5766  xl_heap_inplace xlrec;
5767  XLogRecPtr recptr;
5768 
5769  xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
5770 
5771  XLogBeginInsert();
5772  XLogRegisterData((char *) &xlrec, SizeOfHeapInplace);
5773 
5774  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
5775  XLogRegisterBufData(0, (char *) htup + htup->t_hoff, newlen);
5776 
5777  /* inplace updates aren't decoded atm, don't log the origin */
5778 
5779  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
5780 
5781  PageSetLSN(page, recptr);
5782  }
5783 
5784  END_CRIT_SECTION();
5785 
5786  UnlockReleaseBuffer(buffer);
5787 
5788  /*
5789  * Send out shared cache inval if necessary. Note that because we only
5790  * pass the new version of the tuple, this mustn't be used for any
5791  * operations that could change catcache lookup keys. But we aren't
5792  * bothering with index updates either, so that's true a fortiori.
5793  */
5795  CacheInvalidateHeapTuple(relation, tuple, NULL);
5796 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:368
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1122
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1471
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:220
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
#define SizeOfHeapInplace
Definition: heapam_xlog.h:307
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
int errcode(int sqlerrcode)
Definition: elog.c:691
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:98
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
bool IsInParallelMode(void)
Definition: xact.c:1012
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3536
#define ERROR
Definition: elog.h:43
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define REGBUF_STANDARD
Definition: xloginsert.h:35
unsigned int uint32
Definition: c.h:429
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:330
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:422
OffsetNumber offnum
Definition: heapam_xlog.h:303
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3752
#define ereport(elevel,...)
Definition: elog.h:155
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:607
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define XLOG_HEAP_INPLACE
Definition: heapam_xlog.h:39
#define RelationNeedsWAL(relation)
Definition: rel.h:563
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:393
int errmsg(const char *fmt,...)
Definition: elog.c:902
#define elog(elevel,...)
Definition: elog.h:228
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
void XLogBeginInsert(void)
Definition: xloginsert.c:123
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78

◆ heap_insert()

void heap_insert ( Relation  relation,
HeapTuple  tup,
CommandId  cid,
int  options,
BulkInsertState  bistate 
)

Definition at line 1859 of file heapam.c.

References Assert, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), END_CRIT_SECTION, FirstOffsetNumber, xl_heap_insert::flags, GetCurrentTransactionId(), heap_freetuple(), HEAP_INSERT_NO_LOGICAL, HEAP_INSERT_SPECULATIVE, heap_prepare_insert(), InvalidBlockNumber, InvalidBuffer, IsToastRelation(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, log_heap_new_cid(), MarkBufferDirty(), xl_heap_insert::offnum, PageClearAllVisible, PageGetMaxOffsetNumber, PageIsAllVisible, PageSetLSN, pgstat_count_heap_insert(), REGBUF_KEEP_DATA, REGBUF_STANDARD, REGBUF_WILL_INIT, RelationGetBufferForTuple(), RelationIsAccessibleInLogicalDecoding, RelationIsLogicallyLogged, RelationNeedsWAL, RelationPutHeapTuple(), ReleaseBuffer(), SizeOfHeapHeader, SizeOfHeapInsert, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), visibilitymap_clear(), VISIBILITYMAP_VALID_BITS, XLH_INSERT_ALL_VISIBLE_CLEARED, XLH_INSERT_CONTAINS_NEW_TUPLE, XLH_INSERT_IS_SPECULATIVE, XLH_INSERT_ON_TOAST_RELATION, XLOG_HEAP_INIT_PAGE, XLOG_HEAP_INSERT, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by heapam_tuple_insert(), heapam_tuple_insert_speculative(), simple_heap_insert(), and toast_save_datum().

1861 {
1863  HeapTuple heaptup;
1864  Buffer buffer;
1865  Buffer vmbuffer = InvalidBuffer;
1866  bool all_visible_cleared = false;
1867 
1868  /*
1869  * Fill in tuple header fields and toast the tuple if necessary.
1870  *
1871  * Note: below this point, heaptup is the data we actually intend to store
1872  * into the relation; tup is the caller's original untoasted data.
1873  */
1874  heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
1875 
1876  /*
1877  * Find buffer to insert this tuple into. If the page is all visible,
1878  * this will also pin the requisite visibility map page.
1879  */
1880  buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
1881  InvalidBuffer, options, bistate,
1882  &vmbuffer, NULL);
1883 
1884  /*
1885  * We're about to do the actual insert -- but check for conflict first, to
1886  * avoid possibly having to roll back work we've just done.
1887  *
1888  * This is safe without a recheck as long as there is no possibility of
1889  * another process scanning the page between this check and the insert
1890  * being visible to the scan (i.e., an exclusive buffer content lock is
1891  * continuously held from this point until the tuple insert is visible).
1892  *
1893  * For a heap insert, we only need to check for table-level SSI locks. Our
1894  * new tuple can't possibly conflict with existing tuple locks, and heap
1895  * page locks are only consolidated versions of tuple locks; they do not
1896  * lock "gaps" as index page locks do. So we don't need to specify a
1897  * buffer when making the call, which makes for a faster check.
1898  */
1900 
1901  /* NO EREPORT(ERROR) from here till changes are logged */
1903 
1904  RelationPutHeapTuple(relation, buffer, heaptup,
1905  (options & HEAP_INSERT_SPECULATIVE) != 0);
1906 
1907  if (PageIsAllVisible(BufferGetPage(buffer)))
1908  {
1909  all_visible_cleared = true;
1911  visibilitymap_clear(relation,
1912  ItemPointerGetBlockNumber(&(heaptup->t_self)),
1913  vmbuffer, VISIBILITYMAP_VALID_BITS);
1914  }
1915 
1916  /*
1917  * XXX Should we set PageSetPrunable on this page ?
1918  *
1919  * The inserting transaction may eventually abort thus making this tuple
1920  * DEAD and hence available for pruning. Though we don't want to optimize
1921  * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
1922  * aborted tuple will never be pruned until next vacuum is triggered.
1923  *
1924  * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
1925  */
1926 
1927  MarkBufferDirty(buffer);
1928 
1929  /* XLOG stuff */
1930  if (RelationNeedsWAL(relation))
1931  {
1932  xl_heap_insert xlrec;
1933  xl_heap_header xlhdr;
1934  XLogRecPtr recptr;
1935  Page page = BufferGetPage(buffer);
1936  uint8 info = XLOG_HEAP_INSERT;
1937  int bufflags = 0;
1938 
1939  /*
1940  * If this is a catalog, we need to transmit combocids to properly
1941  * decode, so log that as well.
1942  */
1944  log_heap_new_cid(relation, heaptup);
1945 
1946  /*
1947  * If this is the single and first tuple on page, we can reinit the
1948  * page instead of restoring the whole thing. Set flag, and hide
1949  * buffer references from XLogInsert.
1950  */
1951  if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
1953  {
1954  info |= XLOG_HEAP_INIT_PAGE;
1955  bufflags |= REGBUF_WILL_INIT;
1956  }
1957 
1958  xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
1959  xlrec.flags = 0;
1960  if (all_visible_cleared)
1965 
1966  /*
1967  * For logical decoding, we need the tuple even if we're doing a full
1968  * page write, so make sure it's included even if we take a full-page
1969  * image. (XXX We could alternatively store a pointer into the FPW).
1970  */
1971  if (RelationIsLogicallyLogged(relation) &&
1973  {
1975  bufflags |= REGBUF_KEEP_DATA;
1976 
1977  if (IsToastRelation(relation))
1979  }
1980 
1981  XLogBeginInsert();
1982  XLogRegisterData((char *) &xlrec, SizeOfHeapInsert);
1983 
1984  xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
1985  xlhdr.t_infomask = heaptup->t_data->t_infomask;
1986  xlhdr.t_hoff = heaptup->t_data->t_hoff;
1987 
1988  /*
1989  * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
1990  * write the whole page to the xlog, we don't need to store
1991  * xl_heap_header in the xlog.
1992  */
1993  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
1994  XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
1995  /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
1997  (char *) heaptup->t_data + SizeofHeapTupleHeader,
1998  heaptup->t_len - SizeofHeapTupleHeader);
1999 
2000  /* filtering by origin on a row level is much more efficient */
2002 
2003  recptr = XLogInsert(RM_HEAP_ID, info);
2004 
2005  PageSetLSN(page, recptr);
2006  }
2007 
2008  END_CRIT_SECTION();
2009 
2010  UnlockReleaseBuffer(buffer);
2011  if (vmbuffer != InvalidBuffer)
2012  ReleaseBuffer(vmbuffer);
2013 
2014  /*
2015  * If tuple is cachable, mark it for invalidation from the caches in case
2016  * we abort. Note it is OK to do this after releasing the buffer, because
2017  * the heaptup data structure is all in local memory, not in the shared
2018  * buffer.
2019  */
2020  CacheInvalidateHeapTuple(relation, heaptup, NULL);
2021 
2022  /* Note: speculative insertions are counted too, even if aborted later */
2023  pgstat_count_heap_insert(relation, 1);
2024 
2025  /*
2026  * If heaptup is a private copy, release it. Don't forget to copy t_self
2027  * back to the caller's image, too.
2028  */
2029  if (heaptup != tup)
2030  {
2031  tup->t_self = heaptup->t_self;
2032  heap_freetuple(heaptup);
2033  }
2034 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:368
#define SizeofHeapTupleHeader
Definition: htup_details.h:184
bool IsToastRelation(Relation relation)
Definition: catalog.c:138
#define XLOG_HEAP_INSERT
Definition: heapam_xlog.h:32
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:7518
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1122
static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
Definition: heapam.c:2043
#define PageIsAllVisible(page)
Definition: bufpage.h:385
uint32 TransactionId
Definition: c.h:575
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1471
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:220
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
unsigned char uint8
Definition: c.h:427
#define XLH_INSERT_IS_SPECULATIVE
Definition: heapam_xlog.h:68
#define InvalidBuffer
Definition: buf.h:25
#define REGBUF_WILL_INIT
Definition: xloginsert.h:33
uint16 t_infomask2
Definition: heapam_xlog.h:143
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
#define XLH_INSERT_ON_TOAST_RELATION
Definition: heapam_xlog.h:70
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:238
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3513
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:636
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1338
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
void RelationPutHeapTuple(Relation relation, Buffer buffer, HeapTuple tuple, bool token)
Definition: hio.c:36
#define XLOG_HEAP_INIT_PAGE
Definition: heapam_xlog.h:46
#define HEAP_INSERT_SPECULATIVE
Definition: heapam.h:37
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:68
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3536
#define XLH_INSERT_CONTAINS_NEW_TUPLE
Definition: heapam_xlog.h:69
ItemPointerData t_self
Definition: htup.h:65
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:438
uint32 t_len
Definition: htup.h:64
#define FirstOffsetNumber
Definition: off.h:27
#define REGBUF_STANDARD
Definition: xloginsert.h:35
Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer otherBuffer, int options, BulkInsertState bistate, Buffer *vmbuffer, Buffer *vmbuffer_other)
Definition: hio.c:331
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:404
#define BufferGetPage(buffer)
Definition: bufmgr.h:169
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:330
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:422
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:620
#define REGBUF_KEEP_DATA
Definition: xloginsert.h:38
void CheckForSerializableConflictIn(Relation relation, ItemPointer tid, BlockNumber blkno)
Definition: predicate.c:4375
#define PageClearAllVisible(page)
Definition: bufpage.h:389
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:800
uint16 t_infomask
Definition: heapam_xlog.h:144
#define InvalidBlockNumber
Definition: block.h:33
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define RelationNeedsWAL(relation)
Definition: rel.h:563
#define SizeOfHeapInsert
Definition: heapam_xlog.h:159
#define XLH_INSERT_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:66
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2663
void pgstat_count_heap_insert(Relation rel, PgStat_Counter n)
Definition: pgstat.c:2111
#define HEAP_INSERT_NO_LOGICAL
Definition: heapam.h:36
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
void XLogBeginInsert(void)
Definition: xloginsert.c:123
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
OffsetNumber offnum
Definition: heapam_xlog.h:153
#define SizeOfHeapHeader
Definition: heapam_xlog.h:148
Pointer Page
Definition: bufpage.h:78

◆ heap_lock_tuple()

TM_Result heap_lock_tuple ( Relation  relation,
HeapTuple  tuple,
CommandId  cid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
bool  follow_updates,
Buffer buffer,
TM_FailureData tmfd 
)

Definition at line 3973 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferIsValid, TM_FailureData::cmax, compute_infobits(), compute_new_xmax_infomask(), ConditionalMultiXactIdWait(), ConditionalXactLockTableWait(), TM_FailureData::ctid, DoesMultiXactIdConflict(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, xl_heap_lock::flags, get_mxact_status_for_lock(), GetCurrentTransactionId(), GetMultiXactIdMembers(), heap_acquire_tuplock(), HEAP_KEYS_UPDATED, heap_lock_updated_tuple(), HEAP_XMAX_BITS, HEAP_XMAX_INVALID, HEAP_XMAX_IS_EXCL_LOCKED, HEAP_XMAX_IS_KEYSHR_LOCKED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HEAP_XMAX_IS_SHR_LOCKED, HeapTupleHeaderClearHotUpdated, HeapTupleHeaderGetCmax(), HeapTupleHeaderGetRawXmax, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIndicatesMovedPartitions, HeapTupleHeaderIsOnlyLocked(), HeapTupleHeaderSetXmax, HeapTupleSatisfiesUpdate(), i, xl_heap_lock::infobits_set, InvalidBuffer, InvalidCommandId, ItemIdGetLength, ItemIdIsNormal, ItemPointerCopy, ItemPointerEquals(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), xl_heap_lock::locking_xid, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, LockWaitBlock, LockWaitError, LockWaitSkip, MarkBufferDirty(), MultiXactIdSetOldestMember(), MultiXactIdWait(), MultiXactStatusNoKeyUpdate, xl_heap_lock::offnum, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, pfree(), ReadBuffer(), REGBUF_STANDARD, RelationGetRelationName, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapLock, START_CRIT_SECTION, status(), HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TM_BeingModified, TM_Deleted, TM_Invisible, TM_Ok, TM_SelfModified, TM_Updated, TM_WouldBlock, TransactionIdEquals, TransactionIdIsCurrentTransactionId(), TUPLOCK_from_mxstatus, UnlockTupleTuplock, UpdateXmaxHintBits(), VISIBILITYMAP_ALL_FROZEN, visibilitymap_clear(), visibilitymap_pin(), XactLockTableWait(), XLH_LOCK_ALL_FROZEN_CLEARED, XLOG_HEAP_LOCK, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), XLTW_Lock, TM_FailureData::xmax, and xmax_infomask_changed().

Referenced by heapam_tuple_lock().

3977 {
3978  TM_Result result;
3979  ItemPointer tid = &(tuple->t_self);
3980  ItemId lp;
3981  Page page;
3982  Buffer vmbuffer = InvalidBuffer;
3983  BlockNumber block;
3984  TransactionId xid,
3985  xmax;
3986  uint16 old_infomask,
3987  new_infomask,
3988  new_infomask2;
3989  bool first_time = true;
3990  bool skip_tuple_lock = false;
3991  bool have_tuple_lock = false;
3992  bool cleared_all_frozen = false;
3993 
3994  *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
3995  block = ItemPointerGetBlockNumber(tid);
3996 
3997  /*
3998  * Before locking the buffer, pin the visibility map page if it appears to
3999  * be necessary. Since we haven't got the lock yet, someone else might be
4000  * in the middle of changing this, so we'll need to recheck after we have
4001  * the lock.
4002  */
4003  if (PageIsAllVisible(BufferGetPage(*buffer)))
4004  visibilitymap_pin(relation, block, &vmbuffer);
4005 
4007 
4008  page = BufferGetPage(*buffer);
4009  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
4010  Assert(ItemIdIsNormal(lp));
4011 
4012  tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
4013  tuple->t_len = ItemIdGetLength(lp);
4014  tuple->t_tableOid = RelationGetRelid(relation);
4015 
4016 l3:
4017  result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
4018 
4019  if (result == TM_Invisible)
4020  {
4021  /*
4022  * This is possible, but only when locking a tuple for ON CONFLICT
4023  * UPDATE. We return this value here rather than throwing an error in
4024  * order to give that case the opportunity to throw a more specific
4025  * error.
4026  */
4027  result = TM_Invisible;
4028  goto out_locked;
4029  }
4030  else if (result == TM_BeingModified ||
4031  result == TM_Updated ||
4032  result == TM_Deleted)
4033  {
4034  TransactionId xwait;
4035  uint16 infomask;
4036  uint16 infomask2;
4037  bool require_sleep;
4038  ItemPointerData t_ctid;
4039 
4040  /* must copy state data before unlocking buffer */
4041  xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
4042  infomask = tuple->t_data->t_infomask;
4043  infomask2 = tuple->t_data->t_infomask2;
4044  ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
4045 
4046  LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
4047 
4048  /*
4049  * If any subtransaction of the current top transaction already holds
4050  * a lock as strong as or stronger than what we're requesting, we
4051  * effectively hold the desired lock already. We *must* succeed
4052  * without trying to take the tuple lock, else we will deadlock
4053  * against anyone wanting to acquire a stronger lock.
4054  *
4055  * Note we only do this the first time we loop on the HTSU result;
4056  * there is no point in testing in subsequent passes, because
4057  * evidently our own transaction cannot have acquired a new lock after
4058  * the first time we checked.
4059  */
4060  if (first_time)
4061  {
4062  first_time = false;
4063 
4064  if (infomask & HEAP_XMAX_IS_MULTI)
4065  {
4066  int i;
4067  int nmembers;
4068  MultiXactMember *members;
4069 
4070  /*
4071  * We don't need to allow old multixacts here; if that had
4072  * been the case, HeapTupleSatisfiesUpdate would have returned
4073  * MayBeUpdated and we wouldn't be here.
4074  */
4075  nmembers =
4076  GetMultiXactIdMembers(xwait, &members, false,
4077  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
4078 
4079  for (i = 0; i < nmembers; i++)
4080  {
4081  /* only consider members of our own transaction */
4082  if (!TransactionIdIsCurrentTransactionId(members[i].xid))
4083  continue;
4084 
4085  if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
4086  {
4087  pfree(members);
4088  result = TM_Ok;
4089  goto out_unlocked;
4090  }
4091  else
4092  {
4093  /*
4094  * Disable acquisition of the heavyweight tuple lock.
4095  * Otherwise, when promoting a weaker lock, we might
4096  * deadlock with another locker that has acquired the
4097  * heavyweight tuple lock and is waiting for our
4098  * transaction to finish.
4099  *
4100  * Note that in this case we still need to wait for
4101  * the multixact if required, to avoid acquiring
4102  * conflicting locks.
4103  */
4104  skip_tuple_lock = true;
4105  }
4106  }
4107 
4108  if (members)
4109  pfree(members);
4110  }
4111  else if (TransactionIdIsCurrentTransactionId(xwait))
4112  {
4113  switch (mode)
4114  {
4115  case LockTupleKeyShare:
4116  Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
4117  HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4118  HEAP_XMAX_IS_EXCL_LOCKED(infomask));
4119  result = TM_Ok;
4120  goto out_unlocked;
4121  case LockTupleShare:
4122  if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4123  HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4124  {
4125  result = TM_Ok;
4126  goto out_unlocked;
4127  }
4128  break;
4130  if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4131  {
4132  result = TM_Ok;
4133  goto out_unlocked;
4134  }
4135  break;
4136  case LockTupleExclusive:
4137  if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
4138  infomask2 & HEAP_KEYS_UPDATED)
4139  {
4140  result = TM_Ok;
4141  goto out_unlocked;
4142  }
4143  break;
4144  }
4145  }
4146  }
4147 
4148  /*
4149  * Initially assume that we will have to wait for the locking
4150  * transaction(s) to finish. We check various cases below in which
4151  * this can be turned off.
4152  */
4153  require_sleep = true;
4154  if (mode == LockTupleKeyShare)
4155  {
4156  /*
4157  * If we're requesting KeyShare, and there's no update present, we
4158  * don't need to wait. Even if there is an update, we can still
4159  * continue if the key hasn't been modified.
4160  *
4161  * However, if there are updates, we need to walk the update chain
4162  * to mark future versions of the row as locked, too. That way,
4163  * if somebody deletes that future version, we're protected
4164  * against the key going away. This locking of future versions
4165  * could block momentarily, if a concurrent transaction is
4166  * deleting a key; or it could return a value to the effect that
4167  * the transaction deleting the key has already committed. So we
4168  * do this before re-locking the buffer; otherwise this would be
4169  * prone to deadlocks.
4170  *
4171  * Note that the TID we're locking was grabbed before we unlocked
4172  * the buffer. For it to change while we're not looking, the
4173  * other properties we're testing for below after re-locking the
4174  * buffer would also change, in which case we would restart this
4175  * loop above.
4176  */
4177  if (!(infomask2 & HEAP_KEYS_UPDATED))
4178  {
4179  bool updated;
4180 
4181  updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
4182 
4183  /*
4184  * If there are updates, follow the update chain; bail out if
4185  * that cannot be done.
4186  */
4187  if (follow_updates && updated)
4188  {
4189  TM_Result res;
4190 
4191  res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4193  mode);
4194  if (res != TM_Ok)
4195  {
4196  result = res;
4197  /* recovery code expects to have buffer lock held */
4199  goto failed;
4200  }
4201  }
4202 
4204 
4205  /*
4206  * Make sure it's still an appropriate lock, else start over.
4207  * Also, if it wasn't updated before we released the lock, but
4208  * is updated now, we start over too; the reason is that we
4209  * now need to follow the update chain to lock the new
4210  * versions.
4211  */
4212  if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
4213  ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
4214  !updated))
4215  goto l3;
4216 
4217  /* Things look okay, so we can skip sleeping */
4218  require_sleep = false;
4219 
4220  /*
4221  * Note we allow Xmax to change here; other updaters/lockers
4222  * could have modified it before we grabbed the buffer lock.
4223  * However, this is not a problem, because with the recheck we
4224  * just did we ensure that they still don't conflict with the
4225  * lock we want.
4226  */
4227  }
4228  }
4229  else if (mode == LockTupleShare)
4230  {
4231  /*
4232  * If we're requesting Share, we can similarly avoid sleeping if
4233  * there's no update and no exclusive lock present.
4234  */
4235  if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
4236  !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4237  {
4239 
4240  /*
4241  * Make sure it's still an appropriate lock, else start over.
4242  * See above about allowing xmax to change.
4243  */
4244  if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
4246  goto l3;
4247  require_sleep = false;
4248  }
4249  }
4250  else if (mode == LockTupleNoKeyExclusive)
4251  {
4252  /*
4253  * If we're requesting NoKeyExclusive, we might also be able to
4254  * avoid sleeping; just ensure that there no conflicting lock
4255  * already acquired.
4256  */
4257  if (infomask & HEAP_XMAX_IS_MULTI)
4258  {
4259  if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
4260  mode, NULL))
4261  {
4262  /*
4263  * No conflict, but if the xmax changed under us in the
4264  * meantime, start over.
4265  */
4267  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4269  xwait))
4270  goto l3;
4271 
4272  /* otherwise, we're good */
4273  require_sleep = false;
4274  }
4275  }
4276  else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
4277  {
4279 
4280  /* if the xmax changed in the meantime, start over */
4281  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4283  xwait))
4284  goto l3;
4285  /* otherwise, we're good */
4286  require_sleep = false;
4287  }
4288  }
4289 
4290  /*
4291  * As a check independent from those above, we can also avoid sleeping
4292  * if the current transaction is the sole locker of the tuple. Note
4293  * that the strength of the lock already held is irrelevant; this is
4294  * not about recording the lock in Xmax (which will be done regardless
4295  * of this optimization, below). Also, note that the cases where we
4296  * hold a lock stronger than we are requesting are already handled
4297  * above by not doing anything.
4298  *
4299  * Note we only deal with the non-multixact case here; MultiXactIdWait
4300  * is well equipped to deal with this situation on its own.
4301  */
4302  if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
4304  {
4305  /* ... but if the xmax changed in the meantime, start over */
4307  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4309  xwait))
4310  goto l3;
4312  require_sleep = false;
4313  }
4314 
4315  /*
4316  * Time to sleep on the other transaction/multixact, if necessary.
4317  *
4318  * If the other transaction is an update/delete that's already
4319  * committed, then sleeping cannot possibly do any good: if we're
4320  * required to sleep, get out to raise an error instead.
4321  *
4322  * By here, we either have already acquired the buffer exclusive lock,
4323  * or we must wait for the locking transaction or multixact; so below
4324  * we ensure that we grab buffer lock after the sleep.
4325  */
4326  if (require_sleep && (result == TM_Updated || result == TM_Deleted))
4327  {
4329  goto failed;
4330  }
4331  else if (require_sleep)
4332  {
4333  /*
4334  * Acquire tuple lock to establish our priority for the tuple, or
4335  * die trying. LockTuple will release us when we are next-in-line
4336  * for the tuple. We must do this even if we are share-locking,
4337  * but not if we already have a weaker lock on the tuple.
4338  *
4339  * If we are forced to "start over" below, we keep the tuple lock;
4340  * this arranges that we stay at the head of the line while
4341  * rechecking tuple state.
4342  */
4343  if (!skip_tuple_lock &&
4344  !heap_acquire_tuplock(relation, tid, mode, wait_policy,
4345  &have_tuple_lock))
4346  {
4347  /*
4348  * This can only happen if wait_policy is Skip and the lock
4349  * couldn't be obtained.
4350  */
4351  result = TM_WouldBlock;
4352  /* recovery code expects to have buffer lock held */
4354  goto failed;
4355  }
4356 
4357  if (infomask & HEAP_XMAX_IS_MULTI)
4358  {
4360 
4361  /* We only ever lock tuples, never update them */
4362  if (status >= MultiXactStatusNoKeyUpdate)
4363  elog(ERROR, "invalid lock mode in heap_lock_tuple");
4364 
4365  /* wait for multixact to end, or die trying */
4366  switch (wait_policy)
4367  {
4368  case LockWaitBlock:
4369  MultiXactIdWait((MultiXactId) xwait, status, infomask,
4370  relation, &tuple->t_self, XLTW_Lock, NULL);
4371  break;
4372  case LockWaitSkip:
4374  status, infomask, relation,
4375  NULL))
4376  {
4377  result = TM_WouldBlock;
4378  /* recovery code expects to have buffer lock held */
4380  goto failed;
4381  }
4382  break;
4383  case LockWaitError:
4385  status, infomask, relation,
4386  NULL))
4387  ereport(ERROR,
4388  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4389  errmsg("could not obtain lock on row in relation \"%s\"",
4390  RelationGetRelationName(relation))));
4391 
4392  break;
4393  }
4394 
4395  /*
4396  * Of course, the multixact might not be done here: if we're
4397  * requesting a light lock mode, other transactions with light
4398  * locks could still be alive, as well as locks owned by our
4399  * own xact or other subxacts of this backend. We need to
4400  * preserve the surviving MultiXact members. Note that it
4401  * isn't absolutely necessary in the latter case, but doing so
4402  * is simpler.
4403  */
4404  }
4405  else
4406  {
4407  /* wait for regular transaction to end, or die trying */
4408  switch (wait_policy)
4409  {
4410  case LockWaitBlock:
4411  XactLockTableWait(xwait, relation, &tuple->t_self,
4412  XLTW_Lock);
4413  break;
4414  case LockWaitSkip:
4415  if (!ConditionalXactLockTableWait(xwait))
4416  {
4417  result = TM_WouldBlock;
4418  /* recovery code expects to have buffer lock held */
4420  goto failed;
4421  }
4422  break;
4423  case LockWaitError:
4424  if (!ConditionalXactLockTableWait(xwait))
4425  ereport(ERROR,
4426  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4427  errmsg("could not obtain lock on row in relation \"%s\"",
4428  RelationGetRelationName(relation))));
4429  break;
4430  }
4431  }
4432 
4433  /* if there are updates, follow the update chain */
4434  if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
4435  {
4436  TM_Result res;
4437 
4438  res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4440  mode);
4441  if (res != TM_Ok)
4442  {
4443  result = res;
4444  /* recovery code expects to have buffer lock held */
4446  goto failed;
4447  }
4448  }
4449 
4451 
4452  /*
4453  * xwait is done, but if xwait had just locked the tuple then some
4454  * other xact could update this tuple before we get to this point.
4455  * Check for xmax change, and start over if so.
4456  */
4457  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4459  xwait))
4460  goto l3;
4461 
4462  if (!(infomask & HEAP_XMAX_IS_MULTI))
4463  {
4464  /*
4465  * Otherwise check if it committed or aborted. Note we cannot
4466  * be here if the tuple was only locked by somebody who didn't
4467  * conflict with us; that would have been handled above. So
4468  * that transaction must necessarily be gone by now. But
4469  * don't check for this in the multixact case, because some
4470  * locker transactions might still be running.
4471  */
4472  UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
4473  }
4474  }
4475 
4476  /* By here, we're certain that we hold buffer exclusive lock again */
4477 
4478  /*
4479  * We may lock if previous xmax aborted, or if it committed but only
4480  * locked the tuple without updating it; or if we didn't have to wait
4481  * at all for whatever reason.
4482  */
4483  if (!require_sleep ||
4484  (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
4487  result = TM_Ok;
4488  else if (!ItemPointerEquals(&tuple->t_self, &tuple->t_data->t_ctid) ||
4490  result = TM_Updated;
4491  else
<