PostgreSQL Source Code  git master
heapam.c File Reference
#include "postgres.h"
#include "access/bufmask.h"
#include "access/genam.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/heaptoast.h"
#include "access/hio.h"
#include "access/multixact.h"
#include "access/parallel.h"
#include "access/relscan.h"
#include "access/sysattr.h"
#include "access/tableam.h"
#include "access/transam.h"
#include "access/valid.h"
#include "access/visibilitymap.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "access/xloginsert.h"
#include "access/xlogutils.h"
#include "catalog/catalog.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "port/atomics.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
#include "storage/procarray.h"
#include "storage/smgr.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "utils/datum.h"
#include "utils/inval.h"
#include "utils/lsyscache.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
#include "utils/spccache.h"
Include dependency graph for heapam.c:

Go to the source code of this file.

Macros

#define LOCKMODE_from_mxstatus(status)   (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
 
#define LockTupleTuplock(rel, tup, mode)   LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define UnlockTupleTuplock(rel, tup, mode)   UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define ConditionalLockTupleTuplock(rel, tup, mode)   ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define TUPLOCK_from_mxstatus(status)   (MultiXactStatusLock[(status)])
 
#define HEAPDEBUG_1
 
#define HEAPDEBUG_2
 
#define HEAPDEBUG_3
 
#define HEAPAMSLOTDEBUG_1
 
#define HEAPAMSLOTDEBUG_2
 
#define HEAPAMSLOTDEBUG_3
 
#define FRM_NOOP   0x0001
 
#define FRM_INVALIDATE_XMAX   0x0002
 
#define FRM_RETURN_IS_XID   0x0004
 
#define FRM_RETURN_IS_MULTI   0x0008
 
#define FRM_MARK_COMMITTED   0x0010
 

Functions

static HeapTuple heap_prepare_insert (Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
 
static XLogRecPtr log_heap_update (Relation reln, Buffer oldbuf, Buffer newbuf, HeapTuple oldtup, HeapTuple newtup, HeapTuple old_key_tuple, bool all_visible_cleared, bool new_all_visible_cleared)
 
static BitmapsetHeapDetermineModifiedColumns (Relation relation, Bitmapset *interesting_cols, HeapTuple oldtup, HeapTuple newtup)
 
static bool heap_acquire_tuplock (Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
 
static void compute_new_xmax_infomask (TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
 
static TM_Result heap_lock_updated_tuple (Relation rel, HeapTuple tuple, ItemPointer ctid, TransactionId xid, LockTupleMode mode)
 
static void GetMultiXactIdHintBits (MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
 
static TransactionId MultiXactIdGetUpdateXid (TransactionId xmax, uint16 t_infomask)
 
static bool DoesMultiXactIdConflict (MultiXactId multi, uint16 infomask, LockTupleMode lockmode, bool *current_is_member)
 
static void MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
static bool ConditionalMultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, int *remaining)
 
static XLogRecPtr log_heap_new_cid (Relation relation, HeapTuple tup)
 
static HeapTuple ExtractReplicaIdentity (Relation rel, HeapTuple tup, bool key_changed, bool *copy)
 
static void initscan (HeapScanDesc scan, ScanKey key, bool keep_startblock)
 
void heap_setscanlimits (TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlks)
 
void heapgetpage (TableScanDesc sscan, BlockNumber page)
 
static void heapgettup (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
static void heapgettup_pagemode (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
TableScanDesc heap_beginscan (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelTableScanDesc parallel_scan, uint32 flags)
 
void heap_rescan (TableScanDesc sscan, ScanKey key, bool set_params, bool allow_strat, bool allow_sync, bool allow_pagemode)
 
void heap_endscan (TableScanDesc sscan)
 
HeapTuple heap_getnext (TableScanDesc sscan, ScanDirection direction)
 
bool heap_getnextslot (TableScanDesc sscan, ScanDirection direction, TupleTableSlot *slot)
 
bool heap_fetch (Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf)
 
bool heap_hot_search_buffer (ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
 
void heap_get_latest_tid (TableScanDesc sscan, ItemPointer tid)
 
static void UpdateXmaxHintBits (HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
 
BulkInsertState GetBulkInsertState (void)
 
void FreeBulkInsertState (BulkInsertState bistate)
 
void ReleaseBulkInsertStatePin (BulkInsertState bistate)
 
void heap_insert (Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate)
 
void heap_multi_insert (Relation relation, TupleTableSlot **slots, int ntuples, CommandId cid, int options, BulkInsertState bistate)
 
void simple_heap_insert (Relation relation, HeapTuple tup)
 
static uint8 compute_infobits (uint16 infomask, uint16 infomask2)
 
static bool xmax_infomask_changed (uint16 new_infomask, uint16 old_infomask)
 
TM_Result heap_delete (Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, bool changingPart)
 
void simple_heap_delete (Relation relation, ItemPointer tid)
 
TM_Result heap_update (Relation relation, ItemPointer otid, HeapTuple newtup, CommandId cid, Snapshot crosscheck, bool wait, TM_FailureData *tmfd, LockTupleMode *lockmode)
 
static bool heap_tuple_attr_equals (TupleDesc tupdesc, int attrnum, HeapTuple tup1, HeapTuple tup2)
 
void simple_heap_update (Relation relation, ItemPointer otid, HeapTuple tup)
 
static MultiXactStatus get_mxact_status_for_lock (LockTupleMode mode, bool is_update)
 
TM_Result heap_lock_tuple (Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, TM_FailureData *tmfd)
 
static TM_Result test_lockmode_for_conflict (MultiXactStatus status, TransactionId xid, LockTupleMode mode, HeapTuple tup, bool *needwait)
 
static TM_Result heap_lock_updated_tuple_rec (Relation rel, ItemPointer tid, TransactionId xid, LockTupleMode mode)
 
void heap_finish_speculative (Relation relation, ItemPointer tid)
 
void heap_abort_speculative (Relation relation, ItemPointer tid)
 
void heap_inplace_update (Relation relation, HeapTuple tuple)
 
static TransactionId FreezeMultiXactId (MultiXactId multi, uint16 t_infomask, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, MultiXactId cutoff_multi, uint16 *flags)
 
bool heap_prepare_freeze_tuple (HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
 
void heap_execute_freeze_tuple (HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
 
bool heap_freeze_tuple (HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi)
 
TransactionId HeapTupleGetUpdateXid (HeapTupleHeader tuple)
 
static bool Do_MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
bool heap_tuple_needs_eventual_freeze (HeapTupleHeader tuple)
 
bool heap_tuple_needs_freeze (HeapTupleHeader tuple, TransactionId cutoff_xid, MultiXactId cutoff_multi, Buffer buf)
 
void HeapTupleHeaderAdvanceLatestRemovedXid (HeapTupleHeader tuple, TransactionId *latestRemovedXid)
 
TransactionId heap_compute_xid_horizon_for_tuples (Relation rel, ItemPointerData *tids, int nitems)
 
XLogRecPtr log_heap_cleanup_info (RelFileNode rnode, TransactionId latestRemovedXid)
 
XLogRecPtr log_heap_clean (Relation reln, Buffer buffer, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused, TransactionId latestRemovedXid)
 
XLogRecPtr log_heap_freeze (Relation reln, Buffer buffer, TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples, int ntuples)
 
XLogRecPtr log_heap_visible (RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer, TransactionId cutoff_xid, uint8 vmflags)
 
static void heap_xlog_cleanup_info (XLogReaderState *record)
 
static void heap_xlog_clean (XLogReaderState *record)
 
static void heap_xlog_visible (XLogReaderState *record)
 
static void heap_xlog_freeze_page (XLogReaderState *record)
 
static void fix_infomask_from_infobits (uint8 infobits, uint16 *infomask, uint16 *infomask2)
 
static void heap_xlog_delete (XLogReaderState *record)
 
static void heap_xlog_insert (XLogReaderState *record)
 
static void heap_xlog_multi_insert (XLogReaderState *record)
 
static void heap_xlog_update (XLogReaderState *record, bool hot_update)
 
static void heap_xlog_confirm (XLogReaderState *record)
 
static void heap_xlog_lock (XLogReaderState *record)
 
static void heap_xlog_lock_updated (XLogReaderState *record)
 
static void heap_xlog_inplace (XLogReaderState *record)
 
void heap_redo (XLogReaderState *record)
 
void heap2_redo (XLogReaderState *record)
 
void heap_sync (Relation rel)
 
void heap_mask (char *pagedata, BlockNumber blkno)
 

Variables

struct {
   LOCKMODE   hwlock
 
   int   lockstatus
 
   int   updstatus
 
tupleLockExtraInfo [MaxLockTupleMode+1]
 
static const int MultiXactStatusLock [MaxMultiXactStatus+1]
 

Macro Definition Documentation

◆ ConditionalLockTupleTuplock

#define ConditionalLockTupleTuplock (   rel,
  tup,
  mode 
)    ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 163 of file heapam.c.

Referenced by heap_acquire_tuplock().

◆ FRM_INVALIDATE_XMAX

#define FRM_INVALIDATE_XMAX   0x0002

Definition at line 5791 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_MARK_COMMITTED

#define FRM_MARK_COMMITTED   0x0010

Definition at line 5794 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_NOOP

#define FRM_NOOP   0x0001

Definition at line 5790 of file heapam.c.

Referenced by FreezeMultiXactId().

◆ FRM_RETURN_IS_MULTI

#define FRM_RETURN_IS_MULTI   0x0008

Definition at line 5793 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_RETURN_IS_XID

#define FRM_RETURN_IS_XID   0x0004

Definition at line 5792 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ HEAPAMSLOTDEBUG_1

#define HEAPAMSLOTDEBUG_1

Definition at line 1343 of file heapam.c.

Referenced by heap_getnextslot().

◆ HEAPAMSLOTDEBUG_2

#define HEAPAMSLOTDEBUG_2

Definition at line 1344 of file heapam.c.

Referenced by heap_getnextslot().

◆ HEAPAMSLOTDEBUG_3

#define HEAPAMSLOTDEBUG_3

Definition at line 1345 of file heapam.c.

Referenced by heap_getnextslot().

◆ HEAPDEBUG_1

#define HEAPDEBUG_1

Definition at line 1283 of file heapam.c.

Referenced by heap_getnext().

◆ HEAPDEBUG_2

#define HEAPDEBUG_2

Definition at line 1284 of file heapam.c.

Referenced by heap_getnext().

◆ HEAPDEBUG_3

#define HEAPDEBUG_3

Definition at line 1285 of file heapam.c.

Referenced by heap_getnext().

◆ LOCKMODE_from_mxstatus

#define LOCKMODE_from_mxstatus (   status)    (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)

◆ LockTupleTuplock

#define LockTupleTuplock (   rel,
  tup,
  mode 
)    LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 159 of file heapam.c.

Referenced by heap_acquire_tuplock().

◆ TUPLOCK_from_mxstatus

#define TUPLOCK_from_mxstatus (   status)    (MultiXactStatusLock[(status)])

Definition at line 195 of file heapam.c.

Referenced by compute_new_xmax_infomask(), GetMultiXactIdHintBits(), and heap_lock_tuple().

◆ UnlockTupleTuplock

#define UnlockTupleTuplock (   rel,
  tup,
  mode 
)    UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 161 of file heapam.c.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

Function Documentation

◆ compute_infobits()

static uint8 compute_infobits ( uint16  infomask,
uint16  infomask2 
)
static

Definition at line 2397 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_abort_speculative(), heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_update(), and log_heap_update().

2398 {
2399  return
2400  ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
2401  ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
2402  ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
2403  /* note we ignore HEAP_XMAX_SHR_LOCK here */
2404  ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
2405  ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
2406  XLHL_KEYS_UPDATED : 0);
2407 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:193
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:196
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:263
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:262
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:195
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:264
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:266
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:265

◆ compute_new_xmax_infomask()

static void compute_new_xmax_infomask ( TransactionId  xmax,
uint16  old_infomask,
uint16  old_infomask2,
TransactionId  add_to_xmax,
LockTupleMode  mode,
bool  is_update,
TransactionId result_xmax,
uint16 result_infomask,
uint16 result_infomask2 
)
static

Definition at line 4713 of file heapam.c.

References Assert, elog, ERROR, get_mxact_status_for_lock(), GetMultiXactIdHintBits(), HEAP_KEYS_UPDATED, HEAP_LOCKED_UPGRADED, HEAP_XMAX_COMMITTED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_INVALID, HEAP_XMAX_IS_EXCL_LOCKED, HEAP_XMAX_IS_KEYSHR_LOCKED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HEAP_XMAX_IS_SHR_LOCKED, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, InvalidTransactionId, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, MultiXactIdCreate(), MultiXactIdExpand(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TUPLOCK_from_mxstatus, and WARNING.

Referenced by heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), and heap_update().

4718 {
4719  TransactionId new_xmax;
4720  uint16 new_infomask,
4721  new_infomask2;
4722 
4724 
4725 l5:
4726  new_infomask = 0;
4727  new_infomask2 = 0;
4728  if (old_infomask & HEAP_XMAX_INVALID)
4729  {
4730  /*
4731  * No previous locker; we just insert our own TransactionId.
4732  *
4733  * Note that it's critical that this case be the first one checked,
4734  * because there are several blocks below that come back to this one
4735  * to implement certain optimizations; old_infomask might contain
4736  * other dirty bits in those cases, but we don't really care.
4737  */
4738  if (is_update)
4739  {
4740  new_xmax = add_to_xmax;
4741  if (mode == LockTupleExclusive)
4742  new_infomask2 |= HEAP_KEYS_UPDATED;
4743  }
4744  else
4745  {
4746  new_infomask |= HEAP_XMAX_LOCK_ONLY;
4747  switch (mode)
4748  {
4749  case LockTupleKeyShare:
4750  new_xmax = add_to_xmax;
4751  new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
4752  break;
4753  case LockTupleShare:
4754  new_xmax = add_to_xmax;
4755  new_infomask |= HEAP_XMAX_SHR_LOCK;
4756  break;
4758  new_xmax = add_to_xmax;
4759  new_infomask |= HEAP_XMAX_EXCL_LOCK;
4760  break;
4761  case LockTupleExclusive:
4762  new_xmax = add_to_xmax;
4763  new_infomask |= HEAP_XMAX_EXCL_LOCK;
4764  new_infomask2 |= HEAP_KEYS_UPDATED;
4765  break;
4766  default:
4767  new_xmax = InvalidTransactionId; /* silence compiler */
4768  elog(ERROR, "invalid lock mode");
4769  }
4770  }
4771  }
4772  else if (old_infomask & HEAP_XMAX_IS_MULTI)
4773  {
4774  MultiXactStatus new_status;
4775 
4776  /*
4777  * Currently we don't allow XMAX_COMMITTED to be set for multis, so
4778  * cross-check.
4779  */
4780  Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
4781 
4782  /*
4783  * A multixact together with LOCK_ONLY set but neither lock bit set
4784  * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
4785  * anymore. This check is critical for databases upgraded by
4786  * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
4787  * that such multis are never passed.
4788  */
4789  if (HEAP_LOCKED_UPGRADED(old_infomask))
4790  {
4791  old_infomask &= ~HEAP_XMAX_IS_MULTI;
4792  old_infomask |= HEAP_XMAX_INVALID;
4793  goto l5;
4794  }
4795 
4796  /*
4797  * If the XMAX is already a MultiXactId, then we need to expand it to
4798  * include add_to_xmax; but if all the members were lockers and are
4799  * all gone, we can do away with the IS_MULTI bit and just set
4800  * add_to_xmax as the only locker/updater. If all lockers are gone
4801  * and we have an updater that aborted, we can also do without a
4802  * multi.
4803  *
4804  * The cost of doing GetMultiXactIdMembers would be paid by
4805  * MultiXactIdExpand if we weren't to do this, so this check is not
4806  * incurring extra work anyhow.
4807  */
4808  if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
4809  {
4810  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
4812  old_infomask)))
4813  {
4814  /*
4815  * Reset these bits and restart; otherwise fall through to
4816  * create a new multi below.
4817  */
4818  old_infomask &= ~HEAP_XMAX_IS_MULTI;
4819  old_infomask |= HEAP_XMAX_INVALID;
4820  goto l5;
4821  }
4822  }
4823 
4824  new_status = get_mxact_status_for_lock(mode, is_update);
4825 
4826  new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
4827  new_status);
4828  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4829  }
4830  else if (old_infomask & HEAP_XMAX_COMMITTED)
4831  {
4832  /*
4833  * It's a committed update, so we need to preserve him as updater of
4834  * the tuple.
4835  */
4837  MultiXactStatus new_status;
4838 
4839  if (old_infomask2 & HEAP_KEYS_UPDATED)
4840  status = MultiXactStatusUpdate;
4841  else
4842  status = MultiXactStatusNoKeyUpdate;
4843 
4844  new_status = get_mxact_status_for_lock(mode, is_update);
4845 
4846  /*
4847  * since it's not running, it's obviously impossible for the old
4848  * updater to be identical to the current one, so we need not check
4849  * for that case as we do in the block above.
4850  */
4851  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
4852  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4853  }
4854  else if (TransactionIdIsInProgress(xmax))
4855  {
4856  /*
4857  * If the XMAX is a valid, in-progress TransactionId, then we need to
4858  * create a new MultiXactId that includes both the old locker or
4859  * updater and our own TransactionId.
4860  */
4861  MultiXactStatus new_status;
4862  MultiXactStatus old_status;
4863  LockTupleMode old_mode;
4864 
4865  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
4866  {
4867  if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
4868  old_status = MultiXactStatusForKeyShare;
4869  else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
4870  old_status = MultiXactStatusForShare;
4871  else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
4872  {
4873  if (old_infomask2 & HEAP_KEYS_UPDATED)
4874  old_status = MultiXactStatusForUpdate;
4875  else
4876  old_status = MultiXactStatusForNoKeyUpdate;
4877  }
4878  else
4879  {
4880  /*
4881  * LOCK_ONLY can be present alone only when a page has been
4882  * upgraded by pg_upgrade. But in that case,
4883  * TransactionIdIsInProgress() should have returned false. We
4884  * assume it's no longer locked in this case.
4885  */
4886  elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
4887  old_infomask |= HEAP_XMAX_INVALID;
4888  old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
4889  goto l5;
4890  }
4891  }
4892  else
4893  {
4894  /* it's an update, but which kind? */
4895  if (old_infomask2 & HEAP_KEYS_UPDATED)
4896  old_status = MultiXactStatusUpdate;
4897  else
4898  old_status = MultiXactStatusNoKeyUpdate;
4899  }
4900 
4901  old_mode = TUPLOCK_from_mxstatus(old_status);
4902 
4903  /*
4904  * If the lock to be acquired is for the same TransactionId as the
4905  * existing lock, there's an optimization possible: consider only the
4906  * strongest of both locks as the only one present, and restart.
4907  */
4908  if (xmax == add_to_xmax)
4909  {
4910  /*
4911  * Note that it's not possible for the original tuple to be
4912  * updated: we wouldn't be here because the tuple would have been
4913  * invisible and we wouldn't try to update it. As a subtlety,
4914  * this code can also run when traversing an update chain to lock
4915  * future versions of a tuple. But we wouldn't be here either,
4916  * because the add_to_xmax would be different from the original
4917  * updater.
4918  */
4919  Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
4920 
4921  /* acquire the strongest of both */
4922  if (mode < old_mode)
4923  mode = old_mode;
4924  /* mustn't touch is_update */
4925 
4926  old_infomask |= HEAP_XMAX_INVALID;
4927  goto l5;
4928  }
4929 
4930  /* otherwise, just fall back to creating a new multixact */
4931  new_status = get_mxact_status_for_lock(mode, is_update);
4932  new_xmax = MultiXactIdCreate(xmax, old_status,
4933  add_to_xmax, new_status);
4934  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4935  }
4936  else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
4937  TransactionIdDidCommit(xmax))
4938  {
4939  /*
4940  * It's a committed update, so we gotta preserve him as updater of the
4941  * tuple.
4942  */
4944  MultiXactStatus new_status;
4945 
4946  if (old_infomask2 & HEAP_KEYS_UPDATED)
4947  status = MultiXactStatusUpdate;
4948  else
4949  status = MultiXactStatusNoKeyUpdate;
4950 
4951  new_status = get_mxact_status_for_lock(mode, is_update);
4952 
4953  /*
4954  * since it's not running, it's obviously impossible for the old
4955  * updater to be identical to the current one, so we need not check
4956  * for that case as we do in the block above.
4957  */
4958  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
4959  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
4960  }
4961  else
4962  {
4963  /*
4964  * Can get here iff the locking/updating transaction was running when
4965  * the infomask was extracted from the tuple, but finished before
4966  * TransactionIdIsInProgress got to run. Deal with it as if there was
4967  * no locker at all in the first place.
4968  */
4969  old_infomask |= HEAP_XMAX_INVALID;
4970  goto l5;
4971  }
4972 
4973  *result_infomask = new_infomask;
4974  *result_infomask2 = new_infomask2;
4975  *result_xmax = new_xmax;
4976 }
static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
Definition: heapam.c:6399
static PgChecksumMode mode
Definition: pg_checksums.c:61
MultiXactStatus
Definition: multixact.h:40
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:193
LockTupleMode
Definition: lockoptions.h:49
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:196
uint32 TransactionId
Definition: c.h:514
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:853
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:987
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:252
#define HEAP_XMAX_COMMITTED
Definition: htup_details.h:206
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:199
#define HEAP_XMAX_IS_SHR_LOCKED(infomask)
Definition: htup_details.h:262
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:6480
unsigned short uint16
Definition: c.h:358
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:207
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:195
#define InvalidTransactionId
Definition: transam.h:31
#define WARNING
Definition: elog.h:40
MultiXactId MultiXactIdCreate(TransactionId xid1, MultiXactStatus status1, TransactionId xid2, MultiXactStatus status2)
Definition: multixact.c:386
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
TransactionId MultiXactId
Definition: c.h:524
#define Assert(condition)
Definition: c.h:739
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:195
#define elog(elevel,...)
Definition: elog.h:228
static MultiXactStatus get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
Definition: heapam.c:3925
#define HEAP_XMAX_IS_EXCL_LOCKED(infomask)
Definition: htup_details.h:264
#define HEAP_XMAX_IS_KEYSHR_LOCKED(infomask)
Definition: htup_details.h:266
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:226
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:551
MultiXactId MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
Definition: multixact.c:439

◆ ConditionalMultiXactIdWait()

static bool ConditionalMultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
Relation  rel,
int *  remaining 
)
static

Definition at line 6747 of file heapam.c.

References Do_MultiXactIdWait(), and XLTW_None.

Referenced by heap_lock_tuple().

6749 {
6750  return Do_MultiXactIdWait(multi, status, infomask, true,
6751  rel, NULL, XLTW_None, remaining);
6752 }
int remaining
Definition: informix.c:667
Definition: lmgr.h:26
static bool Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:6647
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:226

◆ Do_MultiXactIdWait()

static bool Do_MultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
bool  nowait,
Relation  rel,
ItemPointer  ctid,
XLTW_Oper  oper,
int *  remaining 
)
static

Definition at line 6647 of file heapam.c.

References ConditionalXactLockTableWait(), DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, LOCKMODE_from_mxstatus, pfree(), MultiXactMember::status, TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), XactLockTableWait(), and MultiXactMember::xid.

Referenced by ConditionalMultiXactIdWait(), and MultiXactIdWait().

6651 {
6652  bool result = true;
6653  MultiXactMember *members;
6654  int nmembers;
6655  int remain = 0;
6656 
6657  /* for pre-pg_upgrade tuples, no need to sleep at all */
6658  nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
6659  GetMultiXactIdMembers(multi, &members, false,
6660  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
6661 
6662  if (nmembers >= 0)
6663  {
6664  int i;
6665 
6666  for (i = 0; i < nmembers; i++)
6667  {
6668  TransactionId memxid = members[i].xid;
6669  MultiXactStatus memstatus = members[i].status;
6670 
6672  {
6673  remain++;
6674  continue;
6675  }
6676 
6679  {
6680  if (remaining && TransactionIdIsInProgress(memxid))
6681  remain++;
6682  continue;
6683  }
6684 
6685  /*
6686  * This member conflicts with our multi, so we have to sleep (or
6687  * return failure, if asked to avoid waiting.)
6688  *
6689  * Note that we don't set up an error context callback ourselves,
6690  * but instead we pass the info down to XactLockTableWait. This
6691  * might seem a bit wasteful because the context is set up and
6692  * tore down for each member of the multixact, but in reality it
6693  * should be barely noticeable, and it avoids duplicate code.
6694  */
6695  if (nowait)
6696  {
6697  result = ConditionalXactLockTableWait(memxid);
6698  if (!result)
6699  break;
6700  }
6701  else
6702  XactLockTableWait(memxid, rel, ctid, oper);
6703  }
6704 
6705  pfree(members);
6706  }
6707 
6708  if (remaining)
6709  *remaining = remain;
6710 
6711  return result;
6712 }
int remaining
Definition: informix.c:667
MultiXactStatus
Definition: multixact.h:40
uint32 TransactionId
Definition: c.h:514
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:853
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:987
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:252
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:151
bool ConditionalXactLockTableWait(TransactionId xid)
Definition: lmgr.c:697
void pfree(void *pointer)
Definition: mcxt.c:1056
TransactionId xid
Definition: multixact.h:61
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:556
MultiXactStatus status
Definition: multixact.h:62
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:624
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1204
Operator oper(ParseState *pstate, List *opname, Oid ltypeId, Oid rtypeId, bool noError, int location)
Definition: parse_oper.c:377
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:226

◆ DoesMultiXactIdConflict()

static bool DoesMultiXactIdConflict ( MultiXactId  multi,
uint16  infomask,
LockTupleMode  lockmode,
bool current_is_member 
)
static

Definition at line 6548 of file heapam.c.

References DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, ISUPDATE_from_mxstatus, LOCKMODE_from_mxstatus, pfree(), status(), TransactionIdDidAbort(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), tupleLockExtraInfo, and MultiXactMember::xid.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

6550 {
6551  int nmembers;
6552  MultiXactMember *members;
6553  bool result = false;
6554  LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
6555 
6556  if (HEAP_LOCKED_UPGRADED(infomask))
6557  return false;
6558 
6559  nmembers = GetMultiXactIdMembers(multi, &members, false,
6560  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
6561  if (nmembers >= 0)
6562  {
6563  int i;
6564 
6565  for (i = 0; i < nmembers; i++)
6566  {
6567  TransactionId memxid;
6568  LOCKMODE memlockmode;
6569 
6570  if (result && (current_is_member == NULL || *current_is_member))
6571  break;
6572 
6573  memlockmode = LOCKMODE_from_mxstatus(members[i].status);
6574 
6575  /* ignore members from current xact (but track their presence) */
6576  memxid = members[i].xid;
6578  {
6579  if (current_is_member != NULL)
6580  *current_is_member = true;
6581  continue;
6582  }
6583  else if (result)
6584  continue;
6585 
6586  /* ignore members that don't conflict with the lock we want */
6587  if (!DoLockModesConflict(memlockmode, wanted))
6588  continue;
6589 
6590  if (ISUPDATE_from_mxstatus(members[i].status))
6591  {
6592  /* ignore aborted updaters */
6593  if (TransactionIdDidAbort(memxid))
6594  continue;
6595  }
6596  else
6597  {
6598  /* ignore lockers-only that are no longer in progress */
6599  if (!TransactionIdIsInProgress(memxid))
6600  continue;
6601  }
6602 
6603  /*
6604  * Whatever remains are either live lockers that conflict with our
6605  * wanted lock, and updaters that are not aborted. Those conflict
6606  * with what we want. Set up to return true, but keep going to
6607  * look for the current transaction among the multixact members,
6608  * if needed.
6609  */
6610  result = true;
6611  }
6612  pfree(members);
6613  }
6614 
6615  return result;
6616 }
uint32 TransactionId
Definition: c.h:514
int LOCKMODE
Definition: lockdefs.h:26
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:853
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:987
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:252
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:151
void pfree(void *pointer)
Definition: mcxt.c:1056
TransactionId xid
Definition: multixact.h:61
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:556
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:55
bool TransactionIdDidAbort(TransactionId transactionId)
Definition: transam.c:181
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
static const struct @20 tupleLockExtraInfo[MaxLockTupleMode+1]
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1204
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:226

◆ ExtractReplicaIdentity()

static HeapTuple ExtractReplicaIdentity ( Relation  rel,
HeapTuple  tup,
bool  key_changed,
bool copy 
)
static

Definition at line 7605 of file heapam.c.

References Assert, bms_free(), bms_is_empty(), bms_is_member(), FirstLowInvalidHeapAttributeNumber, heap_deform_tuple(), heap_form_tuple(), heap_freetuple(), HeapTupleHasExternal, i, INDEX_ATTR_BITMAP_IDENTITY_KEY, MaxHeapAttributeNumber, TupleDescData::natts, RelationData::rd_rel, RelationGetDescr, RelationGetIndexAttrBitmap(), RelationIsLogicallyLogged, toast_flatten_tuple(), and values.

Referenced by heap_delete(), and heap_update().

7607 {
7608  TupleDesc desc = RelationGetDescr(relation);
7609  char replident = relation->rd_rel->relreplident;
7610  Bitmapset *idattrs;
7611  HeapTuple key_tuple;
7612  bool nulls[MaxHeapAttributeNumber];
7614 
7615  *copy = false;
7616 
7617  if (!RelationIsLogicallyLogged(relation))
7618  return NULL;
7619 
7620  if (replident == REPLICA_IDENTITY_NOTHING)
7621  return NULL;
7622 
7623  if (replident == REPLICA_IDENTITY_FULL)
7624  {
7625  /*
7626  * When logging the entire old tuple, it very well could contain
7627  * toasted columns. If so, force them to be inlined.
7628  */
7629  if (HeapTupleHasExternal(tp))
7630  {
7631  *copy = true;
7632  tp = toast_flatten_tuple(tp, desc);
7633  }
7634  return tp;
7635  }
7636 
7637  /* if the key hasn't changed and we're only logging the key, we're done */
7638  if (!key_changed)
7639  return NULL;
7640 
7641  /* find out the replica identity columns */
7642  idattrs = RelationGetIndexAttrBitmap(relation,
7644 
7645  /*
7646  * If there's no defined replica identity columns, treat as !key_changed.
7647  * (This case should not be reachable from heap_update, since that should
7648  * calculate key_changed accurately. But heap_delete just passes constant
7649  * true for key_changed, so we can hit this case in deletes.)
7650  */
7651  if (bms_is_empty(idattrs))
7652  return NULL;
7653 
7654  /*
7655  * Construct a new tuple containing only the replica identity columns,
7656  * with nulls elsewhere. While we're at it, assert that the replica
7657  * identity columns aren't null.
7658  */
7659  heap_deform_tuple(tp, desc, values, nulls);
7660 
7661  for (int i = 0; i < desc->natts; i++)
7662  {
7664  idattrs))
7665  Assert(!nulls[i]);
7666  else
7667  nulls[i] = true;
7668  }
7669 
7670  key_tuple = heap_form_tuple(desc, values, nulls);
7671  *copy = true;
7672 
7673  bms_free(idattrs);
7674 
7675  /*
7676  * If the tuple, which by here only contains indexed columns, still has
7677  * toasted columns, force them to be inlined. This is somewhat unlikely
7678  * since there's limits on the size of indexed columns, so we don't
7679  * duplicate toast_flatten_tuple()s functionality in the above loop over
7680  * the indexed columns, even if it would be more efficient.
7681  */
7682  if (HeapTupleHasExternal(key_tuple))
7683  {
7684  HeapTuple oldtup = key_tuple;
7685 
7686  key_tuple = toast_flatten_tuple(oldtup, desc);
7687  heap_freetuple(oldtup);
7688  }
7689 
7690  return key_tuple;
7691 }
#define RelationGetDescr(relation)
Definition: rel.h:448
#define FirstLowInvalidHeapAttributeNumber
Definition: sysattr.h:27
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: heaptuple.c:1020
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:594
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1338
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition: heaptoast.c:343
bool bms_is_empty(const Bitmapset *a)
Definition: bitmapset.c:701
uintptr_t Datum
Definition: postgres.h:367
#define MaxHeapAttributeNumber
Definition: htup_details.h:47
void bms_free(Bitmapset *a)
Definition: bitmapset.c:208
#define Assert(condition)
Definition: c.h:739
void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull)
Definition: heaptuple.c:1249
static Datum values[MAXATTR]
Definition: bootstrap.c:167
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:673
int i
Bitmapset * RelationGetIndexAttrBitmap(Relation relation, IndexAttrBitmapKind attrKind)
Definition: relcache.c:4777
bool bms_is_member(int x, const Bitmapset *a)
Definition: bitmapset.c:427

◆ fix_infomask_from_infobits()

static void fix_infomask_from_infobits ( uint8  infobits,
uint16 infomask,
uint16 infomask2 
)
static

Definition at line 8009 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_xlog_delete(), heap_xlog_lock(), heap_xlog_lock_updated(), and heap_xlog_update().

8010 {
8011  *infomask &= ~(HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY |
8013  *infomask2 &= ~HEAP_KEYS_UPDATED;
8014 
8015  if (infobits & XLHL_XMAX_IS_MULTI)
8016  *infomask |= HEAP_XMAX_IS_MULTI;
8017  if (infobits & XLHL_XMAX_LOCK_ONLY)
8018  *infomask |= HEAP_XMAX_LOCK_ONLY;
8019  if (infobits & XLHL_XMAX_EXCL_LOCK)
8020  *infomask |= HEAP_XMAX_EXCL_LOCK;
8021  /* note HEAP_XMAX_SHR_LOCK isn't considered here */
8022  if (infobits & XLHL_XMAX_KEYSHR_LOCK)
8023  *infomask |= HEAP_XMAX_KEYSHR_LOCK;
8024 
8025  if (infobits & XLHL_KEYS_UPDATED)
8026  *infomask2 |= HEAP_KEYS_UPDATED;
8027 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:193
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:196
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:263
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:262
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:195
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:264
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:266
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:265

◆ FreeBulkInsertState()

void FreeBulkInsertState ( BulkInsertState  bistate)

Definition at line 1829 of file heapam.c.

References BulkInsertStateData::current_buf, FreeAccessStrategy(), InvalidBuffer, pfree(), ReleaseBuffer(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), CopyMultiInsertBufferCleanup(), intorel_shutdown(), and transientrel_shutdown().

1830 {
1831  if (bistate->current_buf != InvalidBuffer)
1832  ReleaseBuffer(bistate->current_buf);
1833  FreeAccessStrategy(bistate->strategy);
1834  pfree(bistate);
1835 }
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3365
void pfree(void *pointer)
Definition: mcxt.c:1056
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:597
BufferAccessStrategy strategy
Definition: hio.h:31
Buffer current_buf
Definition: hio.h:32

◆ FreezeMultiXactId()

static TransactionId FreezeMultiXactId ( MultiXactId  multi,
uint16  t_infomask,
TransactionId  relfrozenxid,
TransactionId  relminmxid,
TransactionId  cutoff_xid,
MultiXactId  cutoff_multi,
uint16 flags 
)
static

Definition at line 5818 of file heapam.c.

References Assert, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg_internal(), ERROR, FRM_INVALIDATE_XMAX, FRM_MARK_COMMITTED, FRM_NOOP, FRM_RETURN_IS_MULTI, FRM_RETURN_IS_XID, GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, i, InvalidTransactionId, ISUPDATE_from_mxstatus, MultiXactIdCreateFromMembers(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactIdIsValid, MultiXactIdPrecedes(), palloc(), pfree(), status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TransactionIdIsValid, TransactionIdPrecedes(), and MultiXactMember::xid.

Referenced by heap_prepare_freeze_tuple().

5822 {
5824  int i;
5825  MultiXactMember *members;
5826  int nmembers;
5827  bool need_replace;
5828  int nnewmembers;
5829  MultiXactMember *newmembers;
5830  bool has_lockers;
5831  TransactionId update_xid;
5832  bool update_committed;
5833 
5834  *flags = 0;
5835 
5836  /* We should only be called in Multis */
5837  Assert(t_infomask & HEAP_XMAX_IS_MULTI);
5838 
5839  if (!MultiXactIdIsValid(multi) ||
5840  HEAP_LOCKED_UPGRADED(t_infomask))
5841  {
5842  /* Ensure infomask bits are appropriately set/reset */
5843  *flags |= FRM_INVALIDATE_XMAX;
5844  return InvalidTransactionId;
5845  }
5846  else if (MultiXactIdPrecedes(multi, relminmxid))
5847  ereport(ERROR,
5849  errmsg_internal("found multixact %u from before relminmxid %u",
5850  multi, relminmxid)));
5851  else if (MultiXactIdPrecedes(multi, cutoff_multi))
5852  {
5853  /*
5854  * This old multi cannot possibly have members still running, but
5855  * verify just in case. If it was a locker only, it can be removed
5856  * without any further consideration; but if it contained an update,
5857  * we might need to preserve it.
5858  */
5859  if (MultiXactIdIsRunning(multi,
5860  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
5861  ereport(ERROR,
5863  errmsg_internal("multixact %u from before cutoff %u found to be still running",
5864  multi, cutoff_multi)));
5865 
5866  if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
5867  {
5868  *flags |= FRM_INVALIDATE_XMAX;
5869  xid = InvalidTransactionId; /* not strictly necessary */
5870  }
5871  else
5872  {
5873  /* replace multi by update xid */
5874  xid = MultiXactIdGetUpdateXid(multi, t_infomask);
5875 
5876  /* wasn't only a lock, xid needs to be valid */
5878 
5880  ereport(ERROR,
5882  errmsg_internal("found update xid %u from before relfrozenxid %u",
5883  xid, relfrozenxid)));
5884 
5885  /*
5886  * If the xid is older than the cutoff, it has to have aborted,
5887  * otherwise the tuple would have gotten pruned away.
5888  */
5889  if (TransactionIdPrecedes(xid, cutoff_xid))
5890  {
5891  if (TransactionIdDidCommit(xid))
5892  ereport(ERROR,
5894  errmsg_internal("cannot freeze committed update xid %u", xid)));
5895  *flags |= FRM_INVALIDATE_XMAX;
5896  xid = InvalidTransactionId; /* not strictly necessary */
5897  }
5898  else
5899  {
5900  *flags |= FRM_RETURN_IS_XID;
5901  }
5902  }
5903 
5904  return xid;
5905  }
5906 
5907  /*
5908  * This multixact might have or might not have members still running, but
5909  * we know it's valid and is newer than the cutoff point for multis.
5910  * However, some member(s) of it may be below the cutoff for Xids, so we
5911  * need to walk the whole members array to figure out what to do, if
5912  * anything.
5913  */
5914 
5915  nmembers =
5916  GetMultiXactIdMembers(multi, &members, false,
5917  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
5918  if (nmembers <= 0)
5919  {
5920  /* Nothing worth keeping */
5921  *flags |= FRM_INVALIDATE_XMAX;
5922  return InvalidTransactionId;
5923  }
5924 
5925  /* is there anything older than the cutoff? */
5926  need_replace = false;
5927  for (i = 0; i < nmembers; i++)
5928  {
5929  if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
5930  {
5931  need_replace = true;
5932  break;
5933  }
5934  }
5935 
5936  /*
5937  * In the simplest case, there is no member older than the cutoff; we can
5938  * keep the existing MultiXactId as is.
5939  */
5940  if (!need_replace)
5941  {
5942  *flags |= FRM_NOOP;
5943  pfree(members);
5944  return InvalidTransactionId;
5945  }
5946 
5947  /*
5948  * If the multi needs to be updated, figure out which members do we need
5949  * to keep.
5950  */
5951  nnewmembers = 0;
5952  newmembers = palloc(sizeof(MultiXactMember) * nmembers);
5953  has_lockers = false;
5954  update_xid = InvalidTransactionId;
5955  update_committed = false;
5956 
5957  for (i = 0; i < nmembers; i++)
5958  {
5959  /*
5960  * Determine whether to keep this member or ignore it.
5961  */
5962  if (ISUPDATE_from_mxstatus(members[i].status))
5963  {
5964  TransactionId xid = members[i].xid;
5965 
5968  ereport(ERROR,
5970  errmsg_internal("found update xid %u from before relfrozenxid %u",
5971  xid, relfrozenxid)));
5972 
5973  /*
5974  * It's an update; should we keep it? If the transaction is known
5975  * aborted or crashed then it's okay to ignore it, otherwise not.
5976  * Note that an updater older than cutoff_xid cannot possibly be
5977  * committed, because HeapTupleSatisfiesVacuum would have returned
5978  * HEAPTUPLE_DEAD and we would not be trying to freeze the tuple.
5979  *
5980  * As with all tuple visibility routines, it's critical to test
5981  * TransactionIdIsInProgress before TransactionIdDidCommit,
5982  * because of race conditions explained in detail in
5983  * heapam_visibility.c.
5984  */
5987  {
5988  Assert(!TransactionIdIsValid(update_xid));
5989  update_xid = xid;
5990  }
5991  else if (TransactionIdDidCommit(xid))
5992  {
5993  /*
5994  * The transaction committed, so we can tell caller to set
5995  * HEAP_XMAX_COMMITTED. (We can only do this because we know
5996  * the transaction is not running.)
5997  */
5998  Assert(!TransactionIdIsValid(update_xid));
5999  update_committed = true;
6000  update_xid = xid;
6001  }
6002  else
6003  {
6004  /*
6005  * Not in progress, not committed -- must be aborted or
6006  * crashed; we can ignore it.
6007  */
6008  }
6009 
6010  /*
6011  * Since the tuple wasn't marked HEAPTUPLE_DEAD by vacuum, the
6012  * update Xid cannot possibly be older than the xid cutoff. The
6013  * presence of such a tuple would cause corruption, so be paranoid
6014  * and check.
6015  */
6016  if (TransactionIdIsValid(update_xid) &&
6017  TransactionIdPrecedes(update_xid, cutoff_xid))
6018  ereport(ERROR,
6020  errmsg_internal("found update xid %u from before xid cutoff %u",
6021  update_xid, cutoff_xid)));
6022 
6023  /*
6024  * If we determined that it's an Xid corresponding to an update
6025  * that must be retained, additionally add it to the list of
6026  * members of the new Multi, in case we end up using that. (We
6027  * might still decide to use only an update Xid and not a multi,
6028  * but it's easier to maintain the list as we walk the old members
6029  * list.)
6030  */
6031  if (TransactionIdIsValid(update_xid))
6032  newmembers[nnewmembers++] = members[i];
6033  }
6034  else
6035  {
6036  /* We only keep lockers if they are still running */
6037  if (TransactionIdIsCurrentTransactionId(members[i].xid) ||
6038  TransactionIdIsInProgress(members[i].xid))
6039  {
6040  /* running locker cannot possibly be older than the cutoff */
6041  Assert(!TransactionIdPrecedes(members[i].xid, cutoff_xid));
6042  newmembers[nnewmembers++] = members[i];
6043  has_lockers = true;
6044  }
6045  }
6046  }
6047 
6048  pfree(members);
6049 
6050  if (nnewmembers == 0)
6051  {
6052  /* nothing worth keeping!? Tell caller to remove the whole thing */
6053  *flags |= FRM_INVALIDATE_XMAX;
6054  xid = InvalidTransactionId;
6055  }
6056  else if (TransactionIdIsValid(update_xid) && !has_lockers)
6057  {
6058  /*
6059  * If there's a single member and it's an update, pass it back alone
6060  * without creating a new Multi. (XXX we could do this when there's a
6061  * single remaining locker, too, but that would complicate the API too
6062  * much; moreover, the case with the single updater is more
6063  * interesting, because those are longer-lived.)
6064  */
6065  Assert(nnewmembers == 1);
6066  *flags |= FRM_RETURN_IS_XID;
6067  if (update_committed)
6068  *flags |= FRM_MARK_COMMITTED;
6069  xid = update_xid;
6070  }
6071  else
6072  {
6073  /*
6074  * Create a new multixact with the surviving members of the previous
6075  * one, to set as new Xmax in the tuple.
6076  */
6077  xid = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
6078  *flags |= FRM_RETURN_IS_MULTI;
6079  }
6080 
6081  pfree(newmembers);
6082 
6083  return xid;
6084 }
#define FRM_RETURN_IS_XID
Definition: heapam.c:5792
#define FRM_MARK_COMMITTED
Definition: heapam.c:5794
uint32 TransactionId
Definition: c.h:514
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:853
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:987
MultiXactId MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members)
Definition: multixact.c:748
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:252
int errcode(int sqlerrcode)
Definition: elog.c:608
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:6480
void pfree(void *pointer)
Definition: mcxt.c:1056
#define ERROR
Definition: elog.h:43
TransactionId xid
Definition: multixact.h:61
#define FRM_INVALIDATE_XMAX
Definition: heapam.c:5791
#define InvalidTransactionId
Definition: transam.h:31
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:55
#define MultiXactIdIsValid(multi)
Definition: multixact.h:27
#define ereport(elevel, rest)
Definition: elog.h:141
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:45
#define FRM_RETURN_IS_MULTI
Definition: heapam.c:5793
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
TransactionId relminmxid
Definition: pg_class.h:126
int errmsg_internal(const char *fmt,...)
Definition: elog.c:909
#define Assert(condition)
Definition: c.h:739
#define FRM_NOOP
Definition: heapam.c:5790
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3142
void * palloc(Size size)
Definition: mcxt.c:949
TransactionId relfrozenxid
Definition: pg_class.h:123
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1204
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:226
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:551

◆ get_mxact_status_for_lock()

static MultiXactStatus get_mxact_status_for_lock ( LockTupleMode  mode,
bool  is_update 
)
static

Definition at line 3925 of file heapam.c.

References elog, ERROR, mode, and tupleLockExtraInfo.

Referenced by compute_new_xmax_infomask(), heap_lock_tuple(), and test_lockmode_for_conflict().

3926 {
3927  int retval;
3928 
3929  if (is_update)
3930  retval = tupleLockExtraInfo[mode].updstatus;
3931  else
3932  retval = tupleLockExtraInfo[mode].lockstatus;
3933 
3934  if (retval == -1)
3935  elog(ERROR, "invalid lock tuple mode %d/%s", mode,
3936  is_update ? "true" : "false");
3937 
3938  return (MultiXactStatus) retval;
3939 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
MultiXactStatus
Definition: multixact.h:40
#define ERROR
Definition: elog.h:43
static const struct @20 tupleLockExtraInfo[MaxLockTupleMode+1]
#define elog(elevel,...)
Definition: elog.h:228

◆ GetBulkInsertState()

BulkInsertState GetBulkInsertState ( void  )

Definition at line 1815 of file heapam.c.

References BAS_BULKWRITE, BulkInsertStateData::current_buf, GetAccessStrategy(), InvalidBuffer, palloc(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), CopyMultiInsertBufferInit(), intorel_startup(), and transientrel_startup().

1816 {
1817  BulkInsertState bistate;
1818 
1819  bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
1821  bistate->current_buf = InvalidBuffer;
1822  return bistate;
1823 }
BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
Definition: freelist.c:542
#define InvalidBuffer
Definition: buf.h:25
struct BulkInsertStateData * BulkInsertState
Definition: heapam.h:38
BufferAccessStrategy strategy
Definition: hio.h:31
void * palloc(Size size)
Definition: mcxt.c:949
Buffer current_buf
Definition: hio.h:32

◆ GetMultiXactIdHintBits()

static void GetMultiXactIdHintBits ( MultiXactId  multi,
uint16 new_infomask,
uint16 new_infomask2 
)
static

Definition at line 6399 of file heapam.c.

References GetMultiXactIdMembers(), HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, i, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, mode, MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, pfree(), status(), and TUPLOCK_from_mxstatus.

Referenced by compute_new_xmax_infomask(), heap_prepare_freeze_tuple(), and heap_update().

6401 {
6402  int nmembers;
6403  MultiXactMember *members;
6404  int i;
6405  uint16 bits = HEAP_XMAX_IS_MULTI;
6406  uint16 bits2 = 0;
6407  bool has_update = false;
6408  LockTupleMode strongest = LockTupleKeyShare;
6409 
6410  /*
6411  * We only use this in multis we just created, so they cannot be values
6412  * pre-pg_upgrade.
6413  */
6414  nmembers = GetMultiXactIdMembers(multi, &members, false, false);
6415 
6416  for (i = 0; i < nmembers; i++)
6417  {
6419 
6420  /*
6421  * Remember the strongest lock mode held by any member of the
6422  * multixact.
6423  */
6424  mode = TUPLOCK_from_mxstatus(members[i].status);
6425  if (mode > strongest)
6426  strongest = mode;
6427 
6428  /* See what other bits we need */
6429  switch (members[i].status)
6430  {
6434  break;
6435 
6437  bits2 |= HEAP_KEYS_UPDATED;
6438  break;
6439 
6441  has_update = true;
6442  break;
6443 
6444  case MultiXactStatusUpdate:
6445  bits2 |= HEAP_KEYS_UPDATED;
6446  has_update = true;
6447  break;
6448  }
6449  }
6450 
6451  if (strongest == LockTupleExclusive ||
6452  strongest == LockTupleNoKeyExclusive)
6453  bits |= HEAP_XMAX_EXCL_LOCK;
6454  else if (strongest == LockTupleShare)
6455  bits |= HEAP_XMAX_SHR_LOCK;
6456  else if (strongest == LockTupleKeyShare)
6457  bits |= HEAP_XMAX_KEYSHR_LOCK;
6458 
6459  if (!has_update)
6460  bits |= HEAP_XMAX_LOCK_ONLY;
6461 
6462  if (nmembers > 0)
6463  pfree(members);
6464 
6465  *new_infomask = bits;
6466  *new_infomask2 = bits2;
6467 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:193
LockTupleMode
Definition: lockoptions.h:49
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:196
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:199
unsigned short uint16
Definition: c.h:358
void pfree(void *pointer)
Definition: mcxt.c:1056
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:195
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:195
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1204
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:226

◆ heap2_redo()

void heap2_redo ( XLogReaderState record)

Definition at line 8884 of file heapam.c.

References elog, heap_xlog_clean(), heap_xlog_cleanup_info(), heap_xlog_freeze_page(), heap_xlog_lock_updated(), heap_xlog_logical_rewrite(), heap_xlog_multi_insert(), heap_xlog_visible(), PANIC, XLOG_HEAP2_CLEAN, XLOG_HEAP2_CLEANUP_INFO, XLOG_HEAP2_FREEZE_PAGE, XLOG_HEAP2_LOCK_UPDATED, XLOG_HEAP2_MULTI_INSERT, XLOG_HEAP2_NEW_CID, XLOG_HEAP2_REWRITE, XLOG_HEAP2_VISIBLE, XLOG_HEAP_OPMASK, XLogRecGetInfo, and XLR_INFO_MASK.

8885 {
8886  uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
8887 
8888  switch (info & XLOG_HEAP_OPMASK)
8889  {
8890  case XLOG_HEAP2_CLEAN:
8891  heap_xlog_clean(record);
8892  break;
8894  heap_xlog_freeze_page(record);
8895  break;
8897  heap_xlog_cleanup_info(record);
8898  break;
8899  case XLOG_HEAP2_VISIBLE:
8900  heap_xlog_visible(record);
8901  break;
8903  heap_xlog_multi_insert(record);
8904  break;
8906  heap_xlog_lock_updated(record);
8907  break;
8908  case XLOG_HEAP2_NEW_CID:
8909 
8910  /*
8911  * Nothing to do on a real replay, only used during logical
8912  * decoding.
8913  */
8914  break;
8915  case XLOG_HEAP2_REWRITE:
8916  heap_xlog_logical_rewrite(record);
8917  break;
8918  default:
8919  elog(PANIC, "heap2_redo: unknown op code %u", info);
8920  }
8921 }
void heap_xlog_logical_rewrite(XLogReaderState *r)
Definition: rewriteheap.c:1123
#define XLOG_HEAP2_LOCK_UPDATED
Definition: heapam_xlog.h:59
#define XLOG_HEAP2_REWRITE
Definition: heapam_xlog.h:53
unsigned char uint8
Definition: c.h:357
#define XLOG_HEAP_OPMASK
Definition: heapam_xlog.h:41
#define PANIC
Definition: elog.h:53
#define XLOG_HEAP2_MULTI_INSERT
Definition: heapam_xlog.h:58
#define XLOG_HEAP2_VISIBLE
Definition: heapam_xlog.h:57
static void heap_xlog_lock_updated(XLogReaderState *record)
Definition: heapam.c:8737
static void heap_xlog_freeze_page(XLogReaderState *record)
Definition: heapam.c:7951
#define XLOG_HEAP2_CLEAN
Definition: heapam_xlog.h:54
#define XLOG_HEAP2_CLEANUP_INFO
Definition: heapam_xlog.h:56
static void heap_xlog_multi_insert(XLogReaderState *record)
Definition: heapam.c:8219
#define XLOG_HEAP2_NEW_CID
Definition: heapam_xlog.h:60
#define XLogRecGetInfo(decoder)
Definition: xlogreader.h:279
#define XLR_INFO_MASK
Definition: xlogrecord.h:62
static void heap_xlog_cleanup_info(XLogReaderState *record)
Definition: heapam.c:7697
#define XLOG_HEAP2_FREEZE_PAGE
Definition: heapam_xlog.h:55
#define elog(elevel,...)
Definition: elog.h:228
static void heap_xlog_visible(XLogReaderState *record)
Definition: heapam.c:7811
static void heap_xlog_clean(XLogReaderState *record)
Definition: heapam.c:7718

◆ heap_abort_speculative()

void heap_abort_speculative ( Relation  relation,
ItemPointer  tid 
)

Definition at line 5567 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, compute_infobits(), elog, END_CRIT_SECTION, ERROR, xl_heap_delete::flags, GetCurrentTransactionId(), HEAP_KEYS_UPDATED, HEAP_MOVED, heap_toast_delete(), HEAP_XMAX_BITS, HeapTupleHasExternal, HeapTupleHeaderIsHeapOnly, HeapTupleHeaderIsSpeculative, HeapTupleHeaderSetXmin, xl_heap_delete::infobits_set, InvalidTransactionId, IsToastRelation(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), MarkBufferDirty(), xl_heap_delete::offnum, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), ReadBuffer(), RecentGlobalXmin, REGBUF_STANDARD, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapDelete, START_CRIT_SECTION, HeapTupleHeaderData::t_choice, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_heap, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, HeapTupleFields::t_xmin, TransactionIdIsValid, XLH_DELETE_IS_SUPER, XLOG_HEAP_DELETE, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and xl_heap_delete::xmax.

Referenced by heapam_tuple_complete_speculative(), and toast_delete_datum().

5568 {
5570  ItemId lp;
5571  HeapTupleData tp;
5572  Page page;
5573  BlockNumber block;
5574  Buffer buffer;
5575 
5576  Assert(ItemPointerIsValid(tid));
5577 
5578  block = ItemPointerGetBlockNumber(tid);
5579  buffer = ReadBuffer(relation, block);
5580  page = BufferGetPage(buffer);
5581 
5583 
5584  /*
5585  * Page can't be all visible, we just inserted into it, and are still
5586  * running.
5587  */
5588  Assert(!PageIsAllVisible(page));
5589 
5590  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
5591  Assert(ItemIdIsNormal(lp));
5592 
5593  tp.t_tableOid = RelationGetRelid(relation);
5594  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
5595  tp.t_len = ItemIdGetLength(lp);
5596  tp.t_self = *tid;
5597 
5598  /*
5599  * Sanity check that the tuple really is a speculatively inserted tuple,
5600  * inserted by us.
5601  */
5602  if (tp.t_data->t_choice.t_heap.t_xmin != xid)
5603  elog(ERROR, "attempted to kill a tuple inserted by another transaction");
5604  if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
5605  elog(ERROR, "attempted to kill a non-speculative tuple");
5607 
5608  /*
5609  * No need to check for serializable conflicts here. There is never a
5610  * need for a combocid, either. No need to extract replica identity, or
5611  * do anything special with infomask bits.
5612  */
5613 
5615 
5616  /*
5617  * The tuple will become DEAD immediately. Flag that this page
5618  * immediately is a candidate for pruning by setting xmin to
5619  * RecentGlobalXmin. That's not pretty, but it doesn't seem worth
5620  * inventing a nicer API for this.
5621  */
5624 
5625  /* store transaction information of xact deleting the tuple */
5628 
5629  /*
5630  * Set the tuple header xmin to InvalidTransactionId. This makes the
5631  * tuple immediately invisible everyone. (In particular, to any
5632  * transactions waiting on the speculative token, woken up later.)
5633  */
5635 
5636  /* Clear the speculative insertion token too */
5637  tp.t_data->t_ctid = tp.t_self;
5638 
5639  MarkBufferDirty(buffer);
5640 
5641  /*
5642  * XLOG stuff
5643  *
5644  * The WAL records generated here match heap_delete(). The same recovery
5645  * routines are used.
5646  */
5647  if (RelationNeedsWAL(relation))
5648  {
5649  xl_heap_delete xlrec;
5650  XLogRecPtr recptr;
5651 
5652  xlrec.flags = XLH_DELETE_IS_SUPER;
5654  tp.t_data->t_infomask2);
5656  xlrec.xmax = xid;
5657 
5658  XLogBeginInsert();
5659  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
5660  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
5661 
5662  /* No replica identity & replication origin logged */
5663 
5664  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
5665 
5666  PageSetLSN(page, recptr);
5667  }
5668 
5669  END_CRIT_SECTION();
5670 
5671  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
5672 
5673  if (HeapTupleHasExternal(&tp))
5674  {
5675  Assert(!IsToastRelation(relation));
5676  heap_toast_delete(relation, &tp, true);
5677  }
5678 
5679  /*
5680  * Never need to mark tuple for invalidation, since catalogs don't support
5681  * speculative insertion
5682  */
5683 
5684  /* Now we can release the buffer */
5685  ReleaseBuffer(buffer);
5686 
5687  /* count deletion, as we counted the insertion too */
5688  pgstat_count_heap_delete(relation);
5689 }
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:86
bool IsToastRelation(Relation relation)
Definition: catalog.c:141
#define HEAP_XMAX_BITS
Definition: htup_details.h:270
union HeapTupleHeaderData::@45 t_choice
#define XLH_DELETE_IS_SUPER
Definition: heapam_xlog.h:95
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2397
HeapTupleFields t_heap
Definition: htup_details.h:156
#define PageIsAllVisible(page)
Definition: bufpage.h:385
uint32 TransactionId
Definition: c.h:514
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1458
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:429
#define PageSetPrunable(page, xid)
Definition: bufpage.h:398
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3365
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:88
OffsetNumber offnum
Definition: heapam_xlog.h:106
HeapTupleHeader t_data
Definition: htup.h:68
#define HeapTupleHeaderIsHeapOnly(tup)
Definition: htup_details.h:501
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
#define ERROR
Definition: elog.h:43
ItemPointerData t_ctid
Definition: htup_details.h:160
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:105
void heap_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: heaptoast.c:41
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:422
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:111
TransactionId RecentGlobalXmin
Definition: snapmgr.c:168
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
TransactionId t_xmin
Definition: htup_details.h:123
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3602
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_MOVED
Definition: htup_details.h:216
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:739
uint8 infobits_set
Definition: heapam_xlog.h:107
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:596
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define RelationNeedsWAL(relation)
Definition: rel.h:524
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:1992
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:673
#define elog(elevel,...)
Definition: elog.h:228
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define TransactionIdIsValid(xid)
Definition: transam.h:41
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:422
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define HeapTupleHeaderSetXmin(tup, xid)
Definition: htup_details.h:319

◆ heap_acquire_tuplock()

static bool heap_acquire_tuplock ( Relation  relation,
ItemPointer  tid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
bool have_tuple_lock 
)
static

Definition at line 4664 of file heapam.c.

References ConditionalLockTupleTuplock, ereport, errcode(), errmsg(), ERROR, LockTupleTuplock, LockWaitBlock, LockWaitError, LockWaitSkip, and RelationGetRelationName.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

4666 {
4667  if (*have_tuple_lock)
4668  return true;
4669 
4670  switch (wait_policy)
4671  {
4672  case LockWaitBlock:
4673  LockTupleTuplock(relation, tid, mode);
4674  break;
4675 
4676  case LockWaitSkip:
4677  if (!ConditionalLockTupleTuplock(relation, tid, mode))
4678  return false;
4679  break;
4680 
4681  case LockWaitError:
4682  if (!ConditionalLockTupleTuplock(relation, tid, mode))
4683  ereport(ERROR,
4684  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4685  errmsg("could not obtain lock on row in relation \"%s\"",
4686  RelationGetRelationName(relation))));
4687  break;
4688  }
4689  *have_tuple_lock = true;
4690 
4691  return true;
4692 }
static PgChecksumMode mode
Definition: pg_checksums.c:61
#define LockTupleTuplock(rel, tup, mode)
Definition: heapam.c:159
#define ConditionalLockTupleTuplock(rel, tup, mode)
Definition: heapam.c:163
int errcode(int sqlerrcode)
Definition: elog.c:608
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:456
#define ereport(elevel, rest)
Definition: elog.h:141
int errmsg(const char *fmt,...)
Definition: elog.c:822

◆ heap_beginscan()

TableScanDesc heap_beginscan ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
ParallelTableScanDesc  parallel_scan,
uint32  flags 
)

Definition at line 1132 of file heapam.c.

References Assert, initscan(), IsMVCCSnapshot, palloc(), PredicateLockRelation(), RelationGetRelid, RelationIncrementReferenceCount(), HeapScanDescData::rs_base, HeapScanDescData::rs_ctup, TableScanDescData::rs_flags, TableScanDescData::rs_key, TableScanDescData::rs_nkeys, TableScanDescData::rs_parallel, TableScanDescData::rs_rd, TableScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, SO_ALLOW_PAGEMODE, SO_TYPE_SAMPLESCAN, SO_TYPE_SEQSCAN, and HeapTupleData::t_tableOid.

Referenced by SampleHeapTupleVisible().

1136 {
1137  HeapScanDesc scan;
1138 
1139  /*
1140  * increment relation ref count while scanning relation
1141  *
1142  * This is just to make really sure the relcache entry won't go away while
1143  * the scan has a pointer to it. Caller should be holding the rel open
1144  * anyway, so this is redundant in all normal scenarios...
1145  */
1147 
1148  /*
1149  * allocate and initialize scan descriptor
1150  */
1151  scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
1152 
1153  scan->rs_base.rs_rd = relation;
1154  scan->rs_base.rs_snapshot = snapshot;
1155  scan->rs_base.rs_nkeys = nkeys;
1156  scan->rs_base.rs_flags = flags;
1157  scan->rs_base.rs_parallel = parallel_scan;
1158  scan->rs_strategy = NULL; /* set in initscan */
1159 
1160  /*
1161  * Disable page-at-a-time mode if it's not a MVCC-safe snapshot.
1162  */
1163  if (!(snapshot && IsMVCCSnapshot(snapshot)))
1165 
1166  /*
1167  * For seqscan and sample scans in a serializable transaction, acquire a
1168  * predicate lock on the entire relation. This is required not only to
1169  * lock all the matching tuples, but also to conflict with new insertions
1170  * into the table. In an indexscan, we take page locks on the index pages
1171  * covering the range specified in the scan qual, but in a heap scan there
1172  * is nothing more fine-grained to lock. A bitmap scan is a different
1173  * story, there we have already scanned the index and locked the index
1174  * pages covering the predicate. But in that case we still have to lock
1175  * any matching heap tuples. For sample scan we could optimize the locking
1176  * to be at least page-level granularity, but we'd need to add per-tuple
1177  * locking for that.
1178  */
1180  {
1181  /*
1182  * Ensure a missing snapshot is noticed reliably, even if the
1183  * isolation mode means predicate locking isn't performed (and
1184  * therefore the snapshot isn't used here).
1185  */
1186  Assert(snapshot);
1187  PredicateLockRelation(relation, snapshot);
1188  }
1189 
1190  /* we only need to set this up once */
1191  scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1192 
1193  /*
1194  * we do this here instead of in initscan() because heap_rescan also calls
1195  * initscan() and we don't want to allocate memory again
1196  */
1197  if (nkeys > 0)
1198  scan->rs_base.rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
1199  else
1200  scan->rs_base.rs_key = NULL;
1201 
1202  initscan(scan, key, false);
1203 
1204  return (TableScanDesc) scan;
1205 }
TableScanDescData rs_base
Definition: heapam.h:48
void PredicateLockRelation(Relation relation, Snapshot snapshot)
Definition: predicate.c:2503
uint32 rs_flags
Definition: relscan.h:43
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:72
HeapTupleData rs_ctup
Definition: heapam.h:65
ScanKeyData * ScanKey
Definition: skey.h:75
Oid t_tableOid
Definition: htup.h:66
struct ScanKeyData * rs_key
Definition: relscan.h:37
void RelationIncrementReferenceCount(Relation rel)
Definition: relcache.c:2055
BufferAccessStrategy rs_strategy
Definition: heapam.h:63
#define IsMVCCSnapshot(snapshot)
Definition: snapmgr.h:97
#define Assert(condition)
Definition: c.h:739
Relation rs_rd
Definition: relscan.h:34
struct SnapshotData * rs_snapshot
Definition: relscan.h:35
void * palloc(Size size)
Definition: mcxt.c:949
struct ParallelTableScanDescData * rs_parallel
Definition: relscan.h:45
static void initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
Definition: heapam.c:208
#define RelationGetRelid(relation)
Definition: rel.h:422

◆ heap_compute_xid_horizon_for_tuples()

TransactionId heap_compute_xid_horizon_for_tuples ( Relation  rel,
ItemPointerData tids,
int  nitems 
)

Definition at line 6986 of file heapam.c.

References Assert, buf, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferIsValid, CHECK_FOR_INTERRUPTS, effective_io_concurrency, get_tablespace_io_concurrency(), HeapTupleHeaderAdvanceLatestRemovedXid(), i, InvalidBlockNumber, InvalidBuffer, InvalidTransactionId, IsCatalogRelation(), ItemIdGetRedirect, ItemIdHasStorage, ItemIdIsDead, ItemIdIsRedirected, ItemIdIsUsed, ItemPointerCompare(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MAX_IO_CONCURRENCY, Min, PageGetItem, PageGetItemId, qsort, RelationData::rd_rel, ReadBuffer(), and ReleaseBuffer().

Referenced by SampleHeapTupleVisible().

6989 {
6990  TransactionId latestRemovedXid = InvalidTransactionId;
6991  BlockNumber hblkno;
6993  Page hpage;
6994 #ifdef USE_PREFETCH
6995  XidHorizonPrefetchState prefetch_state;
6996  int io_concurrency;
6997  int prefetch_distance;
6998 #endif
6999 
7000  /*
7001  * Sort to avoid repeated lookups for the same page, and to make it more
7002  * likely to access items in an efficient order. In particular, this
7003  * ensures that if there are multiple pointers to the same page, they all
7004  * get processed looking up and locking the page just once.
7005  */
7006  qsort((void *) tids, nitems, sizeof(ItemPointerData),
7007  (int (*) (const void *, const void *)) ItemPointerCompare);
7008 
7009 #ifdef USE_PREFETCH
7010  /* Initialize prefetch state. */
7011  prefetch_state.cur_hblkno = InvalidBlockNumber;
7012  prefetch_state.next_item = 0;
7013  prefetch_state.nitems = nitems;
7014  prefetch_state.tids = tids;
7015 
7016  /*
7017  * Compute the prefetch distance that we will attempt to maintain.
7018  *
7019  * We don't use the regular formula to determine how much to prefetch
7020  * here, but instead just add a constant to effective_io_concurrency.
7021  * That's because it seems best to do some prefetching here even when
7022  * effective_io_concurrency is set to 0, but if the DBA thinks it's OK to
7023  * do more prefetching for other operations, then it's probably OK to do
7024  * more prefetching in this case, too. It may be that this formula is too
7025  * simplistic, but at the moment there is no evidence of that or any idea
7026  * about what would work better.
7027  *
7028  * Since the caller holds a buffer lock somewhere in rel, we'd better make
7029  * sure that isn't a catalog relation before we call code that does
7030  * syscache lookups, to avoid risk of deadlock.
7031  */
7032  if (IsCatalogRelation(rel))
7033  io_concurrency = effective_io_concurrency;
7034  else
7035  io_concurrency = get_tablespace_io_concurrency(rel->rd_rel->reltablespace);
7036  prefetch_distance = Min((io_concurrency) + 10, MAX_IO_CONCURRENCY);
7037 
7038  /* Start prefetching. */
7039  xid_horizon_prefetch_buffer(rel, &prefetch_state, prefetch_distance);
7040 #endif
7041 
7042  /* Iterate over all tids, and check their horizon */
7043  hblkno = InvalidBlockNumber;
7044  hpage = NULL;
7045  for (int i = 0; i < nitems; i++)
7046  {
7047  ItemPointer htid = &tids[i];
7048  ItemId hitemid;
7049  OffsetNumber hoffnum;
7050 
7051  /*
7052  * Read heap buffer, but avoid refetching if it's the same block as
7053  * required for the last tid.
7054  */
7055  if (hblkno == InvalidBlockNumber ||
7056  ItemPointerGetBlockNumber(htid) != hblkno)
7057  {
7058  /* release old buffer */
7059  if (BufferIsValid(buf))
7060  {
7062  ReleaseBuffer(buf);
7063  }
7064 
7065  hblkno = ItemPointerGetBlockNumber(htid);
7066 
7067  buf = ReadBuffer(rel, hblkno);
7068 
7069 #ifdef USE_PREFETCH
7070 
7071  /*
7072  * To maintain the prefetch distance, prefetch one more page for
7073  * each page we read.
7074  */
7075  xid_horizon_prefetch_buffer(rel, &prefetch_state, 1);
7076 #endif
7077 
7078  hpage = BufferGetPage(buf);
7079 
7081  }
7082 
7083  hoffnum = ItemPointerGetOffsetNumber(htid);
7084  hitemid = PageGetItemId(hpage, hoffnum);
7085 
7086  /*
7087  * Follow any redirections until we find something useful.
7088  */
7089  while (ItemIdIsRedirected(hitemid))
7090  {
7091  hoffnum = ItemIdGetRedirect(hitemid);
7092  hitemid = PageGetItemId(hpage, hoffnum);
7094  }
7095 
7096  /*
7097  * If the heap item has storage, then read the header and use that to
7098  * set latestRemovedXid.
7099  *
7100  * Some LP_DEAD items may not be accessible, so we ignore them.
7101  */
7102  if (ItemIdHasStorage(hitemid))
7103  {
7104  HeapTupleHeader htuphdr;
7105 
7106  htuphdr = (HeapTupleHeader) PageGetItem(hpage, hitemid);
7107 
7108  HeapTupleHeaderAdvanceLatestRemovedXid(htuphdr, &latestRemovedXid);
7109  }
7110  else if (ItemIdIsDead(hitemid))
7111  {
7112  /*
7113  * Conjecture: if hitemid is dead then it had xids before the xids
7114  * marked on LP_NORMAL items. So we just ignore this item and move
7115  * onto the next, for the purposes of calculating
7116  * latestRemovedXid.
7117  */
7118  }
7119  else
7120  Assert(!ItemIdIsUsed(hitemid));
7121 
7122  }
7123 
7124  if (BufferIsValid(buf))
7125  {
7127  ReleaseBuffer(buf);
7128  }
7129 
7130  /*
7131  * If all heap tuples were LP_DEAD then we will be returning
7132  * InvalidTransactionId here, which avoids conflicts. This matches
7133  * existing logic which assumes that LP_DEAD tuples must already be older
7134  * than the latestRemovedXid on the cleanup record that set them as
7135  * LP_DEAD, hence must already have generated a conflict.
7136  */
7137 
7138  return latestRemovedXid;
7139 }
int32 ItemPointerCompare(ItemPointer arg1, ItemPointer arg2)
Definition: itemptr.c:52
void HeapTupleHeaderAdvanceLatestRemovedXid(HeapTupleHeader tuple, TransactionId *latestRemovedXid)
Definition: heapam.c:6895
#define MAX_IO_CONCURRENCY
Definition: bufmgr.h:78
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:86
bool IsCatalogRelation(Relation relation)
Definition: catalog.c:99
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
uint32 TransactionId
Definition: c.h:514
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:78
#define Min(x, y)
Definition: c.h:911
#define ItemIdIsUsed(itemId)
Definition: itemid.h:92
#define InvalidBuffer
Definition: buf.h:25
int get_tablespace_io_concurrency(Oid spcid)
Definition: spccache.c:215
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3365
Form_pg_class rd_rel
Definition: rel.h:83
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
int effective_io_concurrency
Definition: bufmgr.c:113
uint16 OffsetNumber
Definition: off.h:24
static char * buf
Definition: pg_test_fsync.c:67
#define InvalidTransactionId
Definition: transam.h:31
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3602
#define ItemIdHasStorage(itemId)
Definition: itemid.h:120
#define Assert(condition)
Definition: c.h:739
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:596
#define InvalidBlockNumber
Definition: block.h:33
#define BufferIsValid(bufnum)
Definition: bufmgr.h:113
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
int i
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:87
#define CHECK_FOR_INTERRUPTS()
Definition: miscadmin.h:99
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define qsort(a, b, c, d)
Definition: port.h:488
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78

◆ heap_delete()

TM_Result heap_delete ( Relation  relation,
ItemPointer  tid,
CommandId  cid,
Snapshot  crosscheck,
bool  wait,
TM_FailureData tmfd,
bool  changingPart 
)

Definition at line 2442 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), TM_FailureData::cmax, compute_infobits(), compute_new_xmax_infomask(), TM_FailureData::ctid, DoesMultiXactIdConflict(), END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, ExtractReplicaIdentity(), xl_heap_delete::flags, GetCurrentTransactionId(), heap_acquire_tuplock(), heap_freetuple(), HEAP_KEYS_UPDATED, HEAP_MOVED, heap_toast_delete(), HEAP_XMAX_BITS, HEAP_XMAX_INVALID, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HeapTupleHasExternal, HeapTupleHeaderAdjustCmax(), HeapTupleHeaderClearHotUpdated, HeapTupleHeaderGetCmax(), HeapTupleHeaderGetRawXmax, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIndicatesMovedPartitions, HeapTupleHeaderIsOnlyLocked(), HeapTupleHeaderSetCmax, HeapTupleHeaderSetMovedPartitions, HeapTupleHeaderSetXmax, HeapTupleSatisfiesUpdate(), HeapTupleSatisfiesVisibility(), xl_heap_delete::infobits_set, InvalidBuffer, InvalidCommandId, InvalidSnapshot, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerEquals(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), LockTupleExclusive, LockWaitBlock, log_heap_new_cid(), MarkBufferDirty(), MultiXactIdSetOldestMember(), MultiXactIdWait(), MultiXactStatusUpdate, xl_heap_delete::offnum, PageClearAllVisible, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), RelationData::rd_rel, ReadBuffer(), REGBUF_STANDARD, RelationGetRelid, RelationIsAccessibleInLogicalDecoding, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapDelete, SizeOfHeapHeader, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TM_BeingModified, TM_Deleted, TM_Invisible, TM_Ok, TM_SelfModified, TM_Updated, TransactionIdEquals, TransactionIdIsCurrentTransactionId(), UnlockReleaseBuffer(), UnlockTupleTuplock, UpdateXmaxHintBits(), visibilitymap_clear(), visibilitymap_pin(), VISIBILITYMAP_VALID_BITS, XactLockTableWait(), XLH_DELETE_ALL_VISIBLE_CLEARED, XLH_DELETE_CONTAINS_OLD_KEY, XLH_DELETE_CONTAINS_OLD_TUPLE, XLH_DELETE_IS_PARTITION_MOVE, XLOG_HEAP_DELETE, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), XLogSetRecordFlags(), XLTW_Delete, xl_heap_delete::xmax, TM_FailureData::xmax, and xmax_infomask_changed().

Referenced by heapam_tuple_delete(), and simple_heap_delete().

2445 {
2446  TM_Result result;
2448  ItemId lp;
2449  HeapTupleData tp;
2450  Page page;
2451  BlockNumber block;
2452  Buffer buffer;
2453  Buffer vmbuffer = InvalidBuffer;
2454  TransactionId new_xmax;
2455  uint16 new_infomask,
2456  new_infomask2;
2457  bool have_tuple_lock = false;
2458  bool iscombo;
2459  bool all_visible_cleared = false;
2460  HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
2461  bool old_key_copied = false;
2462 
2463  Assert(ItemPointerIsValid(tid));
2464 
2465  /*
2466  * Forbid this during a parallel operation, lest it allocate a combocid.
2467  * Other workers might need that combocid for visibility checks, and we
2468  * have no provision for broadcasting it to them.
2469  */
2470  if (IsInParallelMode())
2471  ereport(ERROR,
2472  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
2473  errmsg("cannot delete tuples during a parallel operation")));
2474 
2475  block = ItemPointerGetBlockNumber(tid);
2476  buffer = ReadBuffer(relation, block);
2477  page = BufferGetPage(buffer);
2478 
2479  /*
2480  * Before locking the buffer, pin the visibility map page if it appears to
2481  * be necessary. Since we haven't got the lock yet, someone else might be
2482  * in the middle of changing this, so we'll need to recheck after we have
2483  * the lock.
2484  */
2485  if (PageIsAllVisible(page))
2486  visibilitymap_pin(relation, block, &vmbuffer);
2487 
2489 
2490  /*
2491  * If we didn't pin the visibility map page and the page has become all
2492  * visible while we were busy locking the buffer, we'll have to unlock and
2493  * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
2494  * unfortunate, but hopefully shouldn't happen often.
2495  */
2496  if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
2497  {
2498  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2499  visibilitymap_pin(relation, block, &vmbuffer);
2501  }
2502 
2503  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
2504  Assert(ItemIdIsNormal(lp));
2505 
2506  tp.t_tableOid = RelationGetRelid(relation);
2507  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2508  tp.t_len = ItemIdGetLength(lp);
2509  tp.t_self = *tid;
2510 
2511 l1:
2512  result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
2513 
2514  if (result == TM_Invisible)
2515  {
2516  UnlockReleaseBuffer(buffer);
2517  ereport(ERROR,
2518  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
2519  errmsg("attempted to delete invisible tuple")));
2520  }
2521  else if (result == TM_BeingModified && wait)
2522  {
2523  TransactionId xwait;
2524  uint16 infomask;
2525 
2526  /* must copy state data before unlocking buffer */
2527  xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
2528  infomask = tp.t_data->t_infomask;
2529 
2530  /*
2531  * Sleep until concurrent transaction ends -- except when there's a
2532  * single locker and it's our own transaction. Note we don't care
2533  * which lock mode the locker has, because we need the strongest one.
2534  *
2535  * Before sleeping, we need to acquire tuple lock to establish our
2536  * priority for the tuple (see heap_lock_tuple). LockTuple will
2537  * release us when we are next-in-line for the tuple.
2538  *
2539  * If we are forced to "start over" below, we keep the tuple lock;
2540  * this arranges that we stay at the head of the line while rechecking
2541  * tuple state.
2542  */
2543  if (infomask & HEAP_XMAX_IS_MULTI)
2544  {
2545  bool current_is_member = false;
2546 
2547  if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
2548  LockTupleExclusive, &current_is_member))
2549  {
2550  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2551 
2552  /*
2553  * Acquire the lock, if necessary (but skip it when we're
2554  * requesting a lock and already have one; avoids deadlock).
2555  */
2556  if (!current_is_member)
2558  LockWaitBlock, &have_tuple_lock);
2559 
2560  /* wait for multixact */
2562  relation, &(tp.t_self), XLTW_Delete,
2563  NULL);
2565 
2566  /*
2567  * If xwait had just locked the tuple then some other xact
2568  * could update this tuple before we get to this point. Check
2569  * for xmax change, and start over if so.
2570  */
2571  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2573  xwait))
2574  goto l1;
2575  }
2576 
2577  /*
2578  * You might think the multixact is necessarily done here, but not
2579  * so: it could have surviving members, namely our own xact or
2580  * other subxacts of this backend. It is legal for us to delete
2581  * the tuple in either case, however (the latter case is
2582  * essentially a situation of upgrading our former shared lock to
2583  * exclusive). We don't bother changing the on-disk hint bits
2584  * since we are about to overwrite the xmax altogether.
2585  */
2586  }
2587  else if (!TransactionIdIsCurrentTransactionId(xwait))
2588  {
2589  /*
2590  * Wait for regular transaction to end; but first, acquire tuple
2591  * lock.
2592  */
2593  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2595  LockWaitBlock, &have_tuple_lock);
2596  XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
2598 
2599  /*
2600  * xwait is done, but if xwait had just locked the tuple then some
2601  * other xact could update this tuple before we get to this point.
2602  * Check for xmax change, and start over if so.
2603  */
2604  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
2606  xwait))
2607  goto l1;
2608 
2609  /* Otherwise check if it committed or aborted */
2610  UpdateXmaxHintBits(tp.t_data, buffer, xwait);
2611  }
2612 
2613  /*
2614  * We may overwrite if previous xmax aborted, or if it committed but
2615  * only locked the tuple without updating it.
2616  */
2617  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2620  result = TM_Ok;
2621  else if (!ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid) ||
2623  result = TM_Updated;
2624  else
2625  result = TM_Deleted;
2626  }
2627 
2628  if (crosscheck != InvalidSnapshot && result == TM_Ok)
2629  {
2630  /* Perform additional check for transaction-snapshot mode RI updates */
2631  if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
2632  result = TM_Updated;
2633  }
2634 
2635  if (result != TM_Ok)
2636  {
2637  Assert(result == TM_SelfModified ||
2638  result == TM_Updated ||
2639  result == TM_Deleted ||
2640  result == TM_BeingModified);
2642  Assert(result != TM_Updated ||
2643  !ItemPointerEquals(&tp.t_self, &tp.t_data->t_ctid));
2644  tmfd->ctid = tp.t_data->t_ctid;
2646  if (result == TM_SelfModified)
2647  tmfd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
2648  else
2649  tmfd->cmax = InvalidCommandId;
2650  UnlockReleaseBuffer(buffer);
2651  if (have_tuple_lock)
2652  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
2653  if (vmbuffer != InvalidBuffer)
2654  ReleaseBuffer(vmbuffer);
2655  return result;
2656  }
2657 
2658  /*
2659  * We're about to do the actual delete -- check for conflict first, to
2660  * avoid possibly having to roll back work we've just done.
2661  *
2662  * This is safe without a recheck as long as there is no possibility of
2663  * another process scanning the page between this check and the delete
2664  * being visible to the scan (i.e., an exclusive buffer content lock is
2665  * continuously held from this point until the tuple delete is visible).
2666  */
2667  CheckForSerializableConflictIn(relation, &tp, buffer);
2668 
2669  /* replace cid with a combo cid if necessary */
2670  HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
2671 
2672  /*
2673  * Compute replica identity tuple before entering the critical section so
2674  * we don't PANIC upon a memory allocation failure.
2675  */
2676  old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
2677 
2678  /*
2679  * If this is the first possibly-multixact-able operation in the current
2680  * transaction, set my per-backend OldestMemberMXactId setting. We can be
2681  * certain that the transaction will never become a member of any older
2682  * MultiXactIds than that. (We have to do this even if we end up just
2683  * using our own TransactionId below, since some other backend could
2684  * incorporate our XID into a MultiXact immediately afterwards.)
2685  */
2687 
2690  xid, LockTupleExclusive, true,
2691  &new_xmax, &new_infomask, &new_infomask2);
2692 
2694 
2695  /*
2696  * If this transaction commits, the tuple will become DEAD sooner or
2697  * later. Set flag that this page is a candidate for pruning once our xid
2698  * falls below the OldestXmin horizon. If the transaction finally aborts,
2699  * the subsequent page pruning will be a no-op and the hint will be
2700  * cleared.
2701  */
2702  PageSetPrunable(page, xid);
2703 
2704  if (PageIsAllVisible(page))
2705  {
2706  all_visible_cleared = true;
2707  PageClearAllVisible(page);
2708  visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
2709  vmbuffer, VISIBILITYMAP_VALID_BITS);
2710  }
2711 
2712  /* store transaction information of xact deleting the tuple */
2715  tp.t_data->t_infomask |= new_infomask;
2716  tp.t_data->t_infomask2 |= new_infomask2;
2718  HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
2719  HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
2720  /* Make sure there is no forward chain link in t_ctid */
2721  tp.t_data->t_ctid = tp.t_self;
2722 
2723  /* Signal that this is actually a move into another partition */
2724  if (changingPart)
2726 
2727  MarkBufferDirty(buffer);
2728 
2729  /*
2730  * XLOG stuff
2731  *
2732  * NB: heap_abort_speculative() uses the same xlog record and replay
2733  * routines.
2734  */
2735  if (RelationNeedsWAL(relation))
2736  {
2737  xl_heap_delete xlrec;
2738  xl_heap_header xlhdr;
2739  XLogRecPtr recptr;
2740 
2741  /* For logical decode we need combocids to properly decode the catalog */
2743  log_heap_new_cid(relation, &tp);
2744 
2745  xlrec.flags = 0;
2746  if (all_visible_cleared)
2748  if (changingPart)
2751  tp.t_data->t_infomask2);
2753  xlrec.xmax = new_xmax;
2754 
2755  if (old_key_tuple != NULL)
2756  {
2757  if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
2759  else
2761  }
2762 
2763  XLogBeginInsert();
2764  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
2765 
2766  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
2767 
2768  /*
2769  * Log replica identity of the deleted tuple if there is one
2770  */
2771  if (old_key_tuple != NULL)
2772  {
2773  xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
2774  xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
2775  xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
2776 
2777  XLogRegisterData((char *) &xlhdr, SizeOfHeapHeader);
2778  XLogRegisterData((char *) old_key_tuple->t_data
2780  old_key_tuple->t_len
2782  }
2783 
2784  /* filtering by origin on a row level is much more efficient */
2786 
2787  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
2788 
2789  PageSetLSN(page, recptr);
2790  }
2791 
2792  END_CRIT_SECTION();
2793 
2794  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2795 
2796  if (vmbuffer != InvalidBuffer)
2797  ReleaseBuffer(vmbuffer);
2798 
2799  /*
2800  * If the tuple has toasted out-of-line attributes, we need to delete
2801  * those items too. We have to do this before releasing the buffer
2802  * because we need to look at the contents of the tuple, but it's OK to
2803  * release the content lock on the buffer first.
2804  */
2805  if (relation->rd_rel->relkind != RELKIND_RELATION &&
2806  relation->rd_rel->relkind != RELKIND_MATVIEW)
2807  {
2808  /* toast table entries should never be recursively toasted */
2810  }
2811  else if (HeapTupleHasExternal(&tp))
2812  heap_toast_delete(relation, &tp, false);
2813 
2814  /*
2815  * Mark tuple for invalidation from system caches at next command
2816  * boundary. We have to do this before releasing the buffer because we
2817  * need to look at the contents of the tuple.
2818  */
2819  CacheInvalidateHeapTuple(relation, &tp, NULL);
2820 
2821  /* Now we can release the buffer */
2822  ReleaseBuffer(buffer);
2823 
2824  /*
2825  * Release the lmgr tuple lock, if we had it.
2826  */
2827  if (have_tuple_lock)
2828  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
2829 
2830  pgstat_count_heap_delete(relation);
2831 
2832  if (old_key_tuple != NULL && old_key_copied)
2833  heap_freetuple(old_key_tuple);
2834 
2835  return TM_Ok;
2836 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
ItemPointerData ctid
Definition: tableam.h:123
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:86
#define SizeofHeapTupleHeader
Definition: htup_details.h:184
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:7523
#define HEAP_XMAX_BITS
Definition: htup_details.h:270
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2397
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1114
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
#define PageIsAllVisible(page)
Definition: bufpage.h:385
uint32 TransactionId
Definition: c.h:514
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:853
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1458
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_changed, bool *copy)
Definition: heapam.c:7605
static bool xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
Definition: heapam.c:2419
#define HeapTupleHeaderClearHotUpdated(tup)
Definition: htup_details.h:496
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
CommandId cmax
Definition: tableam.h:125
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
#define InvalidBuffer
Definition: buf.h:25
uint16 t_infomask2
Definition: heapam_xlog.h:144
#define PageSetPrunable(page, xid)
Definition: bufpage.h:398
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
int errcode(int sqlerrcode)
Definition: elog.c:608
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:228
#define HeapTupleHeaderIndicatesMovedPartitions(tup)
Definition: htup_details.h:445
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3365
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:88
Form_pg_class rd_rel
Definition: rel.h:83
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1338
TM_Result HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid, Buffer buffer)
void CheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
Definition: predicate.c:4426
#define UnlockTupleTuplock(rel, tup, mode)
Definition: heapam.c:161
OffsetNumber offnum
Definition: heapam_xlog.h:106
void MultiXactIdSetOldestMember(void)
Definition: multixact.c:625
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:68
#define HeapTupleHeaderGetRawXmax(tup)
Definition: htup_details.h:375
unsigned short uint16
Definition: c.h:358
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
bool IsInParallelMode(void)
Definition: xact.c:996
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3388
TransactionId xmax
Definition: tableam.h:124
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:207
ItemPointerData t_ctid
Definition: htup_details.h:160
#define HeapTupleHeaderSetMovedPartitions(tup)
Definition: htup_details.h:449
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:105
static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:6725
void heap_toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: heaptoast.c:41
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:422
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:111
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define XLH_DELETE_CONTAINS_OLD_KEY
Definition: heapam_xlog.h:94
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:380
Oid t_tableOid
Definition: htup.h:66
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define HeapTupleHeaderSetCmax(tup, cid, iscombo)
Definition: htup_details.h:405
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
#define ereport(elevel, rest)
Definition: elog.h:141
static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
Definition: heapam.c:4713
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
#define InvalidSnapshot
Definition: snapshot.h:123
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
TM_Result
Definition: tableam.h:68
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:578
#define InvalidCommandId
Definition: c.h:531
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:230
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3602
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:278
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:208
static void UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
Definition: heapam.c:1793
#define HEAP_MOVED
Definition: htup_details.h:216
static bool heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
Definition: heapam.c:4664
TransactionId MultiXactId
Definition: c.h:524
#define PageClearAllVisible(page)
Definition: bufpage.h:389
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:624
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:739
uint8 infobits_set
Definition: heapam_xlog.h:107
CommandId HeapTupleHeaderGetCmax(HeapTupleHeader tup)
Definition: combocid.c:118
Definition: tableam.h:74
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:596
uint16 t_infomask
Definition: heapam_xlog.h:145
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask, LockTupleMode lockmode, bool *current_is_member)
Definition: heapam.c:6548
#define RelationNeedsWAL(relation)
Definition: rel.h:524
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:1992
void HeapTupleHeaderAdjustCmax(HeapTupleHeader tup, CommandId *cmax, bool *iscombo)
Definition: combocid.c:153
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2613
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:673
int errmsg(const char *fmt,...)
Definition: elog.c:822
#define XLH_DELETE_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:92
#define XLH_DELETE_IS_PARTITION_MOVE
Definition: heapam_xlog.h:96
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:422
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
#define SizeOfHeapHeader
Definition: heapam_xlog.h:149
Pointer Page
Definition: bufpage.h:78
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)
#define XLH_DELETE_CONTAINS_OLD_TUPLE
Definition: heapam_xlog.h:93

◆ heap_endscan()

void heap_endscan ( TableScanDesc  sscan)

Definition at line 1245 of file heapam.c.

References BufferIsValid, FreeAccessStrategy(), pfree(), RelationDecrementReferenceCount(), ReleaseBuffer(), HeapScanDescData::rs_base, HeapScanDescData::rs_cbuf, TableScanDescData::rs_flags, TableScanDescData::rs_key, TableScanDescData::rs_rd, TableScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, SO_TEMP_SNAPSHOT, and UnregisterSnapshot().

Referenced by SampleHeapTupleVisible().

1246 {
1247  HeapScanDesc scan = (HeapScanDesc) sscan;
1248 
1249  /* Note: no locking manipulations needed */
1250 
1251  /*
1252  * unpin scan buffers
1253  */
1254  if (BufferIsValid(scan->rs_cbuf))
1255  ReleaseBuffer(scan->rs_cbuf);
1256 
1257  /*
1258  * decrement relation reference count and free scan descriptor storage
1259  */
1261 
1262  if (scan->rs_base.rs_key)
1263  pfree(scan->rs_base.rs_key);
1264 
1265  if (scan->rs_strategy != NULL)
1267 
1268  if (scan->rs_base.rs_flags & SO_TEMP_SNAPSHOT)
1270 
1271  pfree(scan);
1272 }
TableScanDescData rs_base
Definition: heapam.h:48
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3365
uint32 rs_flags
Definition: relscan.h:43
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:72
void pfree(void *pointer)
Definition: mcxt.c:1056
void RelationDecrementReferenceCount(Relation rel)
Definition: relcache.c:2068
struct ScanKeyData * rs_key
Definition: relscan.h:37
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:907
BufferAccessStrategy rs_strategy
Definition: heapam.h:63
Buffer rs_cbuf
Definition: heapam.h:59
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:597
#define BufferIsValid(bufnum)
Definition: bufmgr.h:113
Relation rs_rd
Definition: relscan.h:34
struct SnapshotData * rs_snapshot
Definition: relscan.h:35

◆ heap_execute_freeze_tuple()

void heap_execute_freeze_tuple ( HeapTupleHeader  tuple,
xl_heap_freeze_tuple frz 
)

Definition at line 6347 of file heapam.c.

References FrozenTransactionId, xl_heap_freeze_tuple::frzflags, HeapTupleHeaderSetXmax, HeapTupleHeaderSetXvac, InvalidTransactionId, HeapTupleHeaderData::t_infomask, xl_heap_freeze_tuple::t_infomask, HeapTupleHeaderData::t_infomask2, xl_heap_freeze_tuple::t_infomask2, XLH_FREEZE_XVAC, XLH_INVALID_XVAC, and xl_heap_freeze_tuple::xmax.

Referenced by heap_freeze_tuple(), heap_xlog_freeze_page(), and lazy_scan_heap().

6348 {
6349  HeapTupleHeaderSetXmax(tuple, frz->xmax);
6350 
6351  if (frz->frzflags & XLH_FREEZE_XVAC)
6353 
6354  if (frz->frzflags & XLH_INVALID_XVAC)
6356 
6357  tuple->t_infomask = frz->t_infomask;
6358  tuple->t_infomask2 = frz->t_infomask2;
6359 }
#define HeapTupleHeaderSetXvac(tup, xid)
Definition: htup_details.h:423
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:380
#define InvalidTransactionId
Definition: transam.h:31
#define FrozenTransactionId
Definition: transam.h:33
TransactionId xmax
Definition: heapam_xlog.h:320
#define XLH_INVALID_XVAC
Definition: heapam_xlog.h:316
#define XLH_FREEZE_XVAC
Definition: heapam_xlog.h:315

◆ heap_fetch()

bool heap_fetch ( Relation  relation,
Snapshot  snapshot,
HeapTuple  tuple,
Buffer userbuf 
)

Definition at line 1412 of file heapam.c.

References BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, CheckForSerializableConflictOut(), HeapTupleSatisfiesVisibility(), InvalidBuffer, ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PredicateLockTuple(), ReadBuffer(), RelationGetRelid, ReleaseBuffer(), HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, and TestForOldSnapshot().

Referenced by heap_lock_updated_tuple_rec(), heapam_fetch_row_version(), and heapam_tuple_lock().

1416 {
1417  ItemPointer tid = &(tuple->t_self);
1418  ItemId lp;
1419  Buffer buffer;
1420  Page page;
1421  OffsetNumber offnum;
1422  bool valid;
1423 
1424  /*
1425  * Fetch and pin the appropriate page of the relation.
1426  */
1427  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1428 
1429  /*
1430  * Need share lock on buffer to examine tuple commit status.
1431  */
1432  LockBuffer(buffer, BUFFER_LOCK_SHARE);
1433  page = BufferGetPage(buffer);
1434  TestForOldSnapshot(snapshot, relation, page);
1435 
1436  /*
1437  * We'd better check for out-of-range offnum in case of VACUUM since the
1438  * TID was obtained.
1439  */
1440  offnum = ItemPointerGetOffsetNumber(tid);
1441  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1442  {
1443  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1444  ReleaseBuffer(buffer);
1445  *userbuf = InvalidBuffer;
1446  tuple->t_data = NULL;
1447  return false;
1448  }
1449 
1450  /*
1451  * get the item line pointer corresponding to the requested tid
1452  */
1453  lp = PageGetItemId(page, offnum);
1454 
1455  /*
1456  * Must check for deleted tuple.
1457  */
1458  if (!ItemIdIsNormal(lp))
1459  {
1460  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1461  ReleaseBuffer(buffer);
1462  *userbuf = InvalidBuffer;
1463  tuple->t_data = NULL;
1464  return false;
1465  }
1466 
1467  /*
1468  * fill in *tuple fields
1469  */
1470  tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1471  tuple->t_len = ItemIdGetLength(lp);
1472  tuple->t_tableOid = RelationGetRelid(relation);
1473 
1474  /*
1475  * check tuple visibility, then release lock
1476  */
1477  valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1478 
1479  if (valid)
1480  PredicateLockTuple(relation, tuple, snapshot);
1481 
1482  CheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1483 
1484  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1485 
1486  if (valid)
1487  {
1488  /*
1489  * All checks passed, so return the tuple as valid. Caller is now
1490  * responsible for releasing the buffer.
1491  */
1492  *userbuf = buffer;
1493 
1494  return true;
1495  }
1496 
1497  /* Tuple failed time qual */
1498  ReleaseBuffer(buffer);
1499  *userbuf = InvalidBuffer;
1500 
1501  return false;
1502 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:86
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:264
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3365
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:4041
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3602
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:596
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
void PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
Definition: predicate.c:2548
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:87
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:422
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)

◆ heap_finish_speculative()

void heap_finish_speculative ( Relation  relation,
ItemPointer  tid 
)

Definition at line 5476 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, elog, END_CRIT_SECTION, ERROR, HeapTupleHeaderIsSpeculative, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), MaxOffsetNumber, xl_heap_confirm::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapConfirm, SpecTokenOffsetNumber, START_CRIT_SECTION, StaticAssertStmt, HeapTupleHeaderData::t_ctid, UnlockReleaseBuffer(), XLOG_HEAP_CONFIRM, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by heapam_tuple_complete_speculative().

5477 {
5478  Buffer buffer;
5479  Page page;
5480  OffsetNumber offnum;
5481  ItemId lp = NULL;
5482  HeapTupleHeader htup;
5483 
5484  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
5486  page = (Page) BufferGetPage(buffer);
5487 
5488  offnum = ItemPointerGetOffsetNumber(tid);
5489  if (PageGetMaxOffsetNumber(page) >= offnum)
5490  lp = PageGetItemId(page, offnum);
5491 
5492  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
5493  elog(ERROR, "invalid lp");
5494 
5495  htup = (HeapTupleHeader) PageGetItem(page, lp);
5496 
5497  /* SpecTokenOffsetNumber should be distinguishable from any real offset */
5499  "invalid speculative token constant");
5500 
5501  /* NO EREPORT(ERROR) from here till changes are logged */
5503 
5505 
5506  MarkBufferDirty(buffer);
5507 
5508  /*
5509  * Replace the speculative insertion token with a real t_ctid, pointing to
5510  * itself like it does on regular tuples.
5511  */
5512  htup->t_ctid = *tid;
5513 
5514  /* XLOG stuff */
5515  if (RelationNeedsWAL(relation))
5516  {
5517  xl_heap_confirm xlrec;
5518  XLogRecPtr recptr;
5519 
5520  xlrec.offnum = ItemPointerGetOffsetNumber(tid);
5521 
5522  XLogBeginInsert();
5523 
5524  /* We want the same filtering on this as on a plain insert */
5526 
5527  XLogRegisterData((char *) &xlrec, SizeOfHeapConfirm);
5528  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
5529 
5530  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
5531 
5532  PageSetLSN(page, recptr);
5533  }
5534 
5535  END_CRIT_SECTION();
5536 
5537  UnlockReleaseBuffer(buffer);
5538 }
OffsetNumber offnum
Definition: heapam_xlog.h:296
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1458
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define MaxOffsetNumber
Definition: off.h:28
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:429
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:228
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:88
#define SpecTokenOffsetNumber
Definition: itemptr.h:63
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:849
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3388
#define ERROR
Definition: elog.h:43
ItemPointerData t_ctid
Definition: htup_details.h:160
#define REGBUF_STANDARD
Definition: xloginsert.h:35
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
#define SizeOfHeapConfirm
Definition: heapam_xlog.h:299
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3602
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:739
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:596
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define RelationNeedsWAL(relation)
Definition: rel.h:524
#define elog(elevel,...)
Definition: elog.h:228
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define XLOG_HEAP_CONFIRM
Definition: heapam_xlog.h:37

◆ heap_freeze_tuple()

bool heap_freeze_tuple ( HeapTupleHeader  tuple,
TransactionId  relfrozenxid,
TransactionId  relminmxid,
TransactionId  cutoff_xid,
TransactionId  cutoff_multi 
)

Definition at line 6368 of file heapam.c.

References heap_execute_freeze_tuple(), and heap_prepare_freeze_tuple().

Referenced by rewrite_heap_tuple().

6371 {
6373  bool do_freeze;
6374  bool tuple_totally_frozen;
6375 
6376  do_freeze = heap_prepare_freeze_tuple(tuple,
6378  cutoff_xid, cutoff_multi,
6379  &frz, &tuple_totally_frozen);
6380 
6381  /*
6382  * Note that because this is not a WAL-logged operation, we don't need to
6383  * fill in the offset in the freeze record.
6384  */
6385 
6386  if (do_freeze)
6387  heap_execute_freeze_tuple(tuple, &frz);
6388  return do_freeze;
6389 }
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
Definition: heapam.c:6118
void heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
Definition: heapam.c:6347
TransactionId relminmxid
Definition: pg_class.h:126
TransactionId relfrozenxid
Definition: pg_class.h:123

◆ heap_get_latest_tid()

void heap_get_latest_tid ( TableScanDesc  sscan,
ItemPointer  tid 
)

Definition at line 1670 of file heapam.c.

References Assert, BUFFER_LOCK_SHARE, BufferGetPage, CheckForSerializableConflictOut(), HEAP_XMAX_INVALID, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleHeaderIndicatesMovedPartitions, HeapTupleHeaderIsOnlyLocked(), HeapTupleSatisfiesVisibility(), InvalidTransactionId, ItemIdGetLength, ItemIdIsNormal, ItemPointerEquals(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, ReadBuffer(), RelationGetRelid, TableScanDescData::rs_rd, TableScanDescData::rs_snapshot, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TestForOldSnapshot(), TransactionIdEquals, TransactionIdIsValid, and UnlockReleaseBuffer().

Referenced by SampleHeapTupleVisible().

1672 {
1673  Relation relation = sscan->rs_rd;
1674  Snapshot snapshot = sscan->rs_snapshot;
1675  ItemPointerData ctid;
1676  TransactionId priorXmax;
1677 
1678  /*
1679  * table_get_latest_tid verified that the passed in tid is valid. Assume
1680  * that t_ctid links are valid however - there shouldn't be invalid ones
1681  * in the table.
1682  */
1683  Assert(ItemPointerIsValid(tid));
1684 
1685  /*
1686  * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
1687  * need to examine, and *tid is the TID we will return if ctid turns out
1688  * to be bogus.
1689  *
1690  * Note that we will loop until we reach the end of the t_ctid chain.
1691  * Depending on the snapshot passed, there might be at most one visible
1692  * version of the row, but we don't try to optimize for that.
1693  */
1694  ctid = *tid;
1695  priorXmax = InvalidTransactionId; /* cannot check first XMIN */
1696  for (;;)
1697  {
1698  Buffer buffer;
1699  Page page;
1700  OffsetNumber offnum;
1701  ItemId lp;
1702  HeapTupleData tp;
1703  bool valid;
1704 
1705  /*
1706  * Read, pin, and lock the page.
1707  */
1708  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
1709  LockBuffer(buffer, BUFFER_LOCK_SHARE);
1710  page = BufferGetPage(buffer);
1711  TestForOldSnapshot(snapshot, relation, page);
1712 
1713  /*
1714  * Check for bogus item number. This is not treated as an error
1715  * condition because it can happen while following a t_ctid link. We
1716  * just assume that the prior tid is OK and return it unchanged.
1717  */
1718  offnum = ItemPointerGetOffsetNumber(&ctid);
1719  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1720  {
1721  UnlockReleaseBuffer(buffer);
1722  break;
1723  }
1724  lp = PageGetItemId(page, offnum);
1725  if (!ItemIdIsNormal(lp))
1726  {
1727  UnlockReleaseBuffer(buffer);
1728  break;
1729  }
1730 
1731  /* OK to access the tuple */
1732  tp.t_self = ctid;
1733  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
1734  tp.t_len = ItemIdGetLength(lp);
1735  tp.t_tableOid = RelationGetRelid(relation);
1736 
1737  /*
1738  * After following a t_ctid link, we might arrive at an unrelated
1739  * tuple. Check for XMIN match.
1740  */
1741  if (TransactionIdIsValid(priorXmax) &&
1743  {
1744  UnlockReleaseBuffer(buffer);
1745  break;
1746  }
1747 
1748  /*
1749  * Check tuple visibility; if visible, set it as the new result
1750  * candidate.
1751  */
1752  valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
1753  CheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
1754  if (valid)
1755  *tid = ctid;
1756 
1757  /*
1758  * If there's a valid t_ctid link, follow it, else we're done.
1759  */
1760  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
1764  {
1765  UnlockReleaseBuffer(buffer);
1766  break;
1767  }
1768 
1769  ctid = tp.t_data->t_ctid;
1770  priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
1771  UnlockReleaseBuffer(buffer);
1772  } /* end of loop */
1773 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:82
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:264
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:514
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
#define HeapTupleHeaderIndicatesMovedPartitions(tup)
Definition: htup_details.h:445
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:4041
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3388
#define HEAP_XMAX_INVALID
Definition: htup_details.h:207
ItemPointerData t_ctid
Definition: htup_details.h:160
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3602
#define Assert(condition)
Definition: c.h:739
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:596
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
Relation rs_rd
Definition: relscan.h:34
struct SnapshotData * rs_snapshot
Definition: relscan.h:35
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:87
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define TransactionIdIsValid(xid)
Definition: transam.h:41
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:422
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)

◆ heap_getnext()

HeapTuple heap_getnext ( TableScanDesc  sscan,
ScanDirection  direction 
)

Definition at line 1290 of file heapam.c.

References ereport, errcode(), errmsg_internal(), ERROR, GetHeapamTableAmRoutine(), HEAPDEBUG_1, HEAPDEBUG_2, HEAPDEBUG_3, heapgettup(), heapgettup_pagemode(), pgstat_count_heap_getnext, RelationData::rd_tableam, HeapScanDescData::rs_base, HeapScanDescData::rs_ctup, TableScanDescData::rs_flags, TableScanDescData::rs_key, TableScanDescData::rs_nkeys, TableScanDescData::rs_rd, SO_ALLOW_PAGEMODE, HeapTupleData::t_data, and unlikely.

Referenced by AlterTableMoveAll(), AlterTableSpaceOptions(), boot_openrel(), check_db_file_conflict(), createdb(), do_autovacuum(), DropSetting(), DropTableSpace(), find_typed_table_dependencies(), get_all_vacuum_rels(), get_database_list(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), heapam_index_build_range_scan(), heapam_index_validate_scan(), index_update_stats(), objectsInSchemaToOids(), pgrowlocks(), pgstat_collect_oids(), pgstat_heap(), ReindexMultipleTables(), remove_dbtablespaces(), RemoveConversionById(), RemoveSubscriptionRel(), RenameTableSpace(), ThereIsAtLeastOneRole(), and vac_truncate_clog().

1291 {
1292  HeapScanDesc scan = (HeapScanDesc) sscan;
1293 
1294  /*
1295  * This is still widely used directly, without going through table AM, so
1296  * add a safety check. It's possible we should, at a later point,
1297  * downgrade this to an assert. The reason for checking the AM routine,
1298  * rather than the AM oid, is that this allows to write regression tests
1299  * that create another AM reusing the heap handler.
1300  */
1302  ereport(ERROR,
1303  (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
1304  errmsg_internal("only heap AM is supported")));
1305 
1306  /* Note: no locking manipulations needed */
1307 
1308  HEAPDEBUG_1; /* heap_getnext( info ) */
1309 
1310  if (scan->rs_base.rs_flags & SO_ALLOW_PAGEMODE)
1311  heapgettup_pagemode(scan, direction,
1312  scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1313  else
1314  heapgettup(scan, direction,
1315  scan->rs_base.rs_nkeys, scan->rs_base.rs_key);
1316 
1317  if (scan->rs_ctup.t_data == NULL)
1318  {
1319  HEAPDEBUG_2; /* heap_getnext returning EOS */
1320  return NULL;
1321  }
1322 
1323  /*
1324  * if we get here it means we have a new current scan tuple, so point to
1325  * the proper return buffer and return the tuple.
1326  */
1327  HEAPDEBUG_3; /* heap_getnext returning tuple */
1328 
1330 
1331  return &scan->rs_ctup;
1332 }
TableScanDescData rs_base
Definition: heapam.h:48
int errcode(int sqlerrcode)
Definition: elog.c:608
uint32 rs_flags
Definition: relscan.h:43
#define HEAPDEBUG_2
Definition: heapam.c:1284
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:72
HeapTupleData rs_ctup
Definition: heapam.h:65
HeapTupleHeader t_data
Definition: htup.h:68
#define ERROR
Definition: elog.h:43
struct ScanKeyData * rs_key
Definition: relscan.h:37
#define ereport(elevel, rest)
Definition: elog.h:141
#define HEAPDEBUG_1
Definition: heapam.c:1283
const struct TableAmRoutine * rd_tableam
Definition: rel.h:140
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:487
int errmsg_internal(const char *fmt,...)
Definition: elog.c:909
#define HEAPDEBUG_3
Definition: heapam.c:1285
Relation rs_rd
Definition: relscan.h:34
#define unlikely(x)
Definition: c.h:208
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:1363
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:801
const TableAmRoutine * GetHeapamTableAmRoutine(void)

◆ heap_getnextslot()

bool heap_getnextslot ( TableScanDesc  sscan,
ScanDirection  direction,
TupleTableSlot slot 
)

Definition at line 1349 of file heapam.c.

References ExecClearTuple(), ExecStoreBufferHeapTuple(), HEAPAMSLOTDEBUG_1, HEAPAMSLOTDEBUG_2, HEAPAMSLOTDEBUG_3, heapgettup(), heapgettup_pagemode(), pgstat_count_heap_getnext, HeapScanDescData::rs_base, HeapScanDescData::rs_cbuf, HeapScanDescData::rs_ctup, TableScanDescData::rs_flags, TableScanDescData::rs_key, TableScanDescData::rs_nkeys, TableScanDescData::rs_rd, SO_ALLOW_PAGEMODE, and HeapTupleData::t_data.

Referenced by SampleHeapTupleVisible().

1350 {
1351  HeapScanDesc scan = (HeapScanDesc) sscan;
1352 
1353  /* Note: no locking manipulations needed */
1354 
1355  HEAPAMSLOTDEBUG_1; /* heap_getnextslot( info ) */
1356 
1357  if (sscan->rs_flags & SO_ALLOW_PAGEMODE)
1358  heapgettup_pagemode(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1359  else
1360  heapgettup(scan, direction, sscan->rs_nkeys, sscan->rs_key);
1361 
1362  if (scan->rs_ctup.t_data == NULL)
1363  {
1364  HEAPAMSLOTDEBUG_2; /* heap_getnextslot returning EOS */
1365  ExecClearTuple(slot);
1366  return false;
1367  }
1368 
1369  /*
1370  * if we get here it means we have a new current scan tuple, so point to
1371  * the proper return buffer and return the tuple.
1372  */
1373  HEAPAMSLOTDEBUG_3; /* heap_getnextslot returning tuple */
1374 
1376 
1377  ExecStoreBufferHeapTuple(&scan->rs_ctup, slot,
1378  scan->rs_cbuf);
1379  return true;
1380 }
#define HEAPAMSLOTDEBUG_2
Definition: heapam.c:1344
static TupleTableSlot * ExecClearTuple(TupleTableSlot *slot)
Definition: tuptable.h:425
#define HEAPAMSLOTDEBUG_1
Definition: heapam.c:1343
TableScanDescData rs_base
Definition: heapam.h:48
uint32 rs_flags
Definition: relscan.h:43
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:72
HeapTupleData rs_ctup
Definition: heapam.h:65
HeapTupleHeader t_data
Definition: htup.h:68
struct ScanKeyData * rs_key
Definition: relscan.h:37
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:487
TupleTableSlot * ExecStoreBufferHeapTuple(HeapTuple tuple, TupleTableSlot *slot, Buffer buffer)
Definition: execTuples.c:1362
Buffer rs_cbuf
Definition: heapam.h:59
Relation rs_rd
Definition: relscan.h:34
#define HEAPAMSLOTDEBUG_3
Definition: heapam.c:1345
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:1363
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:801

◆ heap_hot_search_buffer()

bool heap_hot_search_buffer ( ItemPointer  tid,
Relation  relation,
Buffer  buffer,
Snapshot  snapshot,
HeapTuple  heapTuple,
bool all_dead,
bool  first_call 
)

Definition at line 1526 of file heapam.c.

References Assert, BufferGetBlockNumber(), BufferGetPage, CheckForSerializableConflictOut(), HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleIsHeapOnly, HeapTupleIsHotUpdated, HeapTupleIsSurelyDead(), HeapTupleSatisfiesVisibility(), InvalidTransactionId, ItemIdGetLength, ItemIdGetRedirect, ItemIdIsNormal, ItemIdIsRedirected, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerSet, ItemPointerSetOffsetNumber, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PredicateLockTuple(), RecentGlobalXmin, RelationGetRelid, skip, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdEquals, and TransactionIdIsValid.

Referenced by heapam_index_fetch_tuple(), and heapam_scan_bitmap_next_block().

1529 {
1530  Page dp = (Page) BufferGetPage(buffer);
1531  TransactionId prev_xmax = InvalidTransactionId;
1532  BlockNumber blkno;
1533  OffsetNumber offnum;
1534  bool at_chain_start;
1535  bool valid;
1536  bool skip;
1537 
1538  /* If this is not the first call, previous call returned a (live!) tuple */
1539  if (all_dead)
1540  *all_dead = first_call;
1541 
1542  blkno = ItemPointerGetBlockNumber(tid);
1543  offnum = ItemPointerGetOffsetNumber(tid);
1544  at_chain_start = first_call;
1545  skip = !first_call;
1546 
1548  Assert(BufferGetBlockNumber(buffer) == blkno);
1549 
1550  /* Scan through possible multiple members of HOT-chain */
1551  for (;;)
1552  {
1553  ItemId lp;
1554 
1555  /* check for bogus TID */
1556  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
1557  break;
1558 
1559  lp = PageGetItemId(dp, offnum);
1560 
1561  /* check for unused, dead, or redirected items */
1562  if (!ItemIdIsNormal(lp))
1563  {
1564  /* We should only see a redirect at start of chain */
1565  if (ItemIdIsRedirected(lp) && at_chain_start)
1566  {
1567  /* Follow the redirect */
1568  offnum = ItemIdGetRedirect(lp);
1569  at_chain_start = false;
1570  continue;
1571  }
1572  /* else must be end of chain */
1573  break;
1574  }
1575 
1576  /*
1577  * Update heapTuple to point to the element of the HOT chain we're
1578  * currently investigating. Having t_self set correctly is important
1579  * because the SSI checks and the *Satisfies routine for historical
1580  * MVCC snapshots need the correct tid to decide about the visibility.
1581  */
1582  heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
1583  heapTuple->t_len = ItemIdGetLength(lp);
1584  heapTuple->t_tableOid = RelationGetRelid(relation);
1585  ItemPointerSet(&heapTuple->t_self, blkno, offnum);
1586 
1587  /*
1588  * Shouldn't see a HEAP_ONLY tuple at chain start.
1589  */
1590  if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
1591  break;
1592 
1593  /*
1594  * The xmin should match the previous xmax value, else chain is
1595  * broken.
1596  */
1597  if (TransactionIdIsValid(prev_xmax) &&
1598  !TransactionIdEquals(prev_xmax,
1599  HeapTupleHeaderGetXmin(heapTuple->t_data)))
1600  break;
1601 
1602  /*
1603  * When first_call is true (and thus, skip is initially false) we'll
1604  * return the first tuple we find. But on later passes, heapTuple
1605  * will initially be pointing to the tuple we returned last time.
1606  * Returning it again would be incorrect (and would loop forever), so
1607  * we skip it and return the next match we find.
1608  */
1609  if (!skip)
1610  {
1611  /* If it's visible per the snapshot, we must return it */
1612  valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
1613  CheckForSerializableConflictOut(valid, relation, heapTuple,
1614  buffer, snapshot);
1615 
1616  if (valid)
1617  {
1618  ItemPointerSetOffsetNumber(tid, offnum);
1619  PredicateLockTuple(relation, heapTuple, snapshot);
1620  if (all_dead)
1621  *all_dead = false;
1622  return true;
1623  }
1624  }
1625  skip = false;
1626 
1627  /*
1628  * If we can't see it, maybe no one else can either. At caller
1629  * request, check whether all chain members are dead to all
1630  * transactions.
1631  *
1632  * Note: if you change the criterion here for what is "dead", fix the
1633  * planner's get_actual_variable_range() function to match.
1634  */
1635  if (all_dead && *all_dead &&
1637  *all_dead = false;
1638 
1639  /*
1640  * Check to see if HOT chain continues past this tuple; if so fetch
1641  * the next offnum and loop around.
1642  */
1643  if (HeapTupleIsHotUpdated(heapTuple))
1644  {
1646  blkno);
1647  offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
1648  at_chain_start = false;
1649  prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
1650  }
1651  else
1652  break; /* end of chain */
1653  }
1654 
1655  return false;
1656 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:365
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:106
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:514
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:78
uint32 BlockNumber
Definition: block.h:31
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:4041
static const char *const skip[]
Definition: pg_checksums.c:99
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define HeapTupleIsHotUpdated(tuple)
Definition: htup_details.h:676
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
ItemPointerData t_ctid
Definition: htup_details.h:160
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
TransactionId RecentGlobalXmin
Definition: snapmgr.c:168
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
bool HeapTupleIsSurelyDead(HeapTuple htup, TransactionId OldestXmin)
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:685
#define Assert(condition)
Definition: c.h:739
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:313
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
void PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
Definition: predicate.c:2548
#define ItemPointerSetOffsetNumber(pointer, offsetNumber)
Definition: itemptr.h:148
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2613
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
#define TransactionIdIsValid(xid)
Definition: transam.h:41
#define RelationGetRelid(relation)
Definition: rel.h:422
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:127
bool HeapTupleSatisfiesVisibility(HeapTuple tup, Snapshot snapshot, Buffer buffer)

◆ heap_inplace_update()

void heap_inplace_update ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 5707 of file heapam.c.

References BUFFER_LOCK_EXCLUSIVE, BufferGetPage, CacheInvalidateHeapTuple(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, IsBootstrapProcessingMode, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), xl_heap_inplace::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapInplace, START_CRIT_SECTION, HeapTupleData::t_data, HeapTupleHeaderData::t_hoff, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), XLOG_HEAP_INPLACE, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by create_toast_table(), index_set_state_flags(), index_update_stats(), vac_update_datfrozenxid(), and vac_update_relstats().

5708 {
5709  Buffer buffer;
5710  Page page;
5711  OffsetNumber offnum;
5712  ItemId lp = NULL;
5713  HeapTupleHeader htup;
5714  uint32 oldlen;
5715  uint32 newlen;
5716 
5717  /*
5718  * For now, parallel operations are required to be strictly read-only.
5719  * Unlike a regular update, this should never create a combo CID, so it
5720  * might be possible to relax this restriction, but not without more
5721  * thought and testing. It's not clear that it would be useful, anyway.
5722  */
5723  if (IsInParallelMode())
5724  ereport(ERROR,
5725  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
5726  errmsg("cannot update tuples during a parallel operation")));
5727 
5728  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
5730  page = (Page) BufferGetPage(buffer);
5731 
5732  offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
5733  if (PageGetMaxOffsetNumber(page) >= offnum)
5734  lp = PageGetItemId(page, offnum);
5735 
5736  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
5737  elog(ERROR, "invalid lp");
5738 
5739  htup = (HeapTupleHeader) PageGetItem(page, lp);
5740 
5741  oldlen = ItemIdGetLength(lp) - htup->t_hoff;
5742  newlen = tuple->t_len - tuple->t_data->t_hoff;
5743  if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
5744  elog(ERROR, "wrong tuple length");
5745 
5746  /* NO EREPORT(ERROR) from here till changes are logged */
5748 
5749  memcpy((char *) htup + htup->t_hoff,
5750  (char *) tuple->t_data + tuple->t_data->t_hoff,
5751  newlen);
5752 
5753  MarkBufferDirty(buffer);
5754 
5755  /* XLOG stuff */
5756  if (RelationNeedsWAL(relation))
5757  {
5758  xl_heap_inplace xlrec;
5759  XLogRecPtr recptr;
5760 
5761  xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
5762 
5763  XLogBeginInsert();
5764  XLogRegisterData((char *) &xlrec, SizeOfHeapInplace);
5765 
5766  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
5767  XLogRegisterBufData(0, (char *) htup + htup->t_hoff, newlen);
5768 
5769  /* inplace updates aren't decoded atm, don't log the origin */
5770 
5771  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
5772 
5773  PageSetLSN(page, recptr);
5774  }
5775 
5776  END_CRIT_SECTION();
5777 
5778  UnlockReleaseBuffer(buffer);
5779 
5780  /*
5781  * Send out shared cache inval if necessary. Note that because we only
5782  * pass the new version of the tuple, this mustn't be used for any
5783  * operations that could change catcache lookup keys. But we aren't
5784  * bothering with index updates either, so that's true a fortiori.
5785  */
5787  CacheInvalidateHeapTuple(relation, tuple, NULL);
5788 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:361
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1114
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1458
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
#define SizeOfHeapInplace
Definition: heapam_xlog.h:308
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
int errcode(int sqlerrcode)
Definition: elog.c:608
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:88
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:59
bool IsInParallelMode(void)
Definition: xact.c:996
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3388
#define ERROR
Definition: elog.h:43
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define REGBUF_STANDARD
Definition: xloginsert.h:35
unsigned int uint32
Definition: c.h:359
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
#define ereport(elevel, rest)
Definition: elog.h:141
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:235
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
OffsetNumber offnum
Definition: heapam_xlog.h:304
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3602
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define ItemIdIsNormal(itemId)
Definition: itemid.h:99
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:596
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define XLOG_HEAP_INPLACE
Definition: heapam_xlog.h:39
#define RelationNeedsWAL(relation)
Definition: rel.h:524
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:374
int errmsg(const char *fmt,...)
Definition: elog.c:822
#define elog(elevel,...)
Definition: elog.h:228
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:340
Pointer Page
Definition: bufpage.h:78

◆ heap_insert()

void heap_insert ( Relation  relation,
HeapTuple  tup,
CommandId  cid,
int  options,
BulkInsertState  bistate 
)

Definition at line 1868 of file heapam.c.

References Assert, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), END_CRIT_SECTION, FirstOffsetNumber, xl_heap_insert::flags, GetCurrentTransactionId(), heap_freetuple(), HEAP_INSERT_NO_LOGICAL, HEAP_INSERT_SKIP_WAL, HEAP_INSERT_SPECULATIVE, heap_prepare_insert(), InvalidBuffer, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, log_heap_new_cid(), MarkBufferDirty(), xl_heap_insert::offnum, PageClearAllVisible, PageGetMaxOffsetNumber, PageIsAllVisible, PageSetLSN, pgstat_count_heap_insert(), REGBUF_KEEP_DATA, REGBUF_STANDARD, REGBUF_WILL_INIT, RelationGetBufferForTuple(), RelationIsAccessibleInLogicalDecoding, RelationIsLogicallyLogged, RelationNeedsWAL, RelationPutHeapTuple(), ReleaseBuffer(), SizeOfHeapHeader, SizeOfHeapInsert, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), visibilitymap_clear(), VISIBILITYMAP_VALID_BITS, XLH_INSERT_ALL_VISIBLE_CLEARED, XLH_INSERT_CONTAINS_NEW_TUPLE, XLH_INSERT_IS_SPECULATIVE, XLOG_HEAP_INIT_PAGE, XLOG_HEAP_INSERT, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by heapam_tuple_insert(), heapam_tuple_insert_speculative(), simple_heap_insert(), and toast_save_datum().

1870 {
1872  HeapTuple heaptup;
1873  Buffer buffer;
1874  Buffer vmbuffer = InvalidBuffer;
1875  bool all_visible_cleared = false;
1876 
1877  /*
1878  * Fill in tuple header fields and toast the tuple if necessary.
1879  *
1880  * Note: below this point, heaptup is the data we actually intend to store
1881  * into the relation; tup is the caller's original untoasted data.
1882  */
1883  heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
1884 
1885  /*
1886  * Find buffer to insert this tuple into. If the page is all visible,
1887  * this will also pin the requisite visibility map page.
1888  */
1889  buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
1890  InvalidBuffer, options, bistate,
1891  &vmbuffer, NULL);
1892 
1893  /*
1894  * We're about to do the actual insert -- but check for conflict first, to
1895  * avoid possibly having to roll back work we've just done.
1896  *
1897  * This is safe without a recheck as long as there is no possibility of
1898  * another process scanning the page between this check and the insert
1899  * being visible to the scan (i.e., an exclusive buffer content lock is
1900  * continuously held from this point until the tuple insert is visible).
1901  *
1902  * For a heap insert, we only need to check for table-level SSI locks. Our
1903  * new tuple can't possibly conflict with existing tuple locks, and heap
1904  * page locks are only consolidated versions of tuple locks; they do not
1905  * lock "gaps" as index page locks do. So we don't need to specify a
1906  * buffer when making the call, which makes for a faster check.
1907  */
1909 
1910  /* NO EREPORT(ERROR) from here till changes are logged */
1912 
1913  RelationPutHeapTuple(relation, buffer, heaptup,
1914  (options & HEAP_INSERT_SPECULATIVE) != 0);
1915 
1916  if (PageIsAllVisible(BufferGetPage(buffer)))
1917  {
1918  all_visible_cleared = true;
1920  visibilitymap_clear(relation,
1921  ItemPointerGetBlockNumber(&(heaptup->t_self)),
1922  vmbuffer, VISIBILITYMAP_VALID_BITS);
1923  }
1924 
1925  /*
1926  * XXX Should we set PageSetPrunable on this page ?
1927  *
1928  * The inserting transaction may eventually abort thus making this tuple
1929  * DEAD and hence available for pruning. Though we don't want to optimize
1930  * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
1931  * aborted tuple will never be pruned until next vacuum is triggered.
1932  *
1933  * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
1934  */
1935 
1936  MarkBufferDirty(buffer);
1937 
1938  /* XLOG stuff */
1939  if (!(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation))
1940  {
1941  xl_heap_insert xlrec;
1942  xl_heap_header xlhdr;
1943  XLogRecPtr recptr;
1944  Page page = BufferGetPage(buffer);
1945  uint8 info = XLOG_HEAP_INSERT;
1946  int bufflags = 0;
1947 
1948  /*
1949  * If this is a catalog, we need to transmit combocids to properly
1950  * decode, so log that as well.
1951  */
1953  log_heap_new_cid(relation, heaptup);
1954 
1955  /*
1956  * If this is the single and first tuple on page, we can reinit the
1957  * page instead of restoring the whole thing. Set flag, and hide
1958  * buffer references from XLogInsert.
1959  */
1960  if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
1962  {
1963  info |= XLOG_HEAP_INIT_PAGE;
1964  bufflags |= REGBUF_WILL_INIT;
1965  }
1966 
1967  xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
1968  xlrec.flags = 0;
1969  if (all_visible_cleared)
1974 
1975  /*
1976  * For logical decoding, we need the tuple even if we're doing a full
1977  * page write, so make sure it's included even if we take a full-page
1978  * image. (XXX We could alternatively store a pointer into the FPW).
1979  */
1980  if (RelationIsLogicallyLogged(relation) &&
1982  {
1984  bufflags |= REGBUF_KEEP_DATA;
1985  }
1986 
1987  XLogBeginInsert();
1988  XLogRegisterData((char *) &xlrec, SizeOfHeapInsert);
1989 
1990  xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
1991  xlhdr.t_infomask = heaptup->t_data->t_infomask;
1992  xlhdr.t_hoff = heaptup->t_data->t_hoff;
1993 
1994  /*
1995  * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
1996  * write the whole page to the xlog, we don't need to store
1997  * xl_heap_header in the xlog.
1998  */
1999  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2000  XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
2001  /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
2003  (char *) heaptup->t_data + SizeofHeapTupleHeader,
2004  heaptup->t_len - SizeofHeapTupleHeader);
2005 
2006  /* filtering by origin on a row level is much more efficient */
2008 
2009  recptr = XLogInsert(RM_HEAP_ID, info);
2010 
2011  PageSetLSN(page, recptr);
2012  }
2013 
2014  END_CRIT_SECTION();
2015 
2016  UnlockReleaseBuffer(buffer);
2017  if (vmbuffer != InvalidBuffer)
2018  ReleaseBuffer(vmbuffer);
2019 
2020  /*
2021  * If tuple is cachable, mark it for invalidation from the caches in case
2022  * we abort. Note it is OK to do this after releasing the buffer, because
2023  * the heaptup data structure is all in local memory, not in the shared
2024  * buffer.
2025  */
2026  CacheInvalidateHeapTuple(relation, heaptup, NULL);
2027 
2028  /* Note: speculative insertions are counted too, even if aborted later */
2029  pgstat_count_heap_insert(relation, 1);
2030 
2031  /*
2032  * If heaptup is a private copy, release it. Don't forget to copy t_self
2033  * back to the caller's image, too.
2034  */
2035  if (heaptup != tup)
2036  {
2037  tup->t_self = heaptup->t_self;
2038  heap_freetuple(heaptup);
2039  }
2040 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:361
#define SizeofHeapTupleHeader
Definition: htup_details.h:184
#define XLOG_HEAP_INSERT
Definition: heapam_xlog.h:32
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:7523
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1114
static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
Definition: heapam.c:2049
#define PageIsAllVisible(page)
Definition: bufpage.h:385
uint32 TransactionId
Definition: c.h:514
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1458
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
#define END_CRIT_SECTION()
Definition: miscadmin.h:134
unsigned char uint8
Definition: c.h:357
#define XLH_INSERT_IS_SPECULATIVE
Definition: heapam_xlog.h:68
#define InvalidBuffer
Definition: buf.h:25
#define REGBUF_WILL_INIT
Definition: xloginsert.h:33
uint16 t_infomask2
Definition: heapam_xlog.h:144
#define START_CRIT_SECTION()
Definition: miscadmin.h:132
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:228
#define HEAP_INSERT_SKIP_WAL
Definition: heapam.h:32
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3365
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:594
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1338
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:357
void RelationPutHeapTuple(Relation relation, Buffer buffer, HeapTuple tuple, bool token)
Definition: hio.c:36
void CheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
Definition: predicate.c:4426
#define XLOG_HEAP_INIT_PAGE
Definition: heapam_xlog.h:46
#define HEAP_INSERT_SPECULATIVE
Definition: heapam.h:36
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:68
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3388
#define XLH_INSERT_CONTAINS_NEW_TUPLE
Definition: heapam_xlog.h:69
ItemPointerData t_self
Definition: htup.h:65
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:422
uint32 t_len
Definition: htup.h:64
#define FirstOffsetNumber
Definition: off.h:27
#define REGBUF_STANDARD
Definition: xloginsert.h:35
Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer otherBuffer, int options, BulkInsertState bistate, Buffer *vmbuffer, Buffer *vmbuffer_other)
Definition: hio.c:320
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define BufferGetPage(buffer)
Definition: bufmgr.h:159
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:578
#define REGBUF_KEEP_DATA
Definition: xloginsert.h:38
#define PageClearAllVisible(page)
Definition: bufpage.h:389
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:739
uint16 t_infomask
Definition: heapam_xlog.h:145
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:117
#define RelationNeedsWAL(relation)
Definition: rel.h:524
#define SizeOfHeapInsert
Definition: heapam_xlog.h:160
#define XLH_INSERT_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:66
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2613
void pgstat_count_heap_insert(Relation rel, PgStat_Counter n)
Definition: pgstat.c:1946
#define HEAP_INSERT_NO_LOGICAL
Definition: heapam.h:35
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:98
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:368
int Buffer
Definition: buf.h:23
OffsetNumber offnum
Definition: heapam_xlog.h:154
#define SizeOfHeapHeader
Definition: heapam_xlog.h:149
Pointer Page
Definition: bufpage.h:78

◆ heap_lock_tuple()

TM_Result heap_lock_tuple ( Relation  relation,
HeapTuple  tuple,
CommandId  cid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
bool  follow_updates,
Buffer buffer,
TM_FailureData tmfd 
)

Definition at line 3973 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferIsValid, TM_FailureData::cmax, compute_infobits(), compute_new_xmax_infomask(), ConditionalMultiXactIdWait(), ConditionalXactLockTableWait(), TM_FailureData::ctid, DoesMultiXactIdConflict(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, xl_heap_lock::flags, get_mxact_status_for_lock(), GetCurrentTransactionId(), GetMultiXactIdMembers(), heap_acquire_tuplock(), HEAP_KEYS_UPDATED, heap_lock_updated_tuple(), HEAP_XMAX_BITS, HEAP_XMAX_INVALID, HEAP_XMAX_IS_EXCL_LOCKED, HEAP_XMAX_IS_KEYSHR_LOCKED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HEAP_XMAX_IS_SHR_LOCKED, HeapTupleHeaderClearHotUpdated, HeapTupleHeaderGetCmax(), HeapTupleHeaderGetRawXmax, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIndicatesMovedPartitions, HeapTupleHeaderIsOnlyLocked(), HeapTupleHeaderSetXmax, HeapTupleSatisfiesUpdate(), i, xl_heap_lock::infobits_set, InvalidBuffer, InvalidCommandId, ItemIdGetLength, ItemIdIsNormal, ItemPointerCopy, ItemPointerEquals(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), xl_heap_lock::locking_xid, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, LockWaitBlock, LockWaitError, LockWaitSkip, MarkBufferDirty(), MultiXactIdSetOldestMember(), MultiXactIdWait(), MultiXactStatusNoKeyUpdate, xl_heap_lock::offnum, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, pfree(), ReadBuffer(), REGBUF_STANDARD, RelationGetRelationName, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapLock, START_CRIT_SECTION, status(), HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TM_BeingModified, TM_Deleted, TM_Invisible, TM_Ok, TM_SelfModified, TM_Updated, TM_WouldBlock, TransactionIdEquals, TransactionIdIsCurrentTransactionId(), TUPLOCK_from_mxstatus, UnlockTupleTuplock, UpdateXmaxHintBits(), VISIBILITYMAP_ALL_FROZEN, visibilitymap_clear(), visibilitymap_pin(), XactLockTableWait(), XLH_LOCK_ALL_FROZEN_CLEARED, XLOG_HEAP_LOCK, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), XLTW_Lock, TM_FailureData::xmax, and xmax_infomask_changed().

Referenced by heapam_tuple_lock().

3977 {
3978  TM_Result result;
3979  ItemPointer tid = &(tuple->t_self);
3980  ItemId lp;
3981  Page page;
3982  Buffer vmbuffer = InvalidBuffer;
3983  BlockNumber block;
3984  TransactionId xid,
3985  xmax;
3986  uint16 old_infomask,
3987  new_infomask,
3988  new_infomask2;
3989  bool first_time = true;
3990  bool skip_tuple_lock = false;
3991  bool have_tuple_lock = false;
3992  bool cleared_all_frozen = false;
3993 
3994  *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
3995  block = ItemPointerGetBlockNumber(tid);
3996 
3997  /*
3998  * Before locking the buffer, pin the visibility map page if it appears to
3999  * be necessary. Since we haven't got the lock yet, someone else might be
4000  * in the middle of changing this, so we'll need to recheck after we have
4001  * the lock.
4002  */
4003  if (PageIsAllVisible(BufferGetPage(*buffer)))
4004  visibilitymap_pin(relation, block, &vmbuffer);
4005 
4007 
4008  page = BufferGetPage(*buffer);
4009  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
4010  Assert(ItemIdIsNormal(lp));
4011 
4012  tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
4013  tuple->t_len = ItemIdGetLength(lp);
4014  tuple->t_tableOid = RelationGetRelid(relation);
4015 
4016 l3:
4017  result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
4018 
4019  if (result == TM_Invisible)
4020  {
4021  /*
4022  * This is possible, but only when locking a tuple for ON CONFLICT
4023  * UPDATE. We return this value here rather than throwing an error in
4024  * order to give that case the opportunity to throw a more specific
4025  * error.
4026  */
4027  result = TM_Invisible;
4028  goto out_locked;
4029  }
4030  else if (result == TM_BeingModified ||
4031  result == TM_Updated ||
4032  result == TM_Deleted)
4033  {
4034  TransactionId xwait;
4035  uint16 infomask;
4036  uint16 infomask2;
4037  bool require_sleep;
4038  ItemPointerData t_ctid;
4039 
4040  /* must copy state data before unlocking buffer */
4041  xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
4042  infomask = tuple->t_data->t_infomask;
4043  infomask2 = tuple->t_data->t_infomask2;
4044  ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
4045 
4046  LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
4047 
4048  /*
4049  * If any subtransaction of the current top transaction already holds
4050  * a lock as strong as or stronger than what we're requesting, we
4051  * effectively hold the desired lock already. We *must* succeed
4052  * without trying to take the tuple lock, else we will deadlock
4053  * against anyone wanting to acquire a stronger lock.
4054  *
4055  * Note we only do this the first time we loop on the HTSU result;
4056  * there is no point in testing in subsequent passes, because
4057  * evidently our own transaction cannot have acquired a new lock after
4058  * the first time we checked.
4059  */
4060  if (first_time)
4061  {
4062  first_time = false;
4063 
4064  if (infomask & HEAP_XMAX_IS_MULTI)
4065  {
4066  int i;
4067  int nmembers;
4068  MultiXactMember *members;
4069 
4070  /*
4071  * We don't need to allow old multixacts here; if that had
4072  * been the case, HeapTupleSatisfiesUpdate would have returned
4073  * MayBeUpdated and we wouldn't be here.
4074  */
4075  nmembers =
4076  GetMultiXactIdMembers(xwait, &members, false,
4077  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
4078 
4079  for (i = 0; i < nmembers; i++)
4080  {
4081  /* only consider members of our own transaction */
4082  if (!TransactionIdIsCurrentTransactionId(members[i].xid))
4083  continue;
4084 
4085  if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
4086  {
4087  pfree(members);
4088  result = TM_Ok;
4089  goto out_unlocked;
4090  }
4091  else
4092  {
4093  /*
4094  * Disable acquisition of the heavyweight tuple lock.
4095  * Otherwise, when promoting a weaker lock, we might
4096  * deadlock with another locker that has acquired the
4097  * heavyweight tuple lock and is waiting for our
4098  * transaction to finish.
4099  *
4100  * Note that in this case we still need to wait for
4101  * the multixact if required, to avoid acquiring
4102  * conflicting locks.
4103  */
4104  skip_tuple_lock = true;
4105  }
4106  }
4107 
4108  if (members)
4109  pfree(members);
4110  }
4111  else if (TransactionIdIsCurrentTransactionId(xwait))
4112  {
4113  switch (mode)
4114  {
4115  case LockTupleKeyShare:
4116  Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
4117  HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4118  HEAP_XMAX_IS_EXCL_LOCKED(infomask));
4119  result = TM_Ok;
4120  goto out_unlocked;
4121  case LockTupleShare:
4122  if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4123  HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4124  {
4125  result = TM_Ok;
4126  goto out_unlocked;
4127  }
4128  break;
4130  if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4131  {
4132  result = TM_Ok;
4133  goto out_unlocked;
4134  }
4135  break;
4136  case LockTupleExclusive:
4137  if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
4138  infomask2 & HEAP_KEYS_UPDATED)
4139  {
4140  result = TM_Ok;
4141  goto out_unlocked;
4142  }
4143  break;
4144  }
4145  }
4146  }
4147 
4148  /*
4149  * Initially assume that we will have to wait for the locking
4150  * transaction(s) to finish. We check various cases below in which
4151  * this can be turned off.
4152  */
4153  require_sleep = true;
4154  if (mode == LockTupleKeyShare)
4155  {
4156  /*
4157  * If we're requesting KeyShare, and there's no update present, we
4158  * don't need to wait. Even if there is an update, we can still
4159  * continue if the key hasn't been modified.
4160  *
4161  * However, if there are updates, we need to walk the update chain
4162  * to mark future versions of the row as locked, too. That way,
4163  * if somebody deletes that future version, we're protected
4164  * against the key going away. This locking of future versions
4165  * could block momentarily, if a concurrent transaction is
4166  * deleting a key; or it could return a value to the effect that
4167  * the transaction deleting the key has already committed. So we
4168  * do this before re-locking the buffer; otherwise this would be
4169  * prone to deadlocks.
4170  *
4171  * Note that the TID we're locking was grabbed before we unlocked
4172  * the buffer. For it to change while we're not looking, the
4173  * other properties we're testing for below after re-locking the
4174  * buffer would also change, in which case we would restart this
4175  * loop above.
4176  */
4177  if (!(infomask2 & HEAP_KEYS_UPDATED))
4178  {
4179  bool updated;
4180 
4181  updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
4182 
4183  /*
4184  * If there are updates, follow the update chain; bail out if
4185  * that cannot be done.
4186  */
4187  if (follow_updates && updated)
4188  {
4189  TM_Result res;
4190 
4191  res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4193  mode);
4194  if (res != TM_Ok)
4195  {
4196  result = res;
4197  /* recovery code expects to have buffer lock held */
4199  goto failed;
4200  }
4201  }
4202 
4204 
4205  /*
4206  * Make sure it's still an appropriate lock, else start over.
4207  * Also, if it wasn't updated before we released the lock, but
4208  * is updated now, we start over too; the reason is that we
4209  * now need to follow the update chain to lock the new
4210  * versions.
4211  */
4212  if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
4213  ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
4214  !updated))
4215  goto l3;
4216 
4217  /* Things look okay, so we can skip sleeping */
4218  require_sleep = false;
4219 
4220  /*
4221  * Note we allow Xmax to change here; other updaters/lockers
4222  * could have modified it before we grabbed the buffer lock.
4223  * However, this is not a problem, because with the recheck we
4224  * just did we ensure that they still don't conflict with the
4225  * lock we want.
4226  */
4227  }
4228  }
4229  else if (mode == LockTupleShare)
4230  {
4231  /*
4232  * If we're requesting Share, we can similarly avoid sleeping if
4233  * there's no update and no exclusive lock present.
4234  */
4235  if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
4236  !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4237  {
4239 
4240  /*
4241  * Make sure it's still an appropriate lock, else start over.
4242  * See above about allowing xmax to change.
4243  */
4244  if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
4246  goto l3;
4247  require_sleep = false;
4248  }
4249  }
4250  else if (mode == LockTupleNoKeyExclusive)
4251  {
4252  /*
4253  * If we're requesting NoKeyExclusive, we might also be able to
4254  * avoid sleeping; just ensure that there no conflicting lock
4255  * already acquired.
4256  */
4257  if (infomask & HEAP_XMAX_IS_MULTI)
4258  {
4259  if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
4260  mode, NULL))
4261  {
4262  /*
4263  * No conflict, but if the xmax changed under us in the
4264  * meantime, start over.
4265  */
4267  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4269  xwait))
4270  goto l3;
4271 
4272  /* otherwise, we're good */
4273  require_sleep = false;
4274  }
4275  }
4276  else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
4277  {
4279 
4280  /* if the xmax changed in the meantime, start over */
4281  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4284  xwait))
4285  goto l3;
4286  /* otherwise, we're good */
4287  require_sleep = false;
4288  }
4289  }
4290 
4291  /*
4292  * As a check independent from those above, we can also avoid sleeping
4293  * if the current transaction is the sole locker of the tuple. Note
4294  * that the strength of the lock already held is irrelevant; this is
4295  * not about recording the lock in Xmax (which will be done regardless
4296  * of this optimization, below). Also, note that the cases where we
4297  * hold a lock stronger than we are requesting are already handled
4298  * above by not doing anything.
4299  *
4300  * Note we only deal with the non-multixact case here; MultiXactIdWait
4301  * is well equipped to deal with this situation on its own.
4302  */
4303  if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
4305  {
4306  /* ... but if the xmax changed in the meantime, start over */
4308  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4310  xwait))
4311  goto l3;
4313  require_sleep = false;
4314  }
4315 
4316  /*
4317  * Time to sleep on the other transaction/multixact, if necessary.
4318  *
4319  * If the other transaction is an update/delete that's already
4320  * committed, then sleeping cannot possibly do any good: if we're
4321  * required to sleep, get out to raise an error instead.
4322  *
4323  * By here, we either have already acquired the buffer exclusive lock,
4324  * or we must wait for the locking transaction or multixact; so below
4325  * we ensure that we grab buffer lock after the sleep.
4326  */
4327  if (require_sleep && (result == TM_Updated || result == TM_Deleted))
4328  {
4330  goto failed;
4331  }
4332  else if (require_sleep)
4333  {
4334  /*
4335  * Acquire tuple lock to establish our priority for the tuple, or
4336  * die trying. LockTuple will release us when we are next-in-line
4337  * for the tuple. We must do this even if we are share-locking,
4338  * but not if we already have a weaker lock on the tuple.
4339  *
4340  * If we are forced to "start over" below, we keep the tuple lock;
4341  * this arranges that we stay at the head of the line while
4342  * rechecking tuple state.
4343  */
4344  if (!skip_tuple_lock &&
4345  !heap_acquire_tuplock(relation, tid, mode, wait_policy,
4346  &have_tuple_lock))
4347  {
4348  /*
4349  * This can only happen if wait_policy is Skip and the lock
4350  * couldn't be obtained.
4351  */
4352  result = TM_WouldBlock;
4353  /* recovery code expects to have buffer lock held */
4355  goto failed;
4356  }
4357 
4358  if (infomask & HEAP_XMAX_IS_MULTI)
4359  {
4361 
4362  /* We only ever lock tuples, never update them */
4363  if (status >= MultiXactStatusNoKeyUpdate)
4364  elog(ERROR, "invalid lock mode in heap_lock_tuple");
4365 
4366  /* wait for multixact to end, or die trying */
4367  switch (wait_policy)
4368  {
4369  case LockWaitBlock:
4370  MultiXactIdWait((MultiXactId) xwait, status, infomask,
4371  relation, &tuple->t_self, XLTW_Lock, NULL);
4372  break;
4373  case LockWaitSkip:
4375  status, infomask, relation,
4376  NULL))
4377  {
4378  result = TM_WouldBlock;
4379  /* recovery code expects to have buffer lock held */
4381  goto failed;
4382  }
4383  break;
4384  case LockWaitError:
4386  status, infomask, relation,
4387  NULL))
4388  ereport(ERROR,
4389  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4390  errmsg("could not obtain lock on row in relation \"%s\"",
4391  RelationGetRelationName(relation))));
4392 
4393  break;
4394  }
4395 
4396  /*
4397  * Of course, the multixact might not be done here: if we're
4398  * requesting a light lock mode, other transactions with light
4399  * locks could still be alive, as well as locks owned by our
4400  * own xact or other subxacts of this backend. We need to
4401  * preserve the surviving MultiXact members. Note that it
4402  * isn't absolutely necessary in the latter case, but doing so
4403  * is simpler.
4404  */
4405  }
4406  else
4407  {
4408  /* wait for regular transaction to end, or die trying */
4409  switch (wait_policy)
4410  {
4411  case LockWaitBlock:
4412  XactLockTableWait(xwait, relation, &tuple->t_self,
4413  XLTW_Lock);
4414  break;
4415  case LockWaitSkip:
4416  if (!ConditionalXactLockTableWait(xwait))
4417  {
4418  result = TM_WouldBlock;
4419  /* recovery code expects to have buffer lock held */
4421  goto failed;
4422  }
4423  break;
4424  case LockWaitError:
4425  if (!ConditionalXactLockTableWait(xwait))
4426  ereport(ERROR,
4427  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4428  errmsg("could not obtain lock on row in relation \"%s\"",
4429  RelationGetRelationName(relation))));
4430  break;
4431  }
4432  }
4433 
4434  /* if there are updates, follow the update chain */
4435  if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
4436  {
4437  TM_Result res;
4438 
4439  res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4441  mode);
4442  if (res != TM_Ok)
4443  {
4444  result = res;
4445  /* recovery code expects to have buffer lock held */
4447  goto failed;
4448  }
4449  }
4450 
4452 
4453  /*
4454  * xwait is done, but if xwait had just locked the tuple then some
4455  * other xact could update this tuple before we get to this point.
4456  * Check for xmax change, and start over if so.
4457  */
4458  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4460  xwait))
4461  goto l3;
4462 
4463  if (!(infomask & HEAP_XMAX_IS_MULTI))
4464  {
4465  /*
4466  * Otherwise check if it committed or aborted. Note we cannot
4467  * be here if the tuple was only locked by somebody who didn't
4468  * conflict with us; that would have been handled above. So
4469  * that transaction must necessarily be gone by now. But
4470  * don't check for this in the multixact case, because some
4471  * locker transactions might still be running.
4472  */
4473  UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);