PostgreSQL Source Code  git master
heapam.c File Reference
#include "postgres.h"
#include "access/bufmask.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/hio.h"
#include "access/multixact.h"
#include "access/parallel.h"
#include "access/relscan.h"
#include "access/sysattr.h"
#include "access/transam.h"
#include "access/tuptoaster.h"
#include "access/valid.h"
#include "access/visibilitymap.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "access/xloginsert.h"
#include "access/xlogutils.h"
#include "catalog/catalog.h"
#include "catalog/namespace.h"
#include "catalog/index.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "port/atomics.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
#include "storage/procarray.h"
#include "storage/smgr.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "utils/datum.h"
#include "utils/inval.h"
#include "utils/lsyscache.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
#include "utils/syscache.h"
#include "utils/tqual.h"
#include "utils/memutils.h"
#include "nodes/execnodes.h"
#include "executor/executor.h"
Include dependency graph for heapam.c:

Go to the source code of this file.

Macros

#define LOCKMODE_from_mxstatus(status)   (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
 
#define LockTupleTuplock(rel, tup, mode)   LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define UnlockTupleTuplock(rel, tup, mode)   UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define ConditionalLockTupleTuplock(rel, tup, mode)   ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define TUPLOCK_from_mxstatus(status)   (MultiXactStatusLock[(status)])
 
#define HEAPDEBUG_1
 
#define HEAPDEBUG_2
 
#define HEAPDEBUG_3
 
#define FRM_NOOP   0x0001
 
#define FRM_INVALIDATE_XMAX   0x0002
 
#define FRM_RETURN_IS_XID   0x0004
 
#define FRM_RETURN_IS_MULTI   0x0008
 
#define FRM_MARK_COMMITTED   0x0010
 

Functions

static HeapScanDesc heap_beginscan_internal (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
 
static void heap_parallelscan_startblock_init (HeapScanDesc scan)
 
static BlockNumber heap_parallelscan_nextpage (HeapScanDesc scan)
 
static HeapTuple heap_prepare_insert (Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
 
static XLogRecPtr log_heap_update (Relation reln, Buffer oldbuf, Buffer newbuf, HeapTuple oldtup, HeapTuple newtup, HeapTuple old_key_tup, bool all_visible_cleared, bool new_all_visible_cleared)
 
static BitmapsetHeapDetermineModifiedColumns (Relation relation, Bitmapset *interesting_cols, HeapTuple oldtup, HeapTuple newtup)
 
static bool heap_acquire_tuplock (Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
 
static void compute_new_xmax_infomask (TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
 
static HTSU_Result heap_lock_updated_tuple (Relation rel, HeapTuple tuple, ItemPointer ctid, TransactionId xid, LockTupleMode mode)
 
static void GetMultiXactIdHintBits (MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
 
static TransactionId MultiXactIdGetUpdateXid (TransactionId xmax, uint16 t_infomask)
 
static bool DoesMultiXactIdConflict (MultiXactId multi, uint16 infomask, LockTupleMode lockmode)
 
static void MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
static bool ConditionalMultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, int *remaining)
 
static XLogRecPtr log_heap_new_cid (Relation relation, HeapTuple tup)
 
static HeapTuple ExtractReplicaIdentity (Relation rel, HeapTuple tup, bool key_modified, bool *copy)
 
static bool ProjIndexIsUnchanged (Relation relation, HeapTuple oldtup, HeapTuple newtup)
 
static void initscan (HeapScanDesc scan, ScanKey key, bool keep_startblock)
 
void heap_setscanlimits (HeapScanDesc scan, BlockNumber startBlk, BlockNumber numBlks)
 
void heapgetpage (HeapScanDesc scan, BlockNumber page)
 
static void heapgettup (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
static void heapgettup_pagemode (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
Relation relation_open (Oid relationId, LOCKMODE lockmode)
 
Relation try_relation_open (Oid relationId, LOCKMODE lockmode)
 
Relation relation_openrv (const RangeVar *relation, LOCKMODE lockmode)
 
Relation relation_openrv_extended (const RangeVar *relation, LOCKMODE lockmode, bool missing_ok)
 
void relation_close (Relation relation, LOCKMODE lockmode)
 
Relation heap_open (Oid relationId, LOCKMODE lockmode)
 
Relation heap_openrv (const RangeVar *relation, LOCKMODE lockmode)
 
Relation heap_openrv_extended (const RangeVar *relation, LOCKMODE lockmode, bool missing_ok)
 
HeapScanDesc heap_beginscan (Relation relation, Snapshot snapshot, int nkeys, ScanKey key)
 
HeapScanDesc heap_beginscan_catalog (Relation relation, int nkeys, ScanKey key)
 
HeapScanDesc heap_beginscan_strat (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, bool allow_strat, bool allow_sync)
 
HeapScanDesc heap_beginscan_bm (Relation relation, Snapshot snapshot, int nkeys, ScanKey key)
 
HeapScanDesc heap_beginscan_sampling (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, bool allow_strat, bool allow_sync, bool allow_pagemode)
 
void heap_rescan (HeapScanDesc scan, ScanKey key)
 
void heap_rescan_set_params (HeapScanDesc scan, ScanKey key, bool allow_strat, bool allow_sync, bool allow_pagemode)
 
void heap_endscan (HeapScanDesc scan)
 
Size heap_parallelscan_estimate (Snapshot snapshot)
 
void heap_parallelscan_initialize (ParallelHeapScanDesc target, Relation relation, Snapshot snapshot)
 
void heap_parallelscan_reinitialize (ParallelHeapScanDesc parallel_scan)
 
HeapScanDesc heap_beginscan_parallel (Relation relation, ParallelHeapScanDesc parallel_scan)
 
void heap_update_snapshot (HeapScanDesc scan, Snapshot snapshot)
 
HeapTuple heap_getnext (HeapScanDesc scan, ScanDirection direction)
 
bool heap_fetch (Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf, bool keep_buf, Relation stats_relation)
 
bool heap_hot_search_buffer (ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
 
bool heap_hot_search (ItemPointer tid, Relation relation, Snapshot snapshot, bool *all_dead)
 
void heap_get_latest_tid (Relation relation, Snapshot snapshot, ItemPointer tid)
 
static void UpdateXmaxHintBits (HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
 
BulkInsertState GetBulkInsertState (void)
 
void FreeBulkInsertState (BulkInsertState bistate)
 
void ReleaseBulkInsertStatePin (BulkInsertState bistate)
 
Oid heap_insert (Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate)
 
void heap_multi_insert (Relation relation, HeapTuple *tuples, int ntuples, CommandId cid, int options, BulkInsertState bistate)
 
Oid simple_heap_insert (Relation relation, HeapTuple tup)
 
static uint8 compute_infobits (uint16 infomask, uint16 infomask2)
 
static bool xmax_infomask_changed (uint16 new_infomask, uint16 old_infomask)
 
HTSU_Result heap_delete (Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, HeapUpdateFailureData *hufd, bool changingPart)
 
void simple_heap_delete (Relation relation, ItemPointer tid)
 
HTSU_Result heap_update (Relation relation, ItemPointer otid, HeapTuple newtup, CommandId cid, Snapshot crosscheck, bool wait, HeapUpdateFailureData *hufd, LockTupleMode *lockmode)
 
static bool heap_tuple_attr_equals (TupleDesc tupdesc, int attrnum, HeapTuple tup1, HeapTuple tup2)
 
void simple_heap_update (Relation relation, ItemPointer otid, HeapTuple tup)
 
static MultiXactStatus get_mxact_status_for_lock (LockTupleMode mode, bool is_update)
 
HTSU_Result heap_lock_tuple (Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, HeapUpdateFailureData *hufd)
 
static HTSU_Result test_lockmode_for_conflict (MultiXactStatus status, TransactionId xid, LockTupleMode mode, bool *needwait)
 
static HTSU_Result heap_lock_updated_tuple_rec (Relation rel, ItemPointer tid, TransactionId xid, LockTupleMode mode)
 
void heap_finish_speculative (Relation relation, HeapTuple tuple)
 
void heap_abort_speculative (Relation relation, HeapTuple tuple)
 
void heap_inplace_update (Relation relation, HeapTuple tuple)
 
static TransactionId FreezeMultiXactId (MultiXactId multi, uint16 t_infomask, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, MultiXactId cutoff_multi, uint16 *flags)
 
bool heap_prepare_freeze_tuple (HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
 
void heap_execute_freeze_tuple (HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
 
bool heap_freeze_tuple (HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi)
 
TransactionId HeapTupleGetUpdateXid (HeapTupleHeader tuple)
 
static bool Do_MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
bool heap_tuple_needs_eventual_freeze (HeapTupleHeader tuple)
 
bool heap_tuple_needs_freeze (HeapTupleHeader tuple, TransactionId cutoff_xid, MultiXactId cutoff_multi, Buffer buf)
 
void HeapTupleHeaderAdvanceLatestRemovedXid (HeapTupleHeader tuple, TransactionId *latestRemovedXid)
 
XLogRecPtr log_heap_cleanup_info (RelFileNode rnode, TransactionId latestRemovedXid)
 
XLogRecPtr log_heap_clean (Relation reln, Buffer buffer, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused, TransactionId latestRemovedXid)
 
XLogRecPtr log_heap_freeze (Relation reln, Buffer buffer, TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples, int ntuples)
 
XLogRecPtr log_heap_visible (RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer, TransactionId cutoff_xid, uint8 vmflags)
 
static void heap_xlog_cleanup_info (XLogReaderState *record)
 
static void heap_xlog_clean (XLogReaderState *record)
 
static void heap_xlog_visible (XLogReaderState *record)
 
static void heap_xlog_freeze_page (XLogReaderState *record)
 
static void fix_infomask_from_infobits (uint8 infobits, uint16 *infomask, uint16 *infomask2)
 
static void heap_xlog_delete (XLogReaderState *record)
 
static void heap_xlog_insert (XLogReaderState *record)
 
static void heap_xlog_multi_insert (XLogReaderState *record)
 
static void heap_xlog_update (XLogReaderState *record, bool hot_update)
 
static void heap_xlog_confirm (XLogReaderState *record)
 
static void heap_xlog_lock (XLogReaderState *record)
 
static void heap_xlog_lock_updated (XLogReaderState *record)
 
static void heap_xlog_inplace (XLogReaderState *record)
 
void heap_redo (XLogReaderState *record)
 
void heap2_redo (XLogReaderState *record)
 
void heap_sync (Relation rel)
 
void heap_mask (char *pagedata, BlockNumber blkno)
 

Variables

bool synchronize_seqscans = true
 
struct {
   LOCKMODE   hwlock
 
   int   lockstatus
 
   int   updstatus
 
tupleLockExtraInfo [MaxLockTupleMode+1]
 
static const int MultiXactStatusLock [MaxMultiXactStatus+1]
 

Macro Definition Documentation

◆ ConditionalLockTupleTuplock

#define ConditionalLockTupleTuplock (   rel,
  tup,
  mode 
)    ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 189 of file heapam.c.

Referenced by heap_acquire_tuplock().

◆ FRM_INVALIDATE_XMAX

#define FRM_INVALIDATE_XMAX   0x0002

Definition at line 6471 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_MARK_COMMITTED

#define FRM_MARK_COMMITTED   0x0010

Definition at line 6474 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_NOOP

#define FRM_NOOP   0x0001

Definition at line 6470 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_RETURN_IS_MULTI

#define FRM_RETURN_IS_MULTI   0x0008

Definition at line 6473 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ FRM_RETURN_IS_XID

#define FRM_RETURN_IS_XID   0x0004

Definition at line 6472 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

◆ HEAPDEBUG_1

#define HEAPDEBUG_1

Definition at line 1828 of file heapam.c.

Referenced by heap_getnext().

◆ HEAPDEBUG_2

#define HEAPDEBUG_2

Definition at line 1829 of file heapam.c.

Referenced by heap_getnext().

◆ HEAPDEBUG_3

#define HEAPDEBUG_3

Definition at line 1830 of file heapam.c.

Referenced by heap_getnext().

◆ LOCKMODE_from_mxstatus

#define LOCKMODE_from_mxstatus (   status)    (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)

◆ LockTupleTuplock

#define LockTupleTuplock (   rel,
  tup,
  mode 
)    LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 185 of file heapam.c.

Referenced by heap_acquire_tuplock().

◆ TUPLOCK_from_mxstatus

#define TUPLOCK_from_mxstatus (   status)    (MultiXactStatusLock[(status)])

Definition at line 207 of file heapam.c.

Referenced by compute_new_xmax_infomask(), GetMultiXactIdHintBits(), and heap_lock_tuple().

◆ UnlockTupleTuplock

#define UnlockTupleTuplock (   rel,
  tup,
  mode 
)    UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 187 of file heapam.c.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

Function Documentation

◆ compute_infobits()

static uint8 compute_infobits ( uint16  infomask,
uint16  infomask2 
)
static

Definition at line 2999 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_abort_speculative(), heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_update(), and log_heap_update().

3000 {
3001  return
3002  ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
3003  ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
3004  ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
3005  /* note we ignore HEAP_XMAX_SHR_LOCK here */
3006  ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
3007  ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
3008  XLHL_KEYS_UPDATED : 0);
3009 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:190
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:193
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:263
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:262
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:192
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:264
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:266
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:275
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:205
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:265

◆ compute_new_xmax_infomask()

static void compute_new_xmax_infomask ( TransactionId  xmax,
uint16  old_infomask,
uint16  old_infomask2,
TransactionId  add_to_xmax,
LockTupleMode  mode,
bool  is_update,
TransactionId result_xmax,
uint16 result_infomask,
uint16 result_infomask2 
)
static

Definition at line 5400 of file heapam.c.

References Assert, elog, ERROR, get_mxact_status_for_lock(), GetMultiXactIdHintBits(), HEAP_KEYS_UPDATED, HEAP_LOCKED_UPGRADED, HEAP_XMAX_COMMITTED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_INVALID, HEAP_XMAX_IS_EXCL_LOCKED, HEAP_XMAX_IS_KEYSHR_LOCKED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HEAP_XMAX_IS_SHR_LOCKED, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, InvalidTransactionId, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, MultiXactIdCreate(), MultiXactIdExpand(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TUPLOCK_from_mxstatus, and WARNING.

Referenced by heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), and heap_update().

5405 {
5406  TransactionId new_xmax;
5407  uint16 new_infomask,
5408  new_infomask2;
5409 
5411 
5412 l5:
5413  new_infomask = 0;
5414  new_infomask2 = 0;
5415  if (old_infomask & HEAP_XMAX_INVALID)
5416  {
5417  /*
5418  * No previous locker; we just insert our own TransactionId.
5419  *
5420  * Note that it's critical that this case be the first one checked,
5421  * because there are several blocks below that come back to this one
5422  * to implement certain optimizations; old_infomask might contain
5423  * other dirty bits in those cases, but we don't really care.
5424  */
5425  if (is_update)
5426  {
5427  new_xmax = add_to_xmax;
5428  if (mode == LockTupleExclusive)
5429  new_infomask2 |= HEAP_KEYS_UPDATED;
5430  }
5431  else
5432  {
5433  new_infomask |= HEAP_XMAX_LOCK_ONLY;
5434  switch (mode)
5435  {
5436  case LockTupleKeyShare:
5437  new_xmax = add_to_xmax;
5438  new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
5439  break;
5440  case LockTupleShare:
5441  new_xmax = add_to_xmax;
5442  new_infomask |= HEAP_XMAX_SHR_LOCK;
5443  break;
5445  new_xmax = add_to_xmax;
5446  new_infomask |= HEAP_XMAX_EXCL_LOCK;
5447  break;
5448  case LockTupleExclusive:
5449  new_xmax = add_to_xmax;
5450  new_infomask |= HEAP_XMAX_EXCL_LOCK;
5451  new_infomask2 |= HEAP_KEYS_UPDATED;
5452  break;
5453  default:
5454  new_xmax = InvalidTransactionId; /* silence compiler */
5455  elog(ERROR, "invalid lock mode");
5456  }
5457  }
5458  }
5459  else if (old_infomask & HEAP_XMAX_IS_MULTI)
5460  {
5461  MultiXactStatus new_status;
5462 
5463  /*
5464  * Currently we don't allow XMAX_COMMITTED to be set for multis, so
5465  * cross-check.
5466  */
5467  Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
5468 
5469  /*
5470  * A multixact together with LOCK_ONLY set but neither lock bit set
5471  * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
5472  * anymore. This check is critical for databases upgraded by
5473  * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
5474  * that such multis are never passed.
5475  */
5476  if (HEAP_LOCKED_UPGRADED(old_infomask))
5477  {
5478  old_infomask &= ~HEAP_XMAX_IS_MULTI;
5479  old_infomask |= HEAP_XMAX_INVALID;
5480  goto l5;
5481  }
5482 
5483  /*
5484  * If the XMAX is already a MultiXactId, then we need to expand it to
5485  * include add_to_xmax; but if all the members were lockers and are
5486  * all gone, we can do away with the IS_MULTI bit and just set
5487  * add_to_xmax as the only locker/updater. If all lockers are gone
5488  * and we have an updater that aborted, we can also do without a
5489  * multi.
5490  *
5491  * The cost of doing GetMultiXactIdMembers would be paid by
5492  * MultiXactIdExpand if we weren't to do this, so this check is not
5493  * incurring extra work anyhow.
5494  */
5495  if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
5496  {
5497  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
5499  old_infomask)))
5500  {
5501  /*
5502  * Reset these bits and restart; otherwise fall through to
5503  * create a new multi below.
5504  */
5505  old_infomask &= ~HEAP_XMAX_IS_MULTI;
5506  old_infomask |= HEAP_XMAX_INVALID;
5507  goto l5;
5508  }
5509  }
5510 
5511  new_status = get_mxact_status_for_lock(mode, is_update);
5512 
5513  new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
5514  new_status);
5515  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5516  }
5517  else if (old_infomask & HEAP_XMAX_COMMITTED)
5518  {
5519  /*
5520  * It's a committed update, so we need to preserve him as updater of
5521  * the tuple.
5522  */
5524  MultiXactStatus new_status;
5525 
5526  if (old_infomask2 & HEAP_KEYS_UPDATED)
5527  status = MultiXactStatusUpdate;
5528  else
5529  status = MultiXactStatusNoKeyUpdate;
5530 
5531  new_status = get_mxact_status_for_lock(mode, is_update);
5532 
5533  /*
5534  * since it's not running, it's obviously impossible for the old
5535  * updater to be identical to the current one, so we need not check
5536  * for that case as we do in the block above.
5537  */
5538  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5539  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5540  }
5541  else if (TransactionIdIsInProgress(xmax))
5542  {
5543  /*
5544  * If the XMAX is a valid, in-progress TransactionId, then we need to
5545  * create a new MultiXactId that includes both the old locker or
5546  * updater and our own TransactionId.
5547  */
5548  MultiXactStatus new_status;
5549  MultiXactStatus old_status;
5550  LockTupleMode old_mode;
5551 
5552  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5553  {
5554  if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5555  old_status = MultiXactStatusForKeyShare;
5556  else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5557  old_status = MultiXactStatusForShare;
5558  else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5559  {
5560  if (old_infomask2 & HEAP_KEYS_UPDATED)
5561  old_status = MultiXactStatusForUpdate;
5562  else
5563  old_status = MultiXactStatusForNoKeyUpdate;
5564  }
5565  else
5566  {
5567  /*
5568  * LOCK_ONLY can be present alone only when a page has been
5569  * upgraded by pg_upgrade. But in that case,
5570  * TransactionIdIsInProgress() should have returned false. We
5571  * assume it's no longer locked in this case.
5572  */
5573  elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
5574  old_infomask |= HEAP_XMAX_INVALID;
5575  old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
5576  goto l5;
5577  }
5578  }
5579  else
5580  {
5581  /* it's an update, but which kind? */
5582  if (old_infomask2 & HEAP_KEYS_UPDATED)
5583  old_status = MultiXactStatusUpdate;
5584  else
5585  old_status = MultiXactStatusNoKeyUpdate;
5586  }
5587 
5588  old_mode = TUPLOCK_from_mxstatus(old_status);
5589 
5590  /*
5591  * If the lock to be acquired is for the same TransactionId as the
5592  * existing lock, there's an optimization possible: consider only the
5593  * strongest of both locks as the only one present, and restart.
5594  */
5595  if (xmax == add_to_xmax)
5596  {
5597  /*
5598  * Note that it's not possible for the original tuple to be
5599  * updated: we wouldn't be here because the tuple would have been
5600  * invisible and we wouldn't try to update it. As a subtlety,
5601  * this code can also run when traversing an update chain to lock
5602  * future versions of a tuple. But we wouldn't be here either,
5603  * because the add_to_xmax would be different from the original
5604  * updater.
5605  */
5606  Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5607 
5608  /* acquire the strongest of both */
5609  if (mode < old_mode)
5610  mode = old_mode;
5611  /* mustn't touch is_update */
5612 
5613  old_infomask |= HEAP_XMAX_INVALID;
5614  goto l5;
5615  }
5616 
5617  /* otherwise, just fall back to creating a new multixact */
5618  new_status = get_mxact_status_for_lock(mode, is_update);
5619  new_xmax = MultiXactIdCreate(xmax, old_status,
5620  add_to_xmax, new_status);
5621  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5622  }
5623  else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
5624  TransactionIdDidCommit(xmax))
5625  {
5626  /*
5627  * It's a committed update, so we gotta preserve him as updater of the
5628  * tuple.
5629  */
5631  MultiXactStatus new_status;
5632 
5633  if (old_infomask2 & HEAP_KEYS_UPDATED)
5634  status = MultiXactStatusUpdate;
5635  else
5636  status = MultiXactStatusNoKeyUpdate;
5637 
5638  new_status = get_mxact_status_for_lock(mode, is_update);
5639 
5640  /*
5641  * since it's not running, it's obviously impossible for the old
5642  * updater to be identical to the current one, so we need not check
5643  * for that case as we do in the block above.
5644  */
5645  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5646  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5647  }
5648  else
5649  {
5650  /*
5651  * Can get here iff the locking/updating transaction was running when
5652  * the infomask was extracted from the tuple, but finished before
5653  * TransactionIdIsInProgress got to run. Deal with it as if there was
5654  * no locker at all in the first place.
5655  */
5656  old_infomask |= HEAP_XMAX_INVALID;
5657  goto l5;
5658  }
5659 
5660  *result_infomask = new_infomask;
5661  *result_infomask2 = new_infomask2;
5662  *result_xmax = new_xmax;
5663 }
static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
Definition: heapam.c:7061
MultiXactStatus
Definition: multixact.h:40
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:190
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:193
uint32 TransactionId
Definition: c.h:474
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:765
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:999
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:249
#define HEAP_XMAX_COMMITTED
Definition: htup_details.h:203
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:196
#define HEAP_XMAX_IS_SHR_LOCKED(infomask)
Definition: htup_details.h:259
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:7142
LockTupleMode
Definition: heapam.h:38
unsigned short uint16
Definition: c.h:324
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:204
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:192
#define InvalidTransactionId
Definition: transam.h:31
#define WARNING
Definition: elog.h:40
MultiXactId MultiXactIdCreate(TransactionId xid1, MultiXactStatus status1, TransactionId xid2, MultiXactStatus status2)
Definition: multixact.c:384
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:227
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:275
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:205
TransactionId MultiXactId
Definition: c.h:484
#define Assert(condition)
Definition: c.h:699
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:207
static MultiXactStatus get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
Definition: heapam.c:4632
#define HEAP_XMAX_IS_EXCL_LOCKED(infomask)
Definition: htup_details.h:261
#define elog
Definition: elog.h:219
#define HEAP_XMAX_IS_KEYSHR_LOCKED(infomask)
Definition: htup_details.h:263
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:549
MultiXactId MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
Definition: multixact.c:437

◆ ConditionalMultiXactIdWait()

static bool ConditionalMultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
Relation  rel,
int *  remaining 
)
static

Definition at line 7396 of file heapam.c.

References Do_MultiXactIdWait(), and XLTW_None.

Referenced by heap_lock_tuple().

7398 {
7399  return Do_MultiXactIdWait(multi, status, infomask, true,
7400  rel, NULL, XLTW_None, remaining);
7401 }
int remaining
Definition: informix.c:692
Definition: lmgr.h:26
static bool Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:7296
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225

◆ Do_MultiXactIdWait()

static bool Do_MultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
bool  nowait,
Relation  rel,
ItemPointer  ctid,
XLTW_Oper  oper,
int *  remaining 
)
static

Definition at line 7296 of file heapam.c.

References ConditionalXactLockTableWait(), DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, LOCKMODE_from_mxstatus, pfree(), MultiXactMember::status, TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), XactLockTableWait(), and MultiXactMember::xid.

Referenced by ConditionalMultiXactIdWait(), and MultiXactIdWait().

7300 {
7301  bool result = true;
7302  MultiXactMember *members;
7303  int nmembers;
7304  int remain = 0;
7305 
7306  /* for pre-pg_upgrade tuples, no need to sleep at all */
7307  nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
7308  GetMultiXactIdMembers(multi, &members, false,
7309  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7310 
7311  if (nmembers >= 0)
7312  {
7313  int i;
7314 
7315  for (i = 0; i < nmembers; i++)
7316  {
7317  TransactionId memxid = members[i].xid;
7318  MultiXactStatus memstatus = members[i].status;
7319 
7321  {
7322  remain++;
7323  continue;
7324  }
7325 
7328  {
7329  if (remaining && TransactionIdIsInProgress(memxid))
7330  remain++;
7331  continue;
7332  }
7333 
7334  /*
7335  * This member conflicts with our multi, so we have to sleep (or
7336  * return failure, if asked to avoid waiting.)
7337  *
7338  * Note that we don't set up an error context callback ourselves,
7339  * but instead we pass the info down to XactLockTableWait. This
7340  * might seem a bit wasteful because the context is set up and
7341  * tore down for each member of the multixact, but in reality it
7342  * should be barely noticeable, and it avoids duplicate code.
7343  */
7344  if (nowait)
7345  {
7346  result = ConditionalXactLockTableWait(memxid);
7347  if (!result)
7348  break;
7349  }
7350  else
7351  XactLockTableWait(memxid, rel, ctid, oper);
7352  }
7353 
7354  pfree(members);
7355  }
7356 
7357  if (remaining)
7358  *remaining = remain;
7359 
7360  return result;
7361 }
int remaining
Definition: informix.c:692
MultiXactStatus
Definition: multixact.h:40
uint32 TransactionId
Definition: c.h:474
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:765
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:999
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:249
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:177
bool ConditionalXactLockTableWait(TransactionId xid)
Definition: lmgr.c:627
void pfree(void *pointer)
Definition: mcxt.c:1031
TransactionId xid
Definition: multixact.h:61
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:556
MultiXactStatus status
Definition: multixact.h:62
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:227
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:554
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
Operator oper(ParseState *pstate, List *opname, Oid ltypeId, Oid rtypeId, bool noError, int location)
Definition: parse_oper.c:377
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225

◆ DoesMultiXactIdConflict()

static bool DoesMultiXactIdConflict ( MultiXactId  multi,
uint16  infomask,
LockTupleMode  lockmode 
)
static

Definition at line 7207 of file heapam.c.

References DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, ISUPDATE_from_mxstatus, LOCKMODE_from_mxstatus, pfree(), status(), TransactionIdDidAbort(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), tupleLockExtraInfo, and MultiXactMember::xid.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

7209 {
7210  int nmembers;
7211  MultiXactMember *members;
7212  bool result = false;
7213  LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
7214 
7215  if (HEAP_LOCKED_UPGRADED(infomask))
7216  return false;
7217 
7218  nmembers = GetMultiXactIdMembers(multi, &members, false,
7219  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7220  if (nmembers >= 0)
7221  {
7222  int i;
7223 
7224  for (i = 0; i < nmembers; i++)
7225  {
7226  TransactionId memxid;
7227  LOCKMODE memlockmode;
7228 
7229  memlockmode = LOCKMODE_from_mxstatus(members[i].status);
7230 
7231  /* ignore members that don't conflict with the lock we want */
7232  if (!DoLockModesConflict(memlockmode, wanted))
7233  continue;
7234 
7235  /* ignore members from current xact */
7236  memxid = members[i].xid;
7238  continue;
7239 
7240  if (ISUPDATE_from_mxstatus(members[i].status))
7241  {
7242  /* ignore aborted updaters */
7243  if (TransactionIdDidAbort(memxid))
7244  continue;
7245  }
7246  else
7247  {
7248  /* ignore lockers-only that are no longer in progress */
7249  if (!TransactionIdIsInProgress(memxid))
7250  continue;
7251  }
7252 
7253  /*
7254  * Whatever remains are either live lockers that conflict with our
7255  * wanted lock, and updaters that are not aborted. Those conflict
7256  * with what we want, so return true.
7257  */
7258  result = true;
7259  break;
7260  }
7261  pfree(members);
7262  }
7263 
7264  return result;
7265 }
uint32 TransactionId
Definition: c.h:474
int LOCKMODE
Definition: lockdefs.h:26
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:765
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:999
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:249
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:177
void pfree(void *pointer)
Definition: mcxt.c:1031
TransactionId xid
Definition: multixact.h:61
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:556
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:55
bool TransactionIdDidAbort(TransactionId transactionId)
Definition: transam.c:181
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:227
static const struct @20 tupleLockExtraInfo[MaxLockTupleMode+1]
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225

◆ ExtractReplicaIdentity()

static HeapTuple ExtractReplicaIdentity ( Relation  rel,
HeapTuple  tup,
bool  key_modified,
bool copy 
)
static

Definition at line 8034 of file heapam.c.

References DEBUG4, elog, ERROR, heap_deform_tuple(), heap_form_tuple(), heap_freetuple(), HeapTupleGetOid, HeapTupleHasExternal, HeapTupleSetOid, IndexRelationGetNumberOfKeyAttributes, MaxHeapAttributeNumber, ObjectIdAttributeNumber, OidIsValid, RelationData::rd_index, RelationData::rd_rel, RelationClose(), RelationGetDescr, RelationGetRelationName, RelationGetReplicaIndex(), RelationIdGetRelation(), RelationIsLogicallyLogged, toast_flatten_tuple(), and values.

Referenced by heap_delete(), and heap_update().

8035 {
8036  TupleDesc desc = RelationGetDescr(relation);
8037  Oid replidindex;
8038  Relation idx_rel;
8039  char replident = relation->rd_rel->relreplident;
8040  HeapTuple key_tuple = NULL;
8041  bool nulls[MaxHeapAttributeNumber];
8043  int natt;
8044 
8045  *copy = false;
8046 
8047  if (!RelationIsLogicallyLogged(relation))
8048  return NULL;
8049 
8050  if (replident == REPLICA_IDENTITY_NOTHING)
8051  return NULL;
8052 
8053  if (replident == REPLICA_IDENTITY_FULL)
8054  {
8055  /*
8056  * When logging the entire old tuple, it very well could contain
8057  * toasted columns. If so, force them to be inlined.
8058  */
8059  if (HeapTupleHasExternal(tp))
8060  {
8061  *copy = true;
8062  tp = toast_flatten_tuple(tp, RelationGetDescr(relation));
8063  }
8064  return tp;
8065  }
8066 
8067  /* if the key hasn't changed and we're only logging the key, we're done */
8068  if (!key_changed)
8069  return NULL;
8070 
8071  /* find the replica identity index */
8072  replidindex = RelationGetReplicaIndex(relation);
8073  if (!OidIsValid(replidindex))
8074  {
8075  elog(DEBUG4, "could not find configured replica identity for table \"%s\"",
8076  RelationGetRelationName(relation));
8077  return NULL;
8078  }
8079 
8080  idx_rel = RelationIdGetRelation(replidindex);
8081 
8082  /* deform tuple, so we have fast access to columns */
8083  heap_deform_tuple(tp, desc, values, nulls);
8084 
8085  /* set all columns to NULL, regardless of whether they actually are */
8086  memset(nulls, 1, sizeof(nulls));
8087 
8088  /*
8089  * Now set all columns contained in the index to NOT NULL, they cannot
8090  * currently be NULL.
8091  */
8092  for (natt = 0; natt < IndexRelationGetNumberOfKeyAttributes(idx_rel); natt++)
8093  {
8094  int attno = idx_rel->rd_index->indkey.values[natt];
8095 
8096  if (attno < 0)
8097  {
8098  /*
8099  * The OID column can appear in an index definition, but that's
8100  * OK, because we always copy the OID if present (see below).
8101  * Other system columns may not.
8102  */
8103  if (attno == ObjectIdAttributeNumber)
8104  continue;
8105  elog(ERROR, "system column in index");
8106  }
8107  nulls[attno - 1] = false;
8108  }
8109 
8110  key_tuple = heap_form_tuple(desc, values, nulls);
8111  *copy = true;
8112  RelationClose(idx_rel);
8113 
8114  /*
8115  * Always copy oids if the table has them, even if not included in the
8116  * index. The space in the logged tuple is used anyway, so there's little
8117  * point in not including the information.
8118  */
8119  if (relation->rd_rel->relhasoids)
8120  HeapTupleSetOid(key_tuple, HeapTupleGetOid(tp));
8121 
8122  /*
8123  * If the tuple, which by here only contains indexed columns, still has
8124  * toasted columns, force them to be inlined. This is somewhat unlikely
8125  * since there's limits on the size of indexed columns, so we don't
8126  * duplicate toast_flatten_tuple()s functionality in the above loop over
8127  * the indexed columns, even if it would be more efficient.
8128  */
8129  if (HeapTupleHasExternal(key_tuple))
8130  {
8131  HeapTuple oldtup = key_tuple;
8132 
8133  key_tuple = toast_flatten_tuple(oldtup, RelationGetDescr(relation));
8134  heap_freetuple(oldtup);
8135  }
8136 
8137  return key_tuple;
8138 }
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition: tuptoaster.c:1085
Oid RelationGetReplicaIndex(Relation relation)
Definition: relcache.c:4521
#define RelationGetDescr(relation)
Definition: rel.h:433
#define ObjectIdAttributeNumber
Definition: sysattr.h:22
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: heaptuple.c:1074
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:580
Form_pg_class rd_rel
Definition: rel.h:84
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1773
unsigned int Oid
Definition: postgres_ext.h:31
#define DEBUG4
Definition: elog.h:22
#define OidIsValid(objectId)
Definition: c.h:605
#define HeapTupleSetOid(tuple, oid)
Definition: htup_details.h:715
Form_pg_index rd_index
Definition: rel.h:131
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:441
void RelationClose(Relation relation)
Definition: relcache.c:1996
#define IndexRelationGetNumberOfKeyAttributes(relation)
Definition: rel.h:426
uintptr_t Datum
Definition: postgres.h:365
#define MaxHeapAttributeNumber
Definition: htup_details.h:47
void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull)
Definition: heaptuple.c:1315
static Datum values[MAXATTR]
Definition: bootstrap.c:164
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:691
#define elog
Definition: elog.h:219
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:712
Relation RelationIdGetRelation(Oid relationId)
Definition: relcache.c:1906

◆ fix_infomask_from_infobits()

static void fix_infomask_from_infobits ( uint8  infobits,
uint16 infomask,
uint16 infomask2 
)
static

Definition at line 8428 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_xlog_delete(), heap_xlog_lock(), heap_xlog_lock_updated(), and heap_xlog_update().

8429 {
8430  *infomask &= ~(HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY |
8432  *infomask2 &= ~HEAP_KEYS_UPDATED;
8433 
8434  if (infobits & XLHL_XMAX_IS_MULTI)
8435  *infomask |= HEAP_XMAX_IS_MULTI;
8436  if (infobits & XLHL_XMAX_LOCK_ONLY)
8437  *infomask |= HEAP_XMAX_LOCK_ONLY;
8438  if (infobits & XLHL_XMAX_EXCL_LOCK)
8439  *infomask |= HEAP_XMAX_EXCL_LOCK;
8440  /* note HEAP_XMAX_SHR_LOCK isn't considered here */
8441  if (infobits & XLHL_XMAX_KEYSHR_LOCK)
8442  *infomask |= HEAP_XMAX_KEYSHR_LOCK;
8443 
8444  if (infobits & XLHL_KEYS_UPDATED)
8445  *infomask2 |= HEAP_KEYS_UPDATED;
8446 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:190
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:193
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:263
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:262
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:192
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:264
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:266
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:275
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:205
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:265

◆ FreeBulkInsertState()

void FreeBulkInsertState ( BulkInsertState  bistate)

Definition at line 2378 of file heapam.c.

References BulkInsertStateData::current_buf, FreeAccessStrategy(), InvalidBuffer, pfree(), ReleaseBuffer(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), intorel_shutdown(), and transientrel_shutdown().

2379 {
2380  if (bistate->current_buf != InvalidBuffer)
2381  ReleaseBuffer(bistate->current_buf);
2382  FreeAccessStrategy(bistate->strategy);
2383  pfree(bistate);
2384 }
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
void pfree(void *pointer)
Definition: mcxt.c:1031
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:597
BufferAccessStrategy strategy
Definition: hio.h:33
Buffer current_buf
Definition: hio.h:34

◆ FreezeMultiXactId()

static TransactionId FreezeMultiXactId ( MultiXactId  multi,
uint16  t_infomask,
TransactionId  relfrozenxid,
TransactionId  relminmxid,
TransactionId  cutoff_xid,
MultiXactId  cutoff_multi,
uint16 flags 
)
static

Definition at line 6498 of file heapam.c.

References Assert, ereport, errcode(), ERRCODE_DATA_CORRUPTED, errmsg_internal(), ERROR, FRM_INVALIDATE_XMAX, FRM_MARK_COMMITTED, FRM_NOOP, FRM_RETURN_IS_MULTI, FRM_RETURN_IS_XID, GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, i, InvalidTransactionId, ISUPDATE_from_mxstatus, MultiXactIdCreateFromMembers(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactIdIsValid, MultiXactIdPrecedes(), palloc(), pfree(), status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TransactionIdIsValid, TransactionIdPrecedes(), and MultiXactMember::xid.

Referenced by heap_prepare_freeze_tuple().

6502 {
6504  int i;
6505  MultiXactMember *members;
6506  int nmembers;
6507  bool need_replace;
6508  int nnewmembers;
6509  MultiXactMember *newmembers;
6510  bool has_lockers;
6511  TransactionId update_xid;
6512  bool update_committed;
6513 
6514  *flags = 0;
6515 
6516  /* We should only be called in Multis */
6517  Assert(t_infomask & HEAP_XMAX_IS_MULTI);
6518 
6519  if (!MultiXactIdIsValid(multi) ||
6520  HEAP_LOCKED_UPGRADED(t_infomask))
6521  {
6522  /* Ensure infomask bits are appropriately set/reset */
6523  *flags |= FRM_INVALIDATE_XMAX;
6524  return InvalidTransactionId;
6525  }
6526  else if (MultiXactIdPrecedes(multi, relminmxid))
6527  ereport(ERROR,
6529  errmsg_internal("found multixact %u from before relminmxid %u",
6530  multi, relminmxid)));
6531  else if (MultiXactIdPrecedes(multi, cutoff_multi))
6532  {
6533  /*
6534  * This old multi cannot possibly have members still running, but
6535  * verify just in case. If it was a locker only, it can be removed
6536  * without any further consideration; but if it contained an update, we
6537  * might need to preserve it.
6538  */
6539  if (MultiXactIdIsRunning(multi,
6540  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)))
6541  ereport(ERROR,
6543  errmsg_internal("multixact %u from before cutoff %u found to be still running",
6544  multi, cutoff_multi)));
6545 
6546  if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
6547  {
6548  *flags |= FRM_INVALIDATE_XMAX;
6549  xid = InvalidTransactionId; /* not strictly necessary */
6550  }
6551  else
6552  {
6553  /* replace multi by update xid */
6554  xid = MultiXactIdGetUpdateXid(multi, t_infomask);
6555 
6556  /* wasn't only a lock, xid needs to be valid */
6558 
6560  ereport(ERROR,
6562  errmsg_internal("found update xid %u from before relfrozenxid %u",
6563  xid, relfrozenxid)));
6564 
6565  /*
6566  * If the xid is older than the cutoff, it has to have aborted,
6567  * otherwise the tuple would have gotten pruned away.
6568  */
6569  if (TransactionIdPrecedes(xid, cutoff_xid))
6570  {
6571  if (TransactionIdDidCommit(xid))
6572  ereport(ERROR,
6574  errmsg_internal("cannot freeze committed update xid %u", xid)));
6575  *flags |= FRM_INVALIDATE_XMAX;
6576  xid = InvalidTransactionId; /* not strictly necessary */
6577  }
6578  else
6579  {
6580  *flags |= FRM_RETURN_IS_XID;
6581  }
6582  }
6583 
6584  return xid;
6585  }
6586 
6587  /*
6588  * This multixact might have or might not have members still running, but
6589  * we know it's valid and is newer than the cutoff point for multis.
6590  * However, some member(s) of it may be below the cutoff for Xids, so we
6591  * need to walk the whole members array to figure out what to do, if
6592  * anything.
6593  */
6594 
6595  nmembers =
6596  GetMultiXactIdMembers(multi, &members, false,
6597  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
6598  if (nmembers <= 0)
6599  {
6600  /* Nothing worth keeping */
6601  *flags |= FRM_INVALIDATE_XMAX;
6602  return InvalidTransactionId;
6603  }
6604 
6605  /* is there anything older than the cutoff? */
6606  need_replace = false;
6607  for (i = 0; i < nmembers; i++)
6608  {
6609  if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
6610  {
6611  need_replace = true;
6612  break;
6613  }
6614  }
6615 
6616  /*
6617  * In the simplest case, there is no member older than the cutoff; we can
6618  * keep the existing MultiXactId as is.
6619  */
6620  if (!need_replace)
6621  {
6622  *flags |= FRM_NOOP;
6623  pfree(members);
6624  return InvalidTransactionId;
6625  }
6626 
6627  /*
6628  * If the multi needs to be updated, figure out which members do we need
6629  * to keep.
6630  */
6631  nnewmembers = 0;
6632  newmembers = palloc(sizeof(MultiXactMember) * nmembers);
6633  has_lockers = false;
6634  update_xid = InvalidTransactionId;
6635  update_committed = false;
6636 
6637  for (i = 0; i < nmembers; i++)
6638  {
6639  /*
6640  * Determine whether to keep this member or ignore it.
6641  */
6642  if (ISUPDATE_from_mxstatus(members[i].status))
6643  {
6644  TransactionId xid = members[i].xid;
6645 
6648  ereport(ERROR,
6650  errmsg_internal("found update xid %u from before relfrozenxid %u",
6651  xid, relfrozenxid)));
6652 
6653  /*
6654  * It's an update; should we keep it? If the transaction is known
6655  * aborted or crashed then it's okay to ignore it, otherwise not.
6656  * Note that an updater older than cutoff_xid cannot possibly be
6657  * committed, because HeapTupleSatisfiesVacuum would have returned
6658  * HEAPTUPLE_DEAD and we would not be trying to freeze the tuple.
6659  *
6660  * As with all tuple visibility routines, it's critical to test
6661  * TransactionIdIsInProgress before TransactionIdDidCommit,
6662  * because of race conditions explained in detail in tqual.c.
6663  */
6666  {
6667  Assert(!TransactionIdIsValid(update_xid));
6668  update_xid = xid;
6669  }
6670  else if (TransactionIdDidCommit(xid))
6671  {
6672  /*
6673  * The transaction committed, so we can tell caller to set
6674  * HEAP_XMAX_COMMITTED. (We can only do this because we know
6675  * the transaction is not running.)
6676  */
6677  Assert(!TransactionIdIsValid(update_xid));
6678  update_committed = true;
6679  update_xid = xid;
6680  }
6681  else
6682  {
6683  /*
6684  * Not in progress, not committed -- must be aborted or crashed;
6685  * we can ignore it.
6686  */
6687  }
6688 
6689  /*
6690  * Since the tuple wasn't marked HEAPTUPLE_DEAD by vacuum, the
6691  * update Xid cannot possibly be older than the xid cutoff. The
6692  * presence of such a tuple would cause corruption, so be paranoid
6693  * and check.
6694  */
6695  if (TransactionIdIsValid(update_xid) &&
6696  TransactionIdPrecedes(update_xid, cutoff_xid))
6697  ereport(ERROR,
6699  errmsg_internal("found update xid %u from before xid cutoff %u",
6700  update_xid, cutoff_xid)));
6701 
6702  /*
6703  * If we determined that it's an Xid corresponding to an update
6704  * that must be retained, additionally add it to the list of
6705  * members of the new Multi, in case we end up using that. (We
6706  * might still decide to use only an update Xid and not a multi,
6707  * but it's easier to maintain the list as we walk the old members
6708  * list.)
6709  */
6710  if (TransactionIdIsValid(update_xid))
6711  newmembers[nnewmembers++] = members[i];
6712  }
6713  else
6714  {
6715  /* We only keep lockers if they are still running */
6716  if (TransactionIdIsCurrentTransactionId(members[i].xid) ||
6717  TransactionIdIsInProgress(members[i].xid))
6718  {
6719  /* running locker cannot possibly be older than the cutoff */
6720  Assert(!TransactionIdPrecedes(members[i].xid, cutoff_xid));
6721  newmembers[nnewmembers++] = members[i];
6722  has_lockers = true;
6723  }
6724  }
6725  }
6726 
6727  pfree(members);
6728 
6729  if (nnewmembers == 0)
6730  {
6731  /* nothing worth keeping!? Tell caller to remove the whole thing */
6732  *flags |= FRM_INVALIDATE_XMAX;
6733  xid = InvalidTransactionId;
6734  }
6735  else if (TransactionIdIsValid(update_xid) && !has_lockers)
6736  {
6737  /*
6738  * If there's a single member and it's an update, pass it back alone
6739  * without creating a new Multi. (XXX we could do this when there's a
6740  * single remaining locker, too, but that would complicate the API too
6741  * much; moreover, the case with the single updater is more
6742  * interesting, because those are longer-lived.)
6743  */
6744  Assert(nnewmembers == 1);
6745  *flags |= FRM_RETURN_IS_XID;
6746  if (update_committed)
6747  *flags |= FRM_MARK_COMMITTED;
6748  xid = update_xid;
6749  }
6750  else
6751  {
6752  /*
6753  * Create a new multixact with the surviving members of the previous
6754  * one, to set as new Xmax in the tuple.
6755  */
6756  xid = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
6757  *flags |= FRM_RETURN_IS_MULTI;
6758  }
6759 
6760  pfree(newmembers);
6761 
6762  return xid;
6763 }
#define FRM_RETURN_IS_XID
Definition: heapam.c:6472
#define FRM_MARK_COMMITTED
Definition: heapam.c:6474
uint32 TransactionId
Definition: c.h:474
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:765
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:999
MultiXactId MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members)
Definition: multixact.c:746
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:249
int errcode(int sqlerrcode)
Definition: elog.c:575
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:7142
void pfree(void *pointer)
Definition: mcxt.c:1031
#define ERROR
Definition: elog.h:43
TransactionId xid
Definition: multixact.h:61
#define FRM_INVALIDATE_XMAX
Definition: heapam.c:6471
#define InvalidTransactionId
Definition: transam.h:31
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:55
#define MultiXactIdIsValid(multi)
Definition: multixact.h:27
#define ereport(elevel, rest)
Definition: elog.h:122
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
#define ERRCODE_DATA_CORRUPTED
Definition: pg_basebackup.c:43
#define FRM_RETURN_IS_MULTI
Definition: heapam.c:6473
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:227
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:205
TransactionId relminmxid
Definition: pg_class.h:72
int errmsg_internal(const char *fmt,...)
Definition: elog.c:827
#define Assert(condition)
Definition: c.h:699
#define FRM_NOOP
Definition: heapam.c:6470
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3140
void * palloc(Size size)
Definition: mcxt.c:924
TransactionId relfrozenxid
Definition: pg_class.h:71
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:549

◆ get_mxact_status_for_lock()

static MultiXactStatus get_mxact_status_for_lock ( LockTupleMode  mode,
bool  is_update 
)
static

Definition at line 4632 of file heapam.c.

References elog, ERROR, and tupleLockExtraInfo.

Referenced by compute_new_xmax_infomask(), heap_lock_tuple(), and test_lockmode_for_conflict().

4633 {
4634  int retval;
4635 
4636  if (is_update)
4637  retval = tupleLockExtraInfo[mode].updstatus;
4638  else
4639  retval = tupleLockExtraInfo[mode].lockstatus;
4640 
4641  if (retval == -1)
4642  elog(ERROR, "invalid lock tuple mode %d/%s", mode,
4643  is_update ? "true" : "false");
4644 
4645  return (MultiXactStatus) retval;
4646 }
MultiXactStatus
Definition: multixact.h:40
#define ERROR
Definition: elog.h:43
static const struct @20 tupleLockExtraInfo[MaxLockTupleMode+1]
#define elog
Definition: elog.h:219

◆ GetBulkInsertState()

BulkInsertState GetBulkInsertState ( void  )

Definition at line 2364 of file heapam.c.

References BAS_BULKWRITE, BulkInsertStateData::current_buf, GetAccessStrategy(), InvalidBuffer, palloc(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), intorel_startup(), and transientrel_startup().

2365 {
2366  BulkInsertState bistate;
2367 
2368  bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
2370  bistate->current_buf = InvalidBuffer;
2371  return bistate;
2372 }
BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
Definition: freelist.c:542
#define InvalidBuffer
Definition: buf.h:25
struct BulkInsertStateData * BulkInsertState
Definition: heapam.h:33
BufferAccessStrategy strategy
Definition: hio.h:33
void * palloc(Size size)
Definition: mcxt.c:924
Buffer current_buf
Definition: hio.h:34

◆ GetMultiXactIdHintBits()

static void GetMultiXactIdHintBits ( MultiXactId  multi,
uint16 new_infomask,
uint16 new_infomask2 
)
static

Definition at line 7061 of file heapam.c.

References GetMultiXactIdMembers(), HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, i, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, pfree(), status(), and TUPLOCK_from_mxstatus.

Referenced by compute_new_xmax_infomask(), heap_prepare_freeze_tuple(), and heap_update().

7063 {
7064  int nmembers;
7065  MultiXactMember *members;
7066  int i;
7067  uint16 bits = HEAP_XMAX_IS_MULTI;
7068  uint16 bits2 = 0;
7069  bool has_update = false;
7070  LockTupleMode strongest = LockTupleKeyShare;
7071 
7072  /*
7073  * We only use this in multis we just created, so they cannot be values
7074  * pre-pg_upgrade.
7075  */
7076  nmembers = GetMultiXactIdMembers(multi, &members, false, false);
7077 
7078  for (i = 0; i < nmembers; i++)
7079  {
7080  LockTupleMode mode;
7081 
7082  /*
7083  * Remember the strongest lock mode held by any member of the
7084  * multixact.
7085  */
7086  mode = TUPLOCK_from_mxstatus(members[i].status);
7087  if (mode > strongest)
7088  strongest = mode;
7089 
7090  /* See what other bits we need */
7091  switch (members[i].status)
7092  {
7096  break;
7097 
7099  bits2 |= HEAP_KEYS_UPDATED;
7100  break;
7101 
7103  has_update = true;
7104  break;
7105 
7106  case MultiXactStatusUpdate:
7107  bits2 |= HEAP_KEYS_UPDATED;
7108  has_update = true;
7109  break;
7110  }
7111  }
7112 
7113  if (strongest == LockTupleExclusive ||
7114  strongest == LockTupleNoKeyExclusive)
7115  bits |= HEAP_XMAX_EXCL_LOCK;
7116  else if (strongest == LockTupleShare)
7117  bits |= HEAP_XMAX_SHR_LOCK;
7118  else if (strongest == LockTupleKeyShare)
7119  bits |= HEAP_XMAX_KEYSHR_LOCK;
7120 
7121  if (!has_update)
7122  bits |= HEAP_XMAX_LOCK_ONLY;
7123 
7124  if (nmembers > 0)
7125  pfree(members);
7126 
7127  *new_infomask = bits;
7128  *new_infomask2 = bits2;
7129 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:190
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:193
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:196
LockTupleMode
Definition: heapam.h:38
unsigned short uint16
Definition: c.h:324
void pfree(void *pointer)
Definition: mcxt.c:1031
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:192
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:275
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:205
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:207
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:225

◆ heap2_redo()

void heap2_redo ( XLogReaderState record)

Definition at line 9302 of file heapam.c.

References elog, heap_xlog_clean(), heap_xlog_cleanup_info(), heap_xlog_freeze_page(), heap_xlog_lock_updated(), heap_xlog_logical_rewrite(), heap_xlog_multi_insert(), heap_xlog_visible(), PANIC, XLOG_HEAP2_CLEAN, XLOG_HEAP2_CLEANUP_INFO, XLOG_HEAP2_FREEZE_PAGE, XLOG_HEAP2_LOCK_UPDATED, XLOG_HEAP2_MULTI_INSERT, XLOG_HEAP2_NEW_CID, XLOG_HEAP2_REWRITE, XLOG_HEAP2_VISIBLE, XLOG_HEAP_OPMASK, XLogRecGetInfo, and XLR_INFO_MASK.

9303 {
9304  uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
9305 
9306  switch (info & XLOG_HEAP_OPMASK)
9307  {
9308  case XLOG_HEAP2_CLEAN:
9309  heap_xlog_clean(record);
9310  break;
9312  heap_xlog_freeze_page(record);
9313  break;
9315  heap_xlog_cleanup_info(record);
9316  break;
9317  case XLOG_HEAP2_VISIBLE:
9318  heap_xlog_visible(record);
9319  break;
9321  heap_xlog_multi_insert(record);
9322  break;
9324  heap_xlog_lock_updated(record);
9325  break;
9326  case XLOG_HEAP2_NEW_CID:
9327 
9328  /*
9329  * Nothing to do on a real replay, only used during logical
9330  * decoding.
9331  */
9332  break;
9333  case XLOG_HEAP2_REWRITE:
9334  heap_xlog_logical_rewrite(record);
9335  break;
9336  default:
9337  elog(PANIC, "heap2_redo: unknown op code %u", info);
9338  }
9339 }
void heap_xlog_logical_rewrite(XLogReaderState *r)
Definition: rewriteheap.c:1121
#define XLOG_HEAP2_LOCK_UPDATED
Definition: heapam_xlog.h:59
#define XLOG_HEAP2_REWRITE
Definition: heapam_xlog.h:53
unsigned char uint8
Definition: c.h:323
#define XLOG_HEAP_OPMASK
Definition: heapam_xlog.h:41
#define PANIC
Definition: elog.h:53
#define XLOG_HEAP2_MULTI_INSERT
Definition: heapam_xlog.h:58
#define XLOG_HEAP2_VISIBLE
Definition: heapam_xlog.h:57
static void heap_xlog_lock_updated(XLogReaderState *record)
Definition: heapam.c:9156
static void heap_xlog_freeze_page(XLogReaderState *record)
Definition: heapam.c:8370
#define XLOG_HEAP2_CLEAN
Definition: heapam_xlog.h:54
#define XLOG_HEAP2_CLEANUP_INFO
Definition: heapam_xlog.h:56
static void heap_xlog_multi_insert(XLogReaderState *record)
Definition: heapam.c:8638
#define XLOG_HEAP2_NEW_CID
Definition: heapam_xlog.h:60
#define XLogRecGetInfo(decoder)
Definition: xlogreader.h:222
#define XLR_INFO_MASK
Definition: xlogrecord.h:62
static void heap_xlog_cleanup_info(XLogReaderState *record)
Definition: heapam.c:8144
#define XLOG_HEAP2_FREEZE_PAGE
Definition: heapam_xlog.h:55
static void heap_xlog_visible(XLogReaderState *record)
Definition: heapam.c:8255
#define elog
Definition: elog.h:219
static void heap_xlog_clean(XLogReaderState *record)
Definition: heapam.c:8165

◆ heap_abort_speculative()

void heap_abort_speculative ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 6246 of file heapam.c.

References Assert, buffer, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, compute_infobits(), elog, END_CRIT_SECTION, ERROR, xl_heap_delete::flags, GetCurrentTransactionId(), HEAP_KEYS_UPDATED, HEAP_MOVED, HEAP_XMAX_BITS, HeapTupleHasExternal, HeapTupleHeaderIsHeapOnly, HeapTupleHeaderIsSpeculative, HeapTupleHeaderSetXmin, xl_heap_delete::infobits_set, InvalidTransactionId, IsToastRelation(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), MarkBufferDirty(), xl_heap_delete::offnum, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), ReadBuffer(), RecentGlobalXmin, REGBUF_STANDARD, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapDelete, START_CRIT_SECTION, HeapTupleHeaderData::t_choice, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_heap, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, HeapTupleFields::t_xmin, toast_delete(), TransactionIdIsValid, XLH_DELETE_IS_SUPER, XLOG_HEAP_DELETE, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and xl_heap_delete::xmax.

Referenced by ExecInsert(), and toast_delete_datum().

6247 {
6249  ItemPointer tid = &(tuple->t_self);
6250  ItemId lp;
6251  HeapTupleData tp;
6252  Page page;
6253  BlockNumber block;
6254  Buffer buffer;
6255 
6256  Assert(ItemPointerIsValid(tid));
6257 
6258  block = ItemPointerGetBlockNumber(tid);
6259  buffer = ReadBuffer(relation, block);
6260  page = BufferGetPage(buffer);
6261 
6263 
6264  /*
6265  * Page can't be all visible, we just inserted into it, and are still
6266  * running.
6267  */
6268  Assert(!PageIsAllVisible(page));
6269 
6270  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
6271  Assert(ItemIdIsNormal(lp));
6272 
6273  tp.t_tableOid = RelationGetRelid(relation);
6274  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
6275  tp.t_len = ItemIdGetLength(lp);
6276  tp.t_self = *tid;
6277 
6278  /*
6279  * Sanity check that the tuple really is a speculatively inserted tuple,
6280  * inserted by us.
6281  */
6282  if (tp.t_data->t_choice.t_heap.t_xmin != xid)
6283  elog(ERROR, "attempted to kill a tuple inserted by another transaction");
6284  if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
6285  elog(ERROR, "attempted to kill a non-speculative tuple");
6287 
6288  /*
6289  * No need to check for serializable conflicts here. There is never a
6290  * need for a combocid, either. No need to extract replica identity, or
6291  * do anything special with infomask bits.
6292  */
6293 
6295 
6296  /*
6297  * The tuple will become DEAD immediately. Flag that this page
6298  * immediately is a candidate for pruning by setting xmin to
6299  * RecentGlobalXmin. That's not pretty, but it doesn't seem worth
6300  * inventing a nicer API for this.
6301  */
6304 
6305  /* store transaction information of xact deleting the tuple */
6308 
6309  /*
6310  * Set the tuple header xmin to InvalidTransactionId. This makes the
6311  * tuple immediately invisible everyone. (In particular, to any
6312  * transactions waiting on the speculative token, woken up later.)
6313  */
6315 
6316  /* Clear the speculative insertion token too */
6317  tp.t_data->t_ctid = tp.t_self;
6318 
6319  MarkBufferDirty(buffer);
6320 
6321  /*
6322  * XLOG stuff
6323  *
6324  * The WAL records generated here match heap_delete(). The same recovery
6325  * routines are used.
6326  */
6327  if (RelationNeedsWAL(relation))
6328  {
6329  xl_heap_delete xlrec;
6330  XLogRecPtr recptr;
6331 
6332  xlrec.flags = XLH_DELETE_IS_SUPER;
6334  tp.t_data->t_infomask2);
6336  xlrec.xmax = xid;
6337 
6338  XLogBeginInsert();
6339  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
6340  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6341 
6342  /* No replica identity & replication origin logged */
6343 
6344  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
6345 
6346  PageSetLSN(page, recptr);
6347  }
6348 
6349  END_CRIT_SECTION();
6350 
6351  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6352 
6353  if (HeapTupleHasExternal(&tp))
6354  {
6355  Assert(!IsToastRelation(relation));
6356  toast_delete(relation, &tp, true);
6357  }
6358 
6359  /*
6360  * Never need to mark tuple for invalidation, since catalogs don't support
6361  * speculative insertion
6362  */
6363 
6364  /* Now we can release the buffer */
6365  ReleaseBuffer(buffer);
6366 
6367  /* count deletion, as we counted the insertion too */
6368  pgstat_count_heap_delete(relation);
6369 }
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:60
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
bool IsToastRelation(Relation relation)
Definition: catalog.c:136
#define HEAP_XMAX_BITS
Definition: htup_details.h:267
union HeapTupleHeaderData::@45 t_choice
#define XLH_DELETE_IS_SUPER
Definition: heapam_xlog.h:95
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2999
HeapTupleFields t_heap
Definition: htup_details.h:153
#define PageIsAllVisible(page)
Definition: bufpage.h:381
uint32 TransactionId
Definition: c.h:474
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:434
#define PageSetPrunable(page, xid)
Definition: bufpage.h:394
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
OffsetNumber offnum
Definition: heapam_xlog.h:106
HeapTupleHeader t_data
Definition: htup.h:68
#define HeapTupleHeaderIsHeapOnly(tup)
Definition: htup_details.h:519
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
#define ERROR
Definition: elog.h:43
ItemPointerData t_ctid
Definition: htup_details.h:157
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:105
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:417
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:111
TransactionId RecentGlobalXmin
Definition: snapmgr.c:166
#define REGBUF_STANDARD
Definition: xloginsert.h:34
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
TransactionId t_xmin
Definition: htup_details.h:120
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:275
#define HEAP_MOVED
Definition: htup_details.h:213
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:699
uint8 infobits_set
Definition: heapam_xlog.h:107
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:215
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define RelationNeedsWAL(relation)
Definition: rel.h:510
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:1953
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:691
void toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: tuptoaster.c:464
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define TransactionIdIsValid(xid)
Definition: transam.h:41
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:407
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
#define HeapTupleHeaderSetXmin(tup, xid)
Definition: htup_details.h:324

◆ heap_acquire_tuplock()

static bool heap_acquire_tuplock ( Relation  relation,
ItemPointer  tid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
bool have_tuple_lock 
)
static

Definition at line 5351 of file heapam.c.

References ConditionalLockTupleTuplock, ereport, errcode(), errmsg(), ERROR, LockTupleTuplock, LockWaitBlock, LockWaitError, LockWaitSkip, and RelationGetRelationName.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

5353 {
5354  if (*have_tuple_lock)
5355  return true;
5356 
5357  switch (wait_policy)
5358  {
5359  case LockWaitBlock:
5360  LockTupleTuplock(relation, tid, mode);
5361  break;
5362 
5363  case LockWaitSkip:
5364  if (!ConditionalLockTupleTuplock(relation, tid, mode))
5365  return false;
5366  break;
5367 
5368  case LockWaitError:
5369  if (!ConditionalLockTupleTuplock(relation, tid, mode))
5370  ereport(ERROR,
5371  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5372  errmsg("could not obtain lock on row in relation \"%s\"",
5373  RelationGetRelationName(relation))));
5374  break;
5375  }
5376  *have_tuple_lock = true;
5377 
5378  return true;
5379 }
#define LockTupleTuplock(rel, tup, mode)
Definition: heapam.c:185
#define ConditionalLockTupleTuplock(rel, tup, mode)
Definition: heapam.c:189
int errcode(int sqlerrcode)
Definition: elog.c:575
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:441
#define ereport(elevel, rest)
Definition: elog.h:122
int errmsg(const char *fmt,...)
Definition: elog.c:797

◆ heap_beginscan()

HeapScanDesc heap_beginscan ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key 
)

Definition at line 1404 of file heapam.c.

References heap_beginscan_internal().

Referenced by AlterDomainNotNull(), ATRewriteTable(), check_default_allows_bound(), copy_heap_data(), CopyTo(), DefineQueryRewrite(), pgrowlocks(), pgstat_collect_oids(), RelationFindReplTupleSeq(), SeqNext(), validateCheckConstraint(), validateDomainConstraint(), and validateForeignKeyConstraint().

1406 {
1407  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1408  true, true, true, false, false, false);
1409 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1450

◆ heap_beginscan_bm()

HeapScanDesc heap_beginscan_bm ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key 
)

Definition at line 1432 of file heapam.c.

References heap_beginscan_internal().

Referenced by ExecInitBitmapHeapScan().

1434 {
1435  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1436  false, false, true, true, false, false);
1437 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1450

◆ heap_beginscan_catalog()

HeapScanDesc heap_beginscan_catalog ( Relation  relation,
int  nkeys,
ScanKey  key 
)

Definition at line 1412 of file heapam.c.

References GetCatalogSnapshot(), heap_beginscan_internal(), RegisterSnapshot(), and RelationGetRelid.

Referenced by AlterTableMoveAll(), AlterTableSpaceOptions(), boot_openrel(), check_db_file_conflict(), createdb(), do_autovacuum(), DropSetting(), DropTableSpace(), find_typed_table_dependencies(), get_all_vacuum_rels(), get_database_list(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), index_update_stats(), objectsInSchemaToOids(), ReindexMultipleTables(), remove_dbtablespaces(), RemoveConversionById(), RemoveSubscriptionRel(), RenameTableSpace(), ThereIsAtLeastOneRole(), and vac_truncate_clog().

1413 {
1414  Oid relid = RelationGetRelid(relation);
1415  Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
1416 
1417  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1418  true, true, true, false, false, true);
1419 }
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:863
Snapshot GetCatalogSnapshot(Oid relid)
Definition: snapmgr.c:440
unsigned int Oid
Definition: postgres_ext.h:31
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1450
#define RelationGetRelid(relation)
Definition: rel.h:407

◆ heap_beginscan_internal()

static HeapScanDesc heap_beginscan_internal ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
ParallelHeapScanDesc  parallel_scan,
bool  allow_strat,
bool  allow_sync,
bool  allow_pagemode,
bool  is_bitmapscan,
bool  is_samplescan,
bool  temp_snap 
)
static

Definition at line 1450 of file heapam.c.

References initscan(), IsMVCCSnapshot, palloc(), PredicateLockRelation(), RelationGetRelid, RelationIncrementReferenceCount(), HeapScanDescData::rs_allow_strat, HeapScanDescData::rs_allow_sync, HeapScanDescData::rs_bitmapscan, HeapScanDescData::rs_ctup, HeapScanDescData::rs_key, HeapScanDescData::rs_nkeys, HeapScanDescData::rs_pageatatime, HeapScanDescData::rs_parallel, HeapScanDescData::rs_rd, HeapScanDescData::rs_samplescan, HeapScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, HeapScanDescData::rs_temp_snap, and HeapTupleData::t_tableOid.

Referenced by heap_beginscan(), heap_beginscan_bm(), heap_beginscan_catalog(), heap_beginscan_parallel(), heap_beginscan_sampling(), and heap_beginscan_strat().

1459 {
1460  HeapScanDesc scan;
1461 
1462  /*
1463  * increment relation ref count while scanning relation
1464  *
1465  * This is just to make really sure the relcache entry won't go away while
1466  * the scan has a pointer to it. Caller should be holding the rel open
1467  * anyway, so this is redundant in all normal scenarios...
1468  */
1470 
1471  /*
1472  * allocate and initialize scan descriptor
1473  */
1474  scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
1475 
1476  scan->rs_rd = relation;
1477  scan->rs_snapshot = snapshot;
1478  scan->rs_nkeys = nkeys;
1479  scan->rs_bitmapscan = is_bitmapscan;
1480  scan->rs_samplescan = is_samplescan;
1481  scan->rs_strategy = NULL; /* set in initscan */
1482  scan->rs_allow_strat = allow_strat;
1483  scan->rs_allow_sync = allow_sync;
1484  scan->rs_temp_snap = temp_snap;
1485  scan->rs_parallel = parallel_scan;
1486 
1487  /*
1488  * we can use page-at-a-time mode if it's an MVCC-safe snapshot
1489  */
1490  scan->rs_pageatatime = allow_pagemode && IsMVCCSnapshot(snapshot);
1491 
1492  /*
1493  * For a seqscan in a serializable transaction, acquire a predicate lock
1494  * on the entire relation. This is required not only to lock all the
1495  * matching tuples, but also to conflict with new insertions into the
1496  * table. In an indexscan, we take page locks on the index pages covering
1497  * the range specified in the scan qual, but in a heap scan there is
1498  * nothing more fine-grained to lock. A bitmap scan is a different story,
1499  * there we have already scanned the index and locked the index pages
1500  * covering the predicate. But in that case we still have to lock any
1501  * matching heap tuples.
1502  */
1503  if (!is_bitmapscan)
1504  PredicateLockRelation(relation, snapshot);
1505 
1506  /* we only need to set this up once */
1507  scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1508 
1509  /*
1510  * we do this here instead of in initscan() because heap_rescan also calls
1511  * initscan() and we don't want to allocate memory again
1512  */
1513  if (nkeys > 0)
1514  scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
1515  else
1516  scan->rs_key = NULL;
1517 
1518  initscan(scan, key, false);
1519 
1520  return scan;
1521 }
bool rs_allow_sync
Definition: relscan.h:57
void PredicateLockRelation(Relation relation, Snapshot snapshot)
Definition: predicate.c:2452
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:100
HeapTupleData rs_ctup
Definition: relscan.h:70
bool rs_bitmapscan
Definition: relscan.h:53
bool rs_pageatatime
Definition: relscan.h:55
ParallelHeapScanDesc rs_parallel
Definition: relscan.h:74
ScanKeyData * ScanKey
Definition: skey.h:75
Snapshot rs_snapshot
Definition: relscan.h:50
Oid t_tableOid
Definition: htup.h:66
bool rs_temp_snap
Definition: relscan.h:58
void RelationIncrementReferenceCount(Relation rel)
Definition: relcache.c:1963
BufferAccessStrategy rs_strategy
Definition: relscan.h:65
Relation rs_rd
Definition: relscan.h:49
#define IsMVCCSnapshot(snapshot)
Definition: tqual.h:31
void * palloc(Size size)
Definition: mcxt.c:924
bool rs_allow_strat
Definition: relscan.h:56
static void initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
Definition: heapam.c:220
bool rs_samplescan
Definition: relscan.h:54
#define RelationGetRelid(relation)
Definition: rel.h:407
ScanKey rs_key
Definition: relscan.h:52

◆ heap_beginscan_parallel()

HeapScanDesc heap_beginscan_parallel ( Relation  relation,
ParallelHeapScanDesc  parallel_scan 
)

Definition at line 1666 of file heapam.c.

References Assert, heap_beginscan_internal(), ParallelHeapScanDescData::phs_relid, ParallelHeapScanDescData::phs_snapshot_any, ParallelHeapScanDescData::phs_snapshot_data, RegisterSnapshot(), RelationGetRelid, RestoreSnapshot(), and SnapshotAny.

Referenced by _bt_parallel_scan_and_sort(), ExecSeqScanInitializeDSM(), and ExecSeqScanInitializeWorker().

1667 {
1668  Snapshot snapshot;
1669 
1670  Assert(RelationGetRelid(relation) == parallel_scan->phs_relid);
1671 
1672  if (!parallel_scan->phs_snapshot_any)
1673  {
1674  /* Snapshot was serialized -- restore it */
1675  snapshot = RestoreSnapshot(parallel_scan->phs_snapshot_data);
1676  RegisterSnapshot(snapshot);
1677  }
1678  else
1679  {
1680  /* SnapshotAny passed by caller (not serialized) */
1681  snapshot = SnapshotAny;
1682  }
1683 
1684  return heap_beginscan_internal(relation, snapshot, 0, NULL, parallel_scan,
1685  true, true, true, false, false,
1686  !parallel_scan->phs_snapshot_any);
1687 }
char phs_snapshot_data[FLEXIBLE_ARRAY_MEMBER]
Definition: relscan.h:43
Snapshot RestoreSnapshot(char *start_address)
Definition: snapmgr.c:2127
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:863
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1450
#define SnapshotAny
Definition: tqual.h:28
#define Assert(condition)
Definition: c.h:699
#define RelationGetRelid(relation)
Definition: rel.h:407

◆ heap_beginscan_sampling()

HeapScanDesc heap_beginscan_sampling ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
bool  allow_strat,
bool  allow_sync,
bool  allow_pagemode 
)

Definition at line 1440 of file heapam.c.

References heap_beginscan_internal().

Referenced by tablesample_init().

1443 {
1444  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1445  allow_strat, allow_sync, allow_pagemode,
1446  false, true, false);
1447 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1450

◆ heap_beginscan_strat()

HeapScanDesc heap_beginscan_strat ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
bool  allow_strat,
bool  allow_sync 
)

Definition at line 1422 of file heapam.c.

References heap_beginscan_internal().

Referenced by bt_check_every_level(), IndexBuildHeapRangeScan(), IndexCheckExclusion(), pgstat_heap(), systable_beginscan(), and validate_index_heapscan().

1425 {
1426  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1427  allow_strat, allow_sync, true,
1428  false, false, false);
1429 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1450

◆ heap_delete()

HTSU_Result heap_delete ( Relation  relation,
ItemPointer  tid,
CommandId  cid,
Snapshot  crosscheck,
bool  wait,
HeapUpdateFailureData hufd,
bool  changingPart 
)

Definition at line 3060 of file heapam.c.

References Assert, buffer, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), HeapUpdateFailureData::cmax, compute_infobits(), compute_new_xmax_infomask(), HeapUpdateFailureData::ctid, DoesMultiXactIdConflict(), END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, ExtractReplicaIdentity(), xl_heap_delete::flags, GetCurrentTransactionId(), heap_acquire_tuplock(), heap_freetuple(), HEAP_KEYS_UPDATED, HEAP_MOVED, HEAP_XMAX_BITS, HEAP_XMAX_INVALID, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HeapTupleBeingUpdated, HeapTupleHasExternal, HeapTupleHeaderAdjustCmax(), HeapTupleHeaderClearHotUpdated, HeapTupleHeaderGetCmax(), HeapTupleHeaderGetRawXmax, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIsOnlyLocked(), HeapTupleHeaderSetCmax, HeapTupleHeaderSetMovedPartitions, HeapTupleHeaderSetXmax, HeapTupleInvisible, HeapTupleMayBeUpdated, HeapTupleSatisfiesUpdate(), HeapTupleSatisfiesVisibility, HeapTupleSelfUpdated, HeapTupleUpdated, xl_heap_delete::infobits_set, InvalidBuffer, InvalidCommandId, InvalidSnapshot, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), LockTupleExclusive, LockWaitBlock, log_heap_new_cid(), MarkBufferDirty(), MultiXactIdSetOldestMember(), MultiXactIdWait(), MultiXactStatusUpdate, xl_heap_delete::offnum, PageClearAllVisible, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), RelationData::rd_rel, ReadBuffer(), REGBUF_STANDARD, RelationGetRelid, RelationIsAccessibleInLogicalDecoding, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapDelete, SizeOfHeapHeader, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, toast_delete(), TransactionIdEquals, TransactionIdIsCurrentTransactionId(), UnlockReleaseBuffer(), UnlockTupleTuplock, UpdateXmaxHintBits(), visibilitymap_clear(), visibilitymap_pin(), VISIBILITYMAP_VALID_BITS, XactLockTableWait(), XLH_DELETE_ALL_VISIBLE_CLEARED, XLH_DELETE_CONTAINS_OLD_KEY, XLH_DELETE_CONTAINS_OLD_TUPLE, XLH_DELETE_IS_PARTITION_MOVE, XLOG_HEAP_DELETE, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), XLogSetRecordFlags(), XLTW_Delete, HeapUpdateFailureData::xmax, xl_heap_delete::xmax, and xmax_infomask_changed().

Referenced by ExecDelete(), and simple_heap_delete().

3063 {
3064  HTSU_Result result;
3066  ItemId lp;
3067  HeapTupleData tp;
3068  Page page;
3069  BlockNumber block;
3070  Buffer buffer;
3071  Buffer vmbuffer = InvalidBuffer;
3072  TransactionId new_xmax;
3073  uint16 new_infomask,
3074  new_infomask2;
3075  bool have_tuple_lock = false;
3076  bool iscombo;
3077  bool all_visible_cleared = false;
3078  HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
3079  bool old_key_copied = false;
3080 
3081  Assert(ItemPointerIsValid(tid));
3082 
3083  /*
3084  * Forbid this during a parallel operation, lest it allocate a combocid.
3085  * Other workers might need that combocid for visibility checks, and we
3086  * have no provision for broadcasting it to them.
3087  */
3088  if (IsInParallelMode())
3089  ereport(ERROR,
3090  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
3091  errmsg("cannot delete tuples during a parallel operation")));
3092 
3093  block = ItemPointerGetBlockNumber(tid);
3094  buffer = ReadBuffer(relation, block);
3095  page = BufferGetPage(buffer);
3096 
3097  /*
3098  * Before locking the buffer, pin the visibility map page if it appears to
3099  * be necessary. Since we haven't got the lock yet, someone else might be
3100  * in the middle of changing this, so we'll need to recheck after we have
3101  * the lock.
3102  */
3103  if (PageIsAllVisible(page))
3104  visibilitymap_pin(relation, block, &vmbuffer);
3105 
3107 
3108  /*
3109  * If we didn't pin the visibility map page and the page has become all
3110  * visible while we were busy locking the buffer, we'll have to unlock and
3111  * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
3112  * unfortunate, but hopefully shouldn't happen often.
3113  */
3114  if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3115  {
3116  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3117  visibilitymap_pin(relation, block, &vmbuffer);
3119  }
3120 
3121  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
3122  Assert(ItemIdIsNormal(lp));
3123 
3124  tp.t_tableOid = RelationGetRelid(relation);
3125  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3126  tp.t_len = ItemIdGetLength(lp);
3127  tp.t_self = *tid;
3128 
3129 l1:
3130  result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
3131 
3132  if (result == HeapTupleInvisible)
3133  {
3134  UnlockReleaseBuffer(buffer);
3135  ereport(ERROR,
3136  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
3137  errmsg("attempted to delete invisible tuple")));
3138  }
3139  else if (result == HeapTupleBeingUpdated && wait)
3140  {
3141  TransactionId xwait;
3142  uint16 infomask;
3143 
3144  /* must copy state data before unlocking buffer */
3145  xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
3146  infomask = tp.t_data->t_infomask;
3147 
3148  /*
3149  * Sleep until concurrent transaction ends -- except when there's a
3150  * single locker and it's our own transaction. Note we don't care
3151  * which lock mode the locker has, because we need the strongest one.
3152  *
3153  * Before sleeping, we need to acquire tuple lock to establish our
3154  * priority for the tuple (see heap_lock_tuple). LockTuple will
3155  * release us when we are next-in-line for the tuple.
3156  *
3157  * If we are forced to "start over" below, we keep the tuple lock;
3158  * this arranges that we stay at the head of the line while rechecking
3159  * tuple state.
3160  */
3161  if (infomask & HEAP_XMAX_IS_MULTI)
3162  {
3163  /* wait for multixact */
3164  if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
3166  {
3167  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3168 
3169  /* acquire tuple lock, if necessary */
3171  LockWaitBlock, &have_tuple_lock);
3172 
3173  /* wait for multixact */
3175  relation, &(tp.t_self), XLTW_Delete,
3176  NULL);
3178 
3179  /*
3180  * If xwait had just locked the tuple then some other xact
3181  * could update this tuple before we get to this point. Check
3182  * for xmax change, and start over if so.
3183  */
3184  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
3186  xwait))
3187  goto l1;
3188  }
3189 
3190  /*
3191  * You might think the multixact is necessarily done here, but not
3192  * so: it could have surviving members, namely our own xact or
3193  * other subxacts of this backend. It is legal for us to delete
3194  * the tuple in either case, however (the latter case is
3195  * essentially a situation of upgrading our former shared lock to
3196  * exclusive). We don't bother changing the on-disk hint bits
3197  * since we are about to overwrite the xmax altogether.
3198  */
3199  }
3200  else if (!TransactionIdIsCurrentTransactionId(xwait))
3201  {
3202  /*
3203  * Wait for regular transaction to end; but first, acquire tuple
3204  * lock.
3205  */
3206  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3208  LockWaitBlock, &have_tuple_lock);
3209  XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
3211 
3212  /*
3213  * xwait is done, but if xwait had just locked the tuple then some
3214  * other xact could update this tuple before we get to this point.
3215  * Check for xmax change, and start over if so.
3216  */
3217  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
3219  xwait))
3220  goto l1;
3221 
3222  /* Otherwise check if it committed or aborted */
3223  UpdateXmaxHintBits(tp.t_data, buffer, xwait);
3224  }
3225 
3226  /*
3227  * We may overwrite if previous xmax aborted, or if it committed but
3228  * only locked the tuple without updating it.
3229  */
3230  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
3233  result = HeapTupleMayBeUpdated;
3234  else
3235  result = HeapTupleUpdated;
3236  }
3237 
3238  if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)
3239  {
3240  /* Perform additional check for transaction-snapshot mode RI updates */
3241  if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
3242  result = HeapTupleUpdated;
3243  }
3244 
3245  if (result != HeapTupleMayBeUpdated)
3246  {
3247  Assert(result == HeapTupleSelfUpdated ||
3248  result == HeapTupleUpdated ||
3249  result == HeapTupleBeingUpdated);
3251  hufd->ctid = tp.t_data->t_ctid;
3253  if (result == HeapTupleSelfUpdated)
3254  hufd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
3255  else
3256  hufd->cmax = InvalidCommandId;
3257  UnlockReleaseBuffer(buffer);
3258  if (have_tuple_lock)
3259  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3260  if (vmbuffer != InvalidBuffer)
3261  ReleaseBuffer(vmbuffer);
3262  return result;
3263  }
3264 
3265  /*
3266  * We're about to do the actual delete -- check for conflict first, to
3267  * avoid possibly having to roll back work we've just done.
3268  *
3269  * This is safe without a recheck as long as there is no possibility of
3270  * another process scanning the page between this check and the delete
3271  * being visible to the scan (i.e., an exclusive buffer content lock is
3272  * continuously held from this point until the tuple delete is visible).
3273  */
3274  CheckForSerializableConflictIn(relation, &tp, buffer);
3275 
3276  /* replace cid with a combo cid if necessary */
3277  HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
3278 
3279  /*
3280  * Compute replica identity tuple before entering the critical section so
3281  * we don't PANIC upon a memory allocation failure.
3282  */
3283  old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
3284 
3285  /*
3286  * If this is the first possibly-multixact-able operation in the current
3287  * transaction, set my per-backend OldestMemberMXactId setting. We can be
3288  * certain that the transaction will never become a member of any older
3289  * MultiXactIds than that. (We have to do this even if we end up just
3290  * using our own TransactionId below, since some other backend could
3291  * incorporate our XID into a MultiXact immediately afterwards.)
3292  */
3294 
3297  xid, LockTupleExclusive, true,
3298  &new_xmax, &new_infomask, &new_infomask2);
3299 
3301 
3302  /*
3303  * If this transaction commits, the tuple will become DEAD sooner or
3304  * later. Set flag that this page is a candidate for pruning once our xid
3305  * falls below the OldestXmin horizon. If the transaction finally aborts,
3306  * the subsequent page pruning will be a no-op and the hint will be
3307  * cleared.
3308  */
3309  PageSetPrunable(page, xid);
3310 
3311  if (PageIsAllVisible(page))
3312  {
3313  all_visible_cleared = true;
3314  PageClearAllVisible(page);
3315  visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
3316  vmbuffer, VISIBILITYMAP_VALID_BITS);
3317  }
3318 
3319  /* store transaction information of xact deleting the tuple */
3322  tp.t_data->t_infomask |= new_infomask;
3323  tp.t_data->t_infomask2 |= new_infomask2;
3325  HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
3326  HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
3327  /* Make sure there is no forward chain link in t_ctid */
3328  tp.t_data->t_ctid = tp.t_self;
3329 
3330  /* Signal that this is actually a move into another partition */
3331  if (changingPart)
3333 
3334  MarkBufferDirty(buffer);
3335 
3336  /*
3337  * XLOG stuff
3338  *
3339  * NB: heap_abort_speculative() uses the same xlog record and replay
3340  * routines.
3341  */
3342  if (RelationNeedsWAL(relation))
3343  {
3344  xl_heap_delete xlrec;
3345  XLogRecPtr recptr;
3346 
3347  /* For logical decode we need combocids to properly decode the catalog */
3349  log_heap_new_cid(relation, &tp);
3350 
3351  xlrec.flags = 0;
3352  if (all_visible_cleared)
3354  if (changingPart)
3357  tp.t_data->t_infomask2);
3359  xlrec.xmax = new_xmax;
3360 
3361  if (old_key_tuple != NULL)
3362  {
3363  if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
3365  else
3367  }
3368 
3369  XLogBeginInsert();
3370  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
3371 
3372  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
3373 
3374  /*
3375  * Log replica identity of the deleted tuple if there is one
3376  */
3377  if (old_key_tuple != NULL)
3378  {
3379  xl_heap_header xlhdr;
3380 
3381  xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
3382  xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
3383  xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
3384 
3385  XLogRegisterData((char *) &xlhdr, SizeOfHeapHeader);
3386  XLogRegisterData((char *) old_key_tuple->t_data
3388  old_key_tuple->t_len
3390  }
3391 
3392  /* filtering by origin on a row level is much more efficient */
3394 
3395  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
3396 
3397  PageSetLSN(page, recptr);
3398  }
3399 
3400  END_CRIT_SECTION();
3401 
3402  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3403 
3404  if (vmbuffer != InvalidBuffer)
3405  ReleaseBuffer(vmbuffer);
3406 
3407  /*
3408  * If the tuple has toasted out-of-line attributes, we need to delete
3409  * those items too. We have to do this before releasing the buffer
3410  * because we need to look at the contents of the tuple, but it's OK to
3411  * release the content lock on the buffer first.
3412  */
3413  if (relation->rd_rel->relkind != RELKIND_RELATION &&
3414  relation->rd_rel->relkind != RELKIND_MATVIEW)
3415  {
3416  /* toast table entries should never be recursively toasted */
3418  }
3419  else if (HeapTupleHasExternal(&tp))
3420  toast_delete(relation, &tp, false);
3421 
3422  /*
3423  * Mark tuple for invalidation from system caches at next command
3424  * boundary. We have to do this before releasing the buffer because we
3425  * need to look at the contents of the tuple.
3426  */
3427  CacheInvalidateHeapTuple(relation, &tp, NULL);
3428 
3429  /* Now we can release the buffer */
3430  ReleaseBuffer(buffer);
3431 
3432  /*
3433  * Release the lmgr tuple lock, if we had it.
3434  */
3435  if (have_tuple_lock)
3436  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3437 
3438  pgstat_count_heap_delete(relation);
3439 
3440  if (old_key_tuple != NULL && old_key_copied)
3441  heap_freetuple(old_key_tuple);
3442 
3443  return HeapTupleMayBeUpdated;
3444 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:370
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
Definition: tqual.c:1596
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:60
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
#define SizeofHeapTupleHeader
Definition: htup_details.h:181
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:7958
#define HEAP_XMAX_BITS
Definition: htup_details.h:267
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2999
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1094
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
#define PageIsAllVisible(page)
Definition: bufpage.h:381
uint32 TransactionId
Definition: c.h:474
HTSU_Result HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid, Buffer buffer)
Definition: tqual.c:460
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:765
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
static bool xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
Definition: heapam.c:3021
#define HeapTupleHeaderClearHotUpdated(tup)
Definition: htup_details.h:514
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define InvalidBuffer
Definition: buf.h:25
uint16 t_infomask2
Definition: heapam_xlog.h:144
#define PageSetPrunable(page, xid)
Definition: bufpage.h:394
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
int errcode(int sqlerrcode)
Definition: elog.c:575
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:192
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
Form_pg_class rd_rel
Definition: rel.h:84
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1773
void CheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
Definition: predicate.c:4280
#define UnlockTupleTuplock(rel, tup, mode)
Definition: heapam.c:187
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
OffsetNumber offnum
Definition: heapam_xlog.h:106
void MultiXactIdSetOldestMember(void)
Definition: multixact.c:623
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:68
#define HeapTupleHeaderGetRawXmax(tup)
Definition: htup_details.h:380
unsigned short uint16
Definition: c.h:324
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
bool IsInParallelMode(void)
Definition: xact.c:905
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:204
ItemPointerData t_ctid
Definition: htup_details.h:157
#define HeapTupleHeaderSetMovedPartitions(tup)
Definition: htup_details.h:450
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:105
static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:7374
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:417
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:111
#define REGBUF_STANDARD
Definition: xloginsert.h:34
#define XLH_DELETE_CONTAINS_OLD_KEY
Definition: heapam_xlog.h:94
CommandId cmax
Definition: heapam.h:72
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:385
HTSU_Result
Definition: snapshot.h:121
Oid t_tableOid
Definition: htup.h:66
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define HeapTupleHeaderSetCmax(tup, cid, iscombo)
Definition: htup_details.h:410
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define ereport(elevel, rest)
Definition: elog.h:122
static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
Definition: heapam.c:5400
TransactionId xmax
Definition: heapam.h:71
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
#define InvalidSnapshot
Definition: snapshot.h:25
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:564
#define InvalidCommandId
Definition: c.h:491
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:227
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:275
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:205
static void UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
Definition: heapam.c:2342
#define HEAP_MOVED
Definition: htup_details.h:213
static bool heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
Definition: heapam.c:5351
TransactionId MultiXactId
Definition: c.h:484
#define PageClearAllVisible(page)
Definition: bufpage.h:385
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:554
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:699
uint8 infobits_set
Definition: heapam_xlog.h:107
static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified, bool *copy)
Definition: heapam.c:8034
CommandId HeapTupleHeaderGetCmax(HeapTupleHeader tup)
Definition: combocid.c:119
static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask, LockTupleMode lockmode)
Definition: heapam.c:7207
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:215
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
uint16 t_infomask
Definition: heapam_xlog.h:145
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define RelationNeedsWAL(relation)
Definition: rel.h:510
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:1953
void HeapTupleHeaderAdjustCmax(HeapTupleHeader tup, CommandId *cmax, bool *iscombo)
Definition: combocid.c:154
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:691
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define XLH_DELETE_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:92
#define XLH_DELETE_IS_PARTITION_MOVE
Definition: heapam_xlog.h:96
void toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: tuptoaster.c:464
ItemPointerData ctid
Definition: heapam.h:70
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:407
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
#define SizeOfHeapHeader
Definition: heapam_xlog.h:149
Pointer Page
Definition: bufpage.h:74
#define XLH_DELETE_CONTAINS_OLD_TUPLE
Definition: heapam_xlog.h:93

◆ heap_endscan()

void heap_endscan ( HeapScanDesc  scan)

Definition at line 1572 of file heapam.c.

References BufferIsValid, FreeAccessStrategy(), pfree(), RelationDecrementReferenceCount(), ReleaseBuffer(), HeapScanDescData::rs_cbuf, HeapScanDescData::rs_key, HeapScanDescData::rs_rd, HeapScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, HeapScanDescData::rs_temp_snap, and UnregisterSnapshot().

Referenced by AlterDomainNotNull(), AlterTableMoveAll(), AlterTableSpaceOptions(), ATRewriteTable(), boot_openrel(), check_db_file_conflict(), check_default_allows_bound(), copy_heap_data(), CopyTo(), createdb(), DefineQueryRewrite(), do_autovacuum(), DropSetting(), DropTableSpace(), ExecEndBitmapHeapScan(), ExecEndSampleScan(), ExecEndSeqScan(), find_typed_table_dependencies(), get_all_vacuum_rels(), get_database_list(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), index_update_stats(), IndexBuildHeapRangeScan(), IndexCheckExclusion(), objectsInSchemaToOids(), pgrowlocks(), pgstat_collect_oids(), pgstat_heap(), ReindexMultipleTables(), RelationFindReplTupleSeq(), remove_dbtablespaces(), RemoveConversionById(), RemoveSubscriptionRel(), RenameTableSpace(), systable_endscan(), ThereIsAtLeastOneRole(), vac_truncate_clog(), validate_index_heapscan(), validateCheckConstraint(), validateDomainConstraint(), and validateForeignKeyConstraint().

1573 {
1574  /* Note: no locking manipulations needed */
1575 
1576  /*
1577  * unpin scan buffers
1578  */
1579  if (BufferIsValid(scan->rs_cbuf))
1580  ReleaseBuffer(scan->rs_cbuf);
1581 
1582  /*
1583  * decrement relation reference count and free scan descriptor storage
1584  */
1586 
1587  if (scan->rs_key)
1588  pfree(scan->rs_key);
1589 
1590  if (scan->rs_strategy != NULL)
1592 
1593  if (scan->rs_temp_snap)
1595 
1596  pfree(scan);
1597 }
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
void pfree(void *pointer)
Definition: mcxt.c:1031
void RelationDecrementReferenceCount(Relation rel)
Definition: relcache.c:1976
Snapshot rs_snapshot
Definition: relscan.h:50
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:905
bool rs_temp_snap
Definition: relscan.h:58
BufferAccessStrategy rs_strategy
Definition: relscan.h:65
Relation rs_rd
Definition: relscan.h:49
Buffer rs_cbuf
Definition: relscan.h:72
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:597
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
ScanKey rs_key
Definition: relscan.h:52

◆ heap_execute_freeze_tuple()

void heap_execute_freeze_tuple ( HeapTupleHeader  tuple,
xl_heap_freeze_tuple frz 
)

Definition at line 7009 of file heapam.c.

References FrozenTransactionId, xl_heap_freeze_tuple::frzflags, HeapTupleHeaderSetXmax, HeapTupleHeaderSetXvac, InvalidTransactionId, HeapTupleHeaderData::t_infomask, xl_heap_freeze_tuple::t_infomask, HeapTupleHeaderData::t_infomask2, xl_heap_freeze_tuple::t_infomask2, XLH_FREEZE_XVAC, XLH_INVALID_XVAC, and xl_heap_freeze_tuple::xmax.

Referenced by heap_freeze_tuple(), heap_xlog_freeze_page(), and lazy_scan_heap().

7010 {
7011  HeapTupleHeaderSetXmax(tuple, frz->xmax);
7012 
7013  if (frz->frzflags & XLH_FREEZE_XVAC)
7015 
7016  if (frz->frzflags & XLH_INVALID_XVAC)
7018 
7019  tuple->t_infomask = frz->t_infomask;
7020  tuple->t_infomask2 = frz->t_infomask2;
7021 }
#define HeapTupleHeaderSetXvac(tup, xid)
Definition: htup_details.h:428
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:385
#define InvalidTransactionId
Definition: transam.h:31
#define FrozenTransactionId
Definition: transam.h:33
TransactionId xmax
Definition: heapam_xlog.h:320
#define XLH_INVALID_XVAC
Definition: heapam_xlog.h:316
#define XLH_FREEZE_XVAC
Definition: heapam_xlog.h:315

◆ heap_fetch()

bool heap_fetch ( Relation  relation,
Snapshot  snapshot,
HeapTuple  tuple,
Buffer userbuf,
bool  keep_buf,
Relation  stats_relation 
)

Definition at line 1903 of file heapam.c.

References buffer, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, CheckForSerializableConflictOut(), HeapTupleSatisfiesVisibility, InvalidBuffer, ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, pgstat_count_heap_fetch, PredicateLockTuple(), ReadBuffer(), RelationGetRelid, ReleaseBuffer(), HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, and TestForOldSnapshot().

Referenced by AfterTriggerExecute(), EvalPlanQualFetch(), EvalPlanQualFetchRowMarks(), ExecCheckTIDVisible(), ExecDelete(), ExecLockRows(), heap_lock_updated_tuple_rec(), and TidNext().

1909 {
1910  ItemPointer tid = &(tuple->t_self);
1911  ItemId lp;
1912  Buffer buffer;
1913  Page page;
1914  OffsetNumber offnum;
1915  bool valid;
1916 
1917  /*
1918  * Fetch and pin the appropriate page of the relation.
1919  */
1920  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1921 
1922  /*
1923  * Need share lock on buffer to examine tuple commit status.
1924  */
1925  LockBuffer(buffer, BUFFER_LOCK_SHARE);
1926  page = BufferGetPage(buffer);
1927  TestForOldSnapshot(snapshot, relation, page);
1928 
1929  /*
1930  * We'd better check for out-of-range offnum in case of VACUUM since the
1931  * TID was obtained.
1932  */
1933  offnum = ItemPointerGetOffsetNumber(tid);
1934  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1935  {
1936  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1937  if (keep_buf)
1938  *userbuf = buffer;
1939  else
1940  {
1941  ReleaseBuffer(buffer);
1942  *userbuf = InvalidBuffer;
1943  }
1944  tuple->t_data = NULL;
1945  return false;
1946  }
1947 
1948  /*
1949  * get the item line pointer corresponding to the requested tid
1950  */
1951  lp = PageGetItemId(page, offnum);
1952 
1953  /*
1954  * Must check for deleted tuple.
1955  */
1956  if (!ItemIdIsNormal(lp))
1957  {
1958  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1959  if (keep_buf)
1960  *userbuf = buffer;
1961  else
1962  {
1963  ReleaseBuffer(buffer);
1964  *userbuf = InvalidBuffer;
1965  }
1966  tuple->t_data = NULL;
1967  return false;
1968  }
1969 
1970  /*
1971  * fill in *tuple fields
1972  */
1973  tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1974  tuple->t_len = ItemIdGetLength(lp);
1975  tuple->t_tableOid = RelationGetRelid(relation);
1976 
1977  /*
1978  * check time qualification of tuple, then release lock
1979  */
1980  valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1981 
1982  if (valid)
1983  PredicateLockTuple(relation, tuple, snapshot);
1984 
1985  CheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1986 
1987  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1988 
1989  if (valid)
1990  {
1991  /*
1992  * All checks passed, so return the tuple as valid. Caller is now
1993  * responsible for releasing the buffer.
1994  */
1995  *userbuf = buffer;
1996 
1997  /* Count the successful fetch against appropriate rel, if any */
1998  if (stats_relation != NULL)
1999  pgstat_count_heap_fetch(stats_relation);
2000 
2001  return true;
2002  }
2003 
2004  /* Tuple failed time qual, but maybe caller wants to see it anyway. */
2005  if (keep_buf)
2006  *userbuf = buffer;
2007  else
2008  {
2009  ReleaseBuffer(buffer);
2010  *userbuf = InvalidBuffer;
2011  }
2012 
2013  return false;
2014 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:265
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:3899
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
ItemPointerData t_self
Definition: htup.h:65
#define pgstat_count_heap_fetch(rel)
Definition: pgstat.h:1286
uint32 t_len
Definition: htup.h:64
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:215
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
void PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
Definition: predicate.c:2497
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:407
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74

◆ heap_finish_speculative()

void heap_finish_speculative ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 6155 of file heapam.c.

References Assert, buffer, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, elog, END_CRIT_SECTION, ERROR, HeapTupleHeaderIsSpeculative, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), MaxOffsetNumber, xl_heap_confirm::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapConfirm, SpecTokenOffsetNumber, START_CRIT_SECTION, StaticAssertStmt, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleData::t_self, UnlockReleaseBuffer(), XLOG_HEAP_CONFIRM, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by ExecInsert().

6156 {
6157  Buffer buffer;
6158  Page page;
6159  OffsetNumber offnum;
6160  ItemId lp = NULL;
6161  HeapTupleHeader htup;
6162 
6163  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
6165  page = (Page) BufferGetPage(buffer);
6166 
6167  offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
6168  if (PageGetMaxOffsetNumber(page) >= offnum)
6169  lp = PageGetItemId(page, offnum);
6170 
6171  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
6172  elog(ERROR, "invalid lp");
6173 
6174  htup = (HeapTupleHeader) PageGetItem(page, lp);
6175 
6176  /* SpecTokenOffsetNumber should be distinguishable from any real offset */
6178  "invalid speculative token constant");
6179 
6180  /* NO EREPORT(ERROR) from here till changes are logged */
6182 
6184 
6185  MarkBufferDirty(buffer);
6186 
6187  /*
6188  * Replace the speculative insertion token with a real t_ctid, pointing to
6189  * itself like it does on regular tuples.
6190  */
6191  htup->t_ctid = tuple->t_self;
6192 
6193  /* XLOG stuff */
6194  if (RelationNeedsWAL(relation))
6195  {
6196  xl_heap_confirm xlrec;
6197  XLogRecPtr recptr;
6198 
6199  xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6200 
6201  XLogBeginInsert();
6202 
6203  /* We want the same filtering on this as on a plain insert */
6205 
6206  XLogRegisterData((char *) &xlrec, SizeOfHeapConfirm);
6207  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6208 
6209  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
6210 
6211  PageSetLSN(page, recptr);
6212  }
6213 
6214  END_CRIT_SECTION();
6215 
6216  UnlockReleaseBuffer(buffer);
6217 }
OffsetNumber offnum
Definition: heapam_xlog.h:296
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define MaxOffsetNumber
Definition: off.h:28
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:434
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:192
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:795
#define SpecTokenOffsetNumber
Definition: htup_details.h:296
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define ERROR
Definition: elog.h:43
ItemPointerData t_ctid
Definition: htup_details.h:157
ItemPointerData t_self
Definition: htup.h:65
#define REGBUF_STANDARD
Definition: xloginsert.h:34
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define SizeOfHeapConfirm
Definition: heapam_xlog.h:299
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:699
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:215
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define RelationNeedsWAL(relation)
Definition: rel.h:510
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
#define XLOG_HEAP_CONFIRM
Definition: heapam_xlog.h:37

◆ heap_freeze_tuple()

bool heap_freeze_tuple ( HeapTupleHeader  tuple,
TransactionId  relfrozenxid,
TransactionId  relminmxid,
TransactionId  cutoff_xid,
TransactionId  cutoff_multi 
)

Definition at line 7030 of file heapam.c.

References heap_execute_freeze_tuple(), and heap_prepare_freeze_tuple().

Referenced by rewrite_heap_tuple().

7033 {
7035  bool do_freeze;
7036  bool tuple_totally_frozen;
7037 
7038  do_freeze = heap_prepare_freeze_tuple(tuple,
7040  cutoff_xid, cutoff_multi,
7041  &frz, &tuple_totally_frozen);
7042 
7043  /*
7044  * Note that because this is not a WAL-logged operation, we don't need to
7045  * fill in the offset in the freeze record.
7046  */
7047 
7048  if (do_freeze)
7049  heap_execute_freeze_tuple(tuple, &frz);
7050  return do_freeze;
7051 }
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId relfrozenxid, TransactionId relminmxid, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
Definition: heapam.c:6797
void heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
Definition: heapam.c:7009
TransactionId relminmxid
Definition: pg_class.h:72
TransactionId relfrozenxid
Definition: pg_class.h:71

◆ heap_get_latest_tid()

void heap_get_latest_tid ( Relation  relation,
Snapshot  snapshot,
ItemPointer  tid 
)

Definition at line 2211 of file heapam.c.

References buffer, BUFFER_LOCK_SHARE, BufferGetPage, CheckForSerializableConflictOut(), elog, ERROR, HEAP_XMAX_INVALID, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleHeaderIndicatesMovedPartitions, HeapTupleHeaderIsOnlyLocked(), HeapTupleSatisfiesVisibility, InvalidTransactionId, ItemIdGetLength, ItemIdIsNormal, ItemPointerEquals(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, ReadBuffer(), RelationGetNumberOfBlocks, RelationGetRelationName, RelationGetRelid, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TestForOldSnapshot(), TransactionIdEquals, TransactionIdIsValid, and UnlockReleaseBuffer().

Referenced by currtid_byrelname(), currtid_byreloid(), and TidNext().

2214 {
2215  BlockNumber blk;
2216  ItemPointerData ctid;
2217  TransactionId priorXmax;
2218 
2219  /* this is to avoid Assert failures on bad input */
2220  if (!ItemPointerIsValid(tid))
2221  return;
2222 
2223  /*
2224  * Since this can be called with user-supplied TID, don't trust the input
2225  * too much. (RelationGetNumberOfBlocks is an expensive check, so we
2226  * don't check t_ctid links again this way. Note that it would not do to
2227  * call it just once and save the result, either.)
2228  */
2229  blk = ItemPointerGetBlockNumber(tid);
2230  if (blk >= RelationGetNumberOfBlocks(relation))
2231  elog(ERROR, "block number %u is out of range for relation \"%s\"",
2232  blk, RelationGetRelationName(relation));
2233 
2234  /*
2235  * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
2236  * need to examine, and *tid is the TID we will return if ctid turns out
2237  * to be bogus.
2238  *
2239  * Note that we will loop until we reach the end of the t_ctid chain.
2240  * Depending on the snapshot passed, there might be at most one visible
2241  * version of the row, but we don't try to optimize for that.
2242  */
2243  ctid = *tid;
2244  priorXmax = InvalidTransactionId; /* cannot check first XMIN */
2245  for (;;)
2246  {
2247  Buffer buffer;
2248  Page page;
2249  OffsetNumber offnum;
2250  ItemId lp;
2251  HeapTupleData tp;
2252  bool valid;
2253 
2254  /*
2255  * Read, pin, and lock the page.
2256  */
2257  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
2258  LockBuffer(buffer, BUFFER_LOCK_SHARE);
2259  page = BufferGetPage(buffer);
2260  TestForOldSnapshot(snapshot, relation, page);
2261 
2262  /*
2263  * Check for bogus item number. This is not treated as an error
2264  * condition because it can happen while following a t_ctid link. We
2265  * just assume that the prior tid is OK and return it unchanged.
2266  */
2267  offnum = ItemPointerGetOffsetNumber(&ctid);
2268  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
2269  {
2270  UnlockReleaseBuffer(buffer);
2271  break;
2272  }
2273  lp = PageGetItemId(page, offnum);
2274  if (!ItemIdIsNormal(lp))
2275  {
2276  UnlockReleaseBuffer(buffer);
2277  break;
2278  }
2279 
2280  /* OK to access the tuple */
2281  tp.t_self = ctid;
2282  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2283  tp.t_len = ItemIdGetLength(lp);
2284  tp.t_tableOid = RelationGetRelid(relation);
2285 
2286  /*
2287  * After following a t_ctid link, we might arrive at an unrelated
2288  * tuple. Check for XMIN match.
2289  */
2290  if (TransactionIdIsValid(priorXmax) &&
2292  {
2293  UnlockReleaseBuffer(buffer);
2294  break;
2295  }
2296 
2297  /*
2298  * Check time qualification of tuple; if visible, set it as the new
2299  * result candidate.
2300  */
2301  valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
2302  CheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
2303  if (valid)
2304  *tid = ctid;
2305 
2306  /*
2307  * If there's a valid t_ctid link, follow it, else we're done.
2308  */
2309  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2313  {
2314  UnlockReleaseBuffer(buffer);
2315  break;
2316  }
2317 
2318  ctid = tp.t_data->t_ctid;
2319  priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
2320  UnlockReleaseBuffer(buffer);
2321  } /* end of loop */
2322 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:370
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
Definition: tqual.c:1596
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:60
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:265
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:474
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define HeapTupleHeaderIndicatesMovedPartitions(tup)
Definition: htup_details.h:453
uint32 BlockNumber
Definition: block.h:31
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:3899
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:204
ItemPointerData t_ctid
Definition: htup_details.h:157
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define InvalidTransactionId
Definition: transam.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:441
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:199
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:215
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:318
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define TransactionIdIsValid(xid)
Definition: transam.h:41
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:407
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74

◆ heap_getnext()

HeapTuple heap_getnext ( HeapScanDesc  scan,
ScanDirection  direction 
)

Definition at line 1835 of file heapam.c.

References HEAPDEBUG_1, HEAPDEBUG_2, HEAPDEBUG_3, heapgettup(), heapgettup_pagemode(), pgstat_count_heap_getnext, HeapScanDescData::rs_ctup, HeapScanDescData::rs_key, HeapScanDescData::rs_nkeys, HeapScanDescData::rs_pageatatime, HeapScanDescData::rs_rd, and HeapTupleData::t_data.

Referenced by AlterDomainNotNull(), AlterTableMoveAll(), AlterTableSpaceOptions(), ATRewriteTable(), boot_openrel(), check_db_file_conflict(), check_default_allows_bound(), copy_heap_data(), CopyTo(), createdb(), DefineQueryRewrite(), do_autovacuum(), DropSetting(), DropTableSpace(), find_typed_table_dependencies(), get_all_vacuum_rels(), get_database_list(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), index_update_stats(), IndexBuildHeapRangeScan(), IndexCheckExclusion(), objectsInSchemaToOids(), pgrowlocks(), pgstat_collect_oids(), pgstat_heap(), ReindexMultipleTables(), RelationFindReplTupleSeq(), remove_dbtablespaces(), RemoveConversionById(), RemoveSubscriptionRel(), RenameTableSpace(), SeqNext(), systable_getnext(), ThereIsAtLeastOneRole(), vac_truncate_clog(), validate_index_heapscan(), validateCheckConstraint(), validateDomainConstraint(), and validateForeignKeyConstraint().

1836 {
1837  /* Note: no locking manipulations needed */
1838 
1839  HEAPDEBUG_1; /* heap_getnext( info ) */
1840 
1841  if (scan->rs_pageatatime)
1842  heapgettup_pagemode(scan, direction,
1843  scan->rs_nkeys, scan->rs_key);
1844  else
1845  heapgettup(scan, direction, scan->rs_nkeys, scan->rs_key);
1846 
1847  if (scan->rs_ctup.t_data == NULL)
1848  {
1849  HEAPDEBUG_2; /* heap_getnext returning EOS */
1850  return NULL;
1851  }
1852 
1853  /*
1854  * if we get here it means we have a new current scan tuple, so point to
1855  * the proper return buffer and return the tuple.
1856  */
1857  HEAPDEBUG_3; /* heap_getnext returning tuple */
1858 
1860 
1861  return &(scan->rs_ctup);
1862 }
#define HEAPDEBUG_2
Definition: heapam.c:1829
HeapTupleData rs_ctup
Definition: relscan.h:70
HeapTupleHeader t_data
Definition: htup.h:68
bool rs_pageatatime
Definition: relscan.h:55
#define HEAPDEBUG_1
Definition: heapam.c:1828
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:485
Relation rs_rd
Definition: relscan.h:49
#define HEAPDEBUG_3
Definition: heapam.c:1830
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:1281
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:789
ScanKey rs_key
Definition: relscan.h:52

◆ heap_hot_search()

bool heap_hot_search ( ItemPointer  tid,
Relation  relation,
Snapshot  snapshot,
bool all_dead 
)

Definition at line 2183 of file heapam.c.

References buffer, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, heap_hot_search_buffer(), ItemPointerGetBlockNumber, LockBuffer(), ReadBuffer(), and ReleaseBuffer().

Referenced by _bt_check_unique(), and unique_key_recheck().

2185 {
2186  bool result;
2187  Buffer buffer;
2188  HeapTupleData heapTuple;
2189 
2190  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
2191  LockBuffer(buffer, BUFFER_LOCK_SHARE);
2192  result = heap_hot_search_buffer(tid, relation, buffer, snapshot,
2193  &heapTuple, all_dead, true);
2194  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2195  ReleaseBuffer(buffer);
2196  return result;
2197 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
Definition: heapam.c:2038
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:215
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
int Buffer
Definition: buf.h:23

◆ heap_hot_search_buffer()

bool heap_hot_search_buffer ( ItemPointer  tid,
Relation  relation,
Buffer  buffer,
Snapshot  snapshot,
HeapTuple  heapTuple,
bool all_dead,
bool  first_call 
)

Definition at line 2038 of file heapam.c.

References Assert, BufferGetBlockNumber(), BufferGetPage, CheckForSerializableConflictOut(), HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleIsHeapOnly, HeapTupleIsHotUpdated, HeapTupleIsSurelyDead(), HeapTupleSatisfiesVisibility, InvalidTransactionId, ItemIdGetLength, ItemIdGetRedirect, ItemIdIsNormal, ItemIdIsRedirected, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerSet, ItemPointerSetOffsetNumber, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PredicateLockTuple(), RecentGlobalXmin, RelationGetRelid, skip, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdEquals, and TransactionIdIsValid.

Referenced by bitgetpage(), heap_hot_search(), and index_fetch_heap().

2041 {
2042  Page dp = (Page) BufferGetPage(buffer);
2043  TransactionId prev_xmax = InvalidTransactionId;
2044  OffsetNumber offnum;
2045  bool at_chain_start;
2046  bool valid;
2047  bool skip;
2048 
2049  /* If this is not the first call, previous call returned a (live!) tuple */
2050  if (all_dead)
2051  *all_dead = first_call;
2052 
2054 
2056  offnum = ItemPointerGetOffsetNumber(tid);
2057  at_chain_start = first_call;
2058  skip = !first_call;
2059 
2060  heapTuple->t_self = *tid;
2061 
2062  /* Scan through possible multiple members of HOT-chain */
2063  for (;;)
2064  {
2065  ItemId lp;
2066 
2067  /* check for bogus TID */
2068  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
2069  break;
2070 
2071  lp = PageGetItemId(dp, offnum);
2072 
2073  /* check for unused, dead, or redirected items */
2074  if (!ItemIdIsNormal(lp))
2075  {
2076  /* We should only see a redirect at start of chain */
2077  if (ItemIdIsRedirected(lp) && at_chain_start)
2078  {
2079  /* Follow the redirect */
2080  offnum = ItemIdGetRedirect(lp);
2081  at_chain_start = false;
2082  continue;
2083  }
2084  /* else must be end of chain */
2085  break;
2086  }
2087 
2088  heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
2089  heapTuple->t_len = ItemIdGetLength(lp);
2090  heapTuple->t_tableOid = RelationGetRelid(relation);
2091  ItemPointerSetOffsetNumber(&heapTuple->t_self, offnum);
2092 
2093  /*
2094  * Shouldn't see a HEAP_ONLY tuple at chain start.
2095  */
2096  if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
2097  break;
2098 
2099  /*
2100  * The xmin should match the previous xmax value, else chain is
2101  * broken.
2102  */
2103  if (TransactionIdIsValid(prev_xmax) &&
2104  !TransactionIdEquals(prev_xmax,
2105  HeapTupleHeaderGetXmin(heapTuple->t_data)))
2106  break;
2107 
2108  /*
2109  * When first_call is true (and thus, skip is initially false) we'll
2110  * return the first tuple we find. But on later passes, heapTuple
2111  * will initially be pointing to the tuple we returned last time.
2112  * Returning it again would be incorrect (and would loop forever), so
2113  * we skip it and return the next match we find.
2114  */
2115  if (!skip)
2116  {
2117  /*
2118  * For the benefit of logical decoding, have t_self point at the
2119  * element of the HOT chain we're currently investigating instead
2120  * of the root tuple of the HOT chain. This is important because
2121  * the *Satisfies routine for historical mvcc snapshots needs the
2122  * correct tid to decide about the visibility in some cases.
2123  */
2124  ItemPointerSet(&(heapTuple->t_self), BufferGetBlockNumber(buffer), offnum);
2125 
2126  /* If it's visible per the snapshot, we must return it */
2127  valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
2128  CheckForSerializableConflictOut(valid, relation, heapTuple,
2129  buffer, snapshot);
2130  /* reset to original, non-redirected, tid */
2131  heapTuple->t_self = *tid;
2132 
2133  if (valid)
2134  {
2135  ItemPointerSetOffsetNumber(tid, offnum);
2136  PredicateLockTuple(relation, heapTuple, snapshot);
2137  if (all_dead)
2138  *all_dead = false;
2139  return true;
2140  }
2141  }
2142  skip = false;
2143 
2144  /*
2145  * If we can't see it, maybe no one else can either. At caller
2146  * request, check whether all chain members are dead to all
2147  * transactions.
2148  *
2149  * Note: if you change the criterion here for what is "dead", fix the
2150  * planner's get_actual_variable_range() function to match.
2151  */
2152  if (all_dead && *all_dead &&
2154  *all_dead = false;
2155 
2156  /*
2157  * Check to see if HOT chain continues past this tuple; if so fetch
2158  * the next offnum and loop around.
2159  */
2160  if (HeapTupleIsHotUpdated(heapTuple))
2161  {
2164  offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
2165  at_chain_start = false;
2166  prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
2167  }
2168  else
2169  break; /* end of chain */
2170  }
2171 
2172  return false;
2173 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:370
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:105
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:474
static const char * skip[]
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:77
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:3899
bool HeapTupleIsSurelyDead(HeapTuple htup, TransactionId OldestXmin)
Definition: tqual.c:1420
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define HeapTupleIsHotUpdated(tuple)
Definition: htup_details.h:694
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
ItemPointerData t_ctid
Definition: htup_details.h:157
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
TransactionId RecentGlobalXmin
Definition: snapmgr.c:166
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:703
#define Assert(condition)
Definition: c.h:699
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:215
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:318
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
void PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
Definition: predicate.c:2497
#define ItemPointerSetOffsetNumber(pointer, offsetNumber)
Definition: itemptr.h:126
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define TransactionIdIsValid(xid)
Definition: transam.h:41
#define RelationGetRelid(relation)
Definition: rel.h:407
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:105

◆ heap_inplace_update()

void heap_inplace_update ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 6387 of file heapam.c.

References buffer, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, CacheInvalidateHeapTuple(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, IsBootstrapProcessingMode, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), xl_heap_inplace::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapInplace, START_CRIT_SECTION, HeapTupleData::t_data, HeapTupleHeaderData::t_hoff, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), XLOG_HEAP_INPLACE, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by create_toast_table(), index_set_state_flags(), index_update_stats(), vac_update_datfrozenxid(), and vac_update_relstats().

6388 {
6389  Buffer buffer;
6390  Page page;
6391  OffsetNumber offnum;
6392  ItemId lp = NULL;
6393  HeapTupleHeader htup;
6394  uint32 oldlen;
6395  uint32 newlen;
6396 
6397  /*
6398  * For now, parallel operations are required to be strictly read-only.
6399  * Unlike a regular update, this should never create a combo CID, so it
6400  * might be possible to relax this restriction, but not without more
6401  * thought and testing. It's not clear that it would be useful, anyway.
6402  */
6403  if (IsInParallelMode())
6404  ereport(ERROR,
6405  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
6406  errmsg("cannot update tuples during a parallel operation")));
6407 
6408  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
6410  page = (Page) BufferGetPage(buffer);
6411 
6412  offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
6413  if (PageGetMaxOffsetNumber(page) >= offnum)
6414  lp = PageGetItemId(page, offnum);
6415 
6416  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
6417  elog(ERROR, "invalid lp");
6418 
6419  htup = (HeapTupleHeader) PageGetItem(page, lp);
6420 
6421  oldlen = ItemIdGetLength(lp) - htup->t_hoff;
6422  newlen = tuple->t_len - tuple->t_data->t_hoff;
6423  if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
6424  elog(ERROR, "wrong tuple length");
6425 
6426  /* NO EREPORT(ERROR) from here till changes are logged */
6428 
6429  memcpy((char *) htup + htup->t_hoff,
6430  (char *) tuple->t_data + tuple->t_data->t_hoff,
6431  newlen);
6432 
6433  MarkBufferDirty(buffer);
6434 
6435  /* XLOG stuff */
6436  if (RelationNeedsWAL(relation))
6437  {
6438  xl_heap_inplace xlrec;
6439  XLogRecPtr recptr;
6440 
6441  xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6442 
6443  XLogBeginInsert();
6444  XLogRegisterData((char *) &xlrec, SizeOfHeapInplace);
6445 
6446  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6447  XLogRegisterBufData(0, (char *) htup + htup->t_hoff, newlen);
6448 
6449  /* inplace updates aren't decoded atm, don't log the origin */
6450 
6451  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
6452 
6453  PageSetLSN(page, recptr);
6454  }
6455 
6456  END_CRIT_SECTION();
6457 
6458  UnlockReleaseBuffer(buffer);
6459 
6460  /*
6461  * Send out shared cache inval if necessary. Note that because we only
6462  * pass the new version of the tuple, this mustn't be used for any
6463  * operations that could change catcache lookup keys. But we aren't
6464  * bothering with index updates either, so that's true a fortiori.
6465  */
6467  CacheInvalidateHeapTuple(relation, tuple, NULL);
6468 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:361
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1094
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define SizeOfHeapInplace
Definition: heapam_xlog.h:308
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
int errcode(int sqlerrcode)
Definition: elog.c:575
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:68
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
bool IsInParallelMode(void)
Definition: xact.c:905
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define ERROR
Definition: elog.h:43
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define REGBUF_STANDARD
Definition: xloginsert.h:34
unsigned int uint32
Definition: c.h:325
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define ereport(elevel, rest)
Definition: elog.h:122
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
OffsetNumber offnum
Definition: heapam_xlog.h:304
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:215
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define XLOG_HEAP_INPLACE
Definition: heapam_xlog.h:39
#define RelationNeedsWAL(relation)
Definition: rel.h:510
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:372
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74

◆ heap_insert()

Oid heap_insert ( Relation  relation,
HeapTuple  tup,
CommandId  cid,
int  options,
BulkInsertState  bistate 
)

Definition at line 2441 of file heapam.c.

References Assert, buffer, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), END_CRIT_SECTION, FirstOffsetNumber, xl_heap_insert::flags, GetCurrentTransactionId(), heap_freetuple(), HEAP_INSERT_SKIP_WAL, HEAP_INSERT_SPECULATIVE, heap_prepare_insert(), HeapTupleGetOid, InvalidBuffer, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, log_heap_new_cid(), MarkBufferDirty(), xl_heap_insert::offnum, PageClearAllVisible, PageGetMaxOffsetNumber, PageIsAllVisible, PageSetLSN, pgstat_count_heap_insert(), REGBUF_KEEP_DATA, REGBUF_STANDARD, REGBUF_WILL_INIT, RelationGetBufferForTuple(), RelationIsAccessibleInLogicalDecoding, RelationIsLogicallyLogged, RelationNeedsWAL, RelationPutHeapTuple(), ReleaseBuffer(), SizeOfHeapHeader, SizeOfHeapInsert, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), visibilitymap_clear(), VISIBILITYMAP_VALID_BITS, XLH_INSERT_ALL_VISIBLE_CLEARED, XLH_INSERT_CONTAINS_NEW_TUPLE, XLH_INSERT_IS_SPECULATIVE, XLOG_HEAP_INIT_PAGE, XLOG_HEAP_INSERT, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by ATRewriteTable(), CopyFrom(), ExecInsert(), intorel_receive(), simple_heap_insert(), toast_save_datum(), and transientrel_receive().

2443 {
2445  HeapTuple heaptup;
2446  Buffer buffer;
2447  Buffer vmbuffer = InvalidBuffer;
2448  bool all_visible_cleared = false;
2449 
2450  /*
2451  * Fill in tuple header fields, assign an OID, and toast the tuple if
2452  * necessary.
2453  *
2454  * Note: below this point, heaptup is the data we actually intend to store
2455  * into the relation; tup is the caller's original untoasted data.
2456  */
2457  heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
2458 
2459  /*
2460  * Find buffer to insert this tuple into. If the page is all visible,
2461  * this will also pin the requisite visibility map page.
2462  */
2463  buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
2464  InvalidBuffer, options, bistate,
2465  &vmbuffer, NULL);
2466 
2467  /*
2468  * We're about to do the actual insert -- but check for conflict first, to
2469  * avoid possibly having to roll back work we've just done.
2470  *
2471  * This is safe without a recheck as long as there is no possibility of
2472  * another process scanning the page between this check and the insert
2473  * being visible to the scan (i.e., an exclusive buffer content lock is
2474  * continuously held from this point until the tuple insert is visible).
2475  *
2476  * For a heap insert, we only need to check for table-level SSI locks. Our
2477  * new tuple can't possibly conflict with existing tuple locks, and heap
2478  * page locks are only consolidated versions of tuple locks; they do not
2479  * lock "gaps" as index page locks do. So we don't need to specify a
2480  * buffer when making the call, which makes for a faster check.
2481  */
2483 
2484  /* NO EREPORT(ERROR) from here till changes are logged */
2486 
2487  RelationPutHeapTuple(relation, buffer, heaptup,
2488  (options & HEAP_INSERT_SPECULATIVE) != 0);
2489 
2490  if (PageIsAllVisible(BufferGetPage(buffer)))
2491  {
2492  all_visible_cleared = true;
2494  visibilitymap_clear(relation,
2495  ItemPointerGetBlockNumber(&(heaptup->t_self)),
2496  vmbuffer, VISIBILITYMAP_VALID_BITS);
2497  }
2498 
2499  /*
2500  * XXX Should we set PageSetPrunable on this page ?
2501  *
2502  * The inserting transaction may eventually abort thus making this tuple
2503  * DEAD and hence available for pruning. Though we don't want to optimize
2504  * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
2505  * aborted tuple will never be pruned until next vacuum is triggered.
2506  *
2507  * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
2508  */
2509 
2510  MarkBufferDirty(buffer);
2511 
2512  /* XLOG stuff */
2513  if (!(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation))
2514  {
2515  xl_heap_insert xlrec;
2516  xl_heap_header xlhdr;
2517  XLogRecPtr recptr;
2518  Page page = BufferGetPage(buffer);
2519  uint8 info = XLOG_HEAP_INSERT;
2520  int bufflags = 0;
2521 
2522  /*
2523  * If this is a catalog, we need to transmit combocids to properly
2524  * decode, so log that as well.
2525  */
2527  log_heap_new_cid(relation, heaptup);
2528 
2529  /*
2530  * If this is the single and first tuple on page, we can reinit the
2531  * page instead of restoring the whole thing. Set flag, and hide
2532  * buffer references from XLogInsert.
2533  */
2534  if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
2536  {
2537  info |= XLOG_HEAP_INIT_PAGE;
2538  bufflags |= REGBUF_WILL_INIT;
2539  }
2540 
2541  xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
2542  xlrec.flags = 0;
2543  if (all_visible_cleared)
2548 
2549  /*
2550  * For logical decoding, we need the tuple even if we're doing a full
2551  * page write, so make sure it's included even if we take a full-page
2552  * image. (XXX We could alternatively store a pointer into the FPW).
2553  */
2554  if (RelationIsLogicallyLogged(relation))
2555  {
2557  bufflags |= REGBUF_KEEP_DATA;
2558  }
2559 
2560  XLogBeginInsert();
2561  XLogRegisterData((char *) &xlrec, SizeOfHeapInsert);
2562 
2563  xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
2564  xlhdr.t_infomask = heaptup->t_data->t_infomask;
2565  xlhdr.t_hoff = heaptup->t_data->t_hoff;
2566 
2567  /*
2568  * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
2569  * write the whole page to the xlog, we don't need to store
2570  * xl_heap_header in the xlog.
2571  */
2572  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2573  XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
2574  /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
2576  (char *) heaptup->t_data + SizeofHeapTupleHeader,
2577  heaptup->t_len - SizeofHeapTupleHeader);
2578 
2579  /* filtering by origin on a row level is much more efficient */
2581 
2582  recptr = XLogInsert(RM_HEAP_ID, info);
2583 
2584  PageSetLSN(page, recptr);
2585  }
2586 
2587  END_CRIT_SECTION();
2588 
2589  UnlockReleaseBuffer(buffer);
2590  if (vmbuffer != InvalidBuffer)
2591  ReleaseBuffer(vmbuffer);
2592 
2593  /*
2594  * If tuple is cachable, mark it for invalidation from the caches in case
2595  * we abort. Note it is OK to do this after releasing the buffer, because
2596  * the heaptup data structure is all in local memory, not in the shared
2597  * buffer.
2598  */
2599  CacheInvalidateHeapTuple(relation, heaptup, NULL);
2600 
2601  /* Note: speculative insertions are counted too, even if aborted later */
2602  pgstat_count_heap_insert(relation, 1);
2603 
2604  /*
2605  * If heaptup is a private copy, release it. Don't forget to copy t_self
2606  * back to the caller's image, too.
2607  */
2608  if (heaptup != tup)
2609  {
2610  tup->t_self = heaptup->t_self;
2611  heap_freetuple(heaptup);
2612  }
2613 
2614  return HeapTupleGetOid(tup);
2615 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:361
#define SizeofHeapTupleHeader
Definition: htup_details.h:181
#define XLOG_HEAP_INSERT
Definition: heapam_xlog.h:32
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:7958
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1094
static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
Definition: heapam.c:2625
#define PageIsAllVisible(page)
Definition: bufpage.h:381
uint32 TransactionId
Definition: c.h:474
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
unsigned char uint8
Definition: c.h:323
#define XLH_INSERT_IS_SPECULATIVE
Definition: heapam_xlog.h:68
#define InvalidBuffer
Definition: buf.h:25
#define REGBUF_WILL_INIT
Definition: xloginsert.h:32
uint16 t_infomask2
Definition: heapam_xlog.h:144
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:192
#define HEAP_INSERT_SKIP_WAL
Definition: heapam.h:28
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:580
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1773
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
void RelationPutHeapTuple(Relation relation, Buffer buffer, HeapTuple tuple, bool token)
Definition: hio.c:36
void CheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
Definition: predicate.c:4280
#define XLOG_HEAP_INIT_PAGE
Definition: heapam_xlog.h:46
#define HEAP_INSERT_SPECULATIVE
Definition: heapam.h:31
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:68
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define XLH_INSERT_CONTAINS_NEW_TUPLE
Definition: heapam_xlog.h:69
ItemPointerData t_self
Definition: htup.h:65
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:417
uint32 t_len
Definition: htup.h:64
#define FirstOffsetNumber
Definition: off.h:27
#define REGBUF_STANDARD
Definition: xloginsert.h:34
Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer otherBuffer, int options, BulkInsertState bistate, Buffer *vmbuffer, Buffer *vmbuffer_other)
Definition: hio.c:313
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:564
#define REGBUF_KEEP_DATA
Definition: xloginsert.h:37
#define PageClearAllVisible(page)
Definition: bufpage.h:385
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:699
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:215
uint16 t_infomask
Definition: heapam_xlog.h:145
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define RelationNeedsWAL(relation)
Definition: rel.h:510
#define SizeOfHeapInsert
Definition: heapam_xlog.h:160
#define XLH_INSERT_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:66
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
void pgstat_count_heap_insert(Relation rel, PgStat_Counter n)
Definition: pgstat.c:1907
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:712
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
OffsetNumber offnum
Definition: heapam_xlog.h:154
#define SizeOfHeapHeader
Definition: heapam_xlog.h:149
Pointer Page
Definition: bufpage.h:74

◆ heap_lock_tuple()

HTSU_Result heap_lock_tuple ( Relation  relation,
HeapTuple  tuple,
CommandId  cid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
bool  follow_updates,
Buffer buffer,
HeapUpdateFailureData hufd 
)

Definition at line 4685 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferIsValid, HeapUpdateFailureData::cmax, compute_infobits(), compute_new_xmax_infomask(), ConditionalMultiXactIdWait(), ConditionalXactLockTableWait(), HeapUpdateFailureData::ctid, DoesMultiXactIdConflict(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, xl_heap_lock::flags, get_mxact_status_for_lock(), GetCurrentTransactionId(), GetMultiXactIdMembers(), heap_acquire_tuplock(), HEAP_KEYS_UPDATED, heap_lock_updated_tuple(), HEAP_XMAX_BITS, HEAP_XMAX_INVALID, HEAP_XMAX_IS_EXCL_LOCKED, HEAP_XMAX_IS_KEYSHR_LOCKED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HEAP_XMAX_IS_SHR_LOCKED, HeapTupleBeingUpdated, HeapTupleHeaderClearHotUpdated, HeapTupleHeaderGetCmax(), HeapTupleHeaderGetRawXmax, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIsOnlyLocked(), HeapTupleHeaderSetXmax, HeapTupleInvisible, HeapTupleMayBeUpdated, HeapTupleSatisfiesUpdate(), HeapTupleSelfUpdated, HeapTupleUpdated, HeapTupleWouldBlock, i, xl_heap_lock::infobits_set, InvalidBuffer, InvalidCommandId, ItemIdGetLength, ItemIdIsNormal, ItemPointerCopy, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), xl_heap_lock::locking_xid, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, LockWaitBlock, LockWaitError, LockWaitSkip, MarkBufferDirty(), MultiXactIdSetOldestMember(), MultiXactIdWait(), MultiXactStatusNoKeyUpdate, xl_heap_lock::offnum, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, pfree(), ReadBuffer(), REGBUF_STANDARD, RelationGetRelationName, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapLock, START_CRIT_SECTION, status(), HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdEquals, TransactionIdIsCurrentTransactionId(), TUPLOCK_from_mxstatus, UnlockTupleTuplock, UpdateXmaxHintBits(), VISIBILITYMAP_ALL_FROZEN, visibilitymap_clear(), visibilitymap_pin(), XactLockTableWait(), XLH_LOCK_ALL_FROZEN_CLEARED, XLOG_HEAP_LOCK, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), XLTW_Lock, HeapUpdateFailureData::xmax, and xmax_infomask_changed().

Referenced by EvalPlanQualFetch(), ExecLockRows(), ExecOnConflictUpdate(), GetTupleForTrigger(), RelationFindReplTupleByIndex(), and RelationFindReplTupleSeq().

4689 {
4690  HTSU_Result result;
4691  ItemPointer tid = &(tuple->t_self);
4692  ItemId lp;
4693  Page page;
4694  Buffer vmbuffer = InvalidBuffer;
4695  BlockNumber block;
4696  TransactionId xid,
4697  xmax;
4698  uint16 old_infomask,
4699  new_infomask,
4700  new_infomask2;
4701  bool first_time = true;
4702  bool have_tuple_lock = false;
4703  bool cleared_all_frozen = false;
4704 
4705  *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
4706  block = ItemPointerGetBlockNumber(tid);
4707 
4708  /*
4709  * Before locking the buffer, pin the visibility map page if it appears to
4710  * be necessary. Since we haven't got the lock yet, someone else might be
4711  * in the middle of changing this, so we'll need to recheck after we have
4712  * the lock.
4713  */
4715  visibilitymap_pin(relation, block, &vmbuffer);
4716 
4718 
4719  page = BufferGetPage(*buffer);
4720  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
4721  Assert(ItemIdIsNormal(lp));
4722 
4723  tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
4724  tuple->t_len = ItemIdGetLength(lp);
4725  tuple->t_tableOid = RelationGetRelid(relation);
4726 
4727 l3:
4728  result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
4729 
4730  if (result == HeapTupleInvisible)
4731  {
4732  /*
4733  * This is possible, but only when locking a tuple for ON CONFLICT
4734  * UPDATE. We return this value here rather than throwing an error in
4735  * order to give that case the opportunity to throw a more specific
4736  * error.
4737  */
4738  result = HeapTupleInvisible;
4739  goto out_locked;
4740  }
4741  else if (result == HeapTupleBeingUpdated || result == HeapTupleUpdated)
4742  {
4743  TransactionId xwait;
4744  uint16 infomask;
4745  uint16 infomask2;
4746  bool require_sleep;
4747  ItemPointerData t_ctid;
4748 
4749  /* must copy state data before unlocking buffer */
4750  xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
4751  infomask = tuple->t_data->t_infomask;
4752  infomask2 = tuple->t_data->t_infomask2;
4753  ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
4754 
4756 
4757  /*
4758  * If any subtransaction of the current top transaction already holds
4759  * a lock as strong as or stronger than what we're requesting, we
4760  * effectively hold the desired lock already. We *must* succeed
4761  * without trying to take the tuple lock, else we will deadlock
4762  * against anyone wanting to acquire a stronger lock.
4763  *
4764  * Note we only do this the first time we loop on the HTSU result;
4765  * there is no point in testing in subsequent passes, because
4766  * evidently our own transaction cannot have acquired a new lock after
4767  * the first time we checked.
4768  */
4769  if (first_time)
4770  {
4771  first_time = false;
4772 
4773  if (infomask & HEAP_XMAX_IS_MULTI)
4774  {
4775  int i;
4776  int nmembers;
4777  MultiXactMember *members;
4778 
4779  /*
4780  * We don't need to allow old multixacts here; if that had
4781  * been the case, HeapTupleSatisfiesUpdate would have returned
4782  * MayBeUpdated and we wouldn't be here.
4783  */
4784  nmembers =
4785  GetMultiXactIdMembers(xwait, &members, false,
4786  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
4787 
4788  for (i = 0; i < nmembers; i++)
4789  {
4790  /* only consider members of our own transaction */
4791  if (!TransactionIdIsCurrentTransactionId(members[i].xid))
4792  continue;
4793 
4794  if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
4795  {
4796  pfree(members);
4797  result = HeapTupleMayBeUpdated;
4798  goto out_unlocked;
4799  }
4800  }
4801 
4802  if (members)
4803  pfree(members);
4804  }
4805  else if (TransactionIdIsCurrentTransactionId(xwait))
4806  {
4807  switch (mode)
4808  {
4809  case LockTupleKeyShare:
4810  Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
4811  HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4812  HEAP_XMAX_IS_EXCL_LOCKED(infomask));
4813  result = HeapTupleMayBeUpdated;
4814  goto out_unlocked;
4815  case LockTupleShare:
4816  if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4817  HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4818  {
4819  result = HeapTupleMayBeUpdated;
4820  goto out_unlocked;
4821  }
4822  break;
4824  if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4825  {
4826  result = HeapTupleMayBeUpdated;
4827  goto out_unlocked;
4828  }
4829  break;
4830  case LockTupleExclusive:
4831  if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
4832  infomask2 & HEAP_KEYS_UPDATED)
4833  {
4834  result = HeapTupleMayBeUpdated;
4835  goto out_unlocked;
4836  }
4837  break;
4838  }
4839  }
4840  }
4841 
4842  /*
4843  * Initially assume that we will have to wait for the locking
4844  * transaction(s) to finish. We check various cases below in which
4845  * this can be turned off.
4846  */
4847  require_sleep = true;
4848  if (mode == LockTupleKeyShare)
4849  {
4850  /*
4851  * If we're requesting KeyShare, and there's no update present, we
4852  * don't need to wait. Even if there is an update, we can still
4853  * continue if the key hasn't been modified.
4854  *
4855  * However, if there are updates, we need to walk the update chain
4856  * to mark future versions of the row as locked, too. That way,
4857  * if somebody deletes that future version, we're protected
4858  * against the key going away. This locking of future versions
4859  * could block momentarily, if a concurrent transaction is
4860  * deleting a key; or it could return a value to the effect that
4861  * the transaction deleting the key has already committed. So we
4862  * do this before re-locking the buffer; otherwise this would be
4863  * prone to deadlocks.
4864  *
4865  * Note that the TID we're locking was grabbed before we unlocked
4866  * the buffer. For it to change while we're not looking, the
4867  * other properties we're testing for below after re-locking the
4868  * buffer would also change, in which case we would restart this
4869  * loop above.
4870  */
4871  if (!(infomask2 & HEAP_KEYS_UPDATED))
4872  {
4873  bool updated;
4874 
4875  updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
4876 
4877  /*
4878  * If there are updates, follow the update chain; bail out if
4879  * that cannot be done.
4880  */
4881  if (follow_updates && updated)
4882  {
4883  HTSU_Result res;
4884 
4885  res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4887  mode);
4888  if (res != HeapTupleMayBeUpdated)
4889  {
4890  result = res;
4891  /* recovery code expects to have buffer lock held */
4893  goto failed;
4894  }
4895  }
4896 
4898 
4899  /*
4900  * Make sure it's still an appropriate lock, else start over.
4901  * Also, if it wasn't updated before we released the lock, but
4902  * is updated now, we start over too; the reason is that we
4903  * now need to follow the update chain to lock the new
4904  * versions.
4905  */
4906  if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
4907  ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
4908  !updated))
4909  goto l3;
4910 
4911  /* Things look okay, so we can skip sleeping */
4912  require_sleep = false;
4913 
4914  /*
4915  * Note we allow Xmax to change here; other updaters/lockers
4916  * could have modified it before we grabbed the buffer lock.
4917  * However, this is not a problem, because with the recheck we
4918  * just did we ensure that they still don't conflict with the
4919  * lock we want.
4920  */
4921  }
4922  }
4923  else if (mode == LockTupleShare)
4924  {
4925  /*
4926  * If we're requesting Share, we can similarly avoid sleeping if
4927  * there's no update and no exclusive lock present.
4928  */
4929  if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
4930  !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4931  {
4933 
4934  /*
4935  * Make sure it's still an appropriate lock, else start over.
4936  * See above about allowing xmax to change.
4937  */
4938  if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
4940  goto l3;
4941  require_sleep = false;
4942  }
4943  }
4944  else if (mode == LockTupleNoKeyExclusive)
4945  {
4946  /*
4947  * If we're requesting NoKeyExclusive, we might also be able to
4948  * avoid sleeping; just ensure that there no conflicting lock
4949  * already acquired.
4950  */
4951  if (infomask & HEAP_XMAX_IS_MULTI)
4952  {
4953  if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
4954  mode))
4955  {
4956  /*
4957  * No conflict, but if the xmax changed under us in the
4958  * meantime, start over.
4959  */
4961  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4963  xwait))
4964  goto l3;
4965 
4966  /* otherwise, we're good */
4967  require_sleep = false;
4968  }
4969  }
4970  else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
4971  {
4973 
4974  /* if the xmax changed in the meantime, start over */
4975  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4978  xwait))
4979  goto l3;
4980  /* otherwise, we're good */
4981  require_sleep = false;
4982  }
4983  }
4984 
4985  /*
4986  * As a check independent from those above, we can also avoid sleeping
4987  * if the current transaction is the sole locker of the tuple. Note
4988  * that the strength of the lock already held is irrelevant; this is
4989  * not about recording the lock in Xmax (which will be done regardless
4990  * of this optimization, below). Also, note that the cases where we
4991  * hold a lock stronger than we are requesting are already handled
4992  * above by not doing anything.
4993  *
4994  * Note we only deal with the non-multixact case here; MultiXactIdWait
4995  * is well equipped to deal with this situation on its own.
4996  */
4997  if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
4999  {
5000  /* ... but if the xmax changed in the meantime, start over */
5002  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
5004  xwait))
5005  goto l3;
5007  require_sleep = false;
5008  }
5009 
5010  /*
5011  * Time to sleep on the other transaction/multixact, if necessary.
5012  *
5013  * If the other transaction is an update that's already committed,
5014  * then sleeping cannot possibly do any good: if we're required to
5015  * sleep, get out to raise an error instead.
5016  *
5017  * By here, we either have already acquired the buffer exclusive lock,
5018  * or we must wait for the locking transaction or multixact; so below
5019  * we ensure that we grab buffer lock after the sleep.
5020  */
5021  if (require_sleep && result == HeapTupleUpdated)
5022  {
5024  goto failed;
5025  }
5026  else if (require_sleep)
5027  {
5028  /*
5029  * Acquire tuple lock to establish our priority for the tuple, or
5030  * die trying. LockTuple will release us when we are next-in-line
5031  * for the tuple. We must do this even if we are share-locking.
5032  *
5033  * If we are forced to "start over" below, we keep the tuple lock;
5034  * this arranges that we stay at the head of the line while
5035  * rechecking tuple state.
5036  */
5037  if (!heap_acquire_tuplock(relation, tid, mode, wait_policy,
5038  &have_tuple_lock))
5039  {
5040  /*
5041  * This can only happen if wait_policy is Skip and the lock
5042  * couldn't be obtained.
5043  */
5044  result = HeapTupleWouldBlock;
5045  /* recovery code expects to have buffer lock held */
5047  goto failed;
5048  }
5049 
5050  if (infomask & HEAP_XMAX_IS_MULTI)
5051  {
5053 
5054  /* We only ever lock tuples, never update them */
5055  if (status >= MultiXactStatusNoKeyUpdate)
5056  elog(ERROR, "invalid lock mode in heap_lock_tuple");
5057 
5058  /* wait for multixact to end, or die trying */
5059  switch (wait_policy)
5060  {
5061  case LockWaitBlock:
5062  MultiXactIdWait((MultiXactId) xwait, status, infomask,
5063  relation, &tuple->t_self, XLTW_Lock, NULL);
5064  break;
5065  case LockWaitSkip:
5067  status, infomask, relation,
5068  NULL))
5069  {
5070  result = HeapTupleWouldBlock;
5071  /* recovery code expects to have buffer lock held */
5073  goto failed;
5074  }
5075  break;
5076  case LockWaitError:
5078  status, infomask, relation,
5079  NULL))
5080  ereport(ERROR,
5081  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5082  errmsg("could not obtain lock on row in relation \"%s\"",
5083  RelationGetRelationName(relation))));
5084 
5085  break;
5086  }
5087 
5088  /*
5089  * Of course, the multixact might not be done here: if we're
5090  * requesting a light lock mode, other transactions with light
5091  * locks could still be alive, as well as locks owned by our
5092  * own xact or other subxacts of this backend. We need to
5093  * preserve the surviving MultiXact members. Note that it
5094  * isn't absolutely necessary in the latter case, but doing so
5095  * is simpler.
5096  */
5097  }
5098  else
5099  {
5100  /* wait for regular transaction to end, or die trying */
5101  switch (wait_policy)
5102  {
5103  case LockWaitBlock:
5104  XactLockTableWait(xwait, relation, &tuple->t_self,
5105  XLTW_Lock);
5106  break;
5107  case LockWaitSkip:
5108  if (!ConditionalXactLockTableWait(xwait))
5109  {
5110  result = HeapTupleWouldBlock;
5111  /* recovery code expects to have buffer lock held */
5113  goto failed;
5114  }
5115  break;
5116  case LockWaitError:
5117  if (!ConditionalXactLockTableWait(xwait))
5118  ereport(ERROR,
5119  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5120  errmsg("could not obtain lock on row in relation \"%s\"",
5121  RelationGetRelationName(relation))));
5122  break;
5123  }
5124  }
5125 
5126  /* if there are updates, follow the update chain */
5127  if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
5128  {
5129  HTSU_Result res;
5130 
5131  res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
5133  mode);
5134  if (res != HeapTupleMayBeUpdated)
5135  {
5136  result = res;
5137  /* recovery code expects to have buffer lock held */
5139  goto failed;
5140  }
5141  }
5142 
5144 
5145  /*
5146  * xwait is done, but if xwait had just locked the tuple then some
5147  * other xact could update this tuple before we get to this point.
5148  * Check for xmax change, and start over if so.
5149  */
5150  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
5152  xwait))
5153  goto l3;
5154 
5155  if (!(infomask & HEAP_XMAX_IS_MULTI))
5156  {
5157  /*
5158  * Otherwise check if it committed or aborted. Note we cannot
5159  * be here if the tuple was only locked by somebody who didn't
5160  * conflict with us; that would have been handled above. So
5161  * that transaction must necessarily be gone by now. But
5162  * don't check for this in the multixact case, because some
5163  * locker transactions might still be running.
5164  */
5165  UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
5166  }
5167  }
5168 
5169  /* By here, we'r