PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
heapam.c File Reference
#include "postgres.h"
#include "access/bufmask.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/hio.h"
#include "access/multixact.h"
#include "access/parallel.h"
#include "access/relscan.h"
#include "access/sysattr.h"
#include "access/transam.h"
#include "access/tuptoaster.h"
#include "access/valid.h"
#include "access/visibilitymap.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "access/xloginsert.h"
#include "access/xlogutils.h"
#include "catalog/catalog.h"
#include "catalog/namespace.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
#include "storage/procarray.h"
#include "storage/smgr.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "utils/datum.h"
#include "utils/inval.h"
#include "utils/lsyscache.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
#include "utils/syscache.h"
#include "utils/tqual.h"
Include dependency graph for heapam.c:

Go to the source code of this file.

Macros

#define LOCKMODE_from_mxstatus(status)   (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
 
#define LockTupleTuplock(rel, tup, mode)   LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define UnlockTupleTuplock(rel, tup, mode)   UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define ConditionalLockTupleTuplock(rel, tup, mode)   ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define TUPLOCK_from_mxstatus(status)   (MultiXactStatusLock[(status)])
 
#define HEAPDEBUG_1
 
#define HEAPDEBUG_2
 
#define HEAPDEBUG_3
 
#define FRM_NOOP   0x0001
 
#define FRM_INVALIDATE_XMAX   0x0002
 
#define FRM_RETURN_IS_XID   0x0004
 
#define FRM_RETURN_IS_MULTI   0x0008
 
#define FRM_MARK_COMMITTED   0x0010
 

Functions

static HeapScanDesc heap_beginscan_internal (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
 
static BlockNumber heap_parallelscan_nextpage (HeapScanDesc scan)
 
static HeapTuple heap_prepare_insert (Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
 
static XLogRecPtr log_heap_update (Relation reln, Buffer oldbuf, Buffer newbuf, HeapTuple oldtup, HeapTuple newtup, HeapTuple old_key_tup, bool all_visible_cleared, bool new_all_visible_cleared)
 
static void HeapSatisfiesHOTandKeyUpdate (Relation relation, Bitmapset *hot_attrs, Bitmapset *key_attrs, Bitmapset *id_attrs, bool *satisfies_hot, bool *satisfies_key, bool *satisfies_id, HeapTuple oldtup, HeapTuple newtup)
 
static bool heap_acquire_tuplock (Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
 
static void compute_new_xmax_infomask (TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
 
static HTSU_Result heap_lock_updated_tuple (Relation rel, HeapTuple tuple, ItemPointer ctid, TransactionId xid, LockTupleMode mode)
 
static void GetMultiXactIdHintBits (MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
 
static TransactionId MultiXactIdGetUpdateXid (TransactionId xmax, uint16 t_infomask)
 
static bool DoesMultiXactIdConflict (MultiXactId multi, uint16 infomask, LockTupleMode lockmode)
 
static void MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
static bool ConditionalMultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, int *remaining)
 
static XLogRecPtr log_heap_new_cid (Relation relation, HeapTuple tup)
 
static HeapTuple ExtractReplicaIdentity (Relation rel, HeapTuple tup, bool key_modified, bool *copy)
 
static void initscan (HeapScanDesc scan, ScanKey key, bool keep_startblock)
 
void heap_setscanlimits (HeapScanDesc scan, BlockNumber startBlk, BlockNumber numBlks)
 
void heapgetpage (HeapScanDesc scan, BlockNumber page)
 
static void heapgettup (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
static void heapgettup_pagemode (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
Relation relation_open (Oid relationId, LOCKMODE lockmode)
 
Relation try_relation_open (Oid relationId, LOCKMODE lockmode)
 
Relation relation_openrv (const RangeVar *relation, LOCKMODE lockmode)
 
Relation relation_openrv_extended (const RangeVar *relation, LOCKMODE lockmode, bool missing_ok)
 
void relation_close (Relation relation, LOCKMODE lockmode)
 
Relation heap_open (Oid relationId, LOCKMODE lockmode)
 
Relation heap_openrv (const RangeVar *relation, LOCKMODE lockmode)
 
Relation heap_openrv_extended (const RangeVar *relation, LOCKMODE lockmode, bool missing_ok)
 
HeapScanDesc heap_beginscan (Relation relation, Snapshot snapshot, int nkeys, ScanKey key)
 
HeapScanDesc heap_beginscan_catalog (Relation relation, int nkeys, ScanKey key)
 
HeapScanDesc heap_beginscan_strat (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, bool allow_strat, bool allow_sync)
 
HeapScanDesc heap_beginscan_bm (Relation relation, Snapshot snapshot, int nkeys, ScanKey key)
 
HeapScanDesc heap_beginscan_sampling (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, bool allow_strat, bool allow_sync, bool allow_pagemode)
 
void heap_rescan (HeapScanDesc scan, ScanKey key)
 
void heap_rescan_set_params (HeapScanDesc scan, ScanKey key, bool allow_strat, bool allow_sync, bool allow_pagemode)
 
void heap_endscan (HeapScanDesc scan)
 
Size heap_parallelscan_estimate (Snapshot snapshot)
 
void heap_parallelscan_initialize (ParallelHeapScanDesc target, Relation relation, Snapshot snapshot)
 
HeapScanDesc heap_beginscan_parallel (Relation relation, ParallelHeapScanDesc parallel_scan)
 
HeapTuple heap_getnext (HeapScanDesc scan, ScanDirection direction)
 
bool heap_fetch (Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf, bool keep_buf, Relation stats_relation)
 
bool heap_hot_search_buffer (ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
 
bool heap_hot_search (ItemPointer tid, Relation relation, Snapshot snapshot, bool *all_dead)
 
void heap_get_latest_tid (Relation relation, Snapshot snapshot, ItemPointer tid)
 
static void UpdateXmaxHintBits (HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
 
BulkInsertState GetBulkInsertState (void)
 
void FreeBulkInsertState (BulkInsertState bistate)
 
void ReleaseBulkInsertStatePin (BulkInsertState bistate)
 
Oid heap_insert (Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate)
 
void heap_multi_insert (Relation relation, HeapTuple *tuples, int ntuples, CommandId cid, int options, BulkInsertState bistate)
 
Oid simple_heap_insert (Relation relation, HeapTuple tup)
 
static uint8 compute_infobits (uint16 infomask, uint16 infomask2)
 
static bool xmax_infomask_changed (uint16 new_infomask, uint16 old_infomask)
 
HTSU_Result heap_delete (Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, HeapUpdateFailureData *hufd)
 
void simple_heap_delete (Relation relation, ItemPointer tid)
 
HTSU_Result heap_update (Relation relation, ItemPointer otid, HeapTuple newtup, CommandId cid, Snapshot crosscheck, bool wait, HeapUpdateFailureData *hufd, LockTupleMode *lockmode)
 
static bool heap_tuple_attr_equals (TupleDesc tupdesc, int attrnum, HeapTuple tup1, HeapTuple tup2)
 
void simple_heap_update (Relation relation, ItemPointer otid, HeapTuple tup)
 
static MultiXactStatus get_mxact_status_for_lock (LockTupleMode mode, bool is_update)
 
HTSU_Result heap_lock_tuple (Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, HeapUpdateFailureData *hufd)
 
static HTSU_Result test_lockmode_for_conflict (MultiXactStatus status, TransactionId xid, LockTupleMode mode, bool *needwait)
 
static HTSU_Result heap_lock_updated_tuple_rec (Relation rel, ItemPointer tid, TransactionId xid, LockTupleMode mode)
 
void heap_finish_speculative (Relation relation, HeapTuple tuple)
 
void heap_abort_speculative (Relation relation, HeapTuple tuple)
 
void heap_inplace_update (Relation relation, HeapTuple tuple)
 
static TransactionId FreezeMultiXactId (MultiXactId multi, uint16 t_infomask, TransactionId cutoff_xid, MultiXactId cutoff_multi, uint16 *flags)
 
bool heap_prepare_freeze_tuple (HeapTupleHeader tuple, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
 
void heap_execute_freeze_tuple (HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
 
bool heap_freeze_tuple (HeapTupleHeader tuple, TransactionId cutoff_xid, TransactionId cutoff_multi)
 
TransactionId HeapTupleGetUpdateXid (HeapTupleHeader tuple)
 
static bool Do_MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
bool heap_tuple_needs_eventual_freeze (HeapTupleHeader tuple)
 
bool heap_tuple_needs_freeze (HeapTupleHeader tuple, TransactionId cutoff_xid, MultiXactId cutoff_multi, Buffer buf)
 
void HeapTupleHeaderAdvanceLatestRemovedXid (HeapTupleHeader tuple, TransactionId *latestRemovedXid)
 
XLogRecPtr log_heap_cleanup_info (RelFileNode rnode, TransactionId latestRemovedXid)
 
XLogRecPtr log_heap_clean (Relation reln, Buffer buffer, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused, TransactionId latestRemovedXid)
 
XLogRecPtr log_heap_freeze (Relation reln, Buffer buffer, TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples, int ntuples)
 
XLogRecPtr log_heap_visible (RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer, TransactionId cutoff_xid, uint8 vmflags)
 
static void heap_xlog_cleanup_info (XLogReaderState *record)
 
static void heap_xlog_clean (XLogReaderState *record)
 
static void heap_xlog_visible (XLogReaderState *record)
 
static void heap_xlog_freeze_page (XLogReaderState *record)
 
static void fix_infomask_from_infobits (uint8 infobits, uint16 *infomask, uint16 *infomask2)
 
static void heap_xlog_delete (XLogReaderState *record)
 
static void heap_xlog_insert (XLogReaderState *record)
 
static void heap_xlog_multi_insert (XLogReaderState *record)
 
static void heap_xlog_update (XLogReaderState *record, bool hot_update)
 
static void heap_xlog_confirm (XLogReaderState *record)
 
static void heap_xlog_lock (XLogReaderState *record)
 
static void heap_xlog_lock_updated (XLogReaderState *record)
 
static void heap_xlog_inplace (XLogReaderState *record)
 
void heap_redo (XLogReaderState *record)
 
void heap2_redo (XLogReaderState *record)
 
void heap_sync (Relation rel)
 
void heap_mask (char *pagedata, BlockNumber blkno)
 

Variables

bool synchronize_seqscans = true
 
struct {
   LOCKMODE   hwlock
 
   int   lockstatus
 
   int   updstatus
 
tupleLockExtraInfo [MaxLockTupleMode+1]
 
static const int MultiXactStatusLock [MaxMultiXactStatus+1]
 

Macro Definition Documentation

#define ConditionalLockTupleTuplock (   rel,
  tup,
  mode 
)    ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 186 of file heapam.c.

Referenced by heap_acquire_tuplock().

#define FRM_INVALIDATE_XMAX   0x0002

Definition at line 6317 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

#define FRM_MARK_COMMITTED   0x0010

Definition at line 6320 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

#define FRM_NOOP   0x0001

Definition at line 6316 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

#define FRM_RETURN_IS_MULTI   0x0008

Definition at line 6319 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

#define FRM_RETURN_IS_XID   0x0004

Definition at line 6318 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

#define HEAPDEBUG_1

Definition at line 1774 of file heapam.c.

Referenced by heap_getnext().

#define HEAPDEBUG_2

Definition at line 1775 of file heapam.c.

Referenced by heap_getnext().

#define HEAPDEBUG_3

Definition at line 1776 of file heapam.c.

Referenced by heap_getnext().

#define LOCKMODE_from_mxstatus (   status)    (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
#define LockTupleTuplock (   rel,
  tup,
  mode 
)    LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 182 of file heapam.c.

Referenced by heap_acquire_tuplock().

#define TUPLOCK_from_mxstatus (   status)    (MultiXactStatusLock[(status)])

Definition at line 204 of file heapam.c.

Referenced by compute_new_xmax_infomask(), GetMultiXactIdHintBits(), and heap_lock_tuple().

#define UnlockTupleTuplock (   rel,
  tup,
  mode 
)    UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 184 of file heapam.c.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

Function Documentation

static uint8 compute_infobits ( uint16  infomask,
uint16  infomask2 
)
static

Definition at line 2939 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_abort_speculative(), heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_update(), and log_heap_update().

2940 {
2941  return
2942  ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
2943  ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
2944  ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
2945  /* note we ignore HEAP_XMAX_SHR_LOCK here */
2946  ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
2947  ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
2948  XLHL_KEYS_UPDATED : 0);
2949 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:179
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:182
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:241
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:240
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:181
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:242
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:244
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:243
static void compute_new_xmax_infomask ( TransactionId  xmax,
uint16  old_infomask,
uint16  old_infomask2,
TransactionId  add_to_xmax,
LockTupleMode  mode,
bool  is_update,
TransactionId result_xmax,
uint16 result_infomask,
uint16 result_infomask2 
)
static

Definition at line 5293 of file heapam.c.

References Assert, elog, ERROR, get_mxact_status_for_lock(), GetMultiXactIdHintBits(), HEAP_KEYS_UPDATED, HEAP_LOCKED_UPGRADED, HEAP_XMAX_COMMITTED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_INVALID, HEAP_XMAX_IS_EXCL_LOCKED, HEAP_XMAX_IS_KEYSHR_LOCKED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HEAP_XMAX_IS_SHR_LOCKED, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, InvalidTransactionId, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, MultiXactIdCreate(), MultiXactIdExpand(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TUPLOCK_from_mxstatus, and WARNING.

Referenced by heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), and heap_update().

5298 {
5299  TransactionId new_xmax;
5300  uint16 new_infomask,
5301  new_infomask2;
5302 
5304 
5305 l5:
5306  new_infomask = 0;
5307  new_infomask2 = 0;
5308  if (old_infomask & HEAP_XMAX_INVALID)
5309  {
5310  /*
5311  * No previous locker; we just insert our own TransactionId.
5312  *
5313  * Note that it's critical that this case be the first one checked,
5314  * because there are several blocks below that come back to this one
5315  * to implement certain optimizations; old_infomask might contain
5316  * other dirty bits in those cases, but we don't really care.
5317  */
5318  if (is_update)
5319  {
5320  new_xmax = add_to_xmax;
5321  if (mode == LockTupleExclusive)
5322  new_infomask2 |= HEAP_KEYS_UPDATED;
5323  }
5324  else
5325  {
5326  new_infomask |= HEAP_XMAX_LOCK_ONLY;
5327  switch (mode)
5328  {
5329  case LockTupleKeyShare:
5330  new_xmax = add_to_xmax;
5331  new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
5332  break;
5333  case LockTupleShare:
5334  new_xmax = add_to_xmax;
5335  new_infomask |= HEAP_XMAX_SHR_LOCK;
5336  break;
5338  new_xmax = add_to_xmax;
5339  new_infomask |= HEAP_XMAX_EXCL_LOCK;
5340  break;
5341  case LockTupleExclusive:
5342  new_xmax = add_to_xmax;
5343  new_infomask |= HEAP_XMAX_EXCL_LOCK;
5344  new_infomask2 |= HEAP_KEYS_UPDATED;
5345  break;
5346  default:
5347  new_xmax = InvalidTransactionId; /* silence compiler */
5348  elog(ERROR, "invalid lock mode");
5349  }
5350  }
5351  }
5352  else if (old_infomask & HEAP_XMAX_IS_MULTI)
5353  {
5354  MultiXactStatus new_status;
5355 
5356  /*
5357  * Currently we don't allow XMAX_COMMITTED to be set for multis, so
5358  * cross-check.
5359  */
5360  Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
5361 
5362  /*
5363  * A multixact together with LOCK_ONLY set but neither lock bit set
5364  * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
5365  * anymore. This check is critical for databases upgraded by
5366  * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
5367  * that such multis are never passed.
5368  */
5369  if (HEAP_LOCKED_UPGRADED(old_infomask))
5370  {
5371  old_infomask &= ~HEAP_XMAX_IS_MULTI;
5372  old_infomask |= HEAP_XMAX_INVALID;
5373  goto l5;
5374  }
5375 
5376  /*
5377  * If the XMAX is already a MultiXactId, then we need to expand it to
5378  * include add_to_xmax; but if all the members were lockers and are
5379  * all gone, we can do away with the IS_MULTI bit and just set
5380  * add_to_xmax as the only locker/updater. If all lockers are gone
5381  * and we have an updater that aborted, we can also do without a
5382  * multi.
5383  *
5384  * The cost of doing GetMultiXactIdMembers would be paid by
5385  * MultiXactIdExpand if we weren't to do this, so this check is not
5386  * incurring extra work anyhow.
5387  */
5388  if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
5389  {
5390  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
5392  old_infomask)))
5393  {
5394  /*
5395  * Reset these bits and restart; otherwise fall through to
5396  * create a new multi below.
5397  */
5398  old_infomask &= ~HEAP_XMAX_IS_MULTI;
5399  old_infomask |= HEAP_XMAX_INVALID;
5400  goto l5;
5401  }
5402  }
5403 
5404  new_status = get_mxact_status_for_lock(mode, is_update);
5405 
5406  new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
5407  new_status);
5408  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5409  }
5410  else if (old_infomask & HEAP_XMAX_COMMITTED)
5411  {
5412  /*
5413  * It's a committed update, so we need to preserve him as updater of
5414  * the tuple.
5415  */
5417  MultiXactStatus new_status;
5418 
5419  if (old_infomask2 & HEAP_KEYS_UPDATED)
5420  status = MultiXactStatusUpdate;
5421  else
5422  status = MultiXactStatusNoKeyUpdate;
5423 
5424  new_status = get_mxact_status_for_lock(mode, is_update);
5425 
5426  /*
5427  * since it's not running, it's obviously impossible for the old
5428  * updater to be identical to the current one, so we need not check
5429  * for that case as we do in the block above.
5430  */
5431  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5432  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5433  }
5434  else if (TransactionIdIsInProgress(xmax))
5435  {
5436  /*
5437  * If the XMAX is a valid, in-progress TransactionId, then we need to
5438  * create a new MultiXactId that includes both the old locker or
5439  * updater and our own TransactionId.
5440  */
5441  MultiXactStatus new_status;
5442  MultiXactStatus old_status;
5443  LockTupleMode old_mode;
5444 
5445  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5446  {
5447  if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5448  old_status = MultiXactStatusForKeyShare;
5449  else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5450  old_status = MultiXactStatusForShare;
5451  else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5452  {
5453  if (old_infomask2 & HEAP_KEYS_UPDATED)
5454  old_status = MultiXactStatusForUpdate;
5455  else
5456  old_status = MultiXactStatusForNoKeyUpdate;
5457  }
5458  else
5459  {
5460  /*
5461  * LOCK_ONLY can be present alone only when a page has been
5462  * upgraded by pg_upgrade. But in that case,
5463  * TransactionIdIsInProgress() should have returned false. We
5464  * assume it's no longer locked in this case.
5465  */
5466  elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
5467  old_infomask |= HEAP_XMAX_INVALID;
5468  old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
5469  goto l5;
5470  }
5471  }
5472  else
5473  {
5474  /* it's an update, but which kind? */
5475  if (old_infomask2 & HEAP_KEYS_UPDATED)
5476  old_status = MultiXactStatusUpdate;
5477  else
5478  old_status = MultiXactStatusNoKeyUpdate;
5479  }
5480 
5481  old_mode = TUPLOCK_from_mxstatus(old_status);
5482 
5483  /*
5484  * If the lock to be acquired is for the same TransactionId as the
5485  * existing lock, there's an optimization possible: consider only the
5486  * strongest of both locks as the only one present, and restart.
5487  */
5488  if (xmax == add_to_xmax)
5489  {
5490  /*
5491  * Note that it's not possible for the original tuple to be
5492  * updated: we wouldn't be here because the tuple would have been
5493  * invisible and we wouldn't try to update it. As a subtlety,
5494  * this code can also run when traversing an update chain to lock
5495  * future versions of a tuple. But we wouldn't be here either,
5496  * because the add_to_xmax would be different from the original
5497  * updater.
5498  */
5499  Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5500 
5501  /* acquire the strongest of both */
5502  if (mode < old_mode)
5503  mode = old_mode;
5504  /* mustn't touch is_update */
5505 
5506  old_infomask |= HEAP_XMAX_INVALID;
5507  goto l5;
5508  }
5509 
5510  /* otherwise, just fall back to creating a new multixact */
5511  new_status = get_mxact_status_for_lock(mode, is_update);
5512  new_xmax = MultiXactIdCreate(xmax, old_status,
5513  add_to_xmax, new_status);
5514  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5515  }
5516  else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
5517  TransactionIdDidCommit(xmax))
5518  {
5519  /*
5520  * It's a committed update, so we gotta preserve him as updater of the
5521  * tuple.
5522  */
5524  MultiXactStatus new_status;
5525 
5526  if (old_infomask2 & HEAP_KEYS_UPDATED)
5527  status = MultiXactStatusUpdate;
5528  else
5529  status = MultiXactStatusNoKeyUpdate;
5530 
5531  new_status = get_mxact_status_for_lock(mode, is_update);
5532 
5533  /*
5534  * since it's not running, it's obviously impossible for the old
5535  * updater to be identical to the current one, so we need not check
5536  * for that case as we do in the block above.
5537  */
5538  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5539  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5540  }
5541  else
5542  {
5543  /*
5544  * Can get here iff the locking/updating transaction was running when
5545  * the infomask was extracted from the tuple, but finished before
5546  * TransactionIdIsInProgress got to run. Deal with it as if there was
5547  * no locker at all in the first place.
5548  */
5549  old_infomask |= HEAP_XMAX_INVALID;
5550  goto l5;
5551  }
5552 
5553  *result_infomask = new_infomask;
5554  *result_infomask2 = new_infomask2;
5555  *result_xmax = new_xmax;
5556 }
static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
Definition: heapam.c:6835
MultiXactStatus
Definition: multixact.h:40
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:179
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:182
uint32 TransactionId
Definition: c.h:393
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:772
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:995
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:238
#define HEAP_XMAX_COMMITTED
Definition: htup_details.h:192
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:185
#define HEAP_XMAX_IS_SHR_LOCKED(infomask)
Definition: htup_details.h:248
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:6916
LockTupleMode
Definition: heapam.h:38
unsigned short uint16
Definition: c.h:264
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:193
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:181
#define InvalidTransactionId
Definition: transam.h:31
#define WARNING
Definition: elog.h:40
MultiXactId MultiXactIdCreate(TransactionId xid1, MultiXactStatus status1, TransactionId xid2, MultiXactStatus status2)
Definition: multixact.c:384
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:216
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
TransactionId MultiXactId
Definition: c.h:403
#define Assert(condition)
Definition: c.h:670
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:204
static MultiXactStatus get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
Definition: heapam.c:4525
#define HEAP_XMAX_IS_EXCL_LOCKED(infomask)
Definition: htup_details.h:250
#define elog
Definition: elog.h:219
#define HEAP_XMAX_IS_KEYSHR_LOCKED(infomask)
Definition: htup_details.h:252
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:222
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:549
MultiXactId MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
Definition: multixact.c:437
static bool ConditionalMultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
Relation  rel,
int *  remaining 
)
static

Definition at line 7170 of file heapam.c.

References Do_MultiXactIdWait(), and XLTW_None.

Referenced by heap_lock_tuple().

7172 {
7173  return Do_MultiXactIdWait(multi, status, infomask, true,
7174  rel, NULL, XLTW_None, remaining);
7175 }
int remaining
Definition: informix.c:692
Definition: lmgr.h:26
static bool Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:7070
#define NULL
Definition: c.h:226
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:222
static bool Do_MultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
bool  nowait,
Relation  rel,
ItemPointer  ctid,
XLTW_Oper  oper,
int *  remaining 
)
static

Definition at line 7070 of file heapam.c.

References ConditionalXactLockTableWait(), DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, LOCKMODE_from_mxstatus, pfree(), MultiXactMember::status, TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), XactLockTableWait(), and MultiXactMember::xid.

Referenced by ConditionalMultiXactIdWait(), and MultiXactIdWait().

7074 {
7075  bool result = true;
7076  MultiXactMember *members;
7077  int nmembers;
7078  int remain = 0;
7079 
7080  /* for pre-pg_upgrade tuples, no need to sleep at all */
7081  nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
7082  GetMultiXactIdMembers(multi, &members, false,
7083  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7084 
7085  if (nmembers >= 0)
7086  {
7087  int i;
7088 
7089  for (i = 0; i < nmembers; i++)
7090  {
7091  TransactionId memxid = members[i].xid;
7092  MultiXactStatus memstatus = members[i].status;
7093 
7095  {
7096  remain++;
7097  continue;
7098  }
7099 
7102  {
7103  if (remaining && TransactionIdIsInProgress(memxid))
7104  remain++;
7105  continue;
7106  }
7107 
7108  /*
7109  * This member conflicts with our multi, so we have to sleep (or
7110  * return failure, if asked to avoid waiting.)
7111  *
7112  * Note that we don't set up an error context callback ourselves,
7113  * but instead we pass the info down to XactLockTableWait. This
7114  * might seem a bit wasteful because the context is set up and
7115  * tore down for each member of the multixact, but in reality it
7116  * should be barely noticeable, and it avoids duplicate code.
7117  */
7118  if (nowait)
7119  {
7120  result = ConditionalXactLockTableWait(memxid);
7121  if (!result)
7122  break;
7123  }
7124  else
7125  XactLockTableWait(memxid, rel, ctid, oper);
7126  }
7127 
7128  pfree(members);
7129  }
7130 
7131  if (remaining)
7132  *remaining = remain;
7133 
7134  return result;
7135 }
int remaining
Definition: informix.c:692
MultiXactStatus
Definition: multixact.h:40
uint32 TransactionId
Definition: c.h:393
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:772
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:995
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:238
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:174
bool ConditionalXactLockTableWait(TransactionId xid)
Definition: lmgr.c:607
void pfree(void *pointer)
Definition: mcxt.c:992
TransactionId xid
Definition: multixact.h:61
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:556
MultiXactStatus status
Definition: multixact.h:62
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:216
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:554
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
Operator oper(ParseState *pstate, List *opname, Oid ltypeId, Oid rtypeId, bool noError, int location)
Definition: parse_oper.c:375
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:222
static bool DoesMultiXactIdConflict ( MultiXactId  multi,
uint16  infomask,
LockTupleMode  lockmode 
)
static

Definition at line 6981 of file heapam.c.

References DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, ISUPDATE_from_mxstatus, LOCKMODE_from_mxstatus, pfree(), status(), TransactionIdDidAbort(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), tupleLockExtraInfo, and MultiXactMember::xid.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

6983 {
6984  int nmembers;
6985  MultiXactMember *members;
6986  bool result = false;
6987  LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
6988 
6989  if (HEAP_LOCKED_UPGRADED(infomask))
6990  return false;
6991 
6992  nmembers = GetMultiXactIdMembers(multi, &members, false,
6993  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
6994  if (nmembers >= 0)
6995  {
6996  int i;
6997 
6998  for (i = 0; i < nmembers; i++)
6999  {
7000  TransactionId memxid;
7001  LOCKMODE memlockmode;
7002 
7003  memlockmode = LOCKMODE_from_mxstatus(members[i].status);
7004 
7005  /* ignore members that don't conflict with the lock we want */
7006  if (!DoLockModesConflict(memlockmode, wanted))
7007  continue;
7008 
7009  /* ignore members from current xact */
7010  memxid = members[i].xid;
7012  continue;
7013 
7014  if (ISUPDATE_from_mxstatus(members[i].status))
7015  {
7016  /* ignore aborted updaters */
7017  if (TransactionIdDidAbort(memxid))
7018  continue;
7019  }
7020  else
7021  {
7022  /* ignore lockers-only that are no longer in progress */
7023  if (!TransactionIdIsInProgress(memxid))
7024  continue;
7025  }
7026 
7027  /*
7028  * Whatever remains are either live lockers that conflict with our
7029  * wanted lock, and updaters that are not aborted. Those conflict
7030  * with what we want, so return true.
7031  */
7032  result = true;
7033  break;
7034  }
7035  pfree(members);
7036  }
7037 
7038  return result;
7039 }
uint32 TransactionId
Definition: c.h:393
int LOCKMODE
Definition: lockdefs.h:26
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:772
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:995
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:238
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:174
void pfree(void *pointer)
Definition: mcxt.c:992
TransactionId xid
Definition: multixact.h:61
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:556
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:55
bool TransactionIdDidAbort(TransactionId transactionId)
Definition: transam.c:181
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:216
static const struct @20 tupleLockExtraInfo[MaxLockTupleMode+1]
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:222
static HeapTuple ExtractReplicaIdentity ( Relation  rel,
HeapTuple  tup,
bool  key_modified,
bool copy 
)
static

Definition at line 7808 of file heapam.c.

References DEBUG4, elog, ERROR, heap_deform_tuple(), heap_form_tuple(), heap_freetuple(), HeapTupleGetOid, HeapTupleHasExternal, HeapTupleSetOid, MaxHeapAttributeNumber, tupleDesc::natts, NULL, ObjectIdAttributeNumber, OidIsValid, RelationData::rd_index, RelationData::rd_rel, RelationClose(), RelationGetDescr, RelationGetRelationName, RelationGetReplicaIndex(), RelationIdGetRelation(), RelationIsLogicallyLogged, REPLICA_IDENTITY_FULL, REPLICA_IDENTITY_NOTHING, toast_flatten_tuple(), and values.

Referenced by heap_delete(), and heap_update().

7809 {
7810  TupleDesc desc = RelationGetDescr(relation);
7811  Oid replidindex;
7812  Relation idx_rel;
7813  TupleDesc idx_desc;
7814  char replident = relation->rd_rel->relreplident;
7815  HeapTuple key_tuple = NULL;
7816  bool nulls[MaxHeapAttributeNumber];
7818  int natt;
7819 
7820  *copy = false;
7821 
7822  if (!RelationIsLogicallyLogged(relation))
7823  return NULL;
7824 
7825  if (replident == REPLICA_IDENTITY_NOTHING)
7826  return NULL;
7827 
7828  if (replident == REPLICA_IDENTITY_FULL)
7829  {
7830  /*
7831  * When logging the entire old tuple, it very well could contain
7832  * toasted columns. If so, force them to be inlined.
7833  */
7834  if (HeapTupleHasExternal(tp))
7835  {
7836  *copy = true;
7837  tp = toast_flatten_tuple(tp, RelationGetDescr(relation));
7838  }
7839  return tp;
7840  }
7841 
7842  /* if the key hasn't changed and we're only logging the key, we're done */
7843  if (!key_changed)
7844  return NULL;
7845 
7846  /* find the replica identity index */
7847  replidindex = RelationGetReplicaIndex(relation);
7848  if (!OidIsValid(replidindex))
7849  {
7850  elog(DEBUG4, "could not find configured replica identity for table \"%s\"",
7851  RelationGetRelationName(relation));
7852  return NULL;
7853  }
7854 
7855  idx_rel = RelationIdGetRelation(replidindex);
7856  idx_desc = RelationGetDescr(idx_rel);
7857 
7858  /* deform tuple, so we have fast access to columns */
7859  heap_deform_tuple(tp, desc, values, nulls);
7860 
7861  /* set all columns to NULL, regardless of whether they actually are */
7862  memset(nulls, 1, sizeof(nulls));
7863 
7864  /*
7865  * Now set all columns contained in the index to NOT NULL, they cannot
7866  * currently be NULL.
7867  */
7868  for (natt = 0; natt < idx_desc->natts; natt++)
7869  {
7870  int attno = idx_rel->rd_index->indkey.values[natt];
7871 
7872  if (attno < 0)
7873  {
7874  /*
7875  * The OID column can appear in an index definition, but that's
7876  * OK, because we always copy the OID if present (see below).
7877  * Other system columns may not.
7878  */
7879  if (attno == ObjectIdAttributeNumber)
7880  continue;
7881  elog(ERROR, "system column in index");
7882  }
7883  nulls[attno - 1] = false;
7884  }
7885 
7886  key_tuple = heap_form_tuple(desc, values, nulls);
7887  *copy = true;
7888  RelationClose(idx_rel);
7889 
7890  /*
7891  * Always copy oids if the table has them, even if not included in the
7892  * index. The space in the logged tuple is used anyway, so there's little
7893  * point in not including the information.
7894  */
7895  if (relation->rd_rel->relhasoids)
7896  HeapTupleSetOid(key_tuple, HeapTupleGetOid(tp));
7897 
7898  /*
7899  * If the tuple, which by here only contains indexed columns, still has
7900  * toasted columns, force them to be inlined. This is somewhat unlikely
7901  * since there's limits on the size of indexed columns, so we don't
7902  * duplicate toast_flatten_tuple()s functionality in the above loop over
7903  * the indexed columns, even if it would be more efficient.
7904  */
7905  if (HeapTupleHasExternal(key_tuple))
7906  {
7907  HeapTuple oldtup = key_tuple;
7908 
7909  key_tuple = toast_flatten_tuple(oldtup, RelationGetDescr(relation));
7910  heap_freetuple(oldtup);
7911  }
7912 
7913  return key_tuple;
7914 }
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition: tuptoaster.c:1084
Oid RelationGetReplicaIndex(Relation relation)
Definition: relcache.c:4588
#define RelationGetDescr(relation)
Definition: rel.h:425
#define ObjectIdAttributeNumber
Definition: sysattr.h:22
#define REPLICA_IDENTITY_NOTHING
Definition: pg_class.h:177
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: heaptuple.c:692
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:572
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1374
unsigned int Oid
Definition: postgres_ext.h:31
#define DEBUG4
Definition: elog.h:22
#define OidIsValid(objectId)
Definition: c.h:533
int natts
Definition: tupdesc.h:73
#define HeapTupleSetOid(tuple, oid)
Definition: htup_details.h:698
Form_pg_index rd_index
Definition: rel.h:155
#define REPLICA_IDENTITY_FULL
Definition: pg_class.h:179
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:433
void RelationClose(Relation relation)
Definition: relcache.c:2155
uintptr_t Datum
Definition: postgres.h:374
#define MaxHeapAttributeNumber
Definition: htup_details.h:47
#define NULL
Definition: c.h:226
void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull)
Definition: heaptuple.c:935
static Datum values[MAXATTR]
Definition: bootstrap.c:162
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:674
#define elog
Definition: elog.h:219
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:695
Relation RelationIdGetRelation(Oid relationId)
Definition: relcache.c:2066
static void fix_infomask_from_infobits ( uint8  infobits,
uint16 infomask,
uint16 infomask2 
)
static

Definition at line 8204 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_xlog_delete(), heap_xlog_lock(), heap_xlog_lock_updated(), and heap_xlog_update().

8205 {
8206  *infomask &= ~(HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY |
8208  *infomask2 &= ~HEAP_KEYS_UPDATED;
8209 
8210  if (infobits & XLHL_XMAX_IS_MULTI)
8211  *infomask |= HEAP_XMAX_IS_MULTI;
8212  if (infobits & XLHL_XMAX_LOCK_ONLY)
8213  *infomask |= HEAP_XMAX_LOCK_ONLY;
8214  if (infobits & XLHL_XMAX_EXCL_LOCK)
8215  *infomask |= HEAP_XMAX_EXCL_LOCK;
8216  /* note HEAP_XMAX_SHR_LOCK isn't considered here */
8217  if (infobits & XLHL_XMAX_KEYSHR_LOCK)
8218  *infomask |= HEAP_XMAX_KEYSHR_LOCK;
8219 
8220  if (infobits & XLHL_KEYS_UPDATED)
8221  *infomask2 |= HEAP_KEYS_UPDATED;
8222 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:179
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:182
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:241
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:240
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:181
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:242
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:244
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:243
void FreeBulkInsertState ( BulkInsertState  bistate)

Definition at line 2320 of file heapam.c.

References BulkInsertStateData::current_buf, FreeAccessStrategy(), InvalidBuffer, pfree(), ReleaseBuffer(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), intorel_shutdown(), and transientrel_shutdown().

2321 {
2322  if (bistate->current_buf != InvalidBuffer)
2323  ReleaseBuffer(bistate->current_buf);
2324  FreeAccessStrategy(bistate->strategy);
2325  pfree(bistate);
2326 }
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3292
void pfree(void *pointer)
Definition: mcxt.c:992
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:580
BufferAccessStrategy strategy
Definition: hio.h:33
Buffer current_buf
Definition: hio.h:34
static TransactionId FreezeMultiXactId ( MultiXactId  multi,
uint16  t_infomask,
TransactionId  cutoff_xid,
MultiXactId  cutoff_multi,
uint16 flags 
)
static

Definition at line 6344 of file heapam.c.

References Assert, FRM_INVALIDATE_XMAX, FRM_MARK_COMMITTED, FRM_NOOP, FRM_RETURN_IS_MULTI, FRM_RETURN_IS_XID, GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, i, InvalidTransactionId, ISUPDATE_from_mxstatus, MultiXactIdCreateFromMembers(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactIdIsValid, MultiXactIdPrecedes(), palloc(), pfree(), status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TransactionIdIsValid, TransactionIdPrecedes(), and MultiXactMember::xid.

Referenced by heap_prepare_freeze_tuple().

6347 {
6349  int i;
6350  MultiXactMember *members;
6351  int nmembers;
6352  bool need_replace;
6353  int nnewmembers;
6354  MultiXactMember *newmembers;
6355  bool has_lockers;
6356  TransactionId update_xid;
6357  bool update_committed;
6358 
6359  *flags = 0;
6360 
6361  /* We should only be called in Multis */
6362  Assert(t_infomask & HEAP_XMAX_IS_MULTI);
6363 
6364  if (!MultiXactIdIsValid(multi) ||
6365  HEAP_LOCKED_UPGRADED(t_infomask))
6366  {
6367  /* Ensure infomask bits are appropriately set/reset */
6368  *flags |= FRM_INVALIDATE_XMAX;
6369  return InvalidTransactionId;
6370  }
6371  else if (MultiXactIdPrecedes(multi, cutoff_multi))
6372  {
6373  /*
6374  * This old multi cannot possibly have members still running. If it
6375  * was a locker only, it can be removed without any further
6376  * consideration; but if it contained an update, we might need to
6377  * preserve it.
6378  */
6380  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)));
6381  if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
6382  {
6383  *flags |= FRM_INVALIDATE_XMAX;
6384  xid = InvalidTransactionId; /* not strictly necessary */
6385  }
6386  else
6387  {
6388  /* replace multi by update xid */
6389  xid = MultiXactIdGetUpdateXid(multi, t_infomask);
6390 
6391  /* wasn't only a lock, xid needs to be valid */
6393 
6394  /*
6395  * If the xid is older than the cutoff, it has to have aborted,
6396  * otherwise the tuple would have gotten pruned away.
6397  */
6398  if (TransactionIdPrecedes(xid, cutoff_xid))
6399  {
6401  *flags |= FRM_INVALIDATE_XMAX;
6402  xid = InvalidTransactionId; /* not strictly necessary */
6403  }
6404  else
6405  {
6406  *flags |= FRM_RETURN_IS_XID;
6407  }
6408  }
6409 
6410  return xid;
6411  }
6412 
6413  /*
6414  * This multixact might have or might not have members still running, but
6415  * we know it's valid and is newer than the cutoff point for multis.
6416  * However, some member(s) of it may be below the cutoff for Xids, so we
6417  * need to walk the whole members array to figure out what to do, if
6418  * anything.
6419  */
6420 
6421  nmembers =
6422  GetMultiXactIdMembers(multi, &members, false,
6423  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
6424  if (nmembers <= 0)
6425  {
6426  /* Nothing worth keeping */
6427  *flags |= FRM_INVALIDATE_XMAX;
6428  return InvalidTransactionId;
6429  }
6430 
6431  /* is there anything older than the cutoff? */
6432  need_replace = false;
6433  for (i = 0; i < nmembers; i++)
6434  {
6435  if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
6436  {
6437  need_replace = true;
6438  break;
6439  }
6440  }
6441 
6442  /*
6443  * In the simplest case, there is no member older than the cutoff; we can
6444  * keep the existing MultiXactId as is.
6445  */
6446  if (!need_replace)
6447  {
6448  *flags |= FRM_NOOP;
6449  pfree(members);
6450  return InvalidTransactionId;
6451  }
6452 
6453  /*
6454  * If the multi needs to be updated, figure out which members do we need
6455  * to keep.
6456  */
6457  nnewmembers = 0;
6458  newmembers = palloc(sizeof(MultiXactMember) * nmembers);
6459  has_lockers = false;
6460  update_xid = InvalidTransactionId;
6461  update_committed = false;
6462 
6463  for (i = 0; i < nmembers; i++)
6464  {
6465  /*
6466  * Determine whether to keep this member or ignore it.
6467  */
6468  if (ISUPDATE_from_mxstatus(members[i].status))
6469  {
6470  TransactionId xid = members[i].xid;
6471 
6472  /*
6473  * It's an update; should we keep it? If the transaction is known
6474  * aborted or crashed then it's okay to ignore it, otherwise not.
6475  * Note that an updater older than cutoff_xid cannot possibly be
6476  * committed, because HeapTupleSatisfiesVacuum would have returned
6477  * HEAPTUPLE_DEAD and we would not be trying to freeze the tuple.
6478  *
6479  * As with all tuple visibility routines, it's critical to test
6480  * TransactionIdIsInProgress before TransactionIdDidCommit,
6481  * because of race conditions explained in detail in tqual.c.
6482  */
6485  {
6486  Assert(!TransactionIdIsValid(update_xid));
6487  update_xid = xid;
6488  }
6489  else if (TransactionIdDidCommit(xid))
6490  {
6491  /*
6492  * The transaction committed, so we can tell caller to set
6493  * HEAP_XMAX_COMMITTED. (We can only do this because we know
6494  * the transaction is not running.)
6495  */
6496  Assert(!TransactionIdIsValid(update_xid));
6497  update_committed = true;
6498  update_xid = xid;
6499  }
6500 
6501  /*
6502  * Not in progress, not committed -- must be aborted or crashed;
6503  * we can ignore it.
6504  */
6505 
6506  /*
6507  * Since the tuple wasn't marked HEAPTUPLE_DEAD by vacuum, the
6508  * update Xid cannot possibly be older than the xid cutoff.
6509  */
6510  Assert(!TransactionIdIsValid(update_xid) ||
6511  !TransactionIdPrecedes(update_xid, cutoff_xid));
6512 
6513  /*
6514  * If we determined that it's an Xid corresponding to an update
6515  * that must be retained, additionally add it to the list of
6516  * members of the new Multi, in case we end up using that. (We
6517  * might still decide to use only an update Xid and not a multi,
6518  * but it's easier to maintain the list as we walk the old members
6519  * list.)
6520  */
6521  if (TransactionIdIsValid(update_xid))
6522  newmembers[nnewmembers++] = members[i];
6523  }
6524  else
6525  {
6526  /* We only keep lockers if they are still running */
6527  if (TransactionIdIsCurrentTransactionId(members[i].xid) ||
6528  TransactionIdIsInProgress(members[i].xid))
6529  {
6530  /* running locker cannot possibly be older than the cutoff */
6531  Assert(!TransactionIdPrecedes(members[i].xid, cutoff_xid));
6532  newmembers[nnewmembers++] = members[i];
6533  has_lockers = true;
6534  }
6535  }
6536  }
6537 
6538  pfree(members);
6539 
6540  if (nnewmembers == 0)
6541  {
6542  /* nothing worth keeping!? Tell caller to remove the whole thing */
6543  *flags |= FRM_INVALIDATE_XMAX;
6544  xid = InvalidTransactionId;
6545  }
6546  else if (TransactionIdIsValid(update_xid) && !has_lockers)
6547  {
6548  /*
6549  * If there's a single member and it's an update, pass it back alone
6550  * without creating a new Multi. (XXX we could do this when there's a
6551  * single remaining locker, too, but that would complicate the API too
6552  * much; moreover, the case with the single updater is more
6553  * interesting, because those are longer-lived.)
6554  */
6555  Assert(nnewmembers == 1);
6556  *flags |= FRM_RETURN_IS_XID;
6557  if (update_committed)
6558  *flags |= FRM_MARK_COMMITTED;
6559  xid = update_xid;
6560  }
6561  else
6562  {
6563  /*
6564  * Create a new multixact with the surviving members of the previous
6565  * one, to set as new Xmax in the tuple.
6566  */
6567  xid = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
6568  *flags |= FRM_RETURN_IS_MULTI;
6569  }
6570 
6571  pfree(newmembers);
6572 
6573  return xid;
6574 }
#define FRM_RETURN_IS_XID
Definition: heapam.c:6318
#define FRM_MARK_COMMITTED
Definition: heapam.c:6320
uint32 TransactionId
Definition: c.h:393
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:772
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:995
MultiXactId MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members)
Definition: multixact.c:746
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:238
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:6916
void pfree(void *pointer)
Definition: mcxt.c:992
TransactionId xid
Definition: multixact.h:61
#define FRM_INVALIDATE_XMAX
Definition: heapam.c:6317
#define InvalidTransactionId
Definition: transam.h:31
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:55
#define MultiXactIdIsValid(multi)
Definition: multixact.h:27
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
#define FRM_RETURN_IS_MULTI
Definition: heapam.c:6319
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:216
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
#define Assert(condition)
Definition: c.h:670
#define FRM_NOOP
Definition: heapam.c:6316
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3135
void * palloc(Size size)
Definition: mcxt.c:891
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:222
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:549
static MultiXactStatus get_mxact_status_for_lock ( LockTupleMode  mode,
bool  is_update 
)
static

Definition at line 4525 of file heapam.c.

References elog, ERROR, and tupleLockExtraInfo.

Referenced by compute_new_xmax_infomask(), heap_lock_tuple(), and test_lockmode_for_conflict().

4526 {
4527  int retval;
4528 
4529  if (is_update)
4530  retval = tupleLockExtraInfo[mode].updstatus;
4531  else
4532  retval = tupleLockExtraInfo[mode].lockstatus;
4533 
4534  if (retval == -1)
4535  elog(ERROR, "invalid lock tuple mode %d/%s", mode,
4536  is_update ? "true" : "false");
4537 
4538  return (MultiXactStatus) retval;
4539 }
MultiXactStatus
Definition: multixact.h:40
#define ERROR
Definition: elog.h:43
static const struct @20 tupleLockExtraInfo[MaxLockTupleMode+1]
#define elog
Definition: elog.h:219
BulkInsertState GetBulkInsertState ( void  )

Definition at line 2306 of file heapam.c.

References BAS_BULKWRITE, BulkInsertStateData::current_buf, GetAccessStrategy(), InvalidBuffer, palloc(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), intorel_startup(), and transientrel_startup().

2307 {
2308  BulkInsertState bistate;
2309 
2310  bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
2312  bistate->current_buf = InvalidBuffer;
2313  return bistate;
2314 }
BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
Definition: freelist.c:525
#define InvalidBuffer
Definition: buf.h:25
struct BulkInsertStateData * BulkInsertState
Definition: heapam.h:33
BufferAccessStrategy strategy
Definition: hio.h:33
void * palloc(Size size)
Definition: mcxt.c:891
Buffer current_buf
Definition: hio.h:34
static void GetMultiXactIdHintBits ( MultiXactId  multi,
uint16 new_infomask,
uint16 new_infomask2 
)
static

Definition at line 6835 of file heapam.c.

References GetMultiXactIdMembers(), HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, i, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, pfree(), status(), and TUPLOCK_from_mxstatus.

Referenced by compute_new_xmax_infomask(), heap_prepare_freeze_tuple(), and heap_update().

6837 {
6838  int nmembers;
6839  MultiXactMember *members;
6840  int i;
6841  uint16 bits = HEAP_XMAX_IS_MULTI;
6842  uint16 bits2 = 0;
6843  bool has_update = false;
6844  LockTupleMode strongest = LockTupleKeyShare;
6845 
6846  /*
6847  * We only use this in multis we just created, so they cannot be values
6848  * pre-pg_upgrade.
6849  */
6850  nmembers = GetMultiXactIdMembers(multi, &members, false, false);
6851 
6852  for (i = 0; i < nmembers; i++)
6853  {
6854  LockTupleMode mode;
6855 
6856  /*
6857  * Remember the strongest lock mode held by any member of the
6858  * multixact.
6859  */
6860  mode = TUPLOCK_from_mxstatus(members[i].status);
6861  if (mode > strongest)
6862  strongest = mode;
6863 
6864  /* See what other bits we need */
6865  switch (members[i].status)
6866  {
6870  break;
6871 
6873  bits2 |= HEAP_KEYS_UPDATED;
6874  break;
6875 
6877  has_update = true;
6878  break;
6879 
6880  case MultiXactStatusUpdate:
6881  bits2 |= HEAP_KEYS_UPDATED;
6882  has_update = true;
6883  break;
6884  }
6885  }
6886 
6887  if (strongest == LockTupleExclusive ||
6888  strongest == LockTupleNoKeyExclusive)
6889  bits |= HEAP_XMAX_EXCL_LOCK;
6890  else if (strongest == LockTupleShare)
6891  bits |= HEAP_XMAX_SHR_LOCK;
6892  else if (strongest == LockTupleKeyShare)
6893  bits |= HEAP_XMAX_KEYSHR_LOCK;
6894 
6895  if (!has_update)
6896  bits |= HEAP_XMAX_LOCK_ONLY;
6897 
6898  if (nmembers > 0)
6899  pfree(members);
6900 
6901  *new_infomask = bits;
6902  *new_infomask2 = bits2;
6903 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:179
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:182
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:185
LockTupleMode
Definition: heapam.h:38
unsigned short uint16
Definition: c.h:264
void pfree(void *pointer)
Definition: mcxt.c:992
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:181
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:204
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:222
void heap2_redo ( XLogReaderState record)

Definition at line 9068 of file heapam.c.

References elog, heap_xlog_clean(), heap_xlog_cleanup_info(), heap_xlog_freeze_page(), heap_xlog_lock_updated(), heap_xlog_logical_rewrite(), heap_xlog_multi_insert(), heap_xlog_visible(), PANIC, XLOG_HEAP2_CLEAN, XLOG_HEAP2_CLEANUP_INFO, XLOG_HEAP2_FREEZE_PAGE, XLOG_HEAP2_LOCK_UPDATED, XLOG_HEAP2_MULTI_INSERT, XLOG_HEAP2_NEW_CID, XLOG_HEAP2_REWRITE, XLOG_HEAP2_VISIBLE, XLOG_HEAP_OPMASK, XLogRecGetInfo, and XLR_INFO_MASK.

9069 {
9070  uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
9071 
9072  switch (info & XLOG_HEAP_OPMASK)
9073  {
9074  case XLOG_HEAP2_CLEAN:
9075  heap_xlog_clean(record);
9076  break;
9078  heap_xlog_freeze_page(record);
9079  break;
9081  heap_xlog_cleanup_info(record);
9082  break;
9083  case XLOG_HEAP2_VISIBLE:
9084  heap_xlog_visible(record);
9085  break;
9087  heap_xlog_multi_insert(record);
9088  break;
9090  heap_xlog_lock_updated(record);
9091  break;
9092  case XLOG_HEAP2_NEW_CID:
9093 
9094  /*
9095  * Nothing to do on a real replay, only used during logical
9096  * decoding.
9097  */
9098  break;
9099  case XLOG_HEAP2_REWRITE:
9100  heap_xlog_logical_rewrite(record);
9101  break;
9102  default:
9103  elog(PANIC, "heap2_redo: unknown op code %u", info);
9104  }
9105 }
void heap_xlog_logical_rewrite(XLogReaderState *r)
Definition: rewriteheap.c:1115
#define XLOG_HEAP2_LOCK_UPDATED
Definition: heapam_xlog.h:59
#define XLOG_HEAP2_REWRITE
Definition: heapam_xlog.h:53
unsigned char uint8
Definition: c.h:263
#define XLOG_HEAP_OPMASK
Definition: heapam_xlog.h:41
#define PANIC
Definition: elog.h:53
#define XLOG_HEAP2_MULTI_INSERT
Definition: heapam_xlog.h:58
#define XLOG_HEAP2_VISIBLE
Definition: heapam_xlog.h:57
static void heap_xlog_lock_updated(XLogReaderState *record)
Definition: heapam.c:8929
static void heap_xlog_freeze_page(XLogReaderState *record)
Definition: heapam.c:8146
#define XLOG_HEAP2_CLEAN
Definition: heapam_xlog.h:54
#define XLOG_HEAP2_CLEANUP_INFO
Definition: heapam_xlog.h:56
static void heap_xlog_multi_insert(XLogReaderState *record)
Definition: heapam.c:8411
#define XLOG_HEAP2_NEW_CID
Definition: heapam_xlog.h:60
#define XLogRecGetInfo(decoder)
Definition: xlogreader.h:198
#define XLR_INFO_MASK
Definition: xlogrecord.h:62
static void heap_xlog_cleanup_info(XLogReaderState *record)
Definition: heapam.c:7920
#define XLOG_HEAP2_FREEZE_PAGE
Definition: heapam_xlog.h:55
static void heap_xlog_visible(XLogReaderState *record)
Definition: heapam.c:8031
#define elog
Definition: elog.h:219
static void heap_xlog_clean(XLogReaderState *record)
Definition: heapam.c:7941
void heap_abort_speculative ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 6092 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, compute_infobits(), elog, END_CRIT_SECTION, ERROR, xl_heap_delete::flags, GetCurrentTransactionId(), HEAP_KEYS_UPDATED, HEAP_MOVED, HEAP_XMAX_BITS, HeapTupleHasExternal, HeapTupleHeaderIsHeapOnly, HeapTupleHeaderIsSpeculative, HeapTupleHeaderSetXmin, xl_heap_delete::infobits_set, InvalidTransactionId, IsToastRelation(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), MarkBufferDirty(), xl_heap_delete::offnum, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), ReadBuffer(), RecentGlobalXmin, REGBUF_STANDARD, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapDelete, START_CRIT_SECTION, HeapTupleHeaderData::t_choice, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_heap, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, HeapTupleFields::t_xmin, toast_delete(), TransactionIdIsValid, XLH_DELETE_IS_SUPER, XLOG_HEAP_DELETE, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and xl_heap_delete::xmax.

Referenced by ExecInsert(), and toast_delete_datum().

6093 {
6095  ItemPointer tid = &(tuple->t_self);
6096  ItemId lp;
6097  HeapTupleData tp;
6098  Page page;
6099  BlockNumber block;
6100  Buffer buffer;
6101 
6102  Assert(ItemPointerIsValid(tid));
6103 
6104  block = ItemPointerGetBlockNumber(tid);
6105  buffer = ReadBuffer(relation, block);
6106  page = BufferGetPage(buffer);
6107 
6109 
6110  /*
6111  * Page can't be all visible, we just inserted into it, and are still
6112  * running.
6113  */
6114  Assert(!PageIsAllVisible(page));
6115 
6116  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
6117  Assert(ItemIdIsNormal(lp));
6118 
6119  tp.t_tableOid = RelationGetRelid(relation);
6120  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
6121  tp.t_len = ItemIdGetLength(lp);
6122  tp.t_self = *tid;
6123 
6124  /*
6125  * Sanity check that the tuple really is a speculatively inserted tuple,
6126  * inserted by us.
6127  */
6128  if (tp.t_data->t_choice.t_heap.t_xmin != xid)
6129  elog(ERROR, "attempted to kill a tuple inserted by another transaction");
6130  if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
6131  elog(ERROR, "attempted to kill a non-speculative tuple");
6133 
6134  /*
6135  * No need to check for serializable conflicts here. There is never a
6136  * need for a combocid, either. No need to extract replica identity, or
6137  * do anything special with infomask bits.
6138  */
6139 
6141 
6142  /*
6143  * The tuple will become DEAD immediately. Flag that this page
6144  * immediately is a candidate for pruning by setting xmin to
6145  * RecentGlobalXmin. That's not pretty, but it doesn't seem worth
6146  * inventing a nicer API for this.
6147  */
6150 
6151  /* store transaction information of xact deleting the tuple */
6154 
6155  /*
6156  * Set the tuple header xmin to InvalidTransactionId. This makes the
6157  * tuple immediately invisible everyone. (In particular, to any
6158  * transactions waiting on the speculative token, woken up later.)
6159  */
6161 
6162  /* Clear the speculative insertion token too */
6163  tp.t_data->t_ctid = tp.t_self;
6164 
6165  MarkBufferDirty(buffer);
6166 
6167  /*
6168  * XLOG stuff
6169  *
6170  * The WAL records generated here match heap_delete(). The same recovery
6171  * routines are used.
6172  */
6173  if (RelationNeedsWAL(relation))
6174  {
6175  xl_heap_delete xlrec;
6176  XLogRecPtr recptr;
6177 
6178  xlrec.flags = XLH_DELETE_IS_SUPER;
6180  tp.t_data->t_infomask2);
6182  xlrec.xmax = xid;
6183 
6184  XLogBeginInsert();
6185  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
6186  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6187 
6188  /* No replica identity & replication origin logged */
6189 
6190  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
6191 
6192  PageSetLSN(page, recptr);
6193  }
6194 
6195  END_CRIT_SECTION();
6196 
6197  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6198 
6199  if (HeapTupleHasExternal(&tp))
6200  {
6201  Assert(!IsToastRelation(relation));
6202  toast_delete(relation, &tp, true);
6203  }
6204 
6205  /*
6206  * Never need to mark tuple for invalidation, since catalogs don't support
6207  * speculative insertion
6208  */
6209 
6210  /* Now we can release the buffer */
6211  ReleaseBuffer(buffer);
6212 
6213  /* count deletion, as we counted the insertion too */
6214  pgstat_count_heap_delete(relation);
6215 }
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:59
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
bool IsToastRelation(Relation relation)
Definition: catalog.c:135
#define HEAP_XMAX_BITS
Definition: htup_details.h:256
#define XLH_DELETE_IS_SUPER
Definition: heapam_xlog.h:95
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2939
HeapTupleFields t_heap
Definition: htup_details.h:146
#define PageIsAllVisible(page)
Definition: bufpage.h:382
uint32 TransactionId
Definition: c.h:393
union HeapTupleHeaderData::@38 t_choice
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1445
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:132
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:423
#define PageSetPrunable(page, xid)
Definition: bufpage.h:395
#define START_CRIT_SECTION()
Definition: miscadmin.h:130
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3292
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
OffsetNumber offnum
Definition: heapam_xlog.h:105
HeapTupleHeader t_data
Definition: htup.h:67
#define HeapTupleHeaderIsHeapOnly(tup)
Definition: htup_details.h:502
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
#define ERROR
Definition: elog.h:43
ItemPointerData t_ctid
Definition: htup_details.h:150
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:104
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:416
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:110
TransactionId RecentGlobalXmin
Definition: snapmgr.c:166
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
TransactionId t_xmin
Definition: htup_details.h:118
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3529
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_MOVED
Definition: htup_details.h:202
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:670
uint8 infobits_set
Definition: heapam_xlog.h:106
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:76
#define RelationNeedsWAL(relation)
Definition: rel.h:502
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:1851
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:674
void toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: tuptoaster.c:464
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:66
#define TransactionIdIsValid(xid)
Definition: transam.h:41
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:365
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:413
#define PageGetItem(page, itemId)
Definition: bufpage.h:337
Pointer Page
Definition: bufpage.h:74
#define HeapTupleHeaderSetXmin(tup, xid)
Definition: htup_details.h:313
static bool heap_acquire_tuplock ( Relation  relation,
ItemPointer  tid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
bool have_tuple_lock 
)
static

Definition at line 5244 of file heapam.c.

References ConditionalLockTupleTuplock, ereport, errcode(), errmsg(), ERROR, LockTupleTuplock, LockWaitBlock, LockWaitError, LockWaitSkip, and RelationGetRelationName.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

5246 {
5247  if (*have_tuple_lock)
5248  return true;
5249 
5250  switch (wait_policy)
5251  {
5252  case LockWaitBlock:
5253  LockTupleTuplock(relation, tid, mode);
5254  break;
5255 
5256  case LockWaitSkip:
5257  if (!ConditionalLockTupleTuplock(relation, tid, mode))
5258  return false;
5259  break;
5260 
5261  case LockWaitError:
5262  if (!ConditionalLockTupleTuplock(relation, tid, mode))
5263  ereport(ERROR,
5264  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5265  errmsg("could not obtain lock on row in relation \"%s\"",
5266  RelationGetRelationName(relation))));
5267  break;
5268  }
5269  *have_tuple_lock = true;
5270 
5271  return true;
5272 }
#define LockTupleTuplock(rel, tup, mode)
Definition: heapam.c:182
#define ConditionalLockTupleTuplock(rel, tup, mode)
Definition: heapam.c:186
int errcode(int sqlerrcode)
Definition: elog.c:575
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:433
#define ereport(elevel, rest)
Definition: elog.h:122
int errmsg(const char *fmt,...)
Definition: elog.c:797
HeapScanDesc heap_beginscan ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key 
)

Definition at line 1394 of file heapam.c.

References heap_beginscan_internal().

Referenced by AlterDomainNotNull(), ATRewriteTable(), copy_heap_data(), CopyTo(), DefineQueryRewrite(), pgrowlocks(), pgstat_collect_oids(), RelationFindReplTupleSeq(), SeqNext(), validateCheckConstraint(), validateDomainConstraint(), and validateForeignKeyConstraint().

1396 {
1397  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1398  true, true, true, false, false, false);
1399 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1440
#define NULL
Definition: c.h:226
HeapScanDesc heap_beginscan_bm ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key 
)

Definition at line 1422 of file heapam.c.

References heap_beginscan_internal().

Referenced by ExecInitBitmapHeapScan().

1424 {
1425  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1426  false, false, true, true, false, false);
1427 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1440
#define NULL
Definition: c.h:226
HeapScanDesc heap_beginscan_catalog ( Relation  relation,
int  nkeys,
ScanKey  key 
)

Definition at line 1402 of file heapam.c.

References GetCatalogSnapshot(), heap_beginscan_internal(), RegisterSnapshot(), and RelationGetRelid.

Referenced by AlterTableMoveAll(), AlterTableSpaceOptions(), boot_openrel(), check_db_file_conflict(), createdb(), do_autovacuum(), DropSetting(), DropTableSpace(), find_typed_table_dependencies(), get_database_list(), get_rel_oids(), get_rewrite_oid_without_relid(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), index_update_stats(), objectsInSchemaToOids(), ReindexMultipleTables(), remove_dbtablespaces(), RemoveConversionById(), RenameTableSpace(), ThereIsAtLeastOneRole(), and vac_truncate_clog().

1403 {
1404  Oid relid = RelationGetRelid(relation);
1405  Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
1406 
1407  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1408  true, true, true, false, false, true);
1409 }
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:858
Snapshot GetCatalogSnapshot(Oid relid)
Definition: snapmgr.c:436
unsigned int Oid
Definition: postgres_ext.h:31
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1440
#define NULL
Definition: c.h:226
#define RelationGetRelid(relation)
Definition: rel.h:413
static HeapScanDesc heap_beginscan_internal ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
ParallelHeapScanDesc  parallel_scan,
bool  allow_strat,
bool  allow_sync,
bool  allow_pagemode,
bool  is_bitmapscan,
bool  is_samplescan,
bool  temp_snap 
)
static

Definition at line 1440 of file heapam.c.

References initscan(), IsMVCCSnapshot, NULL, palloc(), PredicateLockRelation(), RelationGetRelid, RelationIncrementReferenceCount(), HeapScanDescData::rs_allow_strat, HeapScanDescData::rs_allow_sync, HeapScanDescData::rs_bitmapscan, HeapScanDescData::rs_ctup, HeapScanDescData::rs_key, HeapScanDescData::rs_nkeys, HeapScanDescData::rs_pageatatime, HeapScanDescData::rs_parallel, HeapScanDescData::rs_rd, HeapScanDescData::rs_samplescan, HeapScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, HeapScanDescData::rs_temp_snap, and HeapTupleData::t_tableOid.

Referenced by heap_beginscan(), heap_beginscan_bm(), heap_beginscan_catalog(), heap_beginscan_parallel(), heap_beginscan_sampling(), and heap_beginscan_strat().

1449 {
1450  HeapScanDesc scan;
1451 
1452  /*
1453  * increment relation ref count while scanning relation
1454  *
1455  * This is just to make really sure the relcache entry won't go away while
1456  * the scan has a pointer to it. Caller should be holding the rel open
1457  * anyway, so this is redundant in all normal scenarios...
1458  */
1460 
1461  /*
1462  * allocate and initialize scan descriptor
1463  */
1464  scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
1465 
1466  scan->rs_rd = relation;
1467  scan->rs_snapshot = snapshot;
1468  scan->rs_nkeys = nkeys;
1469  scan->rs_bitmapscan = is_bitmapscan;
1470  scan->rs_samplescan = is_samplescan;
1471  scan->rs_strategy = NULL; /* set in initscan */
1472  scan->rs_allow_strat = allow_strat;
1473  scan->rs_allow_sync = allow_sync;
1474  scan->rs_temp_snap = temp_snap;
1475  scan->rs_parallel = parallel_scan;
1476 
1477  /*
1478  * we can use page-at-a-time mode if it's an MVCC-safe snapshot
1479  */
1480  scan->rs_pageatatime = allow_pagemode && IsMVCCSnapshot(snapshot);
1481 
1482  /*
1483  * For a seqscan in a serializable transaction, acquire a predicate lock
1484  * on the entire relation. This is required not only to lock all the
1485  * matching tuples, but also to conflict with new insertions into the
1486  * table. In an indexscan, we take page locks on the index pages covering
1487  * the range specified in the scan qual, but in a heap scan there is
1488  * nothing more fine-grained to lock. A bitmap scan is a different story,
1489  * there we have already scanned the index and locked the index pages
1490  * covering the predicate. But in that case we still have to lock any
1491  * matching heap tuples.
1492  */
1493  if (!is_bitmapscan)
1494  PredicateLockRelation(relation, snapshot);
1495 
1496  /* we only need to set this up once */
1497  scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1498 
1499  /*
1500  * we do this here instead of in initscan() because heap_rescan also calls
1501  * initscan() and we don't want to allocate memory again
1502  */
1503  if (nkeys > 0)
1504  scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
1505  else
1506  scan->rs_key = NULL;
1507 
1508  initscan(scan, key, false);
1509 
1510  return scan;
1511 }
bool rs_allow_sync
Definition: relscan.h:55
void PredicateLockRelation(Relation relation, Snapshot snapshot)
Definition: predicate.c:2415
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:100
HeapTupleData rs_ctup
Definition: relscan.h:68
bool rs_bitmapscan
Definition: relscan.h:51
bool rs_pageatatime
Definition: relscan.h:53
ParallelHeapScanDesc rs_parallel
Definition: relscan.h:72
ScanKeyData * ScanKey
Definition: skey.h:75
Snapshot rs_snapshot
Definition: relscan.h:48
Oid t_tableOid
Definition: htup.h:66
bool rs_temp_snap
Definition: relscan.h:56
void RelationIncrementReferenceCount(Relation rel)
Definition: relcache.c:2122
BufferAccessStrategy rs_strategy
Definition: relscan.h:63
Relation rs_rd
Definition: relscan.h:47
#define NULL
Definition: c.h:226
#define IsMVCCSnapshot(snapshot)
Definition: tqual.h:31
void * palloc(Size size)
Definition: mcxt.c:891
bool rs_allow_strat
Definition: relscan.h:54
static void initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
Definition: heapam.c:217
bool rs_samplescan
Definition: relscan.h:52
#define RelationGetRelid(relation)
Definition: rel.h:413
ScanKey rs_key
Definition: relscan.h:50
HeapScanDesc heap_beginscan_parallel ( Relation  relation,
ParallelHeapScanDesc  parallel_scan 
)

Definition at line 1653 of file heapam.c.

References Assert, heap_beginscan_internal(), ParallelHeapScanDescData::phs_relid, ParallelHeapScanDescData::phs_snapshot_data, RegisterSnapshot(), RelationGetRelid, and RestoreSnapshot().

Referenced by ExecSeqScanInitializeDSM(), and ExecSeqScanInitializeWorker().

1654 {
1655  Snapshot snapshot;
1656 
1657  Assert(RelationGetRelid(relation) == parallel_scan->phs_relid);
1658  snapshot = RestoreSnapshot(parallel_scan->phs_snapshot_data);
1659  RegisterSnapshot(snapshot);
1660 
1661  return heap_beginscan_internal(relation, snapshot, 0, NULL, parallel_scan,
1662  true, true, true, false, false, true);
1663 }
char phs_snapshot_data[FLEXIBLE_ARRAY_MEMBER]
Definition: relscan.h:41
Snapshot RestoreSnapshot(char *start_address)
Definition: snapmgr.c:2069
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:858
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1440
#define NULL
Definition: c.h:226
#define Assert(condition)
Definition: c.h:670
#define RelationGetRelid(relation)
Definition: rel.h:413
HeapScanDesc heap_beginscan_sampling ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
bool  allow_strat,
bool  allow_sync,
bool  allow_pagemode 
)

Definition at line 1430 of file heapam.c.

References heap_beginscan_internal().

Referenced by tablesample_init().

1433 {
1434  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1435  allow_strat, allow_sync, allow_pagemode,
1436  false, true, false);
1437 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1440
#define NULL
Definition: c.h:226
HeapScanDesc heap_beginscan_strat ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
bool  allow_strat,
bool  allow_sync 
)

Definition at line 1412 of file heapam.c.

References heap_beginscan_internal().

Referenced by IndexBuildHeapRangeScan(), IndexCheckExclusion(), pgstat_heap(), systable_beginscan(), and validate_index_heapscan().

1415 {
1416  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1417  allow_strat, allow_sync, true,
1418  false, false, false);
1419 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1440
#define NULL
Definition: c.h:226
HTSU_Result heap_delete ( Relation  relation,
ItemPointer  tid,
CommandId  cid,
Snapshot  crosscheck,
bool  wait,
HeapUpdateFailureData hufd 
)

Definition at line 2998 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), HeapUpdateFailureData::cmax, compute_infobits(), compute_new_xmax_infomask(), HeapUpdateFailureData::ctid, DoesMultiXactIdConflict(), END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, ExtractReplicaIdentity(), xl_heap_delete::flags, GetCurrentTransactionId(), heap_acquire_tuplock(), heap_freetuple(), HEAP_KEYS_UPDATED, HEAP_MOVED, HEAP_XMAX_BITS, HEAP_XMAX_INVALID, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HeapTupleBeingUpdated, HeapTupleHasExternal, HeapTupleHeaderAdjustCmax(), HeapTupleHeaderClearHotUpdated, HeapTupleHeaderGetCmax(), HeapTupleHeaderGetRawXmax, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIsOnlyLocked(), HeapTupleHeaderSetCmax, HeapTupleHeaderSetXmax, HeapTupleInvisible, HeapTupleMayBeUpdated, HeapTupleSatisfiesUpdate(), HeapTupleSatisfiesVisibility, HeapTupleSelfUpdated, HeapTupleUpdated, xl_heap_delete::infobits_set, InvalidBuffer, InvalidCommandId, InvalidSnapshot, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), LockTupleExclusive, LockWaitBlock, log_heap_new_cid(), MarkBufferDirty(), MultiXactIdSetOldestMember(), MultiXactIdWait(), MultiXactStatusUpdate, NULL, xl_heap_delete::offnum, PageClearAllVisible, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), RelationData::rd_rel, ReadBuffer(), REGBUF_STANDARD, RelationGetRelid, RelationIsAccessibleInLogicalDecoding, RelationNeedsWAL, ReleaseBuffer(), RELKIND_MATVIEW, RELKIND_RELATION, REPLICA_IDENTITY_FULL, SizeOfHeapDelete, SizeOfHeapHeader, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, toast_delete(), TransactionIdEquals, TransactionIdIsCurrentTransactionId(), UnlockReleaseBuffer(), UnlockTupleTuplock, UpdateXmaxHintBits(), visibilitymap_clear(), visibilitymap_pin(), VISIBILITYMAP_VALID_BITS, XactLockTableWait(), XLH_DELETE_ALL_VISIBLE_CLEARED, XLH_DELETE_CONTAINS_OLD_KEY, XLH_DELETE_CONTAINS_OLD_TUPLE, XLOG_HEAP_DELETE, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), XLogSetRecordFlags(), XLTW_Delete, HeapUpdateFailureData::xmax, xl_heap_delete::xmax, and xmax_infomask_changed().

Referenced by ExecDelete(), and simple_heap_delete().

3001 {
3002  HTSU_Result result;
3004  ItemId lp;
3005  HeapTupleData tp;
3006  Page page;
3007  BlockNumber block;
3008  Buffer buffer;
3009  Buffer vmbuffer = InvalidBuffer;
3010  TransactionId new_xmax;
3011  uint16 new_infomask,
3012  new_infomask2;
3013  bool have_tuple_lock = false;
3014  bool iscombo;
3015  bool all_visible_cleared = false;
3016  HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
3017  bool old_key_copied = false;
3018 
3019  Assert(ItemPointerIsValid(tid));
3020 
3021  /*
3022  * Forbid this during a parallel operation, lest it allocate a combocid.
3023  * Other workers might need that combocid for visibility checks, and we
3024  * have no provision for broadcasting it to them.
3025  */
3026  if (IsInParallelMode())
3027  ereport(ERROR,
3028  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
3029  errmsg("cannot delete tuples during a parallel operation")));
3030 
3031  block = ItemPointerGetBlockNumber(tid);
3032  buffer = ReadBuffer(relation, block);
3033  page = BufferGetPage(buffer);
3034 
3035  /*
3036  * Before locking the buffer, pin the visibility map page if it appears to
3037  * be necessary. Since we haven't got the lock yet, someone else might be
3038  * in the middle of changing this, so we'll need to recheck after we have
3039  * the lock.
3040  */
3041  if (PageIsAllVisible(page))
3042  visibilitymap_pin(relation, block, &vmbuffer);
3043 
3045 
3046  /*
3047  * If we didn't pin the visibility map page and the page has become all
3048  * visible while we were busy locking the buffer, we'll have to unlock and
3049  * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
3050  * unfortunate, but hopefully shouldn't happen often.
3051  */
3052  if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3053  {
3054  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3055  visibilitymap_pin(relation, block, &vmbuffer);
3057  }
3058 
3059  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
3060  Assert(ItemIdIsNormal(lp));
3061 
3062  tp.t_tableOid = RelationGetRelid(relation);
3063  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3064  tp.t_len = ItemIdGetLength(lp);
3065  tp.t_self = *tid;
3066 
3067 l1:
3068  result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
3069 
3070  if (result == HeapTupleInvisible)
3071  {
3072  UnlockReleaseBuffer(buffer);
3073  ereport(ERROR,
3074  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
3075  errmsg("attempted to delete invisible tuple")));
3076  }
3077  else if (result == HeapTupleBeingUpdated && wait)
3078  {
3079  TransactionId xwait;
3080  uint16 infomask;
3081 
3082  /* must copy state data before unlocking buffer */
3083  xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
3084  infomask = tp.t_data->t_infomask;
3085 
3086  /*
3087  * Sleep until concurrent transaction ends -- except when there's a
3088  * single locker and it's our own transaction. Note we don't care
3089  * which lock mode the locker has, because we need the strongest one.
3090  *
3091  * Before sleeping, we need to acquire tuple lock to establish our
3092  * priority for the tuple (see heap_lock_tuple). LockTuple will
3093  * release us when we are next-in-line for the tuple.
3094  *
3095  * If we are forced to "start over" below, we keep the tuple lock;
3096  * this arranges that we stay at the head of the line while rechecking
3097  * tuple state.
3098  */
3099  if (infomask & HEAP_XMAX_IS_MULTI)
3100  {
3101  /* wait for multixact */
3102  if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
3104  {
3105  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3106 
3107  /* acquire tuple lock, if necessary */
3109  LockWaitBlock, &have_tuple_lock);
3110 
3111  /* wait for multixact */
3113  relation, &(tp.t_self), XLTW_Delete,
3114  NULL);
3116 
3117  /*
3118  * If xwait had just locked the tuple then some other xact
3119  * could update this tuple before we get to this point. Check
3120  * for xmax change, and start over if so.
3121  */
3122  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
3124  xwait))
3125  goto l1;
3126  }
3127 
3128  /*
3129  * You might think the multixact is necessarily done here, but not
3130  * so: it could have surviving members, namely our own xact or
3131  * other subxacts of this backend. It is legal for us to delete
3132  * the tuple in either case, however (the latter case is
3133  * essentially a situation of upgrading our former shared lock to
3134  * exclusive). We don't bother changing the on-disk hint bits
3135  * since we are about to overwrite the xmax altogether.
3136  */
3137  }
3138  else if (!TransactionIdIsCurrentTransactionId(xwait))
3139  {
3140  /*
3141  * Wait for regular transaction to end; but first, acquire tuple
3142  * lock.
3143  */
3144  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3146  LockWaitBlock, &have_tuple_lock);
3147  XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
3149 
3150  /*
3151  * xwait is done, but if xwait had just locked the tuple then some
3152  * other xact could update this tuple before we get to this point.
3153  * Check for xmax change, and start over if so.
3154  */
3155  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
3157  xwait))
3158  goto l1;
3159 
3160  /* Otherwise check if it committed or aborted */
3161  UpdateXmaxHintBits(tp.t_data, buffer, xwait);
3162  }
3163 
3164  /*
3165  * We may overwrite if previous xmax aborted, or if it committed but
3166  * only locked the tuple without updating it.
3167  */
3168  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
3171  result = HeapTupleMayBeUpdated;
3172  else
3173  result = HeapTupleUpdated;
3174  }
3175 
3176  if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)
3177  {
3178  /* Perform additional check for transaction-snapshot mode RI updates */
3179  if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
3180  result = HeapTupleUpdated;
3181  }
3182 
3183  if (result != HeapTupleMayBeUpdated)
3184  {
3185  Assert(result == HeapTupleSelfUpdated ||
3186  result == HeapTupleUpdated ||
3187  result == HeapTupleBeingUpdated);
3189  hufd->ctid = tp.t_data->t_ctid;
3191  if (result == HeapTupleSelfUpdated)
3192  hufd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
3193  else
3194  hufd->cmax = InvalidCommandId;
3195  UnlockReleaseBuffer(buffer);
3196  if (have_tuple_lock)
3197  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3198  if (vmbuffer != InvalidBuffer)
3199  ReleaseBuffer(vmbuffer);
3200  return result;
3201  }
3202 
3203  /*
3204  * We're about to do the actual delete -- check for conflict first, to
3205  * avoid possibly having to roll back work we've just done.
3206  *
3207  * This is safe without a recheck as long as there is no possibility of
3208  * another process scanning the page between this check and the delete
3209  * being visible to the scan (i.e., an exclusive buffer content lock is
3210  * continuously held from this point until the tuple delete is visible).
3211  */
3212  CheckForSerializableConflictIn(relation, &tp, buffer);
3213 
3214  /* replace cid with a combo cid if necessary */
3215  HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
3216 
3217  /*
3218  * Compute replica identity tuple before entering the critical section so
3219  * we don't PANIC upon a memory allocation failure.
3220  */
3221  old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
3222 
3223  /*
3224  * If this is the first possibly-multixact-able operation in the current
3225  * transaction, set my per-backend OldestMemberMXactId setting. We can be
3226  * certain that the transaction will never become a member of any older
3227  * MultiXactIds than that. (We have to do this even if we end up just
3228  * using our own TransactionId below, since some other backend could
3229  * incorporate our XID into a MultiXact immediately afterwards.)
3230  */
3232 
3235  xid, LockTupleExclusive, true,
3236  &new_xmax, &new_infomask, &new_infomask2);
3237 
3239 
3240  /*
3241  * If this transaction commits, the tuple will become DEAD sooner or
3242  * later. Set flag that this page is a candidate for pruning once our xid
3243  * falls below the OldestXmin horizon. If the transaction finally aborts,
3244  * the subsequent page pruning will be a no-op and the hint will be
3245  * cleared.
3246  */
3247  PageSetPrunable(page, xid);
3248 
3249  if (PageIsAllVisible(page))
3250  {
3251  all_visible_cleared = true;
3252  PageClearAllVisible(page);
3253  visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
3254  vmbuffer, VISIBILITYMAP_VALID_BITS);
3255  }
3256 
3257  /* store transaction information of xact deleting the tuple */
3260  tp.t_data->t_infomask |= new_infomask;
3261  tp.t_data->t_infomask2 |= new_infomask2;
3263  HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
3264  HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
3265  /* Make sure there is no forward chain link in t_ctid */
3266  tp.t_data->t_ctid = tp.t_self;
3267 
3268  MarkBufferDirty(buffer);
3269 
3270  /*
3271  * XLOG stuff
3272  *
3273  * NB: heap_abort_speculative() uses the same xlog record and replay
3274  * routines.
3275  */
3276  if (RelationNeedsWAL(relation))
3277  {
3278  xl_heap_delete xlrec;
3279  XLogRecPtr recptr;
3280 
3281  /* For logical decode we need combocids to properly decode the catalog */
3283  log_heap_new_cid(relation, &tp);
3284 
3285  xlrec.flags = all_visible_cleared ? XLH_DELETE_ALL_VISIBLE_CLEARED : 0;
3287  tp.t_data->t_infomask2);
3289  xlrec.xmax = new_xmax;
3290 
3291  if (old_key_tuple != NULL)
3292  {
3293  if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
3295  else
3297  }
3298 
3299  XLogBeginInsert();
3300  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
3301 
3302  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
3303 
3304  /*
3305  * Log replica identity of the deleted tuple if there is one
3306  */
3307  if (old_key_tuple != NULL)
3308  {
3309  xl_heap_header xlhdr;
3310 
3311  xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
3312  xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
3313  xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
3314 
3315  XLogRegisterData((char *) &xlhdr, SizeOfHeapHeader);
3316  XLogRegisterData((char *) old_key_tuple->t_data
3318  old_key_tuple->t_len
3320  }
3321 
3322  /* filtering by origin on a row level is much more efficient */
3324 
3325  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
3326 
3327  PageSetLSN(page, recptr);
3328  }
3329 
3330  END_CRIT_SECTION();
3331 
3332  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3333 
3334  if (vmbuffer != InvalidBuffer)
3335  ReleaseBuffer(vmbuffer);
3336 
3337  /*
3338  * If the tuple has toasted out-of-line attributes, we need to delete
3339  * those items too. We have to do this before releasing the buffer
3340  * because we need to look at the contents of the tuple, but it's OK to
3341  * release the content lock on the buffer first.
3342  */
3343  if (relation->rd_rel->relkind != RELKIND_RELATION &&
3344  relation->rd_rel->relkind != RELKIND_MATVIEW)
3345  {
3346  /* toast table entries should never be recursively toasted */
3348  }
3349  else if (HeapTupleHasExternal(&tp))
3350  toast_delete(relation, &tp, false);
3351 
3352  /*
3353  * Mark tuple for invalidation from system caches at next command
3354  * boundary. We have to do this before releasing the buffer because we
3355  * need to look at the contents of the tuple.
3356  */
3357  CacheInvalidateHeapTuple(relation, &tp, NULL);
3358 
3359  /* Now we can release the buffer */
3360  ReleaseBuffer(buffer);
3361 
3362  /*
3363  * Release the lmgr tuple lock, if we had it.
3364  */
3365  if (have_tuple_lock)
3366  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3367 
3368  pgstat_count_heap_delete(relation);
3369 
3370  if (old_key_tuple != NULL && old_key_copied)
3371  heap_freetuple(old_key_tuple);
3372 
3373  return HeapTupleMayBeUpdated;
3374 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:359
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
Definition: tqual.c:1585
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:59
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
#define SizeofHeapTupleHeader
Definition: htup_details.h:170
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:7732
#define HEAP_XMAX_BITS
Definition: htup_details.h:256
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2939
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1085
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
#define PageIsAllVisible(page)
Definition: bufpage.h:382
uint32 TransactionId
Definition: c.h:393
HTSU_Result HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid, Buffer buffer)
Definition: tqual.c:460
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:772
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1445
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
static bool xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
Definition: heapam.c:2961
#define HeapTupleHeaderClearHotUpdated(tup)
Definition: htup_details.h:497
#define END_CRIT_SECTION()
Definition: miscadmin.h:132
#define RELKIND_MATVIEW
Definition: pg_class.h:167
#define InvalidBuffer
Definition: buf.h:25
uint16 t_infomask2
Definition: heapam_xlog.h:122
#define PageSetPrunable(page, xid)
Definition: bufpage.h:395
#define START_CRIT_SECTION()
Definition: miscadmin.h:130
int errcode(int sqlerrcode)
Definition: elog.c:575
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:192
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3292
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
Form_pg_class rd_rel
Definition: rel.h:113
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1374
void CheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
Definition: predicate.c:4243
#define UnlockTupleTuplock(rel, tup, mode)
Definition: heapam.c:184
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
OffsetNumber offnum
Definition: heapam_xlog.h:105
void MultiXactIdSetOldestMember(void)
Definition: multixact.c:623
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:67
#define HeapTupleHeaderGetRawXmax(tup)
Definition: htup_details.h:369
unsigned short uint16
Definition: c.h:264
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
bool IsInParallelMode(void)
Definition: xact.c:912
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3315
#define REPLICA_IDENTITY_FULL
Definition: pg_class.h:179
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:193
ItemPointerData t_ctid
Definition: htup_details.h:150
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:104
static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:7148
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:416
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:110
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define XLH_DELETE_CONTAINS_OLD_KEY
Definition: heapam_xlog.h:94
CommandId cmax
Definition: heapam.h:72
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:374
HTSU_Result
Definition: snapshot.h:118
Oid t_tableOid
Definition: htup.h:66
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define HeapTupleHeaderSetCmax(tup, cid, iscombo)
Definition: htup_details.h:399
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define ereport(elevel, rest)
Definition: elog.h:122
static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
Definition: heapam.c:5293
TransactionId xmax
Definition: heapam.h:71
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
#define InvalidSnapshot
Definition: snapshot.h:24
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:556
#define InvalidCommandId
Definition: c.h:410
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:216
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3529
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
static void UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
Definition: heapam.c:2284
#define HEAP_MOVED
Definition: htup_details.h:202
static bool heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
Definition: heapam.c:5244
TransactionId MultiXactId
Definition: c.h:403
#define PageClearAllVisible(page)
Definition: bufpage.h:386
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:554
#define NULL
Definition: c.h:226
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:670
uint8 infobits_set
Definition: heapam_xlog.h:106
static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified, bool *copy)
Definition: heapam.c:7808
CommandId HeapTupleHeaderGetCmax(HeapTupleHeader tup)
Definition: combocid.c:119
static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask, LockTupleMode lockmode)
Definition: heapam.c:6981
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
uint16 t_infomask
Definition: heapam_xlog.h:123
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:76
#define RelationNeedsWAL(relation)
Definition: rel.h:502
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:1851
void HeapTupleHeaderAdjustCmax(HeapTupleHeader tup, CommandId *cmax, bool *iscombo)
Definition: combocid.c:154
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2588
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:674
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define XLH_DELETE_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:92
void toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: tuptoaster.c:464
ItemPointerData ctid
Definition: heapam.h:70
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:66
#define RELKIND_RELATION
Definition: pg_class.h:160
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:365
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:413
#define PageGetItem(page, itemId)
Definition: bufpage.h:337
#define SizeOfHeapHeader
Definition: heapam_xlog.h:127
Pointer Page
Definition: bufpage.h:74
#define XLH_DELETE_CONTAINS_OLD_TUPLE
Definition: heapam_xlog.h:93
void heap_endscan ( HeapScanDesc  scan)

Definition at line 1581 of file heapam.c.

References BufferIsValid, FreeAccessStrategy(), pfree(), RelationDecrementReferenceCount(), ReleaseBuffer(), HeapScanDescData::rs_cbuf, HeapScanDescData::rs_key, HeapScanDescData::rs_rd, HeapScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, HeapScanDescData::rs_temp_snap, and UnregisterSnapshot().

Referenced by AlterDomainNotNull(), AlterTableMoveAll(), AlterTableSpaceOptions(), ATRewriteTable(), boot_openrel(), check_db_file_conflict(), copy_heap_data(), CopyTo(), createdb(), DefineQueryRewrite(), do_autovacuum(), DropSetting(), DropTableSpace(), ExecEndBitmapHeapScan(), ExecEndSampleScan(), ExecEndSeqScan(), find_typed_table_dependencies(), get_database_list(), get_rel_oids(), get_rewrite_oid_without_relid(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), index_update_stats(), IndexBuildHeapRangeScan(), IndexCheckExclusion(), objectsInSchemaToOids(), pgrowlocks(), pgstat_collect_oids(), pgstat_heap(), ReindexMultipleTables(), RelationFindReplTupleSeq(), remove_dbtablespaces(), RemoveConversionById(), RenameTableSpace(), systable_endscan(), ThereIsAtLeastOneRole(), vac_truncate_clog(), validate_index_heapscan(), validateCheckConstraint(), validateDomainConstraint(), and validateForeignKeyConstraint().

1582 {
1583  /* Note: no locking manipulations needed */
1584 
1585  /*
1586  * unpin scan buffers
1587  */
1588  if (BufferIsValid(scan->rs_cbuf))
1589  ReleaseBuffer(scan->rs_cbuf);
1590 
1591  /*
1592  * decrement relation reference count and free scan descriptor storage
1593  */
1595 
1596  if (scan->rs_key)
1597  pfree(scan->rs_key);
1598 
1599  if (scan->rs_strategy != NULL)
1601 
1602  if (scan->rs_temp_snap)
1604 
1605  pfree(scan);
1606 }
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3292
void pfree(void *pointer)
Definition: mcxt.c:992
void RelationDecrementReferenceCount(Relation rel)
Definition: relcache.c:2135
Snapshot rs_snapshot
Definition: relscan.h:48
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:900
bool rs_temp_snap
Definition: relscan.h:56
BufferAccessStrategy rs_strategy
Definition: relscan.h:63
Relation rs_rd
Definition: relscan.h:47
Buffer rs_cbuf
Definition: relscan.h:70
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:580
#define NULL
Definition: c.h:226
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
ScanKey rs_key
Definition: relscan.h:50
void heap_execute_freeze_tuple ( HeapTupleHeader  tuple,
xl_heap_freeze_tuple frz 
)

Definition at line 6786 of file heapam.c.

References FrozenTransactionId, xl_heap_freeze_tuple::frzflags, HeapTupleHeaderSetXmax, HeapTupleHeaderSetXvac, InvalidTransactionId, HeapTupleHeaderData::t_infomask, xl_heap_freeze_tuple::t_infomask, HeapTupleHeaderData::t_infomask2, xl_heap_freeze_tuple::t_infomask2, XLH_FREEZE_XVAC, XLH_INVALID_XVAC, and xl_heap_freeze_tuple::xmax.

Referenced by heap_freeze_tuple(), heap_xlog_freeze_page(), and lazy_scan_heap().

6787 {
6788  HeapTupleHeaderSetXmax(tuple, frz->xmax);
6789 
6790  if (frz->frzflags & XLH_FREEZE_XVAC)
6792 
6793  if (frz->frzflags & XLH_INVALID_XVAC)
6795 
6796  tuple->t_infomask = frz->t_infomask;
6797  tuple->t_infomask2 = frz->t_infomask2;
6798 }
#define HeapTupleHeaderSetXvac(tup, xid)
Definition: htup_details.h:417
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:374
#define InvalidTransactionId
Definition: transam.h:31
#define FrozenTransactionId
Definition: transam.h:33
TransactionId xmax
Definition: heapam_xlog.h:298
#define XLH_INVALID_XVAC
Definition: heapam_xlog.h:294
#define XLH_FREEZE_XVAC
Definition: heapam_xlog.h:293
bool heap_fetch ( Relation  relation,
Snapshot  snapshot,
HeapTuple  tuple,
Buffer userbuf,
bool  keep_buf,
Relation  stats_relation 
)

Definition at line 1849 of file heapam.c.

References BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, CheckForSerializableConflictOut(), HeapTupleSatisfiesVisibility, InvalidBuffer, ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), NULL, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, pgstat_count_heap_fetch, PredicateLockTuple(), ReadBuffer(), RelationGetRelid, ReleaseBuffer(), HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, and TestForOldSnapshot().

Referenced by AfterTriggerExecute(), EvalPlanQualFetch(), EvalPlanQualFetchRowMarks(), ExecCheckTIDVisible(), ExecDelete(), ExecLockRows(), heap_lock_updated_tuple_rec(), and TidNext().

1855 {
1856  ItemPointer tid = &(tuple->t_self);
1857  ItemId lp;
1858  Buffer buffer;
1859  Page page;
1860  OffsetNumber offnum;
1861  bool valid;
1862 
1863  /*
1864  * Fetch and pin the appropriate page of the relation.
1865  */
1866  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1867 
1868  /*
1869  * Need share lock on buffer to examine tuple commit status.
1870  */
1871  LockBuffer(buffer, BUFFER_LOCK_SHARE);
1872  page = BufferGetPage(buffer);
1873  TestForOldSnapshot(snapshot, relation, page);
1874 
1875  /*
1876  * We'd better check for out-of-range offnum in case of VACUUM since the
1877  * TID was obtained.
1878  */
1879  offnum = ItemPointerGetOffsetNumber(tid);
1880  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1881  {
1882  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1883  if (keep_buf)
1884  *userbuf = buffer;
1885  else
1886  {
1887  ReleaseBuffer(buffer);
1888  *userbuf = InvalidBuffer;
1889  }
1890  tuple->t_data = NULL;
1891  return false;
1892  }
1893 
1894  /*
1895  * get the item line pointer corresponding to the requested tid
1896  */
1897  lp = PageGetItemId(page, offnum);
1898 
1899  /*
1900  * Must check for deleted tuple.
1901  */
1902  if (!ItemIdIsNormal(lp))
1903  {
1904  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1905  if (keep_buf)
1906  *userbuf = buffer;
1907  else
1908  {
1909  ReleaseBuffer(buffer);
1910  *userbuf = InvalidBuffer;
1911  }
1912  tuple->t_data = NULL;
1913  return false;
1914  }
1915 
1916  /*
1917  * fill in *tuple fields
1918  */
1919  tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1920  tuple->t_len = ItemIdGetLength(lp);
1921  tuple->t_tableOid = RelationGetRelid(relation);
1922 
1923  /*
1924  * check time qualification of tuple, then release lock
1925  */
1926  valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1927 
1928  if (valid)
1929  PredicateLockTuple(relation, tuple, snapshot);
1930 
1931  CheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1932 
1933  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1934 
1935  if (valid)
1936  {
1937  /*
1938  * All checks passed, so return the tuple as valid. Caller is now
1939  * responsible for releasing the buffer.
1940  */
1941  *userbuf = buffer;
1942 
1943  /* Count the successful fetch against appropriate rel, if any */
1944  if (stats_relation != NULL)
1945  pgstat_count_heap_fetch(stats_relation);
1946 
1947  return true;
1948  }
1949 
1950  /* Tuple failed time qual, but maybe caller wants to see it anyway. */
1951  if (keep_buf)
1952  *userbuf = buffer;
1953  else
1954  {
1955  ReleaseBuffer(buffer);
1956  *userbuf = InvalidBuffer;
1957  }
1958 
1959  return false;
1960 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:265
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3292
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:354
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:3862
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
ItemPointerData t_self
Definition: htup.h:65
#define pgstat_count_heap_fetch(rel)
Definition: pgstat.h:1150
uint32 t_len
Definition: htup.h:64
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3529
#define NULL
Definition: c.h:226
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:76
void PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
Definition: predicate.c:2460
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:66
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:413
#define PageGetItem(page, itemId)
Definition: bufpage.h:337
Pointer Page
Definition: bufpage.h:74
void heap_finish_speculative ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 6001 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, elog, END_CRIT_SECTION, ERROR, HeapTupleHeaderIsSpeculative, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), MaxOffsetNumber, NULL, xl_heap_confirm::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapConfirm, SpecTokenOffsetNumber, START_CRIT_SECTION, StaticAssertStmt, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleData::t_self, UnlockReleaseBuffer(), XLOG_HEAP_CONFIRM, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by ExecInsert().

6002 {
6003  Buffer buffer;
6004  Page page;
6005  OffsetNumber offnum;
6006  ItemId lp = NULL;
6007  HeapTupleHeader htup;
6008 
6009  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
6011  page = (Page) BufferGetPage(buffer);
6012 
6013  offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
6014  if (PageGetMaxOffsetNumber(page) >= offnum)
6015  lp = PageGetItemId(page, offnum);
6016 
6017  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
6018  elog(ERROR, "invalid lp");
6019 
6020  htup = (HeapTupleHeader) PageGetItem(page, lp);
6021 
6022  /* SpecTokenOffsetNumber should be distinguishable from any real offset */
6024  "invalid speculative token constant");
6025 
6026  /* NO EREPORT(ERROR) from here till changes are logged */
6028 
6030 
6031  MarkBufferDirty(buffer);
6032 
6033  /*
6034  * Replace the speculative insertion token with a real t_ctid, pointing to
6035  * itself like it does on regular tuples.
6036  */
6037  htup->t_ctid = tuple->t_self;
6038 
6039  /* XLOG stuff */
6040  if (RelationNeedsWAL(relation))
6041  {
6042  xl_heap_confirm xlrec;
6043  XLogRecPtr recptr;
6044 
6045  xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6046 
6047  XLogBeginInsert();
6048 
6049  /* We want the same filtering on this as on a plain insert */
6051 
6052  XLogRegisterData((char *) &xlrec, SizeOfHeapConfirm);
6053  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6054 
6055  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
6056 
6057  PageSetLSN(page, recptr);
6058  }
6059 
6060  END_CRIT_SECTION();
6061 
6062  UnlockReleaseBuffer(buffer);
6063 }
OffsetNumber offnum
Definition: heapam_xlog.h:274
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1445
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define MaxOffsetNumber
Definition: off.h:28
#define END_CRIT_SECTION()
Definition: miscadmin.h:132
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:423
#define START_CRIT_SECTION()
Definition: miscadmin.h:130
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:192
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:354
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:752
#define SpecTokenOffsetNumber
Definition: htup_details.h:285
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3315
#define ERROR
Definition: elog.h:43
ItemPointerData t_ctid
Definition: htup_details.h:150
ItemPointerData t_self
Definition: htup.h:65
#define REGBUF_STANDARD
Definition: xloginsert.h:35
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define SizeOfHeapConfirm
Definition: heapam_xlog.h:277
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3529
#define NULL
Definition: c.h:226
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:670
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:76
#define RelationNeedsWAL(relation)
Definition: rel.h:502
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:66
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:365
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:337
Pointer Page
Definition: bufpage.h:74
#define XLOG_HEAP_CONFIRM
Definition: heapam_xlog.h:37
bool heap_freeze_tuple ( HeapTupleHeader  tuple,
TransactionId  cutoff_xid,
TransactionId  cutoff_multi 
)

Definition at line 6807 of file heapam.c.

References heap_execute_freeze_tuple(), and heap_prepare_freeze_tuple().

Referenced by rewrite_heap_tuple().

6809 {
6811  bool do_freeze;
6812  bool tuple_totally_frozen;
6813 
6814  do_freeze = heap_prepare_freeze_tuple(tuple, cutoff_xid, cutoff_multi,
6815  &frz, &tuple_totally_frozen);
6816 
6817  /*
6818  * Note that because this is not a WAL-logged operation, we don't need to
6819  * fill in the offset in the freeze record.
6820  */
6821 
6822  if (do_freeze)
6823  heap_execute_freeze_tuple(tuple, &frz);
6824  return do_freeze;
6825 }
void heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
Definition: heapam.c:6786
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
Definition: heapam.c:6608
void heap_get_latest_tid ( Relation  relation,
Snapshot  snapshot,
ItemPointer  tid 
)

Definition at line 2154 of file heapam.c.

References BUFFER_LOCK_SHARE, BufferGetPage, CheckForSerializableConflictOut(), elog, ERROR, HEAP_XMAX_INVALID, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleHeaderIsOnlyLocked(), HeapTupleSatisfiesVisibility, InvalidTransactionId, ItemIdGetLength, ItemIdIsNormal, ItemPointerEquals(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, ReadBuffer(), RelationGetNumberOfBlocks, RelationGetRelationName, RelationGetRelid, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TestForOldSnapshot(), TransactionIdEquals, TransactionIdIsValid, and UnlockReleaseBuffer().

Referenced by currtid_byrelname(), currtid_byreloid(), and TidNext().

2157 {
2158  BlockNumber blk;
2159  ItemPointerData ctid;
2160  TransactionId priorXmax;
2161 
2162  /* this is to avoid Assert failures on bad input */
2163  if (!ItemPointerIsValid(tid))
2164  return;
2165 
2166  /*
2167  * Since this can be called with user-supplied TID, don't trust the input
2168  * too much. (RelationGetNumberOfBlocks is an expensive check, so we
2169  * don't check t_ctid links again this way. Note that it would not do to
2170  * call it just once and save the result, either.)
2171  */
2172  blk = ItemPointerGetBlockNumber(tid);
2173  if (blk >= RelationGetNumberOfBlocks(relation))
2174  elog(ERROR, "block number %u is out of range for relation \"%s\"",
2175  blk, RelationGetRelationName(relation));
2176 
2177  /*
2178  * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
2179  * need to examine, and *tid is the TID we will return if ctid turns out
2180  * to be bogus.
2181  *
2182  * Note that we will loop until we reach the end of the t_ctid chain.
2183  * Depending on the snapshot passed, there might be at most one visible
2184  * version of the row, but we don't try to optimize for that.
2185  */
2186  ctid = *tid;
2187  priorXmax = InvalidTransactionId; /* cannot check first XMIN */
2188  for (;;)
2189  {
2190  Buffer buffer;
2191  Page page;
2192  OffsetNumber offnum;
2193  ItemId lp;
2194  HeapTupleData tp;
2195  bool valid;
2196 
2197  /*
2198  * Read, pin, and lock the page.
2199  */
2200  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
2201  LockBuffer(buffer, BUFFER_LOCK_SHARE);
2202  page = BufferGetPage(buffer);
2203  TestForOldSnapshot(snapshot, relation, page);
2204 
2205  /*
2206  * Check for bogus item number. This is not treated as an error
2207  * condition because it can happen while following a t_ctid link. We
2208  * just assume that the prior tid is OK and return it unchanged.
2209  */
2210  offnum = ItemPointerGetOffsetNumber(&ctid);
2211  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
2212  {
2213  UnlockReleaseBuffer(buffer);
2214  break;
2215  }
2216  lp = PageGetItemId(page, offnum);
2217  if (!ItemIdIsNormal(lp))
2218  {
2219  UnlockReleaseBuffer(buffer);
2220  break;
2221  }
2222 
2223  /* OK to access the tuple */
2224  tp.t_self = ctid;
2225  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2226  tp.t_len = ItemIdGetLength(lp);
2227  tp.t_tableOid = RelationGetRelid(relation);
2228 
2229  /*
2230  * After following a t_ctid link, we might arrive at an unrelated
2231  * tuple. Check for XMIN match.
2232  */
2233  if (TransactionIdIsValid(priorXmax) &&
2235  {
2236  UnlockReleaseBuffer(buffer);
2237  break;
2238  }
2239 
2240  /*
2241  * Check time qualification of tuple; if visible, set it as the new
2242  * result candidate.
2243  */
2244  valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
2245  CheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
2246  if (valid)
2247  *tid = ctid;
2248 
2249  /*
2250  * If there's a valid t_ctid link, follow it, else we're done.
2251  */
2252  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2255  {
2256  UnlockReleaseBuffer(buffer);
2257  break;
2258  }
2259 
2260  ctid = tp.t_data->t_ctid;
2261  priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
2262  UnlockReleaseBuffer(buffer);
2263  } /* end of loop */
2264 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:359
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
Definition: tqual.c:1585
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:59
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:265
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:393
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
uint32 BlockNumber
Definition: block.h:31
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:354
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:3862
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3315
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:193
ItemPointerData t_ctid
Definition: htup_details.h:150
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define InvalidTransactionId
Definition: transam.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:433
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3529
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:199
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:307
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:76
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:66
#define TransactionIdIsValid(xid)
Definition: transam.h:41
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:413
#define PageGetItem(page, itemId)
Definition: bufpage.h:337
Pointer Page
Definition: bufpage.h:74
HeapTuple heap_getnext ( HeapScanDesc  scan,
ScanDirection  direction 
)

Definition at line 1781 of file heapam.c.

References HEAPDEBUG_1, HEAPDEBUG_2, HEAPDEBUG_3, heapgettup(), heapgettup_pagemode(), NULL, pgstat_count_heap_getnext, HeapScanDescData::rs_ctup, HeapScanDescData::rs_key, HeapScanDescData::rs_nkeys, HeapScanDescData::rs_pageatatime, HeapScanDescData::rs_rd, and HeapTupleData::t_data.

Referenced by AlterDomainNotNull(), AlterTableMoveAll(), AlterTableSpaceOptions(), ATRewriteTable(), boot_openrel(), check_db_file_conflict(), copy_heap_data(), CopyTo(), createdb(), DefineQueryRewrite(), do_autovacuum(), DropSetting(), DropTableSpace(), find_typed_table_dependencies(), get_database_list(), get_rel_oids(), get_rewrite_oid_without_relid(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), index_update_stats(), IndexBuildHeapRangeScan(), IndexCheckExclusion(), objectsInSchemaToOids(), pgrowlocks(), pgstat_collect_oids(), pgstat_heap(), ReindexMultipleTables(), RelationFindReplTupleSeq(), remove_dbtablespaces(), RemoveConversionById(), RenameTableSpace(), SeqNext(), systable_getnext(), ThereIsAtLeastOneRole(), vac_truncate_clog(), validate_index_heapscan(), validateCheckConstraint(), validateDomainConstraint(), and validateForeignKeyConstraint().

1782 {
1783  /* Note: no locking manipulations needed */
1784 
1785  HEAPDEBUG_1; /* heap_getnext( info ) */
1786 
1787  if (scan->rs_pageatatime)
1788  heapgettup_pagemode(scan, direction,
1789  scan->rs_nkeys, scan->rs_key);
1790  else
1791  heapgettup(scan, direction, scan->rs_nkeys, scan->rs_key);
1792 
1793  if (scan->rs_ctup.t_data == NULL)
1794  {
1795  HEAPDEBUG_2; /* heap_getnext returning EOS */
1796  return NULL;
1797  }
1798 
1799  /*
1800  * if we get here it means we have a new current scan tuple, so point to
1801  * the proper return buffer and return the tuple.
1802  */
1803  HEAPDEBUG_3; /* heap_getnext returning tuple */
1804 
1806 
1807  return &(scan->rs_ctup);
1808 }
#define HEAPDEBUG_2
Definition: heapam.c:1775
HeapTupleData rs_ctup
Definition: relscan.h:68
HeapTupleHeader t_data
Definition: htup.h:67
bool rs_pageatatime
Definition: relscan.h:53
#define HEAPDEBUG_1
Definition: heapam.c:1774
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:482
Relation rs_rd
Definition: relscan.h:47
#define NULL
Definition: c.h:226
#define HEAPDEBUG_3
Definition: heapam.c:1776
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:1145
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:784
ScanKey rs_key
Definition: relscan.h:50
bool heap_hot_search ( ItemPointer  tid,
Relation  relation,
Snapshot  snapshot,
bool all_dead 
)

Definition at line 2126 of file heapam.c.

References BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, heap_hot_search_buffer(), ItemPointerGetBlockNumber, LockBuffer(), ReadBuffer(), and ReleaseBuffer().

Referenced by _bt_check_unique(), and unique_key_recheck().

2128 {
2129  bool result;
2130  Buffer buffer;
2131  HeapTupleData heapTuple;
2132 
2133  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
2134  LockBuffer(buffer, BUFFER_LOCK_SHARE);
2135  result = heap_hot_search_buffer(tid, relation, buffer, snapshot,
2136  &heapTuple, all_dead, true);
2137  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2138  ReleaseBuffer(buffer);
2139  return result;
2140 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3292
bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
Definition: heapam.c:1984
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3529
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:66
int Buffer
Definition: buf.h:23
bool heap_hot_search_buffer ( ItemPointer  tid,
Relation  relation,
Buffer  buffer,
Snapshot  snapshot,
HeapTuple  heapTuple,
bool all_dead,
bool  first_call 
)

Definition at line 1984 of file heapam.c.

References Assert, BufferGetBlockNumber(), BufferGetPage, CheckForSerializableConflictOut(), HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleIsHeapOnly, HeapTupleIsHotUpdated, HeapTupleIsSurelyDead(), HeapTupleSatisfiesVisibility, InvalidTransactionId, ItemIdGetLength, ItemIdGetRedirect, ItemIdIsNormal, ItemIdIsRedirected, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerSet, ItemPointerSetOffsetNumber, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PredicateLockTuple(), RecentGlobalXmin, RelationGetRelid, skip(), HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdEquals, and TransactionIdIsValid.

Referenced by bitgetpage(), heap_hot_search(), and index_fetch_heap().

1987 {
1988  Page dp = (Page) BufferGetPage(buffer);
1989  TransactionId prev_xmax = InvalidTransactionId;
1990  OffsetNumber offnum;
1991  bool at_chain_start;
1992  bool valid;
1993  bool skip;
1994 
1995  /* If this is not the first call, previous call returned a (live!) tuple */
1996  if (all_dead)
1997  *all_dead = first_call;
1998 
2000 
2002  offnum = ItemPointerGetOffsetNumber(tid);
2003  at_chain_start = first_call;
2004  skip = !first_call;
2005 
2006  heapTuple->t_self = *tid;
2007 
2008  /* Scan through possible multiple members of HOT-chain */
2009  for (;;)
2010  {
2011  ItemId lp;
2012 
2013  /* check for bogus TID */
2014  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
2015  break;
2016 
2017  lp = PageGetItemId(dp, offnum);
2018 
2019  /* check for unused, dead, or redirected items */
2020  if (!ItemIdIsNormal(lp))
2021  {
2022  /* We should only see a redirect at start of chain */
2023  if (ItemIdIsRedirected(lp) && at_chain_start)
2024  {
2025  /* Follow the redirect */
2026  offnum = ItemIdGetRedirect(lp);
2027  at_chain_start = false;
2028  continue;
2029  }
2030  /* else must be end of chain */
2031  break;
2032  }
2033 
2034  heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
2035  heapTuple->t_len = ItemIdGetLength(lp);
2036  heapTuple->t_tableOid = RelationGetRelid(relation);
2037  ItemPointerSetOffsetNumber(&heapTuple->t_self, offnum);
2038 
2039  /*
2040  * Shouldn't see a HEAP_ONLY tuple at chain start.
2041  */
2042  if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
2043  break;
2044 
2045  /*
2046  * The xmin should match the previous xmax value, else chain is
2047  * broken.
2048  */
2049  if (TransactionIdIsValid(prev_xmax) &&
2050  !TransactionIdEquals(prev_xmax,
2051  HeapTupleHeaderGetXmin(heapTuple->t_data)))
2052  break;
2053 
2054  /*
2055  * When first_call is true (and thus, skip is initially false) we'll
2056  * return the first tuple we find. But on later passes, heapTuple
2057  * will initially be pointing to the tuple we returned last time.
2058  * Returning it again would be incorrect (and would loop forever), so
2059  * we skip it and return the next match we find.
2060  */
2061  if (!skip)
2062  {
2063  /*
2064  * For the benefit of logical decoding, have t_self point at the
2065  * element of the HOT chain we're currently investigating instead
2066  * of the root tuple of the HOT chain. This is important because
2067  * the *Satisfies routine for historical mvcc snapshots needs the
2068  * correct tid to decide about the visibility in some cases.
2069  */
2070  ItemPointerSet(&(heapTuple->t_self), BufferGetBlockNumber(buffer), offnum);
2071 
2072  /* If it's visible per the snapshot, we must return it */
2073  valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
2074  CheckForSerializableConflictOut(valid, relation, heapTuple,
2075  buffer, snapshot);
2076  /* reset to original, non-redirected, tid */
2077  heapTuple->t_self = *tid;
2078 
2079  if (valid)
2080  {
2081  ItemPointerSetOffsetNumber(tid, offnum);
2082  PredicateLockTuple(relation, heapTuple, snapshot);
2083  if (all_dead)
2084  *all_dead = false;
2085  return true;
2086  }
2087  }
2088  skip = false;
2089 
2090  /*
2091  * If we can't see it, maybe no one else can either. At caller
2092  * request, check whether all chain members are dead to all
2093  * transactions.
2094  */
2095  if (all_dead && *all_dead &&
2097  *all_dead = false;
2098 
2099  /*
2100  * Check to see if HOT chain continues past this tuple; if so fetch
2101  * the next offnum and loop around.
2102  */
2103  if (HeapTupleIsHotUpdated(heapTuple))
2104  {
2107  offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
2108  at_chain_start = false;
2109  prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
2110  }
2111  else
2112  break; /* end of chain */
2113  }
2114 
2115  return false;
2116 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:359
static void skip(struct vars *v)
Definition: regc_lex.c:1109
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:105
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:393
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:77
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:354
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:3862
bool HeapTupleIsSurelyDead(HeapTuple htup, TransactionId OldestXmin)
Definition: tqual.c:1409
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define HeapTupleIsHotUpdated(tuple)
Definition: htup_details.h:677
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
ItemPointerData t_ctid
Definition: htup_details.h:150
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
TransactionId RecentGlobalXmin
Definition: snapmgr.c:166
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:686
#define Assert(condition)
Definition: c.h:670
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:307
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:76
void PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
Definition: predicate.c:2460
#define ItemPointerSetOffsetNumber(pointer, offsetNumber)
Definition: itemptr.h:107
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2588
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:66
#define TransactionIdIsValid(xid)
Definition: transam.h:41
#define RelationGetRelid(relation)
Definition: rel.h:413
#define PageGetItem(page, itemId)
Definition: bufpage.h:337
Pointer Page
Definition: bufpage.h:74
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:86
void heap_inplace_update ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 6233 of file heapam.c.

References BUFFER_LOCK_EXCLUSIVE, BufferGetPage, CacheInvalidateHeapTuple(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, IsBootstrapProcessingMode, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), NULL, xl_heap_inplace::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapInplace, START_CRIT_SECTION, HeapTupleData::t_data, HeapTupleHeaderData::t_hoff, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), XLOG_HEAP_INPLACE, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by create_toast_table(), index_set_state_flags(), index_update_stats(), vac_update_datfrozenxid(), and vac_update_relstats().

6234 {
6235  Buffer buffer;
6236  Page page;
6237  OffsetNumber offnum;
6238  ItemId lp = NULL;
6239  HeapTupleHeader htup;
6240  uint32 oldlen;
6241  uint32 newlen;
6242 
6243  /*
6244  * For now, parallel operations are required to be strictly read-only.
6245  * Unlike a regular update, this should never create a combo CID, so it
6246  * might be possible to relax this restriction, but not without more
6247  * thought and testing. It's not clear that it would be useful, anyway.
6248  */
6249  if (IsInParallelMode())
6250  ereport(ERROR,
6251  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
6252  errmsg("cannot update tuples during a parallel operation")));
6253 
6254  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
6256  page = (Page) BufferGetPage(buffer);
6257 
6258  offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
6259  if (PageGetMaxOffsetNumber(page) >= offnum)
6260  lp = PageGetItemId(page, offnum);
6261 
6262  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
6263  elog(ERROR, "invalid lp");
6264 
6265  htup = (HeapTupleHeader) PageGetItem(page, lp);
6266 
6267  oldlen = ItemIdGetLength(lp) - htup->t_hoff;
6268  newlen = tuple->t_len - tuple->t_data->t_hoff;
6269  if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
6270  elog(ERROR, "wrong tuple length");
6271 
6272  /* NO EREPORT(ERROR) from here till changes are logged */
6274 
6275  memcpy((char *) htup + htup->t_hoff,
6276  (char *) tuple->t_data + tuple->t_data->t_hoff,
6277  newlen);
6278 
6279  MarkBufferDirty(buffer);
6280 
6281  /* XLOG stuff */
6282  if (RelationNeedsWAL(relation))
6283  {
6284  xl_heap_inplace xlrec;
6285  XLogRecPtr recptr;
6286 
6287  xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6288 
6289  XLogBeginInsert();
6290  XLogRegisterData((char *) &xlrec, SizeOfHeapInplace);
6291 
6292  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6293  XLogRegisterBufData(0, (char *) htup + htup->t_hoff, newlen);
6294 
6295  /* inplace updates aren't decoded atm, don't log the origin */
6296 
6297  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
6298 
6299  PageSetLSN(page, recptr);
6300  }
6301 
6302  END_CRIT_SECTION();
6303 
6304  UnlockReleaseBuffer(buffer);
6305 
6306  /*
6307  * Send out shared cache inval if necessary. Note that because we only
6308  * pass the new version of the tuple, this mustn't be used for any
6309  * operations that could change catcache lookup keys. But we aren't
6310  * bothering with index updates either, so that's true a fortiori.
6311  */
6313  CacheInvalidateHeapTuple(relation, tuple, NULL);
6314 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:361
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1085
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1445
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:132
#define SizeOfHeapInplace
Definition: heapam_xlog.h:286
#define START_CRIT_SECTION()
Definition: miscadmin.h:130
int errcode(int sqlerrcode)
Definition: elog.c:575
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:354
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
bool IsInParallelMode(void)
Definition: xact.c:912
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3315
#define ERROR
Definition: elog.h:43
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define REGBUF_STANDARD
Definition: xloginsert.h:35
unsigned int uint32
Definition: c.h:265
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define ereport(elevel, rest)
Definition: elog.h:122
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
OffsetNumber offnum
Definition: heapam_xlog.h:282
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3529
#define NULL
Definition: c.h:226
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:76
#define XLOG_HEAP_INPLACE
Definition: heapam_xlog.h:39
#define RelationNeedsWAL(relation)
Definition: rel.h:502
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:365
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:66
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:365
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:337
Pointer Page
Definition: bufpage.h:74
Oid heap_insert ( Relation  relation,
HeapTuple  tup,
CommandId  cid,
int  options,
BulkInsertState  bistate 
)

Definition at line 2383 of file heapam.c.

References Assert, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), END_CRIT_SECTION, FirstOffsetNumber, xl_heap_insert::flags, GetCurrentTransactionId(), heap_freetuple(), HEAP_INSERT_SKIP_WAL, HEAP_INSERT_SPECULATIVE, heap_prepare_insert(), HeapTupleGetOid, InvalidBuffer, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, log_heap_new_cid(), MarkBufferDirty(), xl_heap_insert::offnum, PageClearAllVisible, PageGetMaxOffsetNumber, PageIsAllVisible, PageSetLSN, pgstat_count_heap_insert(), REGBUF_KEEP_DATA, REGBUF_STANDARD, REGBUF_WILL_INIT, RelationGetBufferForTuple(), RelationIsAccessibleInLogicalDecoding, RelationIsLogicallyLogged, RelationNeedsWAL, RelationPutHeapTuple(), ReleaseBuffer(), SizeOfHeapHeader, SizeOfHeapInsert, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), visibilitymap_clear(), VISIBILITYMAP_VALID_BITS, XLH_INSERT_ALL_VISIBLE_CLEARED, XLH_INSERT_CONTAINS_NEW_TUPLE, XLH_INSERT_IS_SPECULATIVE, XLOG_HEAP_INIT_PAGE, XLOG_HEAP_INSERT, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by ATRewriteTable(), CopyFrom(), ExecInsert(), intorel_receive(), simple_heap_insert(), toast_save_datum(), and transientrel_receive().

2385 {
2387  HeapTuple heaptup;
2388  Buffer buffer;
2389  Buffer vmbuffer = InvalidBuffer;
2390  bool all_visible_cleared = false;
2391 
2392  /*
2393  * Fill in tuple header fields, assign an OID, and toast the tuple if
2394  * necessary.
2395  *
2396  * Note: below this point, heaptup is the data we actually intend to store
2397  * into the relation; tup is the caller's original untoasted data.
2398  */
2399  heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
2400 
2401  /*
2402  * Find buffer to insert this tuple into. If the page is all visible,
2403  * this will also pin the requisite visibility map page.
2404  */
2405  buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
2406  InvalidBuffer, options, bistate,
2407  &vmbuffer, NULL);
2408 
2409  /*
2410  * We're about to do the actual insert -- but check for conflict first, to
2411  * avoid possibly having to roll back work we've just done.
2412  *
2413  * This is safe without a recheck as long as there is no possibility of
2414  * another process scanning the page between this check and the insert
2415  * being visible to the scan (i.e., an exclusive buffer content lock is
2416  * continuously held from this point until the tuple insert is visible).
2417  *
2418  * For a heap insert, we only need to check for table-level SSI locks. Our
2419  * new tuple can't possibly conflict with existing tuple locks, and heap
2420  * page locks are only consolidated versions of tuple locks; they do not
2421  * lock "gaps" as index page locks do. So we don't need to specify a
2422  * buffer when making the call, which makes for a faster check.
2423  */
2425 
2426  /* NO EREPORT(ERROR) from here till changes are logged */
2428 
2429  RelationPutHeapTuple(relation, buffer, heaptup,
2430  (options & HEAP_INSERT_SPECULATIVE) != 0);
2431 
2432  if (PageIsAllVisible(BufferGetPage(buffer)))
2433  {
2434  all_visible_cleared = true;
2436  visibilitymap_clear(relation,
2437  ItemPointerGetBlockNumber(&(heaptup->t_self)),
2438  vmbuffer, VISIBILITYMAP_VALID_BITS);
2439  }
2440 
2441  /*
2442  * XXX Should we set PageSetPrunable on this page ?
2443  *
2444  * The inserting transaction may eventually abort thus making this tuple
2445  * DEAD and hence available for pruning. Though we don't want to optimize
2446  * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
2447  * aborted tuple will never be pruned until next vacuum is triggered.
2448  *
2449  * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
2450  */
2451 
2452  MarkBufferDirty(buffer);
2453 
2454  /* XLOG stuff */
2455  if (!(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation))
2456  {
2457  xl_heap_insert xlrec;
2458  xl_heap_header xlhdr;
2459  XLogRecPtr recptr;
2460  Page page = BufferGetPage(buffer);
2461  uint8 info = XLOG_HEAP_INSERT;
2462  int bufflags = 0;
2463 
2464  /*
2465  * If this is a catalog, we need to transmit combocids to properly
2466  * decode, so log that as well.
2467  */
2469  log_heap_new_cid(relation, heaptup);
2470 
2471  /*
2472  * If this is the single and first tuple on page, we can reinit the
2473  * page instead of restoring the whole thing. Set flag, and hide
2474  * buffer references from XLogInsert.
2475  */
2476  if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
2478  {
2479  info |= XLOG_HEAP_INIT_PAGE;
2480  bufflags |= REGBUF_WILL_INIT;
2481  }
2482 
2483  xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
2484  xlrec.flags = 0;
2485  if (all_visible_cleared)
2490 
2491  /*
2492  * For logical decoding, we need the tuple even if we're doing a full
2493  * page write, so make sure it's included even if we take a full-page
2494  * image. (XXX We could alternatively store a pointer into the FPW).
2495  */
2496  if (RelationIsLogicallyLogged(relation))
2497  {
2499  bufflags |= REGBUF_KEEP_DATA;
2500  }
2501 
2502  XLogBeginInsert();
2503  XLogRegisterData((char *) &xlrec, SizeOfHeapInsert);
2504 
2505  xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
2506  xlhdr.t_infomask = heaptup->t_data->t_infomask;
2507  xlhdr.t_hoff = heaptup->t_data->t_hoff;
2508 
2509  /*
2510  * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
2511  * write the whole page to the xlog, we don't need to store
2512  * xl_heap_header in the xlog.
2513  */
2514  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2515  XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
2516  /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
2518  (char *) heaptup->t_data + SizeofHeapTupleHeader,
2519  heaptup->t_len - SizeofHeapTupleHeader);
2520 
2521  /* filtering by origin on a row level is much more efficient */
2523 
2524  recptr = XLogInsert(RM_HEAP_ID, info);
2525 
2526  PageSetLSN(page, recptr);
2527  }
2528 
2529  END_CRIT_SECTION();
2530 
2531  UnlockReleaseBuffer(buffer);
2532  if (vmbuffer != InvalidBuffer)
2533  ReleaseBuffer(vmbuffer);
2534 
2535  /*
2536  * If tuple is cachable, mark it for invalidation from the caches in case
2537  * we abort. Note it is OK to do this after releasing the buffer, because
2538  * the heaptup data structure is all in local memory, not in the shared
2539  * buffer.
2540  */
2541  CacheInvalidateHeapTuple(relation, heaptup, NULL);
2542 
2543  /* Note: speculative insertions are counted too, even if aborted later */
2544  pgstat_count_heap_insert(relation, 1);
2545 
2546  /*
2547  * If heaptup is a private copy, release it. Don't forget to copy t_self
2548  * back to the caller's image, too.
2549  */
2550  if (heaptup != tup)
2551  {
2552  tup->t_self = heaptup->t_self;
2553  heap_freetuple(heaptup);
2554  }
2555 
2556  return HeapTupleGetOid(tup);
2557 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:361
#define SizeofHeapTupleHeader
Definition: htup_details.h:170
#define XLOG_HEAP_INSERT
Definition: heapam_xlog.h:32
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:7732
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1085
static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
Definition: heapam.c:2567
#define PageIsAllVisible(page)
Definition: bufpage.h:382
uint32 TransactionId
Definition: c.h:393
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1445
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
#define END_CRIT_SECTION()
Definition: miscadmin.h:132
unsigned char uint8
Definition: c.h:263
#define XLH_INSERT_IS_SPECULATIVE
Definition: heapam_xlog.h:68
#define InvalidBuffer
Definition: buf.h:25
#define REGBUF_WILL_INIT
Definition: xloginsert.h:32
uint16 t_infomask2
Definition: heapam_xlog.h:122
#define START_CRIT_SECTION()
Definition: miscadmin.h:130
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:192
#define HEAP_INSERT_SKIP_WAL
Definition: heapam.h:28
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3292
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:572
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1374
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:354
void RelationPutHeapTuple(Relation relation, Buffer buffer, HeapTuple tuple, bool token)
Definition: hio.c:36
void CheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
Definition: predicate.c:4243
#define XLOG_HEAP_INIT_PAGE
Definition: heapam_xlog.h:46
#define HEAP_INSERT_SPECULATIVE
Definition: heapam.h:31
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:67
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3315
#define XLH_INSERT_CONTAINS_NEW_TUPLE
Definition: heapam_xlog.h:69
ItemPointerData t_self
Definition: htup.h:65
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:416
uint32 t_len
Definition: htup.h:64
#define FirstOffsetNumber
Definition: off.h:27
#define REGBUF_STANDARD
Definition: xloginsert.h:35
Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer otherBuffer, int options, BulkInsertState bistate, Buffer *vmbuffer, Buffer *vmbuffer_other)
Definition: hio.c:297
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:556
#define REGBUF_KEEP_DATA
Definition: xloginsert.h:38
#define PageClearAllVisible(page)
Definition: bufpage.h:386
#define NULL
Definition: c.h:226
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:670
uint16 t_infomask
Definition: heapam_xlog.h:123
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:76
#define RelationNeedsWAL(relation)
Definition: rel.h:502
#define SizeOfHeapInsert
Definition: heapam_xlog.h:138
#define XLH_INSERT_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:66
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2588
void pgstat_count_heap_insert(Relation rel, int n)
Definition: pgstat.c:1805
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:66
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:695
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:365
int Buffer
Definition: buf.h:23
OffsetNumber offnum
Definition: heapam_xlog.h:132
#define SizeOfHeapHeader
Definition: heapam_xlog.h:127
Pointer Page
Definition: bufpage.h:74
HTSU_Result heap_lock_tuple ( Relation  relation,
HeapTuple  tuple,
CommandId  cid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
bool  follow_updates,
Buffer buffer,
HeapUpdateFailureData hufd 
)

Definition at line 4578 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferIsValid, HeapUpdateFailureData::cmax, compute_infobits(), compute_new_xmax_infomask(), ConditionalMultiXactIdWait(), ConditionalXactLockTableWait(), HeapUpdateFailureData::ctid, DoesMultiXactIdConflict(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, xl_heap_lock::flags, get_mxact_status_for_lock(), GetCurrentTransactionId(), GetMultiXactIdMembers(), heap_acquire_tuplock(), HEAP_KEYS_UPDATED, heap_lock_updated_tuple(), HEAP_XMAX_BITS, HEAP_XMAX_INVALID, HEAP_XMAX_IS_EXCL_LOCKED, HEAP_XMAX_IS_KEYSHR_LOCKED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HEAP_XMAX_IS_SHR_LOCKED, HeapTupleBeingUpdated, HeapTupleHeaderClearHotUpdated, HeapTupleHeaderGetCmax(), HeapTupleHeaderGetRawXmax, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIsOnlyLocked(), HeapTupleHeaderSetXmax, HeapTupleInvisible, HeapTupleMayBeUpdated, HeapTupleSatisfiesUpdate(), HeapTupleSelfUpdated, HeapTupleUpdated, HeapTupleWouldBlock, i, xl_heap_lock::infobits_set, InvalidBuffer, InvalidCommandId, ItemIdGetLength, ItemIdIsNormal, ItemPointerCopy, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), xl_heap_lock::locking_xid, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, LockWaitBlock, LockWaitError, LockWaitSkip, MarkBufferDirty(), MultiXactIdSetOldestMember(), MultiXactIdWait(), MultiXactStatusNoKeyUpdate, xl_heap_lock::offnum, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, pfree(), ReadBuffer(), REGBUF_STANDARD, RelationGetRelationName, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapLock, START_CRIT_SECTION, status(), HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdEquals, TransactionIdIsCurrentTransactionId(), TUPLOCK_from_mxstatus, UnlockTupleTuplock, UpdateXmaxHintBits(), VISIBILITYMAP_ALL_FROZEN, visibilitymap_clear(), visibilitymap_pin(), XactLockTableWait(), XLH_LOCK_ALL_FROZEN_CLEARED, XLOG_HEAP_LOCK, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), XLTW_Lock, HeapUpdateFailureData::xmax, and xmax_infomask_changed().

Referenced by EvalPlanQualFetch(), ExecLockRows(), ExecOnConflictUpdate(), GetTupleForTrigger(), RelationFindReplTupleByIndex(), and RelationFindReplTupleSeq().

4582 {
4583  HTSU_Result result;
4584  ItemPointer tid = &(tuple->t_self);
4585  ItemId lp;
4586  Page page;
4587  Buffer vmbuffer = InvalidBuffer;
4588  BlockNumber block;
4589  TransactionId xid,
4590  xmax;
4591  uint16 old_infomask,
4592  new_infomask,
4593  new_infomask2;
4594  bool first_time = true;
4595  bool have_tuple_lock = false;
4596  bool cleared_all_frozen = false;
4597 
4598  *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
4599  block = ItemPointerGetBlockNumber(tid);
4600 
4601  /*
4602  * Before locking the buffer, pin the visibility map page if it appears to
4603  * be necessary. Since we haven't got the lock yet, someone else might be
4604  * in the middle of changing this, so we'll need to recheck after we have
4605  * the lock.
4606  */
4607  if (PageIsAllVisible(BufferGetPage(*buffer)))
4608  visibilitymap_pin(relation, block, &vmbuffer);
4609 
4611 
4612  page = BufferGetPage(*buffer);
4613  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
4614  Assert(ItemIdIsNormal(lp));
4615 
4616  tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
4617  tuple->t_len = ItemIdGetLength(lp);
4618  tuple->t_tableOid = RelationGetRelid(relation);
4619 
4620 l3:
4621  result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
4622 
4623  if (result == HeapTupleInvisible)
4624  {
4625  /*
4626  * This is possible, but only when locking a tuple for ON CONFLICT
4627  * UPDATE. We return this value here rather than throwing an error in
4628  * order to give that case the opportunity to throw a more specific
4629  * error.
4630  */
4631  result = HeapTupleInvisible;
4632  goto out_locked;
4633  }
4634  else if (result == HeapTupleBeingUpdated || result == HeapTupleUpdated)
4635  {
4636  TransactionId xwait;
4637  uint16 infomask;
4638  uint16 infomask2;
4639  bool require_sleep;
4640  ItemPointerData t_ctid;
4641 
4642  /* must copy state data before unlocking buffer */
4643  xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
4644  infomask = tuple->t_data->t_infomask;
4645  infomask2 = tuple->t_data->t_infomask2;
4646  ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
4647 
4648  LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
4649 
4650  /*
4651  * If any subtransaction of the current top transaction already holds
4652  * a lock as strong as or stronger than what we're requesting, we
4653  * effectively hold the desired lock already. We *must* succeed
4654  * without trying to take the tuple lock, else we will deadlock
4655  * against anyone wanting to acquire a stronger lock.
4656  *
4657  * Note we only do this the first time we loop on the HTSU result;
4658  * there is no point in testing in subsequent passes, because
4659  * evidently our own transaction cannot have acquired a new lock after
4660  * the first time we checked.
4661  */
4662  if (first_time)
4663  {
4664  first_time = false;
4665 
4666  if (infomask & HEAP_XMAX_IS_MULTI)
4667  {
4668  int i;
4669  int nmembers;
4670  MultiXactMember *members;
4671 
4672  /*
4673  * We don't need to allow old multixacts here; if that had
4674  * been the case, HeapTupleSatisfiesUpdate would have returned
4675  * MayBeUpdated and we wouldn't be here.
4676  */
4677  nmembers =
4678  GetMultiXactIdMembers(xwait, &members, false,
4679  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
4680 
4681  for (i = 0; i < nmembers; i++)
4682  {
4683  /* only consider members of our own transaction */
4684  if (!TransactionIdIsCurrentTransactionId(members[i].xid))
4685  continue;
4686 
4687  if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
4688  {
4689  pfree(members);
4690  result = HeapTupleMayBeUpdated;
4691  goto out_unlocked;
4692  }
4693  }
4694 
4695  if (members)
4696  pfree(members);
4697  }
4698  else if (TransactionIdIsCurrentTransactionId(xwait))
4699  {
4700  switch (mode)
4701  {
4702  case LockTupleKeyShare:
4703  Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
4704  HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4705  HEAP_XMAX_IS_EXCL_LOCKED(infomask));
4706  result = HeapTupleMayBeUpdated;
4707  goto out_unlocked;
4708  case LockTupleShare:
4709  if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4710  HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4711  {
4712  result = HeapTupleMayBeUpdated;
4713  goto out_unlocked;
4714  }
4715  break;
4717  if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4718  {
4719  result = HeapTupleMayBeUpdated;
4720  goto out_unlocked;
4721  }
4722  break;
4723  case LockTupleExclusive:
4724  if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
4725  infomask2 & HEAP_KEYS_UPDATED)
4726  {
4727  result = HeapTupleMayBeUpdated;
4728  goto out_unlocked;
4729  }
4730  break;
4731  }
4732  }
4733  }
4734 
4735  /*
4736  * Initially assume that we will have to wait for the locking
4737  * transaction(s) to finish. We check various cases below in which
4738  * this can be turned off.
4739  */
4740  require_sleep = true;
4741  if (mode == LockTupleKeyShare)
4742  {
4743  /*
4744  * If we're requesting KeyShare, and there's no update present, we
4745  * don't need to wait. Even if there is an update, we can still
4746  * continue if the key hasn't been modified.
4747  *
4748  * However, if there are updates, we need to walk the update chain
4749  * to mark future versions of the row as locked, too. That way,
4750  * if somebody deletes that future version, we're protected
4751  * against the key going away. This locking of future versions
4752  * could block momentarily, if a concurrent transaction is
4753  * deleting a key; or it could return a value to the effect that
4754  * the transaction deleting the key has already committed. So we
4755  * do this before re-locking the buffer; otherwise this would be
4756  * prone to deadlocks.
4757  *
4758  * Note that the TID we're locking was grabbed before we unlocked
4759  * the buffer. For it to change while we're not looking, the
4760  * other properties we're testing for below after re-locking the
4761  * buffer would also change, in which case we would restart this
4762  * loop above.
4763  */
4764  if (!(infomask2 & HEAP_KEYS_UPDATED))
4765  {
4766  bool updated;
4767 
4768  updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
4769 
4770  /*
4771  * If there are updates, follow the update chain; bail out if
4772  * that cannot be done.
4773  */
4774  if (follow_updates && updated)
4775  {
4776  HTSU_Result res;
4777 
4778  res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4780  mode);
4781  if (res != HeapTupleMayBeUpdated)
4782  {
4783  result = res;
4784  /* recovery code expects to have buffer lock held */
4786  goto failed;
4787  }
4788  }
4789 
4791 
4792  /*
4793  * Make sure it's still an appropriate lock, else start over.
4794  * Also, if it wasn't updated before we released the lock, but
4795  * is updated now, we start over too; the reason is that we
4796  * now need to follow the update chain to lock the new
4797  * versions.
4798  */
4799  if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
4800  ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
4801  !updated))
4802  goto l3;
4803 
4804  /* Things look okay, so we can skip sleeping */
4805  require_sleep = false;
4806 
4807  /*
4808  * Note we allow Xmax to change here; other updaters/lockers
4809  * could have modified it before we grabbed the buffer lock.
4810  * However, this is not a problem, because with the recheck we
4811  * just did we ensure that they still don't conflict with the
4812  * lock we want.
4813  */
4814  }
4815  }
4816  else if (mode == LockTupleShare)
4817  {
4818  /*
4819  * If we're requesting Share, we can similarly avoid sleeping if
4820  * there's no update and no exclusive lock present.
4821  */
4822  if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
4823  !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4824  {
4826 
4827  /*
4828  * Make sure it's still an appropriate lock, else start over.
4829  * See above about allowing xmax to change.
4830  */
4831  if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
4833  goto l3;
4834  require_sleep = false;
4835  }
4836  }
4837  else if (mode == LockTupleNoKeyExclusive)
4838  {
4839  /*
4840  * If we're requesting NoKeyExclusive, we might also be able to
4841  * avoid sleeping; just ensure that there no conflicting lock
4842  * already acquired.
4843  */
4844  if (infomask & HEAP_XMAX_IS_MULTI)
4845  {
4846  if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
4847  mode))
4848  {
4849  /*
4850  * No conflict, but if the xmax changed under us in the
4851  * meantime, start over.
4852  */
4854  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4856  xwait))
4857  goto l3;
4858 
4859  /* otherwise, we're good */
4860  require_sleep = false;
4861  }
4862  }
4863  else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
4864  {
4866 
4867  /* if the xmax changed in the meantime, start over */
4868  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4871  xwait))
4872  goto l3;
4873  /* otherwise, we're good */
4874  require_sleep = false;
4875  }
4876  }
4877 
4878  /*
4879  * As a check independent from those above, we can also avoid sleeping
4880  * if the current transaction is the sole locker of the tuple. Note
4881  * that the strength of the lock already held is irrelevant; this is
4882  * not about recording the lock in Xmax (which will be done regardless
4883  * of this optimization, below). Also, note that the cases where we
4884  * hold a lock stronger than we are requesting are already handled
4885  * above by not doing anything.
4886  *
4887  * Note we only deal with the non-multixact case here; MultiXactIdWait
4888  * is well equipped to deal with this situation on its own.
4889  */
4890  if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
4892  {
4893  /* ... but if the xmax changed in the meantime, start over */
4895  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4897  xwait))
4898  goto l3;
4900  require_sleep = false;
4901  }
4902 
4903  /*
4904  * Time to sleep on the other transaction/multixact, if necessary.
4905  *
4906  * If the other transaction is an update that's already committed,
4907  * then sleeping cannot possibly do any good: if we're required to
4908  * sleep, get out to raise an error instead.
4909  *
4910  * By here, we either have already acquired the buffer exclusive lock,
4911  * or we must wait for the locking transaction or multixact; so below
4912  * we ensure that we grab buffer lock after the sleep.
4913  */
4914  if (require_sleep && result == HeapTupleUpdated)
4915  {
4917  goto failed;
4918  }
4919  else if (require_sleep)
4920  {
4921  /*
4922  * Acquire tuple lock to establish our priority for the tuple, or
4923  * die trying. LockTuple will release us when we are next-in-line
4924  * for the tuple. We must do this even if we are share-locking.
4925  *
4926  * If we are forced to "start over" below, we keep the tuple lock;
4927  * this arranges that we stay at the head of the line while
4928  * rechecking tuple state.
4929  */
4930  if (!heap_acquire_tuplock(relation, tid, mode, wait_policy,
4931  &have_tuple_lock))
4932  {
4933  /*
4934  * This can only happen if wait_policy is Skip and the lock
4935  * couldn't be obtained.
4936  */
4937  result = HeapTupleWouldBlock;
4938  /* recovery code expects to have buffer lock held */
4940  goto failed;
4941  }
4942 
4943  if (infomask & HEAP_XMAX_IS_MULTI)
4944  {
4946 
4947  /* We only ever lock tuples, never update them */
4948  if (status >= MultiXactStatusNoKeyUpdate)
4949  elog(ERROR, "invalid lock mode in heap_lock_tuple");
4950 
4951  /* wait for multixact to end, or die trying */
4952  switch (wait_policy)
4953  {
4954  case LockWaitBlock:
4955  MultiXactIdWait((MultiXactId) xwait, status, infomask,
4956  relation, &tuple->t_self, XLTW_Lock, NULL);
4957  break;
4958  case LockWaitSkip:
4960  status, infomask, relation,
4961  NULL))
4962  {
4963  result = HeapTupleWouldBlock;
4964  /* recovery code expects to have buffer lock held */
4966  goto failed;
4967  }
4968  break;
4969  case LockWaitError:
4971  status, infomask, relation,
4972  NULL))
4973  ereport(ERROR,
4974  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4975  errmsg("could not obtain lock on row in relation \"%s\"",
4976  RelationGetRelationName(relation))));
4977 
4978  break;
4979  }
4980 
4981  /*
4982  * Of course, the multixact might not be done here: if we're
4983  * requesting a light lock mode, other transactions with light
4984  * locks could still be alive, as well as locks owned by our
4985  * own xact or other subxacts of this backend. We need to
4986  * preserve the surviving MultiXact members. Note that it
4987  * isn't absolutely necessary in the latter case, but doing so
4988  * is simpler.
4989  */
4990  }
4991  else
4992  {
4993  /* wait for regular transaction to end, or die trying */
4994  switch (wait_policy)
4995  {
4996  case LockWaitBlock:
4997  XactLockTableWait(xwait, relation, &tuple->t_self,
4998  XLTW_Lock);
4999  break;
5000  case LockWaitSkip:
5001  if (!ConditionalXactLockTableWait(xwait))
5002  {
5003  result = HeapTupleWouldBlock;
5004  /* recovery code expects to have buffer lock held */
5006  goto failed;
5007  }
5008  break;
5009  case LockWaitError:
5010  if (!ConditionalXactLockTableWait(xwait))
5011  ereport(ERROR,
5012  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5013  errmsg("could not obtain lock on row in relation \"%s\"",
5014  RelationGetRelationName(relation))));
5015  break;
5016  }
5017  }
5018 
5019  /* if there are updates, follow the update chain */
5020  if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
5021  {
5022  HTSU_Result res;
5023 
5024  res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
5026  mode);
5027  if (res != HeapTupleMayBeUpdated)
5028  {
5029  result = res;
5030  /* recovery code expects to have buffer lock held */
5032  goto failed;
5033  }
5034  }
5035 
5037 
5038  /*
5039  * xwait is done, but if xwait had just locked the tuple then some
5040  * other xact could update this tuple before we get to this point.
5041  * Check for xmax change, and start over if so.
5042  */
5043  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
5045  xwait))
5046  goto l3;
5047 
5048  if (!(infomask & HEAP_XMAX_IS_MULTI))
5049  {
5050  /*
5051  * Otherwise check if it committed or aborted. Note we cannot
5052  * be here if the tuple was only locked by somebody who didn't
5053  * conflict with us; that would have been handled above. So
5054  * that transaction must necessarily be gone by now. But
5055  * don't check for this in the multixact case, because some
5056  * locker transactions might still be running.
5057  */
5058  UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
5059  }
5060  }
5061 
5062  /* By here, we're certain that we hold buffer exclusive lock again */
5063 
5064  /*
5065  * We may lock if previous xmax aborted, or if it committed but only
5066  * locked the tuple without updating it; or if we didn't have to wait
5067  * at all for whatever reason.
5068  */
5069  if (!require_sleep ||
5070  (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
5073  result = HeapTupleMayBeUpdated;
5074  else
5075  result = HeapTupleUpdated;
5076  }
5077 
5078 failed:
5079  if (result != HeapTupleMayBeUpdated)
5080  {
5081  Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated ||
5082  result == HeapTupleWouldBlock);
5083  Assert(!(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
5084  hufd->ctid = tuple->t_data->t_ctid;
5085  hufd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
5086  if (result == HeapTupleSelfUpdated)
5087  hufd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
5088  else
5089  hufd->cmax = InvalidCommandId;
5090  goto out_locked;
5091  }
5092 
5093  /*
5094  * If we didn't pin the visibility map page and the page has become all
5095  * visible while we were busy locking the buffer, or during some
5096  * subsequent window during which we had it unlocked, we'll have to unlock
5097  * and re-lock, to avoid holding the buffer lock across I/O. That's a bit
5098  * unfortunate, especially since we'll now have to recheck whether the
5099  * tuple has been locked or updated under us, but hopefully it won't
5100  * happen very often.
5101  */
5102  if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
5103  {
5104  LockBuffer(*buffer, BUFFER_LOCK_UNLOCK);
5105  visibilitymap_pin(relation, block, &vmbuffer);
5107  goto l3;
5108  }
5109 
5110  xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
5111  old_infomask = tuple->t_data->t_infomask;
5112 
5113  /*
5114  * If this is the first possibly-multixact-able operation in the current
5115  * transaction, set my per-backend OldestMemberMXactId setting. We can be
5116  * certain that the transaction will never become a member of any older
5117  * MultiXactIds than that. (We have to do this even if we end up just
5118  * using our own TransactionId below, since some other backend could
5119  * incorporate our XID into a MultiXact immediately afterwards.)
5120  */
5122 
5123  /*
5124  * Compute the new xmax and infomask to store into the tuple. Note we do
5125  * not modify the tuple just yet, because that would leave it in the wrong
5126  * state if multixact.c elogs.
5127  */
5128  compute_new_xmax_infomask(xmax, old_infomask, tuple->t_data->t_infomask2,
5129  GetCurrentTransactionId(), mode, false,
5130  &xid, &new_infomask, &new_infomask2);
5131 
5133 
5134  /*
5135  * Store transaction information of xact locking the tuple.
5136  *
5137  * Note: Cmax is meaningless in this context, so don't set it; this avoids
5138  * possibly generating a useless combo CID. Moreover, if we're locking a
5139  * previously updated tuple, it's important to preserve the Cmax.
5140  *
5141  * Also reset the HOT UPDATE bit, but only if there's no update; otherwise
5142  * we would break the HOT chain.
5143  */
5144  tuple->t_data->t_infomask &= ~HEAP_XMAX_BITS;
5145  tuple->t_data->t_infomask2 &= ~HEAP_KEYS_UPDATED;
5146  tuple->t_data->t_infomask |= new_infomask;
5147  tuple->t_data->t_infomask2 |= new_infomask2;
5148  if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
5150  HeapTupleHeaderSetXmax(tuple->t_data, xid);
5151 
5152  /*
5153  * Make sure there is no forward chain link in t_ctid. Note that in the
5154  * cases where the tuple has been updated, we must not overwrite t_ctid,
5155  * because it was set by the updater. Moreover, if the tuple has been
5156  * updated, we need to follow the update chain to lock the new versions of
5157  * the tuple as well.
5158  */
5159  if (HEAP_XMAX_IS_LOCKED_ONLY(new_infomask))
5160  tuple->t_data->t_ctid = *tid;
5161 
5162  /* Clear only the all-frozen bit on visibility map if needed */
5163  if (PageIsAllVisible(page) &&
5164  visibilitymap_clear(relation, block, vmbuffer,
5166  cleared_all_frozen = true;
5167 
5168 
5169  MarkBufferDirty(*buffer);
5170 
5171  /*
5172  * XLOG stuff. You might think that we don't need an XLOG record because
5173  * there is no state change worth restoring after a crash. You would be
5174  * wrong however: we have just written either a TransactionId or a
5175  * MultiXactId that may never have been seen on disk before, and we need
5176  * to make sure that there are XLOG entries covering those ID numbers.
5177  * Else the same IDs might be re-used after a crash, which would be
5178  * disastrous if this page made it to disk before the crash. Essentially
5179  * we have to enforce the WAL log-before-data rule even in this case.
5180  * (Also, in a PITR log-shipping or 2PC environment, we have to have XLOG
5181  * entries for everything anyway.)
5182  */
5183  if (RelationNeedsWAL(relation))
5184  {
5185  xl_heap_lock xlrec;
5186  XLogRecPtr recptr;
5187 
5188  XLogBeginInsert();
5189  XLogRegisterBuffer(0, *buffer, REGBUF_STANDARD);
5190 
5191  xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
5192  xlrec.locking_xid = xid;
5193  xlrec.infobits_set = compute_infobits(new_infomask,
5194  tuple->t_data->t_infomask2);
5195  xlrec.flags = cleared_all_frozen ? XLH_LOCK_ALL_FROZEN_CLEARED : 0;