PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
heapam.c File Reference
#include "postgres.h"
#include "access/bufmask.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/hio.h"
#include "access/multixact.h"
#include "access/parallel.h"
#include "access/relscan.h"
#include "access/sysattr.h"
#include "access/transam.h"
#include "access/tuptoaster.h"
#include "access/valid.h"
#include "access/visibilitymap.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "access/xloginsert.h"
#include "access/xlogutils.h"
#include "catalog/catalog.h"
#include "catalog/namespace.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "port/atomics.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
#include "storage/procarray.h"
#include "storage/smgr.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "utils/datum.h"
#include "utils/inval.h"
#include "utils/lsyscache.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
#include "utils/syscache.h"
#include "utils/tqual.h"
Include dependency graph for heapam.c:

Go to the source code of this file.

Macros

#define LOCKMODE_from_mxstatus(status)   (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
 
#define LockTupleTuplock(rel, tup, mode)   LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define UnlockTupleTuplock(rel, tup, mode)   UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define ConditionalLockTupleTuplock(rel, tup, mode)   ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define TUPLOCK_from_mxstatus(status)   (MultiXactStatusLock[(status)])
 
#define HEAPDEBUG_1
 
#define HEAPDEBUG_2
 
#define HEAPDEBUG_3
 
#define FRM_NOOP   0x0001
 
#define FRM_INVALIDATE_XMAX   0x0002
 
#define FRM_RETURN_IS_XID   0x0004
 
#define FRM_RETURN_IS_MULTI   0x0008
 
#define FRM_MARK_COMMITTED   0x0010
 

Functions

static HeapScanDesc heap_beginscan_internal (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
 
static void heap_parallelscan_startblock_init (HeapScanDesc scan)
 
static BlockNumber heap_parallelscan_nextpage (HeapScanDesc scan)
 
static HeapTuple heap_prepare_insert (Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
 
static XLogRecPtr log_heap_update (Relation reln, Buffer oldbuf, Buffer newbuf, HeapTuple oldtup, HeapTuple newtup, HeapTuple old_key_tup, bool all_visible_cleared, bool new_all_visible_cleared)
 
static BitmapsetHeapDetermineModifiedColumns (Relation relation, Bitmapset *interesting_cols, HeapTuple oldtup, HeapTuple newtup)
 
static bool heap_acquire_tuplock (Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
 
static void compute_new_xmax_infomask (TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
 
static HTSU_Result heap_lock_updated_tuple (Relation rel, HeapTuple tuple, ItemPointer ctid, TransactionId xid, LockTupleMode mode)
 
static void GetMultiXactIdHintBits (MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
 
static TransactionId MultiXactIdGetUpdateXid (TransactionId xmax, uint16 t_infomask)
 
static bool DoesMultiXactIdConflict (MultiXactId multi, uint16 infomask, LockTupleMode lockmode)
 
static void MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
static bool ConditionalMultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, int *remaining)
 
static XLogRecPtr log_heap_new_cid (Relation relation, HeapTuple tup)
 
static HeapTuple ExtractReplicaIdentity (Relation rel, HeapTuple tup, bool key_modified, bool *copy)
 
static void initscan (HeapScanDesc scan, ScanKey key, bool keep_startblock)
 
void heap_setscanlimits (HeapScanDesc scan, BlockNumber startBlk, BlockNumber numBlks)
 
void heapgetpage (HeapScanDesc scan, BlockNumber page)
 
static void heapgettup (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
static void heapgettup_pagemode (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
Relation relation_open (Oid relationId, LOCKMODE lockmode)
 
Relation try_relation_open (Oid relationId, LOCKMODE lockmode)
 
Relation relation_openrv (const RangeVar *relation, LOCKMODE lockmode)
 
Relation relation_openrv_extended (const RangeVar *relation, LOCKMODE lockmode, bool missing_ok)
 
void relation_close (Relation relation, LOCKMODE lockmode)
 
Relation heap_open (Oid relationId, LOCKMODE lockmode)
 
Relation heap_openrv (const RangeVar *relation, LOCKMODE lockmode)
 
Relation heap_openrv_extended (const RangeVar *relation, LOCKMODE lockmode, bool missing_ok)
 
HeapScanDesc heap_beginscan (Relation relation, Snapshot snapshot, int nkeys, ScanKey key)
 
HeapScanDesc heap_beginscan_catalog (Relation relation, int nkeys, ScanKey key)
 
HeapScanDesc heap_beginscan_strat (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, bool allow_strat, bool allow_sync)
 
HeapScanDesc heap_beginscan_bm (Relation relation, Snapshot snapshot, int nkeys, ScanKey key)
 
HeapScanDesc heap_beginscan_sampling (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, bool allow_strat, bool allow_sync, bool allow_pagemode)
 
void heap_rescan (HeapScanDesc scan, ScanKey key)
 
void heap_rescan_set_params (HeapScanDesc scan, ScanKey key, bool allow_strat, bool allow_sync, bool allow_pagemode)
 
void heap_endscan (HeapScanDesc scan)
 
Size heap_parallelscan_estimate (Snapshot snapshot)
 
void heap_parallelscan_initialize (ParallelHeapScanDesc target, Relation relation, Snapshot snapshot)
 
HeapScanDesc heap_beginscan_parallel (Relation relation, ParallelHeapScanDesc parallel_scan)
 
void heap_update_snapshot (HeapScanDesc scan, Snapshot snapshot)
 
HeapTuple heap_getnext (HeapScanDesc scan, ScanDirection direction)
 
bool heap_fetch (Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf, bool keep_buf, Relation stats_relation)
 
bool heap_hot_search_buffer (ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
 
bool heap_hot_search (ItemPointer tid, Relation relation, Snapshot snapshot, bool *all_dead)
 
void heap_get_latest_tid (Relation relation, Snapshot snapshot, ItemPointer tid)
 
static void UpdateXmaxHintBits (HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
 
BulkInsertState GetBulkInsertState (void)
 
void FreeBulkInsertState (BulkInsertState bistate)
 
void ReleaseBulkInsertStatePin (BulkInsertState bistate)
 
Oid heap_insert (Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate)
 
void heap_multi_insert (Relation relation, HeapTuple *tuples, int ntuples, CommandId cid, int options, BulkInsertState bistate)
 
Oid simple_heap_insert (Relation relation, HeapTuple tup)
 
static uint8 compute_infobits (uint16 infomask, uint16 infomask2)
 
static bool xmax_infomask_changed (uint16 new_infomask, uint16 old_infomask)
 
HTSU_Result heap_delete (Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, HeapUpdateFailureData *hufd)
 
void simple_heap_delete (Relation relation, ItemPointer tid)
 
HTSU_Result heap_update (Relation relation, ItemPointer otid, HeapTuple newtup, CommandId cid, Snapshot crosscheck, bool wait, HeapUpdateFailureData *hufd, LockTupleMode *lockmode)
 
static bool heap_tuple_attr_equals (TupleDesc tupdesc, int attrnum, HeapTuple tup1, HeapTuple tup2)
 
void simple_heap_update (Relation relation, ItemPointer otid, HeapTuple tup)
 
static MultiXactStatus get_mxact_status_for_lock (LockTupleMode mode, bool is_update)
 
HTSU_Result heap_lock_tuple (Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, HeapUpdateFailureData *hufd)
 
static HTSU_Result test_lockmode_for_conflict (MultiXactStatus status, TransactionId xid, LockTupleMode mode, bool *needwait)
 
static HTSU_Result heap_lock_updated_tuple_rec (Relation rel, ItemPointer tid, TransactionId xid, LockTupleMode mode)
 
void heap_finish_speculative (Relation relation, HeapTuple tuple)
 
void heap_abort_speculative (Relation relation, HeapTuple tuple)
 
void heap_inplace_update (Relation relation, HeapTuple tuple)
 
static TransactionId FreezeMultiXactId (MultiXactId multi, uint16 t_infomask, TransactionId cutoff_xid, MultiXactId cutoff_multi, uint16 *flags)
 
bool heap_prepare_freeze_tuple (HeapTupleHeader tuple, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
 
void heap_execute_freeze_tuple (HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
 
bool heap_freeze_tuple (HeapTupleHeader tuple, TransactionId cutoff_xid, TransactionId cutoff_multi)
 
TransactionId HeapTupleGetUpdateXid (HeapTupleHeader tuple)
 
static bool Do_MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
bool heap_tuple_needs_eventual_freeze (HeapTupleHeader tuple)
 
bool heap_tuple_needs_freeze (HeapTupleHeader tuple, TransactionId cutoff_xid, MultiXactId cutoff_multi, Buffer buf)
 
void HeapTupleHeaderAdvanceLatestRemovedXid (HeapTupleHeader tuple, TransactionId *latestRemovedXid)
 
XLogRecPtr log_heap_cleanup_info (RelFileNode rnode, TransactionId latestRemovedXid)
 
XLogRecPtr log_heap_clean (Relation reln, Buffer buffer, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused, TransactionId latestRemovedXid)
 
XLogRecPtr log_heap_freeze (Relation reln, Buffer buffer, TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples, int ntuples)
 
XLogRecPtr log_heap_visible (RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer, TransactionId cutoff_xid, uint8 vmflags)
 
static void heap_xlog_cleanup_info (XLogReaderState *record)
 
static void heap_xlog_clean (XLogReaderState *record)
 
static void heap_xlog_visible (XLogReaderState *record)
 
static void heap_xlog_freeze_page (XLogReaderState *record)
 
static void fix_infomask_from_infobits (uint8 infobits, uint16 *infomask, uint16 *infomask2)
 
static void heap_xlog_delete (XLogReaderState *record)
 
static void heap_xlog_insert (XLogReaderState *record)
 
static void heap_xlog_multi_insert (XLogReaderState *record)
 
static void heap_xlog_update (XLogReaderState *record, bool hot_update)
 
static void heap_xlog_confirm (XLogReaderState *record)
 
static void heap_xlog_lock (XLogReaderState *record)
 
static void heap_xlog_lock_updated (XLogReaderState *record)
 
static void heap_xlog_inplace (XLogReaderState *record)
 
void heap_redo (XLogReaderState *record)
 
void heap2_redo (XLogReaderState *record)
 
void heap_sync (Relation rel)
 
void heap_mask (char *pagedata, BlockNumber blkno)
 

Variables

bool synchronize_seqscans = true
 
struct {
   LOCKMODE   hwlock
 
   int   lockstatus
 
   int   updstatus
 
tupleLockExtraInfo [MaxLockTupleMode+1]
 
static const int MultiXactStatusLock [MaxMultiXactStatus+1]
 

Macro Definition Documentation

#define ConditionalLockTupleTuplock (   rel,
  tup,
  mode 
)    ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 185 of file heapam.c.

Referenced by heap_acquire_tuplock().

#define FRM_INVALIDATE_XMAX   0x0002

Definition at line 6329 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

#define FRM_MARK_COMMITTED   0x0010

Definition at line 6332 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

#define FRM_NOOP   0x0001

Definition at line 6328 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

#define FRM_RETURN_IS_MULTI   0x0008

Definition at line 6331 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

#define FRM_RETURN_IS_XID   0x0004

Definition at line 6330 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

#define HEAPDEBUG_1

Definition at line 1803 of file heapam.c.

Referenced by heap_getnext().

#define HEAPDEBUG_2

Definition at line 1804 of file heapam.c.

Referenced by heap_getnext().

#define HEAPDEBUG_3

Definition at line 1805 of file heapam.c.

Referenced by heap_getnext().

#define LOCKMODE_from_mxstatus (   status)    (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
#define LockTupleTuplock (   rel,
  tup,
  mode 
)    LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 181 of file heapam.c.

Referenced by heap_acquire_tuplock().

#define TUPLOCK_from_mxstatus (   status)    (MultiXactStatusLock[(status)])

Definition at line 203 of file heapam.c.

Referenced by compute_new_xmax_infomask(), GetMultiXactIdHintBits(), and heap_lock_tuple().

#define UnlockTupleTuplock (   rel,
  tup,
  mode 
)    UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 183 of file heapam.c.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

Function Documentation

static uint8 compute_infobits ( uint16  infomask,
uint16  infomask2 
)
static

Definition at line 2968 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_abort_speculative(), heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_update(), and log_heap_update().

2969 {
2970  return
2971  ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
2972  ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
2973  ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
2974  /* note we ignore HEAP_XMAX_SHR_LOCK here */
2975  ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
2976  ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
2977  XLHL_KEYS_UPDATED : 0);
2978 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:179
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:182
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:241
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:240
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:181
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:242
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:244
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:243
static void compute_new_xmax_infomask ( TransactionId  xmax,
uint16  old_infomask,
uint16  old_infomask2,
TransactionId  add_to_xmax,
LockTupleMode  mode,
bool  is_update,
TransactionId result_xmax,
uint16 result_infomask,
uint16 result_infomask2 
)
static

Definition at line 5272 of file heapam.c.

References Assert, elog, ERROR, get_mxact_status_for_lock(), GetMultiXactIdHintBits(), HEAP_KEYS_UPDATED, HEAP_LOCKED_UPGRADED, HEAP_XMAX_COMMITTED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_INVALID, HEAP_XMAX_IS_EXCL_LOCKED, HEAP_XMAX_IS_KEYSHR_LOCKED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HEAP_XMAX_IS_SHR_LOCKED, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, InvalidTransactionId, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, MultiXactIdCreate(), MultiXactIdExpand(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TUPLOCK_from_mxstatus, and WARNING.

Referenced by heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), and heap_update().

5277 {
5278  TransactionId new_xmax;
5279  uint16 new_infomask,
5280  new_infomask2;
5281 
5283 
5284 l5:
5285  new_infomask = 0;
5286  new_infomask2 = 0;
5287  if (old_infomask & HEAP_XMAX_INVALID)
5288  {
5289  /*
5290  * No previous locker; we just insert our own TransactionId.
5291  *
5292  * Note that it's critical that this case be the first one checked,
5293  * because there are several blocks below that come back to this one
5294  * to implement certain optimizations; old_infomask might contain
5295  * other dirty bits in those cases, but we don't really care.
5296  */
5297  if (is_update)
5298  {
5299  new_xmax = add_to_xmax;
5300  if (mode == LockTupleExclusive)
5301  new_infomask2 |= HEAP_KEYS_UPDATED;
5302  }
5303  else
5304  {
5305  new_infomask |= HEAP_XMAX_LOCK_ONLY;
5306  switch (mode)
5307  {
5308  case LockTupleKeyShare:
5309  new_xmax = add_to_xmax;
5310  new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
5311  break;
5312  case LockTupleShare:
5313  new_xmax = add_to_xmax;
5314  new_infomask |= HEAP_XMAX_SHR_LOCK;
5315  break;
5317  new_xmax = add_to_xmax;
5318  new_infomask |= HEAP_XMAX_EXCL_LOCK;
5319  break;
5320  case LockTupleExclusive:
5321  new_xmax = add_to_xmax;
5322  new_infomask |= HEAP_XMAX_EXCL_LOCK;
5323  new_infomask2 |= HEAP_KEYS_UPDATED;
5324  break;
5325  default:
5326  new_xmax = InvalidTransactionId; /* silence compiler */
5327  elog(ERROR, "invalid lock mode");
5328  }
5329  }
5330  }
5331  else if (old_infomask & HEAP_XMAX_IS_MULTI)
5332  {
5333  MultiXactStatus new_status;
5334 
5335  /*
5336  * Currently we don't allow XMAX_COMMITTED to be set for multis, so
5337  * cross-check.
5338  */
5339  Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
5340 
5341  /*
5342  * A multixact together with LOCK_ONLY set but neither lock bit set
5343  * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
5344  * anymore. This check is critical for databases upgraded by
5345  * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
5346  * that such multis are never passed.
5347  */
5348  if (HEAP_LOCKED_UPGRADED(old_infomask))
5349  {
5350  old_infomask &= ~HEAP_XMAX_IS_MULTI;
5351  old_infomask |= HEAP_XMAX_INVALID;
5352  goto l5;
5353  }
5354 
5355  /*
5356  * If the XMAX is already a MultiXactId, then we need to expand it to
5357  * include add_to_xmax; but if all the members were lockers and are
5358  * all gone, we can do away with the IS_MULTI bit and just set
5359  * add_to_xmax as the only locker/updater. If all lockers are gone
5360  * and we have an updater that aborted, we can also do without a
5361  * multi.
5362  *
5363  * The cost of doing GetMultiXactIdMembers would be paid by
5364  * MultiXactIdExpand if we weren't to do this, so this check is not
5365  * incurring extra work anyhow.
5366  */
5367  if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
5368  {
5369  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
5371  old_infomask)))
5372  {
5373  /*
5374  * Reset these bits and restart; otherwise fall through to
5375  * create a new multi below.
5376  */
5377  old_infomask &= ~HEAP_XMAX_IS_MULTI;
5378  old_infomask |= HEAP_XMAX_INVALID;
5379  goto l5;
5380  }
5381  }
5382 
5383  new_status = get_mxact_status_for_lock(mode, is_update);
5384 
5385  new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
5386  new_status);
5387  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5388  }
5389  else if (old_infomask & HEAP_XMAX_COMMITTED)
5390  {
5391  /*
5392  * It's a committed update, so we need to preserve him as updater of
5393  * the tuple.
5394  */
5396  MultiXactStatus new_status;
5397 
5398  if (old_infomask2 & HEAP_KEYS_UPDATED)
5399  status = MultiXactStatusUpdate;
5400  else
5401  status = MultiXactStatusNoKeyUpdate;
5402 
5403  new_status = get_mxact_status_for_lock(mode, is_update);
5404 
5405  /*
5406  * since it's not running, it's obviously impossible for the old
5407  * updater to be identical to the current one, so we need not check
5408  * for that case as we do in the block above.
5409  */
5410  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5411  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5412  }
5413  else if (TransactionIdIsInProgress(xmax))
5414  {
5415  /*
5416  * If the XMAX is a valid, in-progress TransactionId, then we need to
5417  * create a new MultiXactId that includes both the old locker or
5418  * updater and our own TransactionId.
5419  */
5420  MultiXactStatus new_status;
5421  MultiXactStatus old_status;
5422  LockTupleMode old_mode;
5423 
5424  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5425  {
5426  if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5427  old_status = MultiXactStatusForKeyShare;
5428  else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5429  old_status = MultiXactStatusForShare;
5430  else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5431  {
5432  if (old_infomask2 & HEAP_KEYS_UPDATED)
5433  old_status = MultiXactStatusForUpdate;
5434  else
5435  old_status = MultiXactStatusForNoKeyUpdate;
5436  }
5437  else
5438  {
5439  /*
5440  * LOCK_ONLY can be present alone only when a page has been
5441  * upgraded by pg_upgrade. But in that case,
5442  * TransactionIdIsInProgress() should have returned false. We
5443  * assume it's no longer locked in this case.
5444  */
5445  elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
5446  old_infomask |= HEAP_XMAX_INVALID;
5447  old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
5448  goto l5;
5449  }
5450  }
5451  else
5452  {
5453  /* it's an update, but which kind? */
5454  if (old_infomask2 & HEAP_KEYS_UPDATED)
5455  old_status = MultiXactStatusUpdate;
5456  else
5457  old_status = MultiXactStatusNoKeyUpdate;
5458  }
5459 
5460  old_mode = TUPLOCK_from_mxstatus(old_status);
5461 
5462  /*
5463  * If the lock to be acquired is for the same TransactionId as the
5464  * existing lock, there's an optimization possible: consider only the
5465  * strongest of both locks as the only one present, and restart.
5466  */
5467  if (xmax == add_to_xmax)
5468  {
5469  /*
5470  * Note that it's not possible for the original tuple to be
5471  * updated: we wouldn't be here because the tuple would have been
5472  * invisible and we wouldn't try to update it. As a subtlety,
5473  * this code can also run when traversing an update chain to lock
5474  * future versions of a tuple. But we wouldn't be here either,
5475  * because the add_to_xmax would be different from the original
5476  * updater.
5477  */
5478  Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5479 
5480  /* acquire the strongest of both */
5481  if (mode < old_mode)
5482  mode = old_mode;
5483  /* mustn't touch is_update */
5484 
5485  old_infomask |= HEAP_XMAX_INVALID;
5486  goto l5;
5487  }
5488 
5489  /* otherwise, just fall back to creating a new multixact */
5490  new_status = get_mxact_status_for_lock(mode, is_update);
5491  new_xmax = MultiXactIdCreate(xmax, old_status,
5492  add_to_xmax, new_status);
5493  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5494  }
5495  else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
5496  TransactionIdDidCommit(xmax))
5497  {
5498  /*
5499  * It's a committed update, so we gotta preserve him as updater of the
5500  * tuple.
5501  */
5503  MultiXactStatus new_status;
5504 
5505  if (old_infomask2 & HEAP_KEYS_UPDATED)
5506  status = MultiXactStatusUpdate;
5507  else
5508  status = MultiXactStatusNoKeyUpdate;
5509 
5510  new_status = get_mxact_status_for_lock(mode, is_update);
5511 
5512  /*
5513  * since it's not running, it's obviously impossible for the old
5514  * updater to be identical to the current one, so we need not check
5515  * for that case as we do in the block above.
5516  */
5517  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5518  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5519  }
5520  else
5521  {
5522  /*
5523  * Can get here iff the locking/updating transaction was running when
5524  * the infomask was extracted from the tuple, but finished before
5525  * TransactionIdIsInProgress got to run. Deal with it as if there was
5526  * no locker at all in the first place.
5527  */
5528  old_infomask |= HEAP_XMAX_INVALID;
5529  goto l5;
5530  }
5531 
5532  *result_infomask = new_infomask;
5533  *result_infomask2 = new_infomask2;
5534  *result_xmax = new_xmax;
5535 }
static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
Definition: heapam.c:6847
MultiXactStatus
Definition: multixact.h:40
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:179
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:182
uint32 TransactionId
Definition: c.h:397
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:773
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:999
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:238
#define HEAP_XMAX_COMMITTED
Definition: htup_details.h:192
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:185
#define HEAP_XMAX_IS_SHR_LOCKED(infomask)
Definition: htup_details.h:248
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:6928
LockTupleMode
Definition: heapam.h:38
unsigned short uint16
Definition: c.h:267
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:193
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:181
#define InvalidTransactionId
Definition: transam.h:31
#define WARNING
Definition: elog.h:40
MultiXactId MultiXactIdCreate(TransactionId xid1, MultiXactStatus status1, TransactionId xid2, MultiXactStatus status2)
Definition: multixact.c:384
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:216
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
TransactionId MultiXactId
Definition: c.h:407
#define Assert(condition)
Definition: c.h:676
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:203
static MultiXactStatus get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
Definition: heapam.c:4504
#define HEAP_XMAX_IS_EXCL_LOCKED(infomask)
Definition: htup_details.h:250
#define elog
Definition: elog.h:219
#define HEAP_XMAX_IS_KEYSHR_LOCKED(infomask)
Definition: htup_details.h:252
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:549
MultiXactId MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
Definition: multixact.c:437
static bool ConditionalMultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
Relation  rel,
int *  remaining 
)
static

Definition at line 7182 of file heapam.c.

References Do_MultiXactIdWait(), and XLTW_None.

Referenced by heap_lock_tuple().

7184 {
7185  return Do_MultiXactIdWait(multi, status, infomask, true,
7186  rel, NULL, XLTW_None, remaining);
7187 }
int remaining
Definition: informix.c:692
Definition: lmgr.h:26
static bool Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:7082
#define NULL
Definition: c.h:229
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
static bool Do_MultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
bool  nowait,
Relation  rel,
ItemPointer  ctid,
XLTW_Oper  oper,
int *  remaining 
)
static

Definition at line 7082 of file heapam.c.

References ConditionalXactLockTableWait(), DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, LOCKMODE_from_mxstatus, pfree(), result, MultiXactMember::status, TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), XactLockTableWait(), and MultiXactMember::xid.

Referenced by ConditionalMultiXactIdWait(), and MultiXactIdWait().

7086 {
7087  bool result = true;
7088  MultiXactMember *members;
7089  int nmembers;
7090  int remain = 0;
7091 
7092  /* for pre-pg_upgrade tuples, no need to sleep at all */
7093  nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
7094  GetMultiXactIdMembers(multi, &members, false,
7095  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7096 
7097  if (nmembers >= 0)
7098  {
7099  int i;
7100 
7101  for (i = 0; i < nmembers; i++)
7102  {
7103  TransactionId memxid = members[i].xid;
7104  MultiXactStatus memstatus = members[i].status;
7105 
7107  {
7108  remain++;
7109  continue;
7110  }
7111 
7114  {
7115  if (remaining && TransactionIdIsInProgress(memxid))
7116  remain++;
7117  continue;
7118  }
7119 
7120  /*
7121  * This member conflicts with our multi, so we have to sleep (or
7122  * return failure, if asked to avoid waiting.)
7123  *
7124  * Note that we don't set up an error context callback ourselves,
7125  * but instead we pass the info down to XactLockTableWait. This
7126  * might seem a bit wasteful because the context is set up and
7127  * tore down for each member of the multixact, but in reality it
7128  * should be barely noticeable, and it avoids duplicate code.
7129  */
7130  if (nowait)
7131  {
7132  result = ConditionalXactLockTableWait(memxid);
7133  if (!result)
7134  break;
7135  }
7136  else
7137  XactLockTableWait(memxid, rel, ctid, oper);
7138  }
7139 
7140  pfree(members);
7141  }
7142 
7143  if (remaining)
7144  *remaining = remain;
7145 
7146  return result;
7147 }
int remaining
Definition: informix.c:692
MultiXactStatus
Definition: multixact.h:40
uint32 TransactionId
Definition: c.h:397
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:773
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:999
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:238
return result
Definition: formatting.c:1633
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:173
bool ConditionalXactLockTableWait(TransactionId xid)
Definition: lmgr.c:607
void pfree(void *pointer)
Definition: mcxt.c:950
TransactionId xid
Definition: multixact.h:61
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:556
MultiXactStatus status
Definition: multixact.h:62
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:216
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:554
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
Operator oper(ParseState *pstate, List *opname, Oid ltypeId, Oid rtypeId, bool noError, int location)
Definition: parse_oper.c:377
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
static bool DoesMultiXactIdConflict ( MultiXactId  multi,
uint16  infomask,
LockTupleMode  lockmode 
)
static

Definition at line 6993 of file heapam.c.

References DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, ISUPDATE_from_mxstatus, LOCKMODE_from_mxstatus, pfree(), result, status(), TransactionIdDidAbort(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), tupleLockExtraInfo, and MultiXactMember::xid.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

6995 {
6996  int nmembers;
6997  MultiXactMember *members;
6998  bool result = false;
6999  LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
7000 
7001  if (HEAP_LOCKED_UPGRADED(infomask))
7002  return false;
7003 
7004  nmembers = GetMultiXactIdMembers(multi, &members, false,
7005  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7006  if (nmembers >= 0)
7007  {
7008  int i;
7009 
7010  for (i = 0; i < nmembers; i++)
7011  {
7012  TransactionId memxid;
7013  LOCKMODE memlockmode;
7014 
7015  memlockmode = LOCKMODE_from_mxstatus(members[i].status);
7016 
7017  /* ignore members that don't conflict with the lock we want */
7018  if (!DoLockModesConflict(memlockmode, wanted))
7019  continue;
7020 
7021  /* ignore members from current xact */
7022  memxid = members[i].xid;
7024  continue;
7025 
7026  if (ISUPDATE_from_mxstatus(members[i].status))
7027  {
7028  /* ignore aborted updaters */
7029  if (TransactionIdDidAbort(memxid))
7030  continue;
7031  }
7032  else
7033  {
7034  /* ignore lockers-only that are no longer in progress */
7035  if (!TransactionIdIsInProgress(memxid))
7036  continue;
7037  }
7038 
7039  /*
7040  * Whatever remains are either live lockers that conflict with our
7041  * wanted lock, and updaters that are not aborted. Those conflict
7042  * with what we want, so return true.
7043  */
7044  result = true;
7045  break;
7046  }
7047  pfree(members);
7048  }
7049 
7050  return result;
7051 }
uint32 TransactionId
Definition: c.h:397
int LOCKMODE
Definition: lockdefs.h:26
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:773
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:999
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:238
return result
Definition: formatting.c:1633
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:173
void pfree(void *pointer)
Definition: mcxt.c:950
TransactionId xid
Definition: multixact.h:61
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:556
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:55
bool TransactionIdDidAbort(TransactionId transactionId)
Definition: transam.c:181
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:216
static const struct @20 tupleLockExtraInfo[MaxLockTupleMode+1]
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
static HeapTuple ExtractReplicaIdentity ( Relation  rel,
HeapTuple  tup,
bool  key_modified,
bool copy 
)
static

Definition at line 7820 of file heapam.c.

References DEBUG4, elog, ERROR, heap_deform_tuple(), heap_form_tuple(), heap_freetuple(), HeapTupleGetOid, HeapTupleHasExternal, HeapTupleSetOid, MaxHeapAttributeNumber, tupleDesc::natts, NULL, ObjectIdAttributeNumber, OidIsValid, RelationData::rd_index, RelationData::rd_rel, RelationClose(), RelationGetDescr, RelationGetRelationName, RelationGetReplicaIndex(), RelationIdGetRelation(), RelationIsLogicallyLogged, REPLICA_IDENTITY_FULL, REPLICA_IDENTITY_NOTHING, toast_flatten_tuple(), and values.

Referenced by heap_delete(), and heap_update().

7821 {
7822  TupleDesc desc = RelationGetDescr(relation);
7823  Oid replidindex;
7824  Relation idx_rel;
7825  TupleDesc idx_desc;
7826  char replident = relation->rd_rel->relreplident;
7827  HeapTuple key_tuple = NULL;
7828  bool nulls[MaxHeapAttributeNumber];
7830  int natt;
7831 
7832  *copy = false;
7833 
7834  if (!RelationIsLogicallyLogged(relation))
7835  return NULL;
7836 
7837  if (replident == REPLICA_IDENTITY_NOTHING)
7838  return NULL;
7839 
7840  if (replident == REPLICA_IDENTITY_FULL)
7841  {
7842  /*
7843  * When logging the entire old tuple, it very well could contain
7844  * toasted columns. If so, force them to be inlined.
7845  */
7846  if (HeapTupleHasExternal(tp))
7847  {
7848  *copy = true;
7849  tp = toast_flatten_tuple(tp, RelationGetDescr(relation));
7850  }
7851  return tp;
7852  }
7853 
7854  /* if the key hasn't changed and we're only logging the key, we're done */
7855  if (!key_changed)
7856  return NULL;
7857 
7858  /* find the replica identity index */
7859  replidindex = RelationGetReplicaIndex(relation);
7860  if (!OidIsValid(replidindex))
7861  {
7862  elog(DEBUG4, "could not find configured replica identity for table \"%s\"",
7863  RelationGetRelationName(relation));
7864  return NULL;
7865  }
7866 
7867  idx_rel = RelationIdGetRelation(replidindex);
7868  idx_desc = RelationGetDescr(idx_rel);
7869 
7870  /* deform tuple, so we have fast access to columns */
7871  heap_deform_tuple(tp, desc, values, nulls);
7872 
7873  /* set all columns to NULL, regardless of whether they actually are */
7874  memset(nulls, 1, sizeof(nulls));
7875 
7876  /*
7877  * Now set all columns contained in the index to NOT NULL, they cannot
7878  * currently be NULL.
7879  */
7880  for (natt = 0; natt < idx_desc->natts; natt++)
7881  {
7882  int attno = idx_rel->rd_index->indkey.values[natt];
7883 
7884  if (attno < 0)
7885  {
7886  /*
7887  * The OID column can appear in an index definition, but that's
7888  * OK, because we always copy the OID if present (see below).
7889  * Other system columns may not.
7890  */
7891  if (attno == ObjectIdAttributeNumber)
7892  continue;
7893  elog(ERROR, "system column in index");
7894  }
7895  nulls[attno - 1] = false;
7896  }
7897 
7898  key_tuple = heap_form_tuple(desc, values, nulls);
7899  *copy = true;
7900  RelationClose(idx_rel);
7901 
7902  /*
7903  * Always copy oids if the table has them, even if not included in the
7904  * index. The space in the logged tuple is used anyway, so there's little
7905  * point in not including the information.
7906  */
7907  if (relation->rd_rel->relhasoids)
7908  HeapTupleSetOid(key_tuple, HeapTupleGetOid(tp));
7909 
7910  /*
7911  * If the tuple, which by here only contains indexed columns, still has
7912  * toasted columns, force them to be inlined. This is somewhat unlikely
7913  * since there's limits on the size of indexed columns, so we don't
7914  * duplicate toast_flatten_tuple()s functionality in the above loop over
7915  * the indexed columns, even if it would be more efficient.
7916  */
7917  if (HeapTupleHasExternal(key_tuple))
7918  {
7919  HeapTuple oldtup = key_tuple;
7920 
7921  key_tuple = toast_flatten_tuple(oldtup, RelationGetDescr(relation));
7922  heap_freetuple(oldtup);
7923  }
7924 
7925  return key_tuple;
7926 }
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition: tuptoaster.c:1085
Oid RelationGetReplicaIndex(Relation relation)
Definition: relcache.c:4688
#define RelationGetDescr(relation)
Definition: rel.h:428
#define ObjectIdAttributeNumber
Definition: sysattr.h:22
#define REPLICA_IDENTITY_NOTHING
Definition: pg_class.h:177
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: heaptuple.c:695
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:575
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1373
unsigned int Oid
Definition: postgres_ext.h:31
#define DEBUG4
Definition: elog.h:22
#define OidIsValid(objectId)
Definition: c.h:538
int natts
Definition: tupdesc.h:73
#define HeapTupleSetOid(tuple, oid)
Definition: htup_details.h:698
Form_pg_index rd_index
Definition: rel.h:159
#define REPLICA_IDENTITY_FULL
Definition: pg_class.h:179
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:436
void RelationClose(Relation relation)
Definition: relcache.c:2164
uintptr_t Datum
Definition: postgres.h:372
#define MaxHeapAttributeNumber
Definition: htup_details.h:47
#define NULL
Definition: c.h:229
void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull)
Definition: heaptuple.c:936
static Datum values[MAXATTR]
Definition: bootstrap.c:163
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:674
#define elog
Definition: elog.h:219
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:695
Relation RelationIdGetRelation(Oid relationId)
Definition: relcache.c:2075
static void fix_infomask_from_infobits ( uint8  infobits,
uint16 infomask,
uint16 infomask2 
)
static

Definition at line 8216 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_xlog_delete(), heap_xlog_lock(), heap_xlog_lock_updated(), and heap_xlog_update().

8217 {
8218  *infomask &= ~(HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY |
8220  *infomask2 &= ~HEAP_KEYS_UPDATED;
8221 
8222  if (infobits & XLHL_XMAX_IS_MULTI)
8223  *infomask |= HEAP_XMAX_IS_MULTI;
8224  if (infobits & XLHL_XMAX_LOCK_ONLY)
8225  *infomask |= HEAP_XMAX_LOCK_ONLY;
8226  if (infobits & XLHL_XMAX_EXCL_LOCK)
8227  *infomask |= HEAP_XMAX_EXCL_LOCK;
8228  /* note HEAP_XMAX_SHR_LOCK isn't considered here */
8229  if (infobits & XLHL_XMAX_KEYSHR_LOCK)
8230  *infomask |= HEAP_XMAX_KEYSHR_LOCK;
8231 
8232  if (infobits & XLHL_KEYS_UPDATED)
8233  *infomask2 |= HEAP_KEYS_UPDATED;
8234 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:179
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:182
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:241
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:240
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:181
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:242
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:244
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:243
void FreeBulkInsertState ( BulkInsertState  bistate)

Definition at line 2349 of file heapam.c.

References BulkInsertStateData::current_buf, FreeAccessStrategy(), InvalidBuffer, pfree(), ReleaseBuffer(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), intorel_shutdown(), and transientrel_shutdown().

2350 {
2351  if (bistate->current_buf != InvalidBuffer)
2352  ReleaseBuffer(bistate->current_buf);
2353  FreeAccessStrategy(bistate->strategy);
2354  pfree(bistate);
2355 }
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
void pfree(void *pointer)
Definition: mcxt.c:950
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:580
BufferAccessStrategy strategy
Definition: hio.h:33
Buffer current_buf
Definition: hio.h:34
static TransactionId FreezeMultiXactId ( MultiXactId  multi,
uint16  t_infomask,
TransactionId  cutoff_xid,
MultiXactId  cutoff_multi,
uint16 flags 
)
static

Definition at line 6356 of file heapam.c.

References Assert, FRM_INVALIDATE_XMAX, FRM_MARK_COMMITTED, FRM_NOOP, FRM_RETURN_IS_MULTI, FRM_RETURN_IS_XID, GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, i, InvalidTransactionId, ISUPDATE_from_mxstatus, MultiXactIdCreateFromMembers(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactIdIsValid, MultiXactIdPrecedes(), palloc(), pfree(), status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TransactionIdIsValid, TransactionIdPrecedes(), and MultiXactMember::xid.

Referenced by heap_prepare_freeze_tuple().

6359 {
6361  int i;
6362  MultiXactMember *members;
6363  int nmembers;
6364  bool need_replace;
6365  int nnewmembers;
6366  MultiXactMember *newmembers;
6367  bool has_lockers;
6368  TransactionId update_xid;
6369  bool update_committed;
6370 
6371  *flags = 0;
6372 
6373  /* We should only be called in Multis */
6374  Assert(t_infomask & HEAP_XMAX_IS_MULTI);
6375 
6376  if (!MultiXactIdIsValid(multi) ||
6377  HEAP_LOCKED_UPGRADED(t_infomask))
6378  {
6379  /* Ensure infomask bits are appropriately set/reset */
6380  *flags |= FRM_INVALIDATE_XMAX;
6381  return InvalidTransactionId;
6382  }
6383  else if (MultiXactIdPrecedes(multi, cutoff_multi))
6384  {
6385  /*
6386  * This old multi cannot possibly have members still running. If it
6387  * was a locker only, it can be removed without any further
6388  * consideration; but if it contained an update, we might need to
6389  * preserve it.
6390  */
6392  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)));
6393  if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
6394  {
6395  *flags |= FRM_INVALIDATE_XMAX;
6396  xid = InvalidTransactionId; /* not strictly necessary */
6397  }
6398  else
6399  {
6400  /* replace multi by update xid */
6401  xid = MultiXactIdGetUpdateXid(multi, t_infomask);
6402 
6403  /* wasn't only a lock, xid needs to be valid */
6405 
6406  /*
6407  * If the xid is older than the cutoff, it has to have aborted,
6408  * otherwise the tuple would have gotten pruned away.
6409  */
6410  if (TransactionIdPrecedes(xid, cutoff_xid))
6411  {
6413  *flags |= FRM_INVALIDATE_XMAX;
6414  xid = InvalidTransactionId; /* not strictly necessary */
6415  }
6416  else
6417  {
6418  *flags |= FRM_RETURN_IS_XID;
6419  }
6420  }
6421 
6422  return xid;
6423  }
6424 
6425  /*
6426  * This multixact might have or might not have members still running, but
6427  * we know it's valid and is newer than the cutoff point for multis.
6428  * However, some member(s) of it may be below the cutoff for Xids, so we
6429  * need to walk the whole members array to figure out what to do, if
6430  * anything.
6431  */
6432 
6433  nmembers =
6434  GetMultiXactIdMembers(multi, &members, false,
6435  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
6436  if (nmembers <= 0)
6437  {
6438  /* Nothing worth keeping */
6439  *flags |= FRM_INVALIDATE_XMAX;
6440  return InvalidTransactionId;
6441  }
6442 
6443  /* is there anything older than the cutoff? */
6444  need_replace = false;
6445  for (i = 0; i < nmembers; i++)
6446  {
6447  if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
6448  {
6449  need_replace = true;
6450  break;
6451  }
6452  }
6453 
6454  /*
6455  * In the simplest case, there is no member older than the cutoff; we can
6456  * keep the existing MultiXactId as is.
6457  */
6458  if (!need_replace)
6459  {
6460  *flags |= FRM_NOOP;
6461  pfree(members);
6462  return InvalidTransactionId;
6463  }
6464 
6465  /*
6466  * If the multi needs to be updated, figure out which members do we need
6467  * to keep.
6468  */
6469  nnewmembers = 0;
6470  newmembers = palloc(sizeof(MultiXactMember) * nmembers);
6471  has_lockers = false;
6472  update_xid = InvalidTransactionId;
6473  update_committed = false;
6474 
6475  for (i = 0; i < nmembers; i++)
6476  {
6477  /*
6478  * Determine whether to keep this member or ignore it.
6479  */
6480  if (ISUPDATE_from_mxstatus(members[i].status))
6481  {
6482  TransactionId xid = members[i].xid;
6483 
6484  /*
6485  * It's an update; should we keep it? If the transaction is known
6486  * aborted or crashed then it's okay to ignore it, otherwise not.
6487  * Note that an updater older than cutoff_xid cannot possibly be
6488  * committed, because HeapTupleSatisfiesVacuum would have returned
6489  * HEAPTUPLE_DEAD and we would not be trying to freeze the tuple.
6490  *
6491  * As with all tuple visibility routines, it's critical to test
6492  * TransactionIdIsInProgress before TransactionIdDidCommit,
6493  * because of race conditions explained in detail in tqual.c.
6494  */
6497  {
6498  Assert(!TransactionIdIsValid(update_xid));
6499  update_xid = xid;
6500  }
6501  else if (TransactionIdDidCommit(xid))
6502  {
6503  /*
6504  * The transaction committed, so we can tell caller to set
6505  * HEAP_XMAX_COMMITTED. (We can only do this because we know
6506  * the transaction is not running.)
6507  */
6508  Assert(!TransactionIdIsValid(update_xid));
6509  update_committed = true;
6510  update_xid = xid;
6511  }
6512 
6513  /*
6514  * Not in progress, not committed -- must be aborted or crashed;
6515  * we can ignore it.
6516  */
6517 
6518  /*
6519  * Since the tuple wasn't marked HEAPTUPLE_DEAD by vacuum, the
6520  * update Xid cannot possibly be older than the xid cutoff.
6521  */
6522  Assert(!TransactionIdIsValid(update_xid) ||
6523  !TransactionIdPrecedes(update_xid, cutoff_xid));
6524 
6525  /*
6526  * If we determined that it's an Xid corresponding to an update
6527  * that must be retained, additionally add it to the list of
6528  * members of the new Multi, in case we end up using that. (We
6529  * might still decide to use only an update Xid and not a multi,
6530  * but it's easier to maintain the list as we walk the old members
6531  * list.)
6532  */
6533  if (TransactionIdIsValid(update_xid))
6534  newmembers[nnewmembers++] = members[i];
6535  }
6536  else
6537  {
6538  /* We only keep lockers if they are still running */
6539  if (TransactionIdIsCurrentTransactionId(members[i].xid) ||
6540  TransactionIdIsInProgress(members[i].xid))
6541  {
6542  /* running locker cannot possibly be older than the cutoff */
6543  Assert(!TransactionIdPrecedes(members[i].xid, cutoff_xid));
6544  newmembers[nnewmembers++] = members[i];
6545  has_lockers = true;
6546  }
6547  }
6548  }
6549 
6550  pfree(members);
6551 
6552  if (nnewmembers == 0)
6553  {
6554  /* nothing worth keeping!? Tell caller to remove the whole thing */
6555  *flags |= FRM_INVALIDATE_XMAX;
6556  xid = InvalidTransactionId;
6557  }
6558  else if (TransactionIdIsValid(update_xid) && !has_lockers)
6559  {
6560  /*
6561  * If there's a single member and it's an update, pass it back alone
6562  * without creating a new Multi. (XXX we could do this when there's a
6563  * single remaining locker, too, but that would complicate the API too
6564  * much; moreover, the case with the single updater is more
6565  * interesting, because those are longer-lived.)
6566  */
6567  Assert(nnewmembers == 1);
6568  *flags |= FRM_RETURN_IS_XID;
6569  if (update_committed)
6570  *flags |= FRM_MARK_COMMITTED;
6571  xid = update_xid;
6572  }
6573  else
6574  {
6575  /*
6576  * Create a new multixact with the surviving members of the previous
6577  * one, to set as new Xmax in the tuple.
6578  */
6579  xid = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
6580  *flags |= FRM_RETURN_IS_MULTI;
6581  }
6582 
6583  pfree(newmembers);
6584 
6585  return xid;
6586 }
#define FRM_RETURN_IS_XID
Definition: heapam.c:6330
#define FRM_MARK_COMMITTED
Definition: heapam.c:6332
uint32 TransactionId
Definition: c.h:397
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:773
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:999
MultiXactId MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members)
Definition: multixact.c:746
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:238
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:6928
void pfree(void *pointer)
Definition: mcxt.c:950
TransactionId xid
Definition: multixact.h:61
#define FRM_INVALIDATE_XMAX
Definition: heapam.c:6329
#define InvalidTransactionId
Definition: transam.h:31
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:55
#define MultiXactIdIsValid(multi)
Definition: multixact.h:27
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
#define FRM_RETURN_IS_MULTI
Definition: heapam.c:6331
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:216
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
#define Assert(condition)
Definition: c.h:676
#define FRM_NOOP
Definition: heapam.c:6328
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3140
void * palloc(Size size)
Definition: mcxt.c:849
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:549
static MultiXactStatus get_mxact_status_for_lock ( LockTupleMode  mode,
bool  is_update 
)
static

Definition at line 4504 of file heapam.c.

References elog, ERROR, and tupleLockExtraInfo.

Referenced by compute_new_xmax_infomask(), heap_lock_tuple(), and test_lockmode_for_conflict().

4505 {
4506  int retval;
4507 
4508  if (is_update)
4509  retval = tupleLockExtraInfo[mode].updstatus;
4510  else
4511  retval = tupleLockExtraInfo[mode].lockstatus;
4512 
4513  if (retval == -1)
4514  elog(ERROR, "invalid lock tuple mode %d/%s", mode,
4515  is_update ? "true" : "false");
4516 
4517  return (MultiXactStatus) retval;
4518 }
MultiXactStatus
Definition: multixact.h:40
#define ERROR
Definition: elog.h:43
static const struct @20 tupleLockExtraInfo[MaxLockTupleMode+1]
#define elog
Definition: elog.h:219
BulkInsertState GetBulkInsertState ( void  )

Definition at line 2335 of file heapam.c.

References BAS_BULKWRITE, BulkInsertStateData::current_buf, GetAccessStrategy(), InvalidBuffer, palloc(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), intorel_startup(), and transientrel_startup().

2336 {
2337  BulkInsertState bistate;
2338 
2339  bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
2341  bistate->current_buf = InvalidBuffer;
2342  return bistate;
2343 }
BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
Definition: freelist.c:525
#define InvalidBuffer
Definition: buf.h:25
struct BulkInsertStateData * BulkInsertState
Definition: heapam.h:33
BufferAccessStrategy strategy
Definition: hio.h:33
void * palloc(Size size)
Definition: mcxt.c:849
Buffer current_buf
Definition: hio.h:34
static void GetMultiXactIdHintBits ( MultiXactId  multi,
uint16 new_infomask,
uint16 new_infomask2 
)
static

Definition at line 6847 of file heapam.c.

References GetMultiXactIdMembers(), HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, i, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, pfree(), status(), and TUPLOCK_from_mxstatus.

Referenced by compute_new_xmax_infomask(), heap_prepare_freeze_tuple(), and heap_update().

6849 {
6850  int nmembers;
6851  MultiXactMember *members;
6852  int i;
6853  uint16 bits = HEAP_XMAX_IS_MULTI;
6854  uint16 bits2 = 0;
6855  bool has_update = false;
6856  LockTupleMode strongest = LockTupleKeyShare;
6857 
6858  /*
6859  * We only use this in multis we just created, so they cannot be values
6860  * pre-pg_upgrade.
6861  */
6862  nmembers = GetMultiXactIdMembers(multi, &members, false, false);
6863 
6864  for (i = 0; i < nmembers; i++)
6865  {
6866  LockTupleMode mode;
6867 
6868  /*
6869  * Remember the strongest lock mode held by any member of the
6870  * multixact.
6871  */
6872  mode = TUPLOCK_from_mxstatus(members[i].status);
6873  if (mode > strongest)
6874  strongest = mode;
6875 
6876  /* See what other bits we need */
6877  switch (members[i].status)
6878  {
6882  break;
6883 
6885  bits2 |= HEAP_KEYS_UPDATED;
6886  break;
6887 
6889  has_update = true;
6890  break;
6891 
6892  case MultiXactStatusUpdate:
6893  bits2 |= HEAP_KEYS_UPDATED;
6894  has_update = true;
6895  break;
6896  }
6897  }
6898 
6899  if (strongest == LockTupleExclusive ||
6900  strongest == LockTupleNoKeyExclusive)
6901  bits |= HEAP_XMAX_EXCL_LOCK;
6902  else if (strongest == LockTupleShare)
6903  bits |= HEAP_XMAX_SHR_LOCK;
6904  else if (strongest == LockTupleKeyShare)
6905  bits |= HEAP_XMAX_KEYSHR_LOCK;
6906 
6907  if (!has_update)
6908  bits |= HEAP_XMAX_LOCK_ONLY;
6909 
6910  if (nmembers > 0)
6911  pfree(members);
6912 
6913  *new_infomask = bits;
6914  *new_infomask2 = bits2;
6915 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:179
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:182
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:185
LockTupleMode
Definition: heapam.h:38
unsigned short uint16
Definition: c.h:267
void pfree(void *pointer)
Definition: mcxt.c:950
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:181
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:203
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
void heap2_redo ( XLogReaderState record)

Definition at line 9080 of file heapam.c.

References elog, heap_xlog_clean(), heap_xlog_cleanup_info(), heap_xlog_freeze_page(), heap_xlog_lock_updated(), heap_xlog_logical_rewrite(), heap_xlog_multi_insert(), heap_xlog_visible(), PANIC, XLOG_HEAP2_CLEAN, XLOG_HEAP2_CLEANUP_INFO, XLOG_HEAP2_FREEZE_PAGE, XLOG_HEAP2_LOCK_UPDATED, XLOG_HEAP2_MULTI_INSERT, XLOG_HEAP2_NEW_CID, XLOG_HEAP2_REWRITE, XLOG_HEAP2_VISIBLE, XLOG_HEAP_OPMASK, XLogRecGetInfo, and XLR_INFO_MASK.

9081 {
9082  uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
9083 
9084  switch (info & XLOG_HEAP_OPMASK)
9085  {
9086  case XLOG_HEAP2_CLEAN:
9087  heap_xlog_clean(record);
9088  break;
9090  heap_xlog_freeze_page(record);
9091  break;
9093  heap_xlog_cleanup_info(record);
9094  break;
9095  case XLOG_HEAP2_VISIBLE:
9096  heap_xlog_visible(record);
9097  break;
9099  heap_xlog_multi_insert(record);
9100  break;
9102  heap_xlog_lock_updated(record);
9103  break;
9104  case XLOG_HEAP2_NEW_CID:
9105 
9106  /*
9107  * Nothing to do on a real replay, only used during logical
9108  * decoding.
9109  */
9110  break;
9111  case XLOG_HEAP2_REWRITE:
9112  heap_xlog_logical_rewrite(record);
9113  break;
9114  default:
9115  elog(PANIC, "heap2_redo: unknown op code %u", info);
9116  }
9117 }
void heap_xlog_logical_rewrite(XLogReaderState *r)
Definition: rewriteheap.c:1118
#define XLOG_HEAP2_LOCK_UPDATED
Definition: heapam_xlog.h:59
#define XLOG_HEAP2_REWRITE
Definition: heapam_xlog.h:53
unsigned char uint8
Definition: c.h:266
#define XLOG_HEAP_OPMASK
Definition: heapam_xlog.h:41
#define PANIC
Definition: elog.h:53
#define XLOG_HEAP2_MULTI_INSERT
Definition: heapam_xlog.h:58
#define XLOG_HEAP2_VISIBLE
Definition: heapam_xlog.h:57
static void heap_xlog_lock_updated(XLogReaderState *record)
Definition: heapam.c:8941
static void heap_xlog_freeze_page(XLogReaderState *record)
Definition: heapam.c:8158
#define XLOG_HEAP2_CLEAN
Definition: heapam_xlog.h:54
#define XLOG_HEAP2_CLEANUP_INFO
Definition: heapam_xlog.h:56
static void heap_xlog_multi_insert(XLogReaderState *record)
Definition: heapam.c:8423
#define XLOG_HEAP2_NEW_CID
Definition: heapam_xlog.h:60
#define XLogRecGetInfo(decoder)
Definition: xlogreader.h:216
#define XLR_INFO_MASK
Definition: xlogrecord.h:62
static void heap_xlog_cleanup_info(XLogReaderState *record)
Definition: heapam.c:7932
#define XLOG_HEAP2_FREEZE_PAGE
Definition: heapam_xlog.h:55
static void heap_xlog_visible(XLogReaderState *record)
Definition: heapam.c:8043
#define elog
Definition: elog.h:219
static void heap_xlog_clean(XLogReaderState *record)
Definition: heapam.c:7953
void heap_abort_speculative ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 6104 of file heapam.c.

References Assert, buffer, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, compute_infobits(), elog, END_CRIT_SECTION, ERROR, xl_heap_delete::flags, GetCurrentTransactionId(), HEAP_KEYS_UPDATED, HEAP_MOVED, HEAP_XMAX_BITS, HeapTupleHasExternal, HeapTupleHeaderIsHeapOnly, HeapTupleHeaderIsSpeculative, HeapTupleHeaderSetXmin, xl_heap_delete::infobits_set, InvalidTransactionId, IsToastRelation(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), MarkBufferDirty(), xl_heap_delete::offnum, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), ReadBuffer(), RecentGlobalXmin, REGBUF_STANDARD, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapDelete, START_CRIT_SECTION, HeapTupleHeaderData::t_choice, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_heap, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, HeapTupleFields::t_xmin, toast_delete(), TransactionIdIsValid, XLH_DELETE_IS_SUPER, XLOG_HEAP_DELETE, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and xl_heap_delete::xmax.

Referenced by ExecInsert(), and toast_delete_datum().

6105 {
6107  ItemPointer tid = &(tuple->t_self);
6108  ItemId lp;
6109  HeapTupleData tp;
6110  Page page;
6111  BlockNumber block;
6112  Buffer buffer;
6113 
6114  Assert(ItemPointerIsValid(tid));
6115 
6116  block = ItemPointerGetBlockNumber(tid);
6117  buffer = ReadBuffer(relation, block);
6118  page = BufferGetPage(buffer);
6119 
6121 
6122  /*
6123  * Page can't be all visible, we just inserted into it, and are still
6124  * running.
6125  */
6126  Assert(!PageIsAllVisible(page));
6127 
6128  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
6129  Assert(ItemIdIsNormal(lp));
6130 
6131  tp.t_tableOid = RelationGetRelid(relation);
6132  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
6133  tp.t_len = ItemIdGetLength(lp);
6134  tp.t_self = *tid;
6135 
6136  /*
6137  * Sanity check that the tuple really is a speculatively inserted tuple,
6138  * inserted by us.
6139  */
6140  if (tp.t_data->t_choice.t_heap.t_xmin != xid)
6141  elog(ERROR, "attempted to kill a tuple inserted by another transaction");
6142  if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
6143  elog(ERROR, "attempted to kill a non-speculative tuple");
6145 
6146  /*
6147  * No need to check for serializable conflicts here. There is never a
6148  * need for a combocid, either. No need to extract replica identity, or
6149  * do anything special with infomask bits.
6150  */
6151 
6153 
6154  /*
6155  * The tuple will become DEAD immediately. Flag that this page
6156  * immediately is a candidate for pruning by setting xmin to
6157  * RecentGlobalXmin. That's not pretty, but it doesn't seem worth
6158  * inventing a nicer API for this.
6159  */
6162 
6163  /* store transaction information of xact deleting the tuple */
6166 
6167  /*
6168  * Set the tuple header xmin to InvalidTransactionId. This makes the
6169  * tuple immediately invisible everyone. (In particular, to any
6170  * transactions waiting on the speculative token, woken up later.)
6171  */
6173 
6174  /* Clear the speculative insertion token too */
6175  tp.t_data->t_ctid = tp.t_self;
6176 
6177  MarkBufferDirty(buffer);
6178 
6179  /*
6180  * XLOG stuff
6181  *
6182  * The WAL records generated here match heap_delete(). The same recovery
6183  * routines are used.
6184  */
6185  if (RelationNeedsWAL(relation))
6186  {
6187  xl_heap_delete xlrec;
6188  XLogRecPtr recptr;
6189 
6190  xlrec.flags = XLH_DELETE_IS_SUPER;
6192  tp.t_data->t_infomask2);
6194  xlrec.xmax = xid;
6195 
6196  XLogBeginInsert();
6197  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
6198  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6199 
6200  /* No replica identity & replication origin logged */
6201 
6202  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
6203 
6204  PageSetLSN(page, recptr);
6205  }
6206 
6207  END_CRIT_SECTION();
6208 
6209  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6210 
6211  if (HeapTupleHasExternal(&tp))
6212  {
6213  Assert(!IsToastRelation(relation));
6214  toast_delete(relation, &tp, true);
6215  }
6216 
6217  /*
6218  * Never need to mark tuple for invalidation, since catalogs don't support
6219  * speculative insertion
6220  */
6221 
6222  /* Now we can release the buffer */
6223  ReleaseBuffer(buffer);
6224 
6225  /* count deletion, as we counted the insertion too */
6226  pgstat_count_heap_delete(relation);
6227 }
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:60
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
bool IsToastRelation(Relation relation)
Definition: catalog.c:136
#define HEAP_XMAX_BITS
Definition: htup_details.h:256
union HeapTupleHeaderData::@45 t_choice
#define XLH_DELETE_IS_SUPER
Definition: heapam_xlog.h:95
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2968
HeapTupleFields t_heap
Definition: htup_details.h:146
#define PageIsAllVisible(page)
Definition: bufpage.h:381
uint32 TransactionId
Definition: c.h:397
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:423
#define PageSetPrunable(page, xid)
Definition: bufpage.h:394
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
OffsetNumber offnum
Definition: heapam_xlog.h:105
HeapTupleHeader t_data
Definition: htup.h:67
#define HeapTupleHeaderIsHeapOnly(tup)
Definition: htup_details.h:502
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
#define ERROR
Definition: elog.h:43
ItemPointerData t_ctid
Definition: htup_details.h:150
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:104
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:417
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:110
TransactionId RecentGlobalXmin
Definition: snapmgr.c:166
#define REGBUF_STANDARD
Definition: xloginsert.h:34
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
TransactionId t_xmin
Definition: htup_details.h:118
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_MOVED
Definition: htup_details.h:202
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:676
uint8 infobits_set
Definition: heapam_xlog.h:106
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define RelationNeedsWAL(relation)
Definition: rel.h:505
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:1953
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:674
void toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: tuptoaster.c:464
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define TransactionIdIsValid(xid)
Definition: transam.h:41
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:416
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
#define HeapTupleHeaderSetXmin(tup, xid)
Definition: htup_details.h:313
static bool heap_acquire_tuplock ( Relation  relation,
ItemPointer  tid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
bool have_tuple_lock 
)
static

Definition at line 5223 of file heapam.c.

References ConditionalLockTupleTuplock, ereport, errcode(), errmsg(), ERROR, LockTupleTuplock, LockWaitBlock, LockWaitError, LockWaitSkip, and RelationGetRelationName.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

5225 {
5226  if (*have_tuple_lock)
5227  return true;
5228 
5229  switch (wait_policy)
5230  {
5231  case LockWaitBlock:
5232  LockTupleTuplock(relation, tid, mode);
5233  break;
5234 
5235  case LockWaitSkip:
5236  if (!ConditionalLockTupleTuplock(relation, tid, mode))
5237  return false;
5238  break;
5239 
5240  case LockWaitError:
5241  if (!ConditionalLockTupleTuplock(relation, tid, mode))
5242  ereport(ERROR,
5243  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5244  errmsg("could not obtain lock on row in relation \"%s\"",
5245  RelationGetRelationName(relation))));
5246  break;
5247  }
5248  *have_tuple_lock = true;
5249 
5250  return true;
5251 }
#define LockTupleTuplock(rel, tup, mode)
Definition: heapam.c:181
#define ConditionalLockTupleTuplock(rel, tup, mode)
Definition: heapam.c:185
int errcode(int sqlerrcode)
Definition: elog.c:575
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:436
#define ereport(elevel, rest)
Definition: elog.h:122
int errmsg(const char *fmt,...)
Definition: elog.c:797
HeapScanDesc heap_beginscan ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key 
)

Definition at line 1397 of file heapam.c.

References heap_beginscan_internal().

Referenced by AlterDomainNotNull(), ATRewriteTable(), copy_heap_data(), CopyTo(), DefineQueryRewrite(), pgrowlocks(), pgstat_collect_oids(), RelationFindReplTupleSeq(), SeqNext(), validateCheckConstraint(), validateDomainConstraint(), and validateForeignKeyConstraint().

1399 {
1400  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1401  true, true, true, false, false, false);
1402 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1443
#define NULL
Definition: c.h:229
HeapScanDesc heap_beginscan_bm ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key 
)

Definition at line 1425 of file heapam.c.

References heap_beginscan_internal().

Referenced by ExecInitBitmapHeapScan().

1427 {
1428  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1429  false, false, true, true, false, false);
1430 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1443
#define NULL
Definition: c.h:229
HeapScanDesc heap_beginscan_catalog ( Relation  relation,
int  nkeys,
ScanKey  key 
)

Definition at line 1405 of file heapam.c.

References GetCatalogSnapshot(), heap_beginscan_internal(), RegisterSnapshot(), and RelationGetRelid.

Referenced by AlterTableMoveAll(), AlterTableSpaceOptions(), boot_openrel(), check_db_file_conflict(), createdb(), do_autovacuum(), DropSetting(), DropTableSpace(), find_typed_table_dependencies(), get_database_list(), get_rel_oids(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), index_update_stats(), objectsInSchemaToOids(), ReindexMultipleTables(), remove_dbtablespaces(), RemoveConversionById(), RemoveSubscriptionRel(), RenameTableSpace(), ThereIsAtLeastOneRole(), and vac_truncate_clog().

1406 {
1407  Oid relid = RelationGetRelid(relation);
1408  Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
1409 
1410  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1411  true, true, true, false, false, true);
1412 }
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:863
Snapshot GetCatalogSnapshot(Oid relid)
Definition: snapmgr.c:440
unsigned int Oid
Definition: postgres_ext.h:31
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1443
#define NULL
Definition: c.h:229
#define RelationGetRelid(relation)
Definition: rel.h:416
static HeapScanDesc heap_beginscan_internal ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
ParallelHeapScanDesc  parallel_scan,
bool  allow_strat,
bool  allow_sync,
bool  allow_pagemode,
bool  is_bitmapscan,
bool  is_samplescan,
bool  temp_snap 
)
static

Definition at line 1443 of file heapam.c.

References initscan(), IsMVCCSnapshot, NULL, palloc(), PredicateLockRelation(), RelationGetRelid, RelationIncrementReferenceCount(), HeapScanDescData::rs_allow_strat, HeapScanDescData::rs_allow_sync, HeapScanDescData::rs_bitmapscan, HeapScanDescData::rs_ctup, HeapScanDescData::rs_key, HeapScanDescData::rs_nkeys, HeapScanDescData::rs_pageatatime, HeapScanDescData::rs_parallel, HeapScanDescData::rs_rd, HeapScanDescData::rs_samplescan, HeapScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, HeapScanDescData::rs_temp_snap, and HeapTupleData::t_tableOid.

Referenced by heap_beginscan(), heap_beginscan_bm(), heap_beginscan_catalog(), heap_beginscan_parallel(), heap_beginscan_sampling(), and heap_beginscan_strat().

1452 {
1453  HeapScanDesc scan;
1454 
1455  /*
1456  * increment relation ref count while scanning relation
1457  *
1458  * This is just to make really sure the relcache entry won't go away while
1459  * the scan has a pointer to it. Caller should be holding the rel open
1460  * anyway, so this is redundant in all normal scenarios...
1461  */
1463 
1464  /*
1465  * allocate and initialize scan descriptor
1466  */
1467  scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
1468 
1469  scan->rs_rd = relation;
1470  scan->rs_snapshot = snapshot;
1471  scan->rs_nkeys = nkeys;
1472  scan->rs_bitmapscan = is_bitmapscan;
1473  scan->rs_samplescan = is_samplescan;
1474  scan->rs_strategy = NULL; /* set in initscan */
1475  scan->rs_allow_strat = allow_strat;
1476  scan->rs_allow_sync = allow_sync;
1477  scan->rs_temp_snap = temp_snap;
1478  scan->rs_parallel = parallel_scan;
1479 
1480  /*
1481  * we can use page-at-a-time mode if it's an MVCC-safe snapshot
1482  */
1483  scan->rs_pageatatime = allow_pagemode && IsMVCCSnapshot(snapshot);
1484 
1485  /*
1486  * For a seqscan in a serializable transaction, acquire a predicate lock
1487  * on the entire relation. This is required not only to lock all the
1488  * matching tuples, but also to conflict with new insertions into the
1489  * table. In an indexscan, we take page locks on the index pages covering
1490  * the range specified in the scan qual, but in a heap scan there is
1491  * nothing more fine-grained to lock. A bitmap scan is a different story,
1492  * there we have already scanned the index and locked the index pages
1493  * covering the predicate. But in that case we still have to lock any
1494  * matching heap tuples.
1495  */
1496  if (!is_bitmapscan)
1497  PredicateLockRelation(relation, snapshot);
1498 
1499  /* we only need to set this up once */
1500  scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1501 
1502  /*
1503  * we do this here instead of in initscan() because heap_rescan also calls
1504  * initscan() and we don't want to allocate memory again
1505  */
1506  if (nkeys > 0)
1507  scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
1508  else
1509  scan->rs_key = NULL;
1510 
1511  initscan(scan, key, false);
1512 
1513  return scan;
1514 }
bool rs_allow_sync
Definition: relscan.h:56
void PredicateLockRelation(Relation relation, Snapshot snapshot)
Definition: predicate.c:2496
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:100
HeapTupleData rs_ctup
Definition: relscan.h:69
bool rs_bitmapscan
Definition: relscan.h:52
bool rs_pageatatime
Definition: relscan.h:54
ParallelHeapScanDesc rs_parallel
Definition: relscan.h:73
ScanKeyData * ScanKey
Definition: skey.h:75
Snapshot rs_snapshot
Definition: relscan.h:49
Oid t_tableOid
Definition: htup.h:66
bool rs_temp_snap
Definition: relscan.h:57
void RelationIncrementReferenceCount(Relation rel)
Definition: relcache.c:2131
BufferAccessStrategy rs_strategy
Definition: relscan.h:64
Relation rs_rd
Definition: relscan.h:48
#define NULL
Definition: c.h:229
#define IsMVCCSnapshot(snapshot)
Definition: tqual.h:31
void * palloc(Size size)
Definition: mcxt.c:849
bool rs_allow_strat
Definition: relscan.h:55
static void initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
Definition: heapam.c:216
bool rs_samplescan
Definition: relscan.h:53
#define RelationGetRelid(relation)
Definition: rel.h:416
ScanKey rs_key
Definition: relscan.h:51
HeapScanDesc heap_beginscan_parallel ( Relation  relation,
ParallelHeapScanDesc  parallel_scan 
)

Definition at line 1652 of file heapam.c.

References Assert, heap_beginscan_internal(), ParallelHeapScanDescData::phs_relid, ParallelHeapScanDescData::phs_snapshot_data, RegisterSnapshot(), RelationGetRelid, and RestoreSnapshot().

Referenced by ExecSeqScanInitializeDSM(), and ExecSeqScanInitializeWorker().

1653 {
1654  Snapshot snapshot;
1655 
1656  Assert(RelationGetRelid(relation) == parallel_scan->phs_relid);
1657  snapshot = RestoreSnapshot(parallel_scan->phs_snapshot_data);
1658  RegisterSnapshot(snapshot);
1659 
1660  return heap_beginscan_internal(relation, snapshot, 0, NULL, parallel_scan,
1661  true, true, true, false, false, true);
1662 }
char phs_snapshot_data[FLEXIBLE_ARRAY_MEMBER]
Definition: relscan.h:42
Snapshot RestoreSnapshot(char *start_address)
Definition: snapmgr.c:2121
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:863
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1443
#define NULL
Definition: c.h:229
#define Assert(condition)
Definition: c.h:676
#define RelationGetRelid(relation)
Definition: rel.h:416
HeapScanDesc heap_beginscan_sampling ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
bool  allow_strat,
bool  allow_sync,
bool  allow_pagemode 
)

Definition at line 1433 of file heapam.c.

References heap_beginscan_internal().

Referenced by tablesample_init().

1436 {
1437  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1438  allow_strat, allow_sync, allow_pagemode,
1439  false, true, false);
1440 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1443
#define NULL
Definition: c.h:229
HeapScanDesc heap_beginscan_strat ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
bool  allow_strat,
bool  allow_sync 
)

Definition at line 1415 of file heapam.c.

References heap_beginscan_internal().

Referenced by IndexBuildHeapRangeScan(), IndexCheckExclusion(), pgstat_heap(), systable_beginscan(), and validate_index_heapscan().

1418 {
1419  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1420  allow_strat, allow_sync, true,
1421  false, false, false);
1422 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1443
#define NULL
Definition: c.h:229
HTSU_Result heap_delete ( Relation  relation,
ItemPointer  tid,
CommandId  cid,
Snapshot  crosscheck,
bool  wait,
HeapUpdateFailureData hufd 
)

Definition at line 3027 of file heapam.c.

References Assert, buffer, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), HeapUpdateFailureData::cmax, compute_infobits(), compute_new_xmax_infomask(), HeapUpdateFailureData::ctid, DoesMultiXactIdConflict(), END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, ExtractReplicaIdentity(), xl_heap_delete::flags, GetCurrentTransactionId(), heap_acquire_tuplock(), heap_freetuple(), HEAP_KEYS_UPDATED, HEAP_MOVED, HEAP_XMAX_BITS, HEAP_XMAX_INVALID, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HeapTupleBeingUpdated, HeapTupleHasExternal, HeapTupleHeaderAdjustCmax(), HeapTupleHeaderClearHotUpdated, HeapTupleHeaderGetCmax(), HeapTupleHeaderGetRawXmax, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIsOnlyLocked(), HeapTupleHeaderSetCmax, HeapTupleHeaderSetXmax, HeapTupleInvisible, HeapTupleMayBeUpdated, HeapTupleSatisfiesUpdate(), HeapTupleSatisfiesVisibility, HeapTupleSelfUpdated, HeapTupleUpdated, xl_heap_delete::infobits_set, InvalidBuffer, InvalidCommandId, InvalidSnapshot, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), LockTupleExclusive, LockWaitBlock, log_heap_new_cid(), MarkBufferDirty(), MultiXactIdSetOldestMember(), MultiXactIdWait(), MultiXactStatusUpdate, NULL, xl_heap_delete::offnum, PageClearAllVisible, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), RelationData::rd_rel, ReadBuffer(), REGBUF_STANDARD, RelationGetRelid, RelationIsAccessibleInLogicalDecoding, RelationNeedsWAL, ReleaseBuffer(), RELKIND_MATVIEW, RELKIND_RELATION, REPLICA_IDENTITY_FULL, result, SizeOfHeapDelete, SizeOfHeapHeader, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, toast_delete(), TransactionIdEquals, TransactionIdIsCurrentTransactionId(), UnlockReleaseBuffer(), UnlockTupleTuplock, UpdateXmaxHintBits(), visibilitymap_clear(), visibilitymap_pin(), VISIBILITYMAP_VALID_BITS, XactLockTableWait(), XLH_DELETE_ALL_VISIBLE_CLEARED, XLH_DELETE_CONTAINS_OLD_KEY, XLH_DELETE_CONTAINS_OLD_TUPLE, XLOG_HEAP_DELETE, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), XLogSetRecordFlags(), XLTW_Delete, HeapUpdateFailureData::xmax, xl_heap_delete::xmax, and xmax_infomask_changed().

Referenced by ExecDelete(), and simple_heap_delete().

3030 {
3033  ItemId lp;
3034  HeapTupleData tp;
3035  Page page;
3036  BlockNumber block;
3037  Buffer buffer;
3038  Buffer vmbuffer = InvalidBuffer;
3039  TransactionId new_xmax;
3040  uint16 new_infomask,
3041  new_infomask2;
3042  bool have_tuple_lock = false;
3043  bool iscombo;
3044  bool all_visible_cleared = false;
3045  HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
3046  bool old_key_copied = false;
3047 
3048  Assert(ItemPointerIsValid(tid));
3049 
3050  /*
3051  * Forbid this during a parallel operation, lest it allocate a combocid.
3052  * Other workers might need that combocid for visibility checks, and we
3053  * have no provision for broadcasting it to them.
3054  */
3055  if (IsInParallelMode())
3056  ereport(ERROR,
3057  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
3058  errmsg("cannot delete tuples during a parallel operation")));
3059 
3060  block = ItemPointerGetBlockNumber(tid);
3061  buffer = ReadBuffer(relation, block);
3062  page = BufferGetPage(buffer);
3063 
3064  /*
3065  * Before locking the buffer, pin the visibility map page if it appears to
3066  * be necessary. Since we haven't got the lock yet, someone else might be
3067  * in the middle of changing this, so we'll need to recheck after we have
3068  * the lock.
3069  */
3070  if (PageIsAllVisible(page))
3071  visibilitymap_pin(relation, block, &vmbuffer);
3072 
3074 
3075  /*
3076  * If we didn't pin the visibility map page and the page has become all
3077  * visible while we were busy locking the buffer, we'll have to unlock and
3078  * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
3079  * unfortunate, but hopefully shouldn't happen often.
3080  */
3081  if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3082  {
3083  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3084  visibilitymap_pin(relation, block, &vmbuffer);
3086  }
3087 
3088  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
3089  Assert(ItemIdIsNormal(lp));
3090 
3091  tp.t_tableOid = RelationGetRelid(relation);
3092  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3093  tp.t_len = ItemIdGetLength(lp);
3094  tp.t_self = *tid;
3095 
3096 l1:
3097  result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
3098 
3099  if (result == HeapTupleInvisible)
3100  {
3101  UnlockReleaseBuffer(buffer);
3102  ereport(ERROR,
3103  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
3104  errmsg("attempted to delete invisible tuple")));
3105  }
3106  else if (result == HeapTupleBeingUpdated && wait)
3107  {
3108  TransactionId xwait;
3109  uint16 infomask;
3110 
3111  /* must copy state data before unlocking buffer */
3112  xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
3113  infomask = tp.t_data->t_infomask;
3114 
3115  /*
3116  * Sleep until concurrent transaction ends -- except when there's a
3117  * single locker and it's our own transaction. Note we don't care
3118  * which lock mode the locker has, because we need the strongest one.
3119  *
3120  * Before sleeping, we need to acquire tuple lock to establish our
3121  * priority for the tuple (see heap_lock_tuple). LockTuple will
3122  * release us when we are next-in-line for the tuple.
3123  *
3124  * If we are forced to "start over" below, we keep the tuple lock;
3125  * this arranges that we stay at the head of the line while rechecking
3126  * tuple state.
3127  */
3128  if (infomask & HEAP_XMAX_IS_MULTI)
3129  {
3130  /* wait for multixact */
3131  if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
3133  {
3134  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3135 
3136  /* acquire tuple lock, if necessary */
3138  LockWaitBlock, &have_tuple_lock);
3139 
3140  /* wait for multixact */
3142  relation, &(tp.t_self), XLTW_Delete,
3143  NULL);
3145 
3146  /*
3147  * If xwait had just locked the tuple then some other xact
3148  * could update this tuple before we get to this point. Check
3149  * for xmax change, and start over if so.
3150  */
3151  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
3153  xwait))
3154  goto l1;
3155  }
3156 
3157  /*
3158  * You might think the multixact is necessarily done here, but not
3159  * so: it could have surviving members, namely our own xact or
3160  * other subxacts of this backend. It is legal for us to delete
3161  * the tuple in either case, however (the latter case is
3162  * essentially a situation of upgrading our former shared lock to
3163  * exclusive). We don't bother changing the on-disk hint bits
3164  * since we are about to overwrite the xmax altogether.
3165  */
3166  }
3167  else if (!TransactionIdIsCurrentTransactionId(xwait))
3168  {
3169  /*
3170  * Wait for regular transaction to end; but first, acquire tuple
3171  * lock.
3172  */
3173  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3175  LockWaitBlock, &have_tuple_lock);
3176  XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
3178 
3179  /*
3180  * xwait is done, but if xwait had just locked the tuple then some
3181  * other xact could update this tuple before we get to this point.
3182  * Check for xmax change, and start over if so.
3183  */
3184  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
3186  xwait))
3187  goto l1;
3188 
3189  /* Otherwise check if it committed or aborted */
3190  UpdateXmaxHintBits(tp.t_data, buffer, xwait);
3191  }
3192 
3193  /*
3194  * We may overwrite if previous xmax aborted, or if it committed but
3195  * only locked the tuple without updating it.
3196  */
3197  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
3200  result = HeapTupleMayBeUpdated;
3201  else
3202  result = HeapTupleUpdated;
3203  }
3204 
3205  if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)
3206  {
3207  /* Perform additional check for transaction-snapshot mode RI updates */
3208  if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
3209  result = HeapTupleUpdated;
3210  }
3211 
3212  if (result != HeapTupleMayBeUpdated)
3213  {
3214  Assert(result == HeapTupleSelfUpdated ||
3215  result == HeapTupleUpdated ||
3216  result == HeapTupleBeingUpdated);
3218  hufd->ctid = tp.t_data->t_ctid;
3220  if (result == HeapTupleSelfUpdated)
3221  hufd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
3222  else
3223  hufd->cmax = InvalidCommandId;
3224  UnlockReleaseBuffer(buffer);
3225  if (have_tuple_lock)
3226  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3227  if (vmbuffer != InvalidBuffer)
3228  ReleaseBuffer(vmbuffer);
3229  return result;
3230  }
3231 
3232  /*
3233  * We're about to do the actual delete -- check for conflict first, to
3234  * avoid possibly having to roll back work we've just done.
3235  *
3236  * This is safe without a recheck as long as there is no possibility of
3237  * another process scanning the page between this check and the delete
3238  * being visible to the scan (i.e., an exclusive buffer content lock is
3239  * continuously held from this point until the tuple delete is visible).
3240  */
3241  CheckForSerializableConflictIn(relation, &tp, buffer);
3242 
3243  /* replace cid with a combo cid if necessary */
3244  HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
3245 
3246  /*
3247  * Compute replica identity tuple before entering the critical section so
3248  * we don't PANIC upon a memory allocation failure.
3249  */
3250  old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
3251 
3252  /*
3253  * If this is the first possibly-multixact-able operation in the current
3254  * transaction, set my per-backend OldestMemberMXactId setting. We can be
3255  * certain that the transaction will never become a member of any older
3256  * MultiXactIds than that. (We have to do this even if we end up just
3257  * using our own TransactionId below, since some other backend could
3258  * incorporate our XID into a MultiXact immediately afterwards.)
3259  */
3261 
3264  xid, LockTupleExclusive, true,
3265  &new_xmax, &new_infomask, &new_infomask2);
3266 
3268 
3269  /*
3270  * If this transaction commits, the tuple will become DEAD sooner or
3271  * later. Set flag that this page is a candidate for pruning once our xid
3272  * falls below the OldestXmin horizon. If the transaction finally aborts,
3273  * the subsequent page pruning will be a no-op and the hint will be
3274  * cleared.
3275  */
3276  PageSetPrunable(page, xid);
3277 
3278  if (PageIsAllVisible(page))
3279  {
3280  all_visible_cleared = true;
3281  PageClearAllVisible(page);
3282  visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
3283  vmbuffer, VISIBILITYMAP_VALID_BITS);
3284  }
3285 
3286  /* store transaction information of xact deleting the tuple */
3289  tp.t_data->t_infomask |= new_infomask;
3290  tp.t_data->t_infomask2 |= new_infomask2;
3292  HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
3293  HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
3294  /* Make sure there is no forward chain link in t_ctid */
3295  tp.t_data->t_ctid = tp.t_self;
3296 
3297  MarkBufferDirty(buffer);
3298 
3299  /*
3300  * XLOG stuff
3301  *
3302  * NB: heap_abort_speculative() uses the same xlog record and replay
3303  * routines.
3304  */
3305  if (RelationNeedsWAL(relation))
3306  {
3307  xl_heap_delete xlrec;
3308  XLogRecPtr recptr;
3309 
3310  /* For logical decode we need combocids to properly decode the catalog */
3312  log_heap_new_cid(relation, &tp);
3313 
3314  xlrec.flags = all_visible_cleared ? XLH_DELETE_ALL_VISIBLE_CLEARED : 0;
3316  tp.t_data->t_infomask2);
3318  xlrec.xmax = new_xmax;
3319 
3320  if (old_key_tuple != NULL)
3321  {
3322  if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
3324  else
3326  }
3327 
3328  XLogBeginInsert();
3329  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
3330 
3331  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
3332 
3333  /*
3334  * Log replica identity of the deleted tuple if there is one
3335  */
3336  if (old_key_tuple != NULL)
3337  {
3338  xl_heap_header xlhdr;
3339 
3340  xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
3341  xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
3342  xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
3343 
3344  XLogRegisterData((char *) &xlhdr, SizeOfHeapHeader);
3345  XLogRegisterData((char *) old_key_tuple->t_data
3347  old_key_tuple->t_len
3349  }
3350 
3351  /* filtering by origin on a row level is much more efficient */
3353 
3354  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
3355 
3356  PageSetLSN(page, recptr);
3357  }
3358 
3359  END_CRIT_SECTION();
3360 
3361  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3362 
3363  if (vmbuffer != InvalidBuffer)
3364  ReleaseBuffer(vmbuffer);
3365 
3366  /*
3367  * If the tuple has toasted out-of-line attributes, we need to delete
3368  * those items too. We have to do this before releasing the buffer
3369  * because we need to look at the contents of the tuple, but it's OK to
3370  * release the content lock on the buffer first.
3371  */
3372  if (relation->rd_rel->relkind != RELKIND_RELATION &&
3373  relation->rd_rel->relkind != RELKIND_MATVIEW)
3374  {
3375  /* toast table entries should never be recursively toasted */
3377  }
3378  else if (HeapTupleHasExternal(&tp))
3379  toast_delete(relation, &tp, false);
3380 
3381  /*
3382  * Mark tuple for invalidation from system caches at next command
3383  * boundary. We have to do this before releasing the buffer because we
3384  * need to look at the contents of the tuple.
3385  */
3386  CacheInvalidateHeapTuple(relation, &tp, NULL);
3387 
3388  /* Now we can release the buffer */
3389  ReleaseBuffer(buffer);
3390 
3391  /*
3392  * Release the lmgr tuple lock, if we had it.
3393  */
3394  if (have_tuple_lock)
3395  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3396 
3397  pgstat_count_heap_delete(relation);
3398 
3399  if (old_key_tuple != NULL && old_key_copied)
3400  heap_freetuple(old_key_tuple);
3401 
3402  return HeapTupleMayBeUpdated;
3403 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:359
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
Definition: tqual.c:1585
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:60
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
#define SizeofHeapTupleHeader
Definition: htup_details.h:170
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:7744
#define HEAP_XMAX_BITS
Definition: htup_details.h:256
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2968
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1094
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
#define PageIsAllVisible(page)
Definition: bufpage.h:381
uint32 TransactionId
Definition: c.h:397
HTSU_Result HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid, Buffer buffer)
Definition: tqual.c:460
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:773
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
static bool xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
Definition: heapam.c:2990
#define HeapTupleHeaderClearHotUpdated(tup)
Definition: htup_details.h:497
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define RELKIND_MATVIEW
Definition: pg_class.h:165
#define InvalidBuffer
Definition: buf.h:25
uint16 t_infomask2
Definition: heapam_xlog.h:122
#define PageSetPrunable(page, xid)
Definition: bufpage.h:394
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
int errcode(int sqlerrcode)
Definition: elog.c:575
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:191
return result
Definition: formatting.c:1633
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
Form_pg_class rd_rel
Definition: rel.h:114
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1373
void CheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
Definition: predicate.c:4324
#define UnlockTupleTuplock(rel, tup, mode)
Definition: heapam.c:183
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
OffsetNumber offnum
Definition: heapam_xlog.h:105
void MultiXactIdSetOldestMember(void)
Definition: multixact.c:623
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:67
#define HeapTupleHeaderGetRawXmax(tup)
Definition: htup_details.h:369
unsigned short uint16
Definition: c.h:267
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
bool IsInParallelMode(void)
Definition: xact.c:913
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define REPLICA_IDENTITY_FULL
Definition: pg_class.h:179
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:193
ItemPointerData t_ctid
Definition: htup_details.h:150
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:104
static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:7160
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:417
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:110
#define REGBUF_STANDARD
Definition: xloginsert.h:34
#define XLH_DELETE_CONTAINS_OLD_KEY
Definition: heapam_xlog.h:94
CommandId cmax
Definition: heapam.h:72
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:374
HTSU_Result
Definition: snapshot.h:119
Oid t_tableOid
Definition: htup.h:66
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define HeapTupleHeaderSetCmax(tup, cid, iscombo)
Definition: htup_details.h:399
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define ereport(elevel, rest)
Definition: elog.h:122
static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
Definition: heapam.c:5272
TransactionId xmax
Definition: heapam.h:71
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
#define InvalidSnapshot
Definition: snapshot.h:25
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:559
#define InvalidCommandId
Definition: c.h:414
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:216
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
static void UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
Definition: heapam.c:2313
#define HEAP_MOVED
Definition: htup_details.h:202
static bool heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
Definition: heapam.c:5223
TransactionId MultiXactId
Definition: c.h:407
#define PageClearAllVisible(page)
Definition: bufpage.h:385
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:554
#define NULL
Definition: c.h:229
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:676
uint8 infobits_set
Definition: heapam_xlog.h:106
static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified, bool *copy)
Definition: heapam.c:7820
CommandId HeapTupleHeaderGetCmax(HeapTupleHeader tup)
Definition: combocid.c:119
static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask, LockTupleMode lockmode)
Definition: heapam.c:6993
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
uint16 t_infomask
Definition: heapam_xlog.h:123
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define RelationNeedsWAL(relation)
Definition: rel.h:505
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:1953
void HeapTupleHeaderAdjustCmax(HeapTupleHeader tup, CommandId *cmax, bool *iscombo)
Definition: combocid.c:154
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:674
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define XLH_DELETE_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:92
void toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: tuptoaster.c:464
ItemPointerData ctid
Definition: heapam.h:70
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define RELKIND_RELATION
Definition: pg_class.h:160
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:416
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
#define SizeOfHeapHeader
Definition: heapam_xlog.h:127
Pointer Page
Definition: bufpage.h:74
#define XLH_DELETE_CONTAINS_OLD_TUPLE
Definition: heapam_xlog.h:93
void heap_endscan ( HeapScanDesc  scan)

Definition at line 1580 of file heapam.c.

References BufferIsValid, FreeAccessStrategy(), pfree(), RelationDecrementReferenceCount(), ReleaseBuffer(), HeapScanDescData::rs_cbuf, HeapScanDescData::rs_key, HeapScanDescData::rs_rd, HeapScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, HeapScanDescData::rs_temp_snap, and UnregisterSnapshot().

Referenced by AlterDomainNotNull(), AlterTableMoveAll(), AlterTableSpaceOptions(), ATRewriteTable(), boot_openrel(), check_db_file_conflict(), copy_heap_data(), CopyTo(), createdb(), DefineQueryRewrite(), do_autovacuum(), DropSetting(), DropTableSpace(), ExecEndBitmapHeapScan(), ExecEndSampleScan(), ExecEndSeqScan(), find_typed_table_dependencies(), get_database_list(), get_rel_oids(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), index_update_stats(), IndexBuildHeapRangeScan(), IndexCheckExclusion(), objectsInSchemaToOids(), pgrowlocks(), pgstat_collect_oids(), pgstat_heap(), ReindexMultipleTables(), RelationFindReplTupleSeq(), remove_dbtablespaces(), RemoveConversionById(), RemoveSubscriptionRel(), RenameTableSpace(), systable_endscan(), ThereIsAtLeastOneRole(), vac_truncate_clog(), validate_index_heapscan(), validateCheckConstraint(), validateDomainConstraint(), and validateForeignKeyConstraint().

1581 {
1582  /* Note: no locking manipulations needed */
1583 
1584  /*
1585  * unpin scan buffers
1586  */
1587  if (BufferIsValid(scan->rs_cbuf))
1588  ReleaseBuffer(scan->rs_cbuf);
1589 
1590  /*
1591  * decrement relation reference count and free scan descriptor storage
1592  */
1594 
1595  if (scan->rs_key)
1596  pfree(scan->rs_key);
1597 
1598  if (scan->rs_strategy != NULL)
1600 
1601  if (scan->rs_temp_snap)
1603 
1604  pfree(scan);
1605 }
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
void pfree(void *pointer)
Definition: mcxt.c:950
void RelationDecrementReferenceCount(Relation rel)
Definition: relcache.c:2144
Snapshot rs_snapshot
Definition: relscan.h:49
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:905
bool rs_temp_snap
Definition: relscan.h:57
BufferAccessStrategy rs_strategy
Definition: relscan.h:64
Relation rs_rd
Definition: relscan.h:48
Buffer rs_cbuf
Definition: relscan.h:71
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:580
#define NULL
Definition: c.h:229
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
ScanKey rs_key
Definition: relscan.h:51
void heap_execute_freeze_tuple ( HeapTupleHeader  tuple,
xl_heap_freeze_tuple frz 
)

Definition at line 6798 of file heapam.c.

References FrozenTransactionId, xl_heap_freeze_tuple::frzflags, HeapTupleHeaderSetXmax, HeapTupleHeaderSetXvac, InvalidTransactionId, HeapTupleHeaderData::t_infomask, xl_heap_freeze_tuple::t_infomask, HeapTupleHeaderData::t_infomask2, xl_heap_freeze_tuple::t_infomask2, XLH_FREEZE_XVAC, XLH_INVALID_XVAC, and xl_heap_freeze_tuple::xmax.

Referenced by heap_freeze_tuple(), heap_xlog_freeze_page(), and lazy_scan_heap().

6799 {
6800  HeapTupleHeaderSetXmax(tuple, frz->xmax);
6801 
6802  if (frz->frzflags & XLH_FREEZE_XVAC)
6804 
6805  if (frz->frzflags & XLH_INVALID_XVAC)
6807 
6808  tuple->t_infomask = frz->t_infomask;
6809  tuple->t_infomask2 = frz->t_infomask2;
6810 }
#define HeapTupleHeaderSetXvac(tup, xid)
Definition: htup_details.h:417
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:374
#define InvalidTransactionId
Definition: transam.h:31
#define FrozenTransactionId
Definition: transam.h:33
TransactionId xmax
Definition: heapam_xlog.h:298
#define XLH_INVALID_XVAC
Definition: heapam_xlog.h:294
#define XLH_FREEZE_XVAC
Definition: heapam_xlog.h:293
bool heap_fetch ( Relation  relation,
Snapshot  snapshot,
HeapTuple  tuple,
Buffer userbuf,
bool  keep_buf,
Relation  stats_relation 
)

Definition at line 1878 of file heapam.c.

References buffer, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, CheckForSerializableConflictOut(), HeapTupleSatisfiesVisibility, InvalidBuffer, ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), NULL, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, pgstat_count_heap_fetch, PredicateLockTuple(), ReadBuffer(), RelationGetRelid, ReleaseBuffer(), HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, and TestForOldSnapshot().

Referenced by AfterTriggerExecute(), EvalPlanQualFetch(), EvalPlanQualFetchRowMarks(), ExecCheckTIDVisible(), ExecDelete(), ExecLockRows(), heap_lock_updated_tuple_rec(), and TidNext().

1884 {
1885  ItemPointer tid = &(tuple->t_self);
1886  ItemId lp;
1887  Buffer buffer;
1888  Page page;
1889  OffsetNumber offnum;
1890  bool valid;
1891 
1892  /*
1893  * Fetch and pin the appropriate page of the relation.
1894  */
1895  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1896 
1897  /*
1898  * Need share lock on buffer to examine tuple commit status.
1899  */
1900  LockBuffer(buffer, BUFFER_LOCK_SHARE);
1901  page = BufferGetPage(buffer);
1902  TestForOldSnapshot(snapshot, relation, page);
1903 
1904  /*
1905  * We'd better check for out-of-range offnum in case of VACUUM since the
1906  * TID was obtained.
1907  */
1908  offnum = ItemPointerGetOffsetNumber(tid);
1909  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1910  {
1911  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1912  if (keep_buf)
1913  *userbuf = buffer;
1914  else
1915  {
1916  ReleaseBuffer(buffer);
1917  *userbuf = InvalidBuffer;
1918  }
1919  tuple->t_data = NULL;
1920  return false;
1921  }
1922 
1923  /*
1924  * get the item line pointer corresponding to the requested tid
1925  */
1926  lp = PageGetItemId(page, offnum);
1927 
1928  /*
1929  * Must check for deleted tuple.
1930  */
1931  if (!ItemIdIsNormal(lp))
1932  {
1933  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1934  if (keep_buf)
1935  *userbuf = buffer;
1936  else
1937  {
1938  ReleaseBuffer(buffer);
1939  *userbuf = InvalidBuffer;
1940  }
1941  tuple->t_data = NULL;
1942  return false;
1943  }
1944 
1945  /*
1946  * fill in *tuple fields
1947  */
1948  tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1949  tuple->t_len = ItemIdGetLength(lp);
1950  tuple->t_tableOid = RelationGetRelid(relation);
1951 
1952  /*
1953  * check time qualification of tuple, then release lock
1954  */
1955  valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1956 
1957  if (valid)
1958  PredicateLockTuple(relation, tuple, snapshot);
1959 
1960  CheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1961 
1962  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1963 
1964  if (valid)
1965  {
1966  /*
1967  * All checks passed, so return the tuple as valid. Caller is now
1968  * responsible for releasing the buffer.
1969  */
1970  *userbuf = buffer;
1971 
1972  /* Count the successful fetch against appropriate rel, if any */
1973  if (stats_relation != NULL)
1974  pgstat_count_heap_fetch(stats_relation);
1975 
1976  return true;
1977  }
1978 
1979  /* Tuple failed time qual, but maybe caller wants to see it anyway. */
1980  if (keep_buf)
1981  *userbuf = buffer;
1982  else
1983  {
1984  ReleaseBuffer(buffer);
1985  *userbuf = InvalidBuffer;
1986  }
1987 
1988  return false;
1989 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:265
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:3943
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
ItemPointerData t_self
Definition: htup.h:65
#define pgstat_count_heap_fetch(rel)
Definition: pgstat.h:1261
uint32 t_len
Definition: htup.h:64
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define NULL
Definition: c.h:229
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
void PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
Definition: predicate.c:2541
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:416
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
void heap_finish_speculative ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 6013 of file heapam.c.

References Assert, buffer, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, elog, END_CRIT_SECTION, ERROR, HeapTupleHeaderIsSpeculative, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), MaxOffsetNumber, NULL, xl_heap_confirm::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapConfirm, SpecTokenOffsetNumber, START_CRIT_SECTION, StaticAssertStmt, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleData::t_self, UnlockReleaseBuffer(), XLOG_HEAP_CONFIRM, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by ExecInsert().

6014 {
6015  Buffer buffer;
6016  Page page;
6017  OffsetNumber offnum;
6018  ItemId lp = NULL;
6019  HeapTupleHeader htup;
6020 
6021  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
6023  page = (Page) BufferGetPage(buffer);
6024 
6025  offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
6026  if (PageGetMaxOffsetNumber(page) >= offnum)
6027  lp = PageGetItemId(page, offnum);
6028 
6029  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
6030  elog(ERROR, "invalid lp");
6031 
6032  htup = (HeapTupleHeader) PageGetItem(page, lp);
6033 
6034  /* SpecTokenOffsetNumber should be distinguishable from any real offset */
6036  "invalid speculative token constant");
6037 
6038  /* NO EREPORT(ERROR) from here till changes are logged */
6040 
6042 
6043  MarkBufferDirty(buffer);
6044 
6045  /*
6046  * Replace the speculative insertion token with a real t_ctid, pointing to
6047  * itself like it does on regular tuples.
6048  */
6049  htup->t_ctid = tuple->t_self;
6050 
6051  /* XLOG stuff */
6052  if (RelationNeedsWAL(relation))
6053  {
6054  xl_heap_confirm xlrec;
6055  XLogRecPtr recptr;
6056 
6057  xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6058 
6059  XLogBeginInsert();
6060 
6061  /* We want the same filtering on this as on a plain insert */
6063 
6064  XLogRegisterData((char *) &xlrec, SizeOfHeapConfirm);
6065  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6066 
6067  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
6068 
6069  PageSetLSN(page, recptr);
6070  }
6071 
6072  END_CRIT_SECTION();
6073 
6074  UnlockReleaseBuffer(buffer);
6075 }
OffsetNumber offnum
Definition: heapam_xlog.h:274
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define MaxOffsetNumber
Definition: off.h:28
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:423
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:191
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:758
#define SpecTokenOffsetNumber
Definition: htup_details.h:285
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define ERROR
Definition: elog.h:43
ItemPointerData t_ctid
Definition: htup_details.h:150
ItemPointerData t_self
Definition: htup.h:65
#define REGBUF_STANDARD
Definition: xloginsert.h:34
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define SizeOfHeapConfirm
Definition: heapam_xlog.h:277
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define NULL
Definition: c.h:229
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:676
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define RelationNeedsWAL(relation)
Definition: rel.h:505
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
#define XLOG_HEAP_CONFIRM
Definition: heapam_xlog.h:37
bool heap_freeze_tuple ( HeapTupleHeader  tuple,
TransactionId  cutoff_xid,
TransactionId  cutoff_multi 
)

Definition at line 6819 of file heapam.c.

References heap_execute_freeze_tuple(), and heap_prepare_freeze_tuple().

Referenced by rewrite_heap_tuple().

6821 {
6823  bool do_freeze;
6824  bool tuple_totally_frozen;
6825 
6826  do_freeze = heap_prepare_freeze_tuple(tuple, cutoff_xid, cutoff_multi,
6827  &frz, &tuple_totally_frozen);
6828 
6829  /*
6830  * Note that because this is not a WAL-logged operation, we don't need to
6831  * fill in the offset in the freeze record.
6832  */
6833 
6834  if (do_freeze)
6835  heap_execute_freeze_tuple(tuple, &frz);
6836  return do_freeze;
6837 }
void heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
Definition: heapam.c:6798
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
Definition: heapam.c:6620
void heap_get_latest_tid ( Relation  relation,
Snapshot  snapshot,
ItemPointer  tid 
)

Definition at line 2183 of file heapam.c.

References buffer, BUFFER_LOCK_SHARE, BufferGetPage, CheckForSerializableConflictOut(), elog, ERROR, HEAP_XMAX_INVALID, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleHeaderIsOnlyLocked(), HeapTupleSatisfiesVisibility, InvalidTransactionId, ItemIdGetLength, ItemIdIsNormal, ItemPointerEquals(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, ReadBuffer(), RelationGetNumberOfBlocks, RelationGetRelationName, RelationGetRelid, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TestForOldSnapshot(), TransactionIdEquals, TransactionIdIsValid, and UnlockReleaseBuffer().

Referenced by currtid_byrelname(), currtid_byreloid(), and TidNext().

2186 {
2187  BlockNumber blk;
2188  ItemPointerData ctid;
2189  TransactionId priorXmax;
2190 
2191  /* this is to avoid Assert failures on bad input */
2192  if (!ItemPointerIsValid(tid))
2193  return;
2194 
2195  /*
2196  * Since this can be called with user-supplied TID, don't trust the input
2197  * too much. (RelationGetNumberOfBlocks is an expensive check, so we
2198  * don't check t_ctid links again this way. Note that it would not do to
2199  * call it just once and save the result, either.)
2200  */
2201  blk = ItemPointerGetBlockNumber(tid);
2202  if (blk >= RelationGetNumberOfBlocks(relation))
2203  elog(ERROR, "block number %u is out of range for relation \"%s\"",
2204  blk, RelationGetRelationName(relation));
2205 
2206  /*
2207  * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
2208  * need to examine, and *tid is the TID we will return if ctid turns out
2209  * to be bogus.
2210  *
2211  * Note that we will loop until we reach the end of the t_ctid chain.
2212  * Depending on the snapshot passed, there might be at most one visible
2213  * version of the row, but we don't try to optimize for that.
2214  */
2215  ctid = *tid;
2216  priorXmax = InvalidTransactionId; /* cannot check first XMIN */
2217  for (;;)
2218  {
2219  Buffer buffer;
2220  Page page;
2221  OffsetNumber offnum;
2222  ItemId lp;
2223  HeapTupleData tp;
2224  bool valid;
2225 
2226  /*
2227  * Read, pin, and lock the page.
2228  */
2229  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
2230  LockBuffer(buffer, BUFFER_LOCK_SHARE);
2231  page = BufferGetPage(buffer);
2232  TestForOldSnapshot(snapshot, relation, page);
2233 
2234  /*
2235  * Check for bogus item number. This is not treated as an error
2236  * condition because it can happen while following a t_ctid link. We
2237  * just assume that the prior tid is OK and return it unchanged.
2238  */
2239  offnum = ItemPointerGetOffsetNumber(&ctid);
2240  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
2241  {
2242  UnlockReleaseBuffer(buffer);
2243  break;
2244  }
2245  lp = PageGetItemId(page, offnum);
2246  if (!ItemIdIsNormal(lp))
2247  {
2248  UnlockReleaseBuffer(buffer);
2249  break;
2250  }
2251 
2252  /* OK to access the tuple */
2253  tp.t_self = ctid;
2254  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2255  tp.t_len = ItemIdGetLength(lp);
2256  tp.t_tableOid = RelationGetRelid(relation);
2257 
2258  /*
2259  * After following a t_ctid link, we might arrive at an unrelated
2260  * tuple. Check for XMIN match.
2261  */
2262  if (TransactionIdIsValid(priorXmax) &&
2264  {
2265  UnlockReleaseBuffer(buffer);
2266  break;
2267  }
2268 
2269  /*
2270  * Check time qualification of tuple; if visible, set it as the new
2271  * result candidate.
2272  */
2273  valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
2274  CheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
2275  if (valid)
2276  *tid = ctid;
2277 
2278  /*
2279  * If there's a valid t_ctid link, follow it, else we're done.
2280  */
2281  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2284  {
2285  UnlockReleaseBuffer(buffer);
2286  break;
2287  }
2288 
2289  ctid = tp.t_data->t_ctid;
2290  priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
2291  UnlockReleaseBuffer(buffer);
2292  } /* end of loop */
2293 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:359
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
Definition: tqual.c:1585
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:60
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:265
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:397
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
uint32 BlockNumber
Definition: block.h:31
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:3943
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:193
ItemPointerData t_ctid
Definition: htup_details.h:150
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define InvalidTransactionId
Definition: transam.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:436
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:199
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:307
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define TransactionIdIsValid(xid)
Definition: transam.h:41
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:416
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
HeapTuple heap_getnext ( HeapScanDesc  scan,
ScanDirection  direction 
)

Definition at line 1810 of file heapam.c.

References HEAPDEBUG_1, HEAPDEBUG_2, HEAPDEBUG_3, heapgettup(), heapgettup_pagemode(), NULL, pgstat_count_heap_getnext, HeapScanDescData::rs_ctup, HeapScanDescData::rs_key, HeapScanDescData::rs_nkeys, HeapScanDescData::rs_pageatatime, HeapScanDescData::rs_rd, and HeapTupleData::t_data.

Referenced by AlterDomainNotNull(), AlterTableMoveAll(), AlterTableSpaceOptions(), ATRewriteTable(), boot_openrel(), check_db_file_conflict(), copy_heap_data(), CopyTo(), createdb(), DefineQueryRewrite(), do_autovacuum(), DropSetting(), DropTableSpace(), find_typed_table_dependencies(), get_database_list(), get_rel_oids(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), index_update_stats(), IndexBuildHeapRangeScan(), IndexCheckExclusion(), objectsInSchemaToOids(), pgrowlocks(), pgstat_collect_oids(), pgstat_heap(), ReindexMultipleTables(), RelationFindReplTupleSeq(), remove_dbtablespaces(), RemoveConversionById(), RemoveSubscriptionRel(), RenameTableSpace(), SeqNext(), systable_getnext(), ThereIsAtLeastOneRole(), vac_truncate_clog(), validate_index_heapscan(), validateCheckConstraint(), validateDomainConstraint(), and validateForeignKeyConstraint().

1811 {
1812  /* Note: no locking manipulations needed */
1813 
1814  HEAPDEBUG_1; /* heap_getnext( info ) */
1815 
1816  if (scan->rs_pageatatime)
1817  heapgettup_pagemode(scan, direction,
1818  scan->rs_nkeys, scan->rs_key);
1819  else
1820  heapgettup(scan, direction, scan->rs_nkeys, scan->rs_key);
1821 
1822  if (scan->rs_ctup.t_data == NULL)
1823  {
1824  HEAPDEBUG_2; /* heap_getnext returning EOS */
1825  return NULL;
1826  }
1827 
1828  /*
1829  * if we get here it means we have a new current scan tuple, so point to
1830  * the proper return buffer and return the tuple.
1831  */
1832  HEAPDEBUG_3; /* heap_getnext returning tuple */
1833 
1835 
1836  return &(scan->rs_ctup);
1837 }
#define HEAPDEBUG_2
Definition: heapam.c:1804
HeapTupleData rs_ctup
Definition: relscan.h:69
HeapTupleHeader t_data
Definition: htup.h:67
bool rs_pageatatime
Definition: relscan.h:54
#define HEAPDEBUG_1
Definition: heapam.c:1803
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:481
Relation rs_rd
Definition: relscan.h:48
#define NULL
Definition: c.h:229
#define HEAPDEBUG_3
Definition: heapam.c:1805
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:1256
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:785
ScanKey rs_key
Definition: relscan.h:51
bool heap_hot_search ( ItemPointer  tid,
Relation  relation,
Snapshot  snapshot,
bool all_dead 
)

Definition at line 2155 of file heapam.c.

References buffer, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, heap_hot_search_buffer(), ItemPointerGetBlockNumber, LockBuffer(), ReadBuffer(), ReleaseBuffer(), and result.

Referenced by _bt_check_unique(), and unique_key_recheck().

2157 {
2158  bool result;
2159  Buffer buffer;
2160  HeapTupleData heapTuple;
2161 
2162  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
2163  LockBuffer(buffer, BUFFER_LOCK_SHARE);
2164  result = heap_hot_search_buffer(tid, relation, buffer, snapshot,
2165  &heapTuple, all_dead, true);
2166  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2167  ReleaseBuffer(buffer);
2168  return result;
2169 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
return result
Definition: formatting.c:1633
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
Definition: heapam.c:2013
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
int Buffer
Definition: buf.h:23
bool heap_hot_search_buffer ( ItemPointer  tid,
Relation  relation,
Buffer  buffer,
Snapshot  snapshot,
HeapTuple  heapTuple,
bool all_dead,
bool  first_call 
)

Definition at line 2013 of file heapam.c.

References Assert, BufferGetBlockNumber(), BufferGetPage, CheckForSerializableConflictOut(), HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleIsHeapOnly, HeapTupleIsHotUpdated, HeapTupleIsSurelyDead(), HeapTupleSatisfiesVisibility, InvalidTransactionId, ItemIdGetLength, ItemIdGetRedirect, ItemIdIsNormal, ItemIdIsRedirected, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerSet, ItemPointerSetOffsetNumber, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PredicateLockTuple(), RecentGlobalXmin, RelationGetRelid, skip(), HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdEquals, and TransactionIdIsValid.

Referenced by bitgetpage(), heap_hot_search(), and index_fetch_heap().

2016 {
2017  Page dp = (Page) BufferGetPage(buffer);
2018  TransactionId prev_xmax = InvalidTransactionId;
2019  OffsetNumber offnum;
2020  bool at_chain_start;
2021  bool valid;
2022  bool skip;
2023 
2024  /* If this is not the first call, previous call returned a (live!) tuple */
2025  if (all_dead)
2026  *all_dead = first_call;
2027 
2029 
2031  offnum = ItemPointerGetOffsetNumber(tid);
2032  at_chain_start = first_call;
2033  skip = !first_call;
2034 
2035  heapTuple->t_self = *tid;
2036 
2037  /* Scan through possible multiple members of HOT-chain */
2038  for (;;)
2039  {
2040  ItemId lp;
2041 
2042  /* check for bogus TID */
2043  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
2044  break;
2045 
2046  lp = PageGetItemId(dp, offnum);
2047 
2048  /* check for unused, dead, or redirected items */
2049  if (!ItemIdIsNormal(lp))
2050  {
2051  /* We should only see a redirect at start of chain */
2052  if (ItemIdIsRedirected(lp) && at_chain_start)
2053  {
2054  /* Follow the redirect */
2055  offnum = ItemIdGetRedirect(lp);
2056  at_chain_start = false;
2057  continue;
2058  }
2059  /* else must be end of chain */
2060  break;
2061  }
2062 
2063  heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
2064  heapTuple->t_len = ItemIdGetLength(lp);
2065  heapTuple->t_tableOid = RelationGetRelid(relation);
2066  ItemPointerSetOffsetNumber(&heapTuple->t_self, offnum);
2067 
2068  /*
2069  * Shouldn't see a HEAP_ONLY tuple at chain start.
2070  */
2071  if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
2072  break;
2073 
2074  /*
2075  * The xmin should match the previous xmax value, else chain is
2076  * broken.
2077  */
2078  if (TransactionIdIsValid(prev_xmax) &&
2079  !TransactionIdEquals(prev_xmax,
2080  HeapTupleHeaderGetXmin(heapTuple->t_data)))
2081  break;
2082 
2083  /*
2084  * When first_call is true (and thus, skip is initially false) we'll
2085  * return the first tuple we find. But on later passes, heapTuple
2086  * will initially be pointing to the tuple we returned last time.
2087  * Returning it again would be incorrect (and would loop forever), so
2088  * we skip it and return the next match we find.
2089  */
2090  if (!skip)
2091  {
2092  /*
2093  * For the benefit of logical decoding, have t_self point at the
2094  * element of the HOT chain we're currently investigating instead
2095  * of the root tuple of the HOT chain. This is important because
2096  * the *Satisfies routine for historical mvcc snapshots needs the
2097  * correct tid to decide about the visibility in some cases.
2098  */
2099  ItemPointerSet(&(heapTuple->t_self), BufferGetBlockNumber(buffer), offnum);
2100 
2101  /* If it's visible per the snapshot, we must return it */
2102  valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
2103  CheckForSerializableConflictOut(valid, relation, heapTuple,
2104  buffer, snapshot);
2105  /* reset to original, non-redirected, tid */
2106  heapTuple->t_self = *tid;
2107 
2108  if (valid)
2109  {
2110  ItemPointerSetOffsetNumber(tid, offnum);
2111  PredicateLockTuple(relation, heapTuple, snapshot);
2112  if (all_dead)
2113  *all_dead = false;
2114  return true;
2115  }
2116  }
2117  skip = false;
2118 
2119  /*
2120  * If we can't see it, maybe no one else can either. At caller
2121  * request, check whether all chain members are dead to all
2122  * transactions.
2123  */
2124  if (all_dead && *all_dead &&
2126  *all_dead = false;
2127 
2128  /*
2129  * Check to see if HOT chain continues past this tuple; if so fetch
2130  * the next offnum and loop around.
2131  */
2132  if (HeapTupleIsHotUpdated(heapTuple))
2133  {
2136  offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
2137  at_chain_start = false;
2138  prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
2139  }
2140  else
2141  break; /* end of chain */
2142  }
2143 
2144  return false;
2145 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:359
static void skip(struct vars *v)
Definition: regc_lex.c:1109
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:105
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:397
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:77
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:3943
bool HeapTupleIsSurelyDead(HeapTuple htup, TransactionId OldestXmin)
Definition: tqual.c:1409
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define HeapTupleIsHotUpdated(tuple)
Definition: htup_details.h:677
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
ItemPointerData t_ctid
Definition: htup_details.h:150
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
TransactionId RecentGlobalXmin
Definition: snapmgr.c:166
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:686
#define Assert(condition)
Definition: c.h:676
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:307
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
void PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
Definition: predicate.c:2541
#define ItemPointerSetOffsetNumber(pointer, offsetNumber)
Definition: itemptr.h:126
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define TransactionIdIsValid(xid)
Definition: transam.h:41
#define RelationGetRelid(relation)
Definition: rel.h:416
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:105
void heap_inplace_update ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 6245 of file heapam.c.

References buffer, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, CacheInvalidateHeapTuple(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, IsBootstrapProcessingMode, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), NULL, xl_heap_inplace::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapInplace, START_CRIT_SECTION, HeapTupleData::t_data, HeapTupleHeaderData::t_hoff, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), XLOG_HEAP_INPLACE, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by create_toast_table(), index_set_state_flags(), index_update_stats(), vac_update_datfrozenxid(), and vac_update_relstats().

6246 {
6247  Buffer buffer;
6248  Page page;
6249  OffsetNumber offnum;
6250  ItemId lp = NULL;
6251  HeapTupleHeader htup;
6252  uint32 oldlen;
6253  uint32 newlen;
6254 
6255  /*
6256  * For now, parallel operations are required to be strictly read-only.
6257  * Unlike a regular update, this should never create a combo CID, so it
6258  * might be possible to relax this restriction, but not without more
6259  * thought and testing. It's not clear that it would be useful, anyway.
6260  */
6261  if (IsInParallelMode())
6262  ereport(ERROR,
6263  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
6264  errmsg("cannot update tuples during a parallel operation")));
6265 
6266  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
6268  page = (Page) BufferGetPage(buffer);
6269 
6270  offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
6271  if (PageGetMaxOffsetNumber(page) >= offnum)
6272  lp = PageGetItemId(page, offnum);
6273 
6274  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
6275  elog(ERROR, "invalid lp");
6276 
6277  htup = (HeapTupleHeader) PageGetItem(page, lp);
6278 
6279  oldlen = ItemIdGetLength(lp) - htup->t_hoff;
6280  newlen = tuple->t_len - tuple->t_data->t_hoff;
6281  if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
6282  elog(ERROR, "wrong tuple length");
6283 
6284  /* NO EREPORT(ERROR) from here till changes are logged */
6286 
6287  memcpy((char *) htup + htup->t_hoff,
6288  (char *) tuple->t_data + tuple->t_data->t_hoff,
6289  newlen);
6290 
6291  MarkBufferDirty(buffer);
6292 
6293  /* XLOG stuff */
6294  if (RelationNeedsWAL(relation))
6295  {
6296  xl_heap_inplace xlrec;
6297  XLogRecPtr recptr;
6298 
6299  xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6300 
6301  XLogBeginInsert();
6302  XLogRegisterData((char *) &xlrec, SizeOfHeapInplace);
6303 
6304  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6305  XLogRegisterBufData(0, (char *) htup + htup->t_hoff, newlen);
6306 
6307  /* inplace updates aren't decoded atm, don't log the origin */
6308 
6309  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
6310 
6311  PageSetLSN(page, recptr);
6312  }
6313 
6314  END_CRIT_SECTION();
6315 
6316  UnlockReleaseBuffer(buffer);
6317 
6318  /*
6319  * Send out shared cache inval if necessary. Note that because we only
6320  * pass the new version of the tuple, this mustn't be used for any
6321  * operations that could change catcache lookup keys. But we aren't
6322  * bothering with index updates either, so that's true a fortiori.
6323  */
6325  CacheInvalidateHeapTuple(relation, tuple, NULL);
6326 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:361
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1094
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
#define SizeOfHeapInplace
Definition: heapam_xlog.h:286
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
int errcode(int sqlerrcode)
Definition: elog.c:575
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
bool IsInParallelMode(void)
Definition: xact.c:913
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define ERROR
Definition: elog.h:43
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define REGBUF_STANDARD
Definition: xloginsert.h:34
unsigned int uint32
Definition: c.h:268
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define ereport(elevel, rest)
Definition: elog.h:122
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:231
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
OffsetNumber offnum
Definition: heapam_xlog.h:282
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define NULL
Definition: c.h:229
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define XLOG_HEAP_INPLACE
Definition: heapam_xlog.h:39
#define RelationNeedsWAL(relation)
Definition: rel.h:505
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:368
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:336
Pointer Page
Definition: bufpage.h:74
Oid heap_insert ( Relation  relation,
HeapTuple  tup,
CommandId  cid,
int  options,
BulkInsertState  bistate 
)

Definition at line 2412 of file heapam.c.

References Assert, buffer, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), END_CRIT_SECTION, FirstOffsetNumber, xl_heap_insert::flags, GetCurrentTransactionId(), heap_freetuple(), HEAP_INSERT_SKIP_WAL, HEAP_INSERT_SPECULATIVE, heap_prepare_insert(), HeapTupleGetOid, InvalidBuffer, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, log_heap_new_cid(), MarkBufferDirty(), xl_heap_insert::offnum, PageClearAllVisible, PageGetMaxOffsetNumber, PageIsAllVisible, PageSetLSN, pgstat_count_heap_insert(), REGBUF_KEEP_DATA, REGBUF_STANDARD, REGBUF_WILL_INIT, RelationGetBufferForTuple(), RelationIsAccessibleInLogicalDecoding, RelationIsLogicallyLogged, RelationNeedsWAL, RelationPutHeapTuple(), ReleaseBuffer(), SizeOfHeapHeader, SizeOfHeapInsert, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), visibilitymap_clear(), VISIBILITYMAP_VALID_BITS, XLH_INSERT_ALL_VISIBLE_CLEARED, XLH_INSERT_CONTAINS_NEW_TUPLE, XLH_INSERT_IS_SPECULATIVE, XLOG_HEAP_INIT_PAGE, XLOG_HEAP_INSERT, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by ATRewriteTable(), CopyFrom(), ExecInsert(), intorel_receive(), simple_heap_insert(), toast_save_datum(), and transientrel_receive().

2414 {
2416  HeapTuple heaptup;
2417  Buffer buffer;
2418  Buffer vmbuffer = InvalidBuffer;
2419  bool all_visible_cleared = false;
2420 
2421  /*
2422  * Fill in tuple header fields, assign an OID, and toast the tuple if
2423  * necessary.
2424  *
2425  * Note: below this point, heaptup is the data we actually intend to store
2426  * into the relation; tup is the caller's original untoasted data.
2427  */
2428  heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
2429 
2430  /*
2431  * Find buffer to insert this tuple into. If the page is all visible,
2432  * this will also pin the requisite visibility map page.
2433  */
2434  buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
2435  InvalidBuffer, options, bistate,
2436  &vmbuffer, NULL);
2437 
2438  /*
2439  * We're about to do the actual insert -- but check for conflict first, to
2440  * avoid possibly having to roll back work we've just done.
2441  *
2442  * This is safe without a recheck as long as there is no possibility of
2443  * another process scanning the page between this check and the insert
2444  * being visible to the scan (i.e., an exclusive buffer content lock is
2445  * continuously held from this point until the tuple insert is visible).
2446  *
2447  * For a heap insert, we only need to check for table-level SSI locks. Our
2448  * new tuple can't possibly conflict with existing tuple locks, and heap
2449  * page locks are only consolidated versions of tuple locks; they do not
2450  * lock "gaps" as index page locks do. So we don't need to specify a
2451  * buffer when making the call, which makes for a faster check.
2452  */
2454 
2455  /* NO EREPORT(ERROR) from here till changes are logged */
2457 
2458  RelationPutHeapTuple(relation, buffer, heaptup,
2459  (options & HEAP_INSERT_SPECULATIVE) != 0);
2460 
2461  if (PageIsAllVisible(BufferGetPage(buffer)))
2462  {
2463  all_visible_cleared = true;
2465  visibilitymap_clear(relation,
2466  ItemPointerGetBlockNumber(&(heaptup->t_self)),
2467  vmbuffer, VISIBILITYMAP_VALID_BITS);
2468  }
2469 
2470  /*
2471  * XXX Should we set PageSetPrunable on this page ?
2472  *
2473  * The inserting transaction may eventually abort thus making this tuple
2474  * DEAD and hence available for pruning. Though we don't want to optimize
2475  * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
2476  * aborted tuple will never be pruned until next vacuum is triggered.
2477  *
2478  * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
2479  */
2480 
2481  MarkBufferDirty(buffer);
2482 
2483  /* XLOG stuff */
2484  if (!(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation))
2485  {
2486  xl_heap_insert xlrec;
2487  xl_heap_header xlhdr;
2488  XLogRecPtr recptr;
2489  Page page = BufferGetPage(buffer);
2490  uint8 info = XLOG_HEAP_INSERT;
2491  int bufflags = 0;
2492 
2493  /*
2494  * If this is a catalog, we need to transmit combocids to properly
2495  * decode, so log that as well.
2496  */
2498  log_heap_new_cid(relation, heaptup);
2499 
2500  /*
2501  * If this is the single and first tuple on page, we can reinit the
2502  * page instead of restoring the whole thing. Set flag, and hide
2503  * buffer references from XLogInsert.
2504  */
2505  if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
2507  {
2508  info |= XLOG_HEAP_INIT_PAGE;
2509  bufflags |= REGBUF_WILL_INIT;
2510  }
2511 
2512  xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
2513  xlrec.flags = 0;
2514  if (all_visible_cleared)
2519 
2520  /*
2521  * For logical decoding, we need the tuple even if we're doing a full
2522  * page write, so make sure it's included even if we take a full-page
2523  * image. (XXX We could alternatively store a pointer into the FPW).
2524  */
2525  if (RelationIsLogicallyLogged(relation))
2526  {
2528  bufflags |= REGBUF_KEEP_DATA;
2529  }
2530 
2531  XLogBeginInsert();
2532  XLogRegisterData((char *) &xlrec, SizeOfHeapInsert);
2533 
2534  xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
2535  xlhdr.t_infomask = heaptup->t_data->t_infomask;
2536  xlhdr.t_hoff = heaptup->t_data->t_hoff;
2537 
2538  /*
2539  * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
2540  * write the whole page to the xlog, we don't need to store
2541  * xl_heap_header in the xlog.
2542  */
2543  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2544  XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
2545  /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
2547  (char *) heaptup->t_data + SizeofHeapTupleHeader,
2548  heaptup->t_len - SizeofHeapTupleHeader);
2549 
2550  /* filtering by origin on a row level is much more efficient */
2552 
2553  recptr = XLogInsert(RM_HEAP_ID, info);
2554 
2555  PageSetLSN(page, recptr);
2556  }
2557 
2558  END_CRIT_SECTION();
2559 
2560  UnlockReleaseBuffer(buffer);
2561  if (vmbuffer != InvalidBuffer)
2562  ReleaseBuffer(vmbuffer);
2563 
2564  /*
2565  * If tuple is cachable, mark it for invalidation from the caches in case
2566  * we abort. Note it is OK to do this after releasing the buffer, because
2567  * the heaptup data structure is all in local memory, not in the shared
2568  * buffer.
2569  */
2570  CacheInvalidateHeapTuple(relation, heaptup, NULL);
2571 
2572  /* Note: speculative insertions are counted too, even if aborted later */
2573  pgstat_count_heap_insert(relation, 1);
2574 
2575  /*
2576  * If heaptup is a private copy, release it. Don't forget to copy t_self
2577  * back to the caller's image, too.
2578  */
2579  if (heaptup != tup)
2580  {
2581  tup->t_self = heaptup->t_self;
2582  heap_freetuple(heaptup);
2583  }
2584 
2585  return HeapTupleGetOid(tup);
2586 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:361
#define SizeofHeapTupleHeader
Definition: htup_details.h:170
#define XLOG_HEAP_INSERT
Definition: heapam_xlog.h:32
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:7744
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1094
static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
Definition: heapam.c:2596
#define PageIsAllVisible(page)
Definition: bufpage.h:381
uint32 TransactionId
Definition: c.h:397
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
#define END_CRIT_SECTION()
Definition: miscadmin.h:133
unsigned char uint8
Definition: c.h:266
#define XLH_INSERT_IS_SPECULATIVE
Definition: heapam_xlog.h:68
#define InvalidBuffer
Definition: buf.h:25
#define REGBUF_WILL_INIT
Definition: xloginsert.h:32
uint16 t_infomask2
Definition: heapam_xlog.h:122
#define START_CRIT_SECTION()
Definition: miscadmin.h:131
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:191
#define HEAP_INSERT_SKIP_WAL
Definition: heapam.h:28
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:575
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1373
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:353
void RelationPutHeapTuple(Relation relation, Buffer buffer, HeapTuple tuple, bool token)
Definition: hio.c:36
void CheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
Definition: predicate.c:4324
#define XLOG_HEAP_INIT_PAGE
Definition: heapam_xlog.h:46
#define HEAP_INSERT_SPECULATIVE
Definition: heapam.h:31
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:67
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define XLH_INSERT_CONTAINS_NEW_TUPLE
Definition: heapam_xlog.h:69
ItemPointerData t_self
Definition: htup.h:65
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:417
uint32 t_len
Definition: htup.h:64
#define FirstOffsetNumber
Definition: off.h:27
#define REGBUF_STANDARD
Definition: xloginsert.h:34
Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer otherBuffer, int options, BulkInsertState bistate, Buffer *vmbuffer, Buffer *vmbuffer_other)
Definition: hio.c:297
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:559
#define REGBUF_KEEP_DATA
Definition: xloginsert.h:37
#define PageClearAllVisible(page)
Definition: bufpage.h:385
#define NULL
Definition: c.h:229
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:676
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:214
uint16 t_infomask
Definition: heapam_xlog.h:123
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:95
#define RelationNeedsWAL(relation)
Definition: rel.h:505
#define SizeOfHeapInsert
Definition: heapam_xlog.h:138
#define XLH_INSERT_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:66
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
void pgstat_count_heap_insert(Relation rel, PgStat_Counter n)
Definition: pgstat.c:1907
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:76
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:695
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:364
int Buffer
Definition: buf.h:23
OffsetNumber offnum
Definition: heapam_xlog.h:132
#define SizeOfHeapHeader
Definition: heapam_xlog.h:127
Pointer Page
Definition: bufpage.h:74
HTSU_Result heap_lock_tuple ( Relation  relation,
HeapTuple  tuple,
CommandId  cid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
bool  follow_updates,
Buffer buffer,
HeapUpdateFailureData hufd 
)

Definition at line 4557 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferIsValid, HeapUpdateFailureData::cmax, compute_infobits(), compute_new_xmax_infomask(), ConditionalMultiXactIdWait(), ConditionalXactLockTableWait(), HeapUpdateFailureData::ctid, DoesMultiXactIdConflict(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, xl_heap_lock::flags, get_mxact_status_for_lock(), GetCurrentTransactionId(), GetMultiXactIdMembers(), heap_acquire_tuplock(), HEAP_KEYS_UPDATED, heap_lock_updated_tuple(), HEAP_XMAX_BITS, HEAP_XMAX_INVALID, HEAP_XMAX_IS_EXCL_LOCKED, HEAP_XMAX_IS_KEYSHR_LOCKED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HEAP_XMAX_IS_SHR_LOCKED, HeapTupleBeingUpdated, HeapTupleHeaderClearHotUpdated, HeapTupleHeaderGetCmax(), HeapTupleHeaderGetRawXmax, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIsOnlyLocked(), HeapTupleHeaderSetXmax, HeapTupleInvisible, HeapTupleMayBeUpdated, HeapTupleSatisfiesUpdate(), HeapTupleSelfUpdated, HeapTupleUpdated, HeapTupleWouldBlock, i, xl_heap_lock::infobits_set, InvalidBuffer, InvalidCommandId, ItemIdGetLength, ItemIdIsNormal, ItemPointerCopy, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), xl_heap_lock::locking_xid, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, LockWaitBlock, LockWaitError, LockWaitSkip, MarkBufferDirty(), MultiXactIdSetOldestMember(), MultiXactIdWait(), MultiXactStatusNoKeyUpdate, xl_heap_lock::offnum, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, pfree(), ReadBuffer(), REGBUF_STANDARD, RelationGetRelationName, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), result, SizeOfHeapLock, START_CRIT_SECTION, status(), HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdEquals, TransactionIdIsCurrentTransactionId(), TUPLOCK_from_mxstatus, UnlockTupleTuplock, UpdateXmaxHintBits(), VISIBILITYMAP_ALL_FROZEN, visibilitymap_clear(), visibilitymap_pin(), XactLockTableWait(), XLH_LOCK_ALL_FROZEN_CLEARED, XLOG_HEAP_LOCK, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), XLTW_Lock, HeapUpdateFailureData::xmax, and xmax_infomask_changed().

Referenced by EvalPlanQualFetch(), ExecLockRows(), ExecOnConflictUpdate(), GetTupleForTrigger(), RelationFindReplTupleByIndex(), and RelationFindReplTupleSeq().

4561 {
4563  ItemPointer tid = &(tuple->t_self);
4564  ItemId lp;
4565  Page page;
4566  Buffer vmbuffer = InvalidBuffer;
4567  BlockNumber block;
4568  TransactionId xid,
4569  xmax;
4570  uint16 old_infomask,
4571  new_infomask,
4572  new_infomask2;
4573  bool first_time = true;
4574  bool have_tuple_lock = false;
4575  bool cleared_all_frozen = false;
4576 
4577  *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
4578  block = ItemPointerGetBlockNumber(tid);
4579 
4580  /*
4581  * Before locking the buffer, pin the visibility map page if it appears to
4582  * be necessary. Since we haven't got the lock yet, someone else might be
4583  * in the middle of changing this, so we'll need to recheck after we have
4584  * the lock.
4585  */
4587  visibilitymap_pin(relation, block, &vmbuffer);
4588 
4590 
4591  page = BufferGetPage(*buffer);
4592  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
4593  Assert(ItemIdIsNormal(lp));
4594 
4595  tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
4596  tuple->t_len = ItemIdGetLength(lp);
4597  tuple->t_tableOid = RelationGetRelid(relation);
4598 
4599 l3:
4600  result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
4601 
4602  if (result == HeapTupleInvisible)
4603  {
4604  /*
4605  * This is possible, but only when locking a tuple for ON CONFLICT
4606  * UPDATE. We return this value here rather than throwing an error in
4607  * order to give that case the opportunity to throw a more specific
4608  * error.
4609  */
4610  result = HeapTupleInvisible;
4611  goto out_locked;
4612  }
4613  else if (result == HeapTupleBeingUpdated || result == HeapTupleUpdated)
4614  {
4615  TransactionId xwait;
4616  uint16 infomask;
4617  uint16 infomask2;
4618  bool require_sleep;
4619  ItemPointerData t_ctid;
4620 
4621  /* must copy state data before unlocking buffer */
4622  xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
4623  infomask = tuple->t_data->t_infomask;
4624  infomask2 = tuple->t_data->t_infomask2;
4625  ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
4626 
4628 
4629  /*
4630  * If any subtransaction of the current top transaction already holds
4631  * a lock as strong as or stronger than what we're requesting, we
4632  * effectively hold the desired lock already. We *must* succeed
4633  * without trying to take the tuple lock, else we will deadlock
4634  * against anyone wanting to acquire a stronger lock.
4635  *
4636  * Note we only do this the first time we loop on the HTSU result;
4637  * there is no point in testing in subsequent passes, because
4638  * evidently our own transaction cannot have acquired a new lock after
4639  * the first time we checked.
4640  */
4641  if (first_time)
4642  {
4643  first_time = false;
4644 
4645  if (infomask & HEAP_XMAX_IS_MULTI)
4646  {
4647  int i;
4648  int nmembers;
4649  MultiXactMember *members;
4650 
4651  /*
4652  * We don't need to allow old multixacts here; if that had
4653  * been the case, HeapTupleSatisfiesUpdate would have returned
4654  * MayBeUpdated and we wouldn't be here.
4655  */
4656  nmembers =
4657  GetMultiXactIdMembers(xwait, &members, false,
4658  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
4659 
4660  for (i = 0; i < nmembers; i++)
4661  {
4662  /* only consider members of our own transaction */
4663  if (!TransactionIdIsCurrentTransactionId(members[i].xid))
4664  continue;
4665 
4666  if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
4667  {
4668  pfree(members);
4669  result = HeapTupleMayBeUpdated;
4670  goto out_unlocked;
4671  }
4672  }
4673 
4674  if (members)
4675  pfree(members);
4676  }
4677  else if (TransactionIdIsCurrentTransactionId(xwait))
4678  {
4679  switch (mode)
4680  {
4681  case LockTupleKeyShare:
4682  Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
4683  HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4684  HEAP_XMAX_IS_EXCL_LOCKED(infomask));
4685  result = HeapTupleMayBeUpdated;
4686  goto out_unlocked;
4687  case LockTupleShare:
4688  if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4689  HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4690  {
4691  result = HeapTupleMayBeUpdated;
4692  goto out_unlocked;
4693  }
4694  break;
4696  if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4697  {
4698  result = HeapTupleMayBeUpdated;
4699  goto out_unlocked;
4700  }
4701  break;
4702  case LockTupleExclusive:
4703  if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
4704  infomask2 & HEAP_KEYS_UPDATED)
4705  {
4706  result = HeapTupleMayBeUpdated;
4707  goto out_unlocked;
4708  }
4709  break;
4710  }
4711  }
4712  }
4713 
4714  /*
4715  * Initially assume that we will have to wait for the locking
4716  * transaction(s) to finish. We check various cases below in which
4717  * this can be turned off.
4718  */
4719  require_sleep = true;
4720  if (mode == LockTupleKeyShare)
4721  {
4722  /*
4723  * If we're requesting KeyShare, and there's no update present, we
4724  * don't need to wait. Even if there is an update, we can still
4725  * continue if the key hasn't been modified.
4726  *
4727  * However, if there are updates, we need to walk the update chain
4728  * to mark future versions of the row as locked, too. That way,
4729  * if somebody deletes that future version, we're protected
4730  * against the key going away. This locking of future versions
4731  * could block momentarily, if a concurrent transaction is
4732  * deleting a key; or it could return a value to the effect that
4733  * the transaction deleting the key has already committed. So we
4734  * do this before re-locking the buffer; otherwise this would be
4735  * prone to deadlocks.
4736  *
4737  * Note that the TID we're locking was grabbed before we unlocked
4738  * the buffer. For it to change while we're not looking, the
4739  * other properties we're testing for below after re-locking the
4740  * buffer would also change, in which case we would restart this
4741  * loop above.
4742  */
4743  if (!(infomask2 & HEAP_KEYS_UPDATED))
4744  {
4745  bool updated;
4746 
4747  updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
4748 
4749  /*
4750  * If there are updates, follow the update chain; bail out if
4751  * that cannot be done.
4752  */
4753  if (follow_updates && updated)
4754  {
4755  HTSU_Result res;
4756 
4757  res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4759  mode);
4760  if (res != HeapTupleMayBeUpdated)
4761  {
4762  result = res;
4763  /* recovery code expects to have buffer lock held */
4765  goto failed;
4766  }
4767  }
4768 
4770 
4771  /*
4772  * Make sure it's still an appropriate lock, else start over.
4773  * Also, if it wasn't updated before we released the lock, but
4774  * is updated now, we start over too; the reason is that we
4775  * now need to follow the update chain to lock the new
4776  * versions.
4777  */
4778  if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
4779  ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
4780  !updated))
4781  goto l3;
4782 
4783  /* Things look okay, so we can skip sleeping */
4784  require_sleep = false;
4785 
4786  /*
4787  * Note we allow Xmax to change here; other updaters/lockers
4788  * could have modified it before we grabbed the buffer lock.
4789  * However, this is not a problem, because with the recheck we
4790  * just did we ensure that they still don't conflict with the
4791  * lock we want.
4792  */
4793  }
4794  }
4795  else if (mode == LockTupleShare)
4796  {
4797  /*
4798  * If we're requesting Share, we can similarly avoid sleeping if
4799  * there's no update and no exclusive lock present.
4800  */
4801  if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
4802  !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4803  {
4805 
4806  /*
4807  * Make sure it's still an appropriate lock, else start over.
4808  * See above about allowing xmax to change.
4809  */
4810  if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
4812  goto l3;
4813  require_sleep = false;
4814  }
4815  }
4816  else if (mode == LockTupleNoKeyExclusive)
4817  {
4818  /*
4819  * If we're requesting NoKeyExclusive, we might also be able to
4820  * avoid sleeping; just ensure that there no conflicting lock
4821  * already acquired.
4822  */
4823  if (infomask & HEAP_XMAX_IS_MULTI)
4824  {
4825  if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
4826  mode))
4827  {
4828  /*
4829  * No conflict, but if the xmax changed under us in the
4830  * meantime, start over.
4831  */
4833  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4835  xwait))
4836  goto l3;
4837 
4838  /* otherwise, we're good */
4839  require_sleep = false;
4840  }
4841  }
4842  else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
4843  {
4845 
4846  /* if the xmax changed in the meantime, start over */
4847  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4850  xwait))
4851  goto l3;
4852  /* otherwise, we're good */
4853  require_sleep = false;
4854  }
4855  }
4856 
4857  /*
4858  * As a check independent from those above, we can also avoid sleeping
4859  * if the current transaction is the sole locker of the tuple. Note
4860  * that the strength of the lock already held is irrelevant; this is
4861  * not about recording the lock in Xmax (which will be done regardless
4862  * of this optimization, below). Also, note that the cases where we
4863  * hold a lock stronger than we are requesting are already handled
4864  * above by not doing anything.
4865  *
4866  * Note we only deal with the non-multixact case here; MultiXactIdWait
4867  * is well equipped to deal with this situation on its own.
4868  */
4869  if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
4871  {
4872  /* ... but if the xmax changed in the meantime, start over */
4874  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4876  xwait))
4877  goto l3;
4879  require_sleep = false;
4880  }
4881 
4882  /*
4883  * Time to sleep on the other transaction/multixact, if necessary.
4884  *
4885  * If the other transaction is an update that's already committed,
4886  * then sleeping cannot possibly do any good: if we're required to
4887  * sleep, get out to raise an error instead.
4888  *
4889  * By here, we either have already acquired the buffer exclusive lock,
4890  * or we must wait for the locking transaction or multixact; so below
4891  * we ensure that we grab buffer lock after the sleep.
4892  */
4893  if (require_sleep && result == HeapTupleUpdated)
4894  {
4896  goto failed;
4897  }
4898  else if (require_sleep)
4899  {
4900  /*
4901  * Acquire tuple lock to establish our priority for the tuple, or
4902  * die trying. LockTuple will release us when we are next-in-line
4903  * for the tuple. We must do this even if we are share-locking.
4904  *
4905  * If we are forced to "start over" below, we keep the tuple lock;
4906  * this arranges that we stay at the head of the line while
4907  * rechecking tuple state.
4908  */
4909  if (!heap_acquire_tuplock(relation, tid, mode, wait_policy,
4910  &have_tuple_lock))
4911  {
4912  /*
4913  * This can only happen if wait_policy is Skip and the lock
4914  * couldn't be obtained.
4915  */
4916  result = HeapTupleWouldBlock;
4917  /* recovery code expects to have buffer lock held */
4919  goto failed;
4920  }
4921 
4922  if (infomask & HEAP_XMAX_IS_MULTI)
4923  {
4925 
4926  /* We only ever lock tuples, never update them */
4927  if (status >= MultiXactStatusNoKeyUpdate)
4928  elog(ERROR, "invalid lock mode in heap_lock_tuple");
4929 
4930  /* wait for multixact to end, or die trying */
4931  switch (wait_policy)
4932  {
4933  case LockWaitBlock:
4934  MultiXactIdWait((MultiXactId) xwait, status, infomask,
4935  relation, &tuple->t_self, XLTW_Lock, NULL);
4936  break;
4937  case LockWaitSkip:
4939  status, infomask, relation,
4940  NULL))
4941  {
4942  result = HeapTupleWouldBlock;
4943  /* recovery code expects to have buffer lock held */
4945  goto failed;
4946  }
4947  break;
4948  case LockWaitError:
4950  status, infomask, relation,
4951  NULL))
4952  ereport(ERROR,
4953  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4954  errmsg("could not obtain lock on row in relation \"%s\"",
4955  RelationGetRelationName(relation))));
4956 
4957  break;
4958  }
4959 
4960  /*
4961  * Of course, the multixact might not be done here: if we're
4962  * requesting a light lock mode, other transactions with light
4963  * locks could still be alive, as well as locks owned by our
4964  * own xact or other subxacts of this backend. We need to
4965  * preserve the surviving MultiXact members. Note that it
4966  * isn't absolutely necessary in the latter case, but doing so
4967  * is simpler.
4968  */
4969  }
4970  else
4971  {
4972  /* wait for regular transaction to end, or die trying */
4973  switch (wait_policy)
4974  {
4975  case LockWaitBlock:
4976  XactLockTableWait(xwait, relation, &tuple->t_self,
4977  XLTW_Lock);
4978  break;
4979  case LockWaitSkip:
4980  if (!ConditionalXactLockTableWait(xwait))
4981  {
4982  result = HeapTupleWouldBlock;
4983  /* recovery code expects to have buffer lock held */
4985  goto failed;
4986  }
4987  break;
4988  case LockWaitError:
4989  if (!ConditionalXactLockTableWait(xwait))
4990  ereport(ERROR,
4991  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4992  errmsg("could not obtain lock on row in relation \"%s\"",
4993  RelationGetRelationName(relation))));
4994  break;
4995  }
4996  }
4997 
4998  /* if there are updates, follow the update chain */
4999  if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
5000  {
5001  HTSU_Result res;
5002 
5003  res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
5005  mode);
5006  if (res != HeapTupleMayBeUpdated)
5007  {
5008  result = res;
5009  /* recovery code expects to have buffer lock held */
5011  goto failed;
5012  }
5013  }
5014 
5016 
5017  /*
5018  * xwait is done, but if xwait had just locked the tuple then some
5019  * other xact could update this tuple before we get to this point.
5020  * Check for xmax change, and start over if so.
5021  */
5022  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
5024  xwait))
5025  goto l3;
5026 
5027  if (!(infomask & HEAP_XMAX_IS_MULTI))
5028  {
5029  /*
5030  * Otherwise check if it committed or aborted. Note we cannot
5031  * be here if the tuple was only locked by somebody who didn't
5032  * conflict with us; that would have been handled above. So
5033  * that transaction must necessarily be gone by now. But
5034  * don't check for this in the multixact case, because some
5035  * locker transactions might still be running.
5036  */
5037  UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
5038  }
5039  }
5040 
5041  /* By here, we're certain that we hold buffer exclusive lock again */
5042 
5043  /*
5044  * We may lock if previous xmax aborted, or if it committed but only
5045  * locked the tuple without updating it; or if we didn't have to wait
5046  * at all for whatever reason.
5047  */
5048  if (!require_sleep ||
5049  (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
5052  result = HeapTupleMayBeUpdated;
5053  else
5054  result = HeapTupleUpdated;
5055  }
5056 
5057 failed:
5058  if (result != HeapTupleMayBeUpdated)
5059  {
5060  Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated ||
5061  result == HeapTupleWouldBlock);
5062  Assert(!(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
5063  hufd->ctid = tuple->t_data->t_ctid;
5064  hufd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
5065  if (result == HeapTupleSelfUpdated)
5066  hufd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
5067  else
5068  hufd->cmax = InvalidCommandId;
5069  goto out_locked;
5070  }
5071 
5072  /*
5073  * If we didn't pin the visibility map page and the page has become all
5074  * visible while we were busy locking the buffer, or during some
5075  * subsequent window during which we had it unlocked, we'll have to unlock
5076  * and re-lock, to avoid holding the buffer lock across I/O. That's a bit
5077  * unfortunate, especially since we'll now have to recheck whether the
5078  * tuple has been locked or updated under us, but hopefully it won't
5079  * happen very often.
5080  */
5081  if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
5082  {
5084  visibilitymap_pin(relation, block, &vmbuffer);
5086  goto l3;
5087  }
5088 
5089  xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
5090  old_infomask = tuple->t_data->t_infomask;
5091 
5092  /*
5093  * If this is the first possibly-multixact-able operation in the current
5094  * transaction, set my per-backend OldestMemberMXactId setting. We can be
5095  * certain that the transaction will never become a member of any older
5096  * MultiXactIds than that. (We have to do this even if we end up just
5097  * using our own TransactionId below, since some other backend could
5098  * incorporate our XID into a MultiXact immediately afterwards.)
5099  */
5101 
5102  /*
5103  * Compute the new xmax and infomask to store into the tuple. Note we do
5104  * not modify the tuple just yet, because that would leave it in the wrong
5105  * state if multixact.c elogs.
5106  */
5107  compute_new_xmax_infomask(xmax, old_infomask, tuple->t_data->t_infomask2,
5108  GetCurrentTransactionId(), mode, false,
5109  &xid, &new_infomask, &new_infomask2);
5110 
5112 
5113  /*
5114  * Store transaction information of xact locking the tuple.
5115  *
5116  * Note: Cmax is meaningless in this context, so don't set it; this avoids
5117  * possibly generating a useless combo CID. Moreover, if we're locking a
5118  * previously updated tuple, it's important to preserve the Cmax.
5119  *
5120  * Also reset the HOT UPDATE bit, but only if there's no update; otherwise
5121  * we would break the HOT chain.
5122  */
5123  tuple->t_data->t_infomask &= ~HEAP_XMAX_BITS;