PostgreSQL Source Code  git master
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros
heapam.c File Reference
#include "postgres.h"
#include "access/bufmask.h"
#include "access/heapam.h"
#include "access/heapam_xlog.h"
#include "access/hio.h"
#include "access/multixact.h"
#include "access/parallel.h"
#include "access/relscan.h"
#include "access/sysattr.h"
#include "access/transam.h"
#include "access/tuptoaster.h"
#include "access/valid.h"
#include "access/visibilitymap.h"
#include "access/xact.h"
#include "access/xlog.h"
#include "access/xloginsert.h"
#include "access/xlogutils.h"
#include "catalog/catalog.h"
#include "catalog/namespace.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "storage/bufmgr.h"
#include "storage/freespace.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
#include "storage/procarray.h"
#include "storage/smgr.h"
#include "storage/spin.h"
#include "storage/standby.h"
#include "utils/datum.h"
#include "utils/inval.h"
#include "utils/lsyscache.h"
#include "utils/relcache.h"
#include "utils/snapmgr.h"
#include "utils/syscache.h"
#include "utils/tqual.h"
Include dependency graph for heapam.c:

Go to the source code of this file.

Macros

#define LOCKMODE_from_mxstatus(status)   (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
 
#define LockTupleTuplock(rel, tup, mode)   LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define UnlockTupleTuplock(rel, tup, mode)   UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define ConditionalLockTupleTuplock(rel, tup, mode)   ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)
 
#define TUPLOCK_from_mxstatus(status)   (MultiXactStatusLock[(status)])
 
#define HEAPDEBUG_1
 
#define HEAPDEBUG_2
 
#define HEAPDEBUG_3
 
#define FRM_NOOP   0x0001
 
#define FRM_INVALIDATE_XMAX   0x0002
 
#define FRM_RETURN_IS_XID   0x0004
 
#define FRM_RETURN_IS_MULTI   0x0008
 
#define FRM_MARK_COMMITTED   0x0010
 

Functions

static HeapScanDesc heap_beginscan_internal (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
 
static BlockNumber heap_parallelscan_nextpage (HeapScanDesc scan)
 
static HeapTuple heap_prepare_insert (Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
 
static XLogRecPtr log_heap_update (Relation reln, Buffer oldbuf, Buffer newbuf, HeapTuple oldtup, HeapTuple newtup, HeapTuple old_key_tup, bool all_visible_cleared, bool new_all_visible_cleared)
 
static void HeapSatisfiesHOTandKeyUpdate (Relation relation, Bitmapset *hot_attrs, Bitmapset *key_attrs, Bitmapset *id_attrs, bool *satisfies_hot, bool *satisfies_key, bool *satisfies_id, HeapTuple oldtup, HeapTuple newtup)
 
static bool heap_acquire_tuplock (Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
 
static void compute_new_xmax_infomask (TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
 
static HTSU_Result heap_lock_updated_tuple (Relation rel, HeapTuple tuple, ItemPointer ctid, TransactionId xid, LockTupleMode mode)
 
static void GetMultiXactIdHintBits (MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
 
static TransactionId MultiXactIdGetUpdateXid (TransactionId xmax, uint16 t_infomask)
 
static bool DoesMultiXactIdConflict (MultiXactId multi, uint16 infomask, LockTupleMode lockmode)
 
static void MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
static bool ConditionalMultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, int *remaining)
 
static XLogRecPtr log_heap_new_cid (Relation relation, HeapTuple tup)
 
static HeapTuple ExtractReplicaIdentity (Relation rel, HeapTuple tup, bool key_modified, bool *copy)
 
static void initscan (HeapScanDesc scan, ScanKey key, bool keep_startblock)
 
void heap_setscanlimits (HeapScanDesc scan, BlockNumber startBlk, BlockNumber numBlks)
 
void heapgetpage (HeapScanDesc scan, BlockNumber page)
 
static void heapgettup (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
static void heapgettup_pagemode (HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
 
Relation relation_open (Oid relationId, LOCKMODE lockmode)
 
Relation try_relation_open (Oid relationId, LOCKMODE lockmode)
 
Relation relation_openrv (const RangeVar *relation, LOCKMODE lockmode)
 
Relation relation_openrv_extended (const RangeVar *relation, LOCKMODE lockmode, bool missing_ok)
 
void relation_close (Relation relation, LOCKMODE lockmode)
 
Relation heap_open (Oid relationId, LOCKMODE lockmode)
 
Relation heap_openrv (const RangeVar *relation, LOCKMODE lockmode)
 
Relation heap_openrv_extended (const RangeVar *relation, LOCKMODE lockmode, bool missing_ok)
 
HeapScanDesc heap_beginscan (Relation relation, Snapshot snapshot, int nkeys, ScanKey key)
 
HeapScanDesc heap_beginscan_catalog (Relation relation, int nkeys, ScanKey key)
 
HeapScanDesc heap_beginscan_strat (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, bool allow_strat, bool allow_sync)
 
HeapScanDesc heap_beginscan_bm (Relation relation, Snapshot snapshot, int nkeys, ScanKey key)
 
HeapScanDesc heap_beginscan_sampling (Relation relation, Snapshot snapshot, int nkeys, ScanKey key, bool allow_strat, bool allow_sync, bool allow_pagemode)
 
void heap_rescan (HeapScanDesc scan, ScanKey key)
 
void heap_rescan_set_params (HeapScanDesc scan, ScanKey key, bool allow_strat, bool allow_sync, bool allow_pagemode)
 
void heap_endscan (HeapScanDesc scan)
 
Size heap_parallelscan_estimate (Snapshot snapshot)
 
void heap_parallelscan_initialize (ParallelHeapScanDesc target, Relation relation, Snapshot snapshot)
 
HeapScanDesc heap_beginscan_parallel (Relation relation, ParallelHeapScanDesc parallel_scan)
 
void heap_update_snapshot (HeapScanDesc scan, Snapshot snapshot)
 
HeapTuple heap_getnext (HeapScanDesc scan, ScanDirection direction)
 
bool heap_fetch (Relation relation, Snapshot snapshot, HeapTuple tuple, Buffer *userbuf, bool keep_buf, Relation stats_relation)
 
bool heap_hot_search_buffer (ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
 
bool heap_hot_search (ItemPointer tid, Relation relation, Snapshot snapshot, bool *all_dead)
 
void heap_get_latest_tid (Relation relation, Snapshot snapshot, ItemPointer tid)
 
static void UpdateXmaxHintBits (HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
 
BulkInsertState GetBulkInsertState (void)
 
void FreeBulkInsertState (BulkInsertState bistate)
 
void ReleaseBulkInsertStatePin (BulkInsertState bistate)
 
Oid heap_insert (Relation relation, HeapTuple tup, CommandId cid, int options, BulkInsertState bistate)
 
void heap_multi_insert (Relation relation, HeapTuple *tuples, int ntuples, CommandId cid, int options, BulkInsertState bistate)
 
Oid simple_heap_insert (Relation relation, HeapTuple tup)
 
static uint8 compute_infobits (uint16 infomask, uint16 infomask2)
 
static bool xmax_infomask_changed (uint16 new_infomask, uint16 old_infomask)
 
HTSU_Result heap_delete (Relation relation, ItemPointer tid, CommandId cid, Snapshot crosscheck, bool wait, HeapUpdateFailureData *hufd)
 
void simple_heap_delete (Relation relation, ItemPointer tid)
 
HTSU_Result heap_update (Relation relation, ItemPointer otid, HeapTuple newtup, CommandId cid, Snapshot crosscheck, bool wait, HeapUpdateFailureData *hufd, LockTupleMode *lockmode)
 
static bool heap_tuple_attr_equals (TupleDesc tupdesc, int attrnum, HeapTuple tup1, HeapTuple tup2)
 
void simple_heap_update (Relation relation, ItemPointer otid, HeapTuple tup)
 
static MultiXactStatus get_mxact_status_for_lock (LockTupleMode mode, bool is_update)
 
HTSU_Result heap_lock_tuple (Relation relation, HeapTuple tuple, CommandId cid, LockTupleMode mode, LockWaitPolicy wait_policy, bool follow_updates, Buffer *buffer, HeapUpdateFailureData *hufd)
 
static HTSU_Result test_lockmode_for_conflict (MultiXactStatus status, TransactionId xid, LockTupleMode mode, bool *needwait)
 
static HTSU_Result heap_lock_updated_tuple_rec (Relation rel, ItemPointer tid, TransactionId xid, LockTupleMode mode)
 
void heap_finish_speculative (Relation relation, HeapTuple tuple)
 
void heap_abort_speculative (Relation relation, HeapTuple tuple)
 
void heap_inplace_update (Relation relation, HeapTuple tuple)
 
static TransactionId FreezeMultiXactId (MultiXactId multi, uint16 t_infomask, TransactionId cutoff_xid, MultiXactId cutoff_multi, uint16 *flags)
 
bool heap_prepare_freeze_tuple (HeapTupleHeader tuple, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
 
void heap_execute_freeze_tuple (HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
 
bool heap_freeze_tuple (HeapTupleHeader tuple, TransactionId cutoff_xid, TransactionId cutoff_multi)
 
TransactionId HeapTupleGetUpdateXid (HeapTupleHeader tuple)
 
static bool Do_MultiXactIdWait (MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
 
bool heap_tuple_needs_eventual_freeze (HeapTupleHeader tuple)
 
bool heap_tuple_needs_freeze (HeapTupleHeader tuple, TransactionId cutoff_xid, MultiXactId cutoff_multi, Buffer buf)
 
void HeapTupleHeaderAdvanceLatestRemovedXid (HeapTupleHeader tuple, TransactionId *latestRemovedXid)
 
XLogRecPtr log_heap_cleanup_info (RelFileNode rnode, TransactionId latestRemovedXid)
 
XLogRecPtr log_heap_clean (Relation reln, Buffer buffer, OffsetNumber *redirected, int nredirected, OffsetNumber *nowdead, int ndead, OffsetNumber *nowunused, int nunused, TransactionId latestRemovedXid)
 
XLogRecPtr log_heap_freeze (Relation reln, Buffer buffer, TransactionId cutoff_xid, xl_heap_freeze_tuple *tuples, int ntuples)
 
XLogRecPtr log_heap_visible (RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer, TransactionId cutoff_xid, uint8 vmflags)
 
static void heap_xlog_cleanup_info (XLogReaderState *record)
 
static void heap_xlog_clean (XLogReaderState *record)
 
static void heap_xlog_visible (XLogReaderState *record)
 
static void heap_xlog_freeze_page (XLogReaderState *record)
 
static void fix_infomask_from_infobits (uint8 infobits, uint16 *infomask, uint16 *infomask2)
 
static void heap_xlog_delete (XLogReaderState *record)
 
static void heap_xlog_insert (XLogReaderState *record)
 
static void heap_xlog_multi_insert (XLogReaderState *record)
 
static void heap_xlog_update (XLogReaderState *record, bool hot_update)
 
static void heap_xlog_confirm (XLogReaderState *record)
 
static void heap_xlog_lock (XLogReaderState *record)
 
static void heap_xlog_lock_updated (XLogReaderState *record)
 
static void heap_xlog_inplace (XLogReaderState *record)
 
void heap_redo (XLogReaderState *record)
 
void heap2_redo (XLogReaderState *record)
 
void heap_sync (Relation rel)
 
void heap_mask (char *pagedata, BlockNumber blkno)
 

Variables

bool synchronize_seqscans = true
 
struct {
   LOCKMODE   hwlock
 
   int   lockstatus
 
   int   updstatus
 
tupleLockExtraInfo [MaxLockTupleMode+1]
 
static const int MultiXactStatusLock [MaxMultiXactStatus+1]
 

Macro Definition Documentation

#define ConditionalLockTupleTuplock (   rel,
  tup,
  mode 
)    ConditionalLockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 186 of file heapam.c.

Referenced by heap_acquire_tuplock().

#define FRM_INVALIDATE_XMAX   0x0002

Definition at line 6333 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

#define FRM_MARK_COMMITTED   0x0010

Definition at line 6336 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

#define FRM_NOOP   0x0001

Definition at line 6332 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

#define FRM_RETURN_IS_MULTI   0x0008

Definition at line 6335 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

#define FRM_RETURN_IS_XID   0x0004

Definition at line 6334 of file heapam.c.

Referenced by FreezeMultiXactId(), and heap_prepare_freeze_tuple().

#define HEAPDEBUG_1

Definition at line 1790 of file heapam.c.

Referenced by heap_getnext().

#define HEAPDEBUG_2

Definition at line 1791 of file heapam.c.

Referenced by heap_getnext().

#define HEAPDEBUG_3

Definition at line 1792 of file heapam.c.

Referenced by heap_getnext().

#define LOCKMODE_from_mxstatus (   status)    (tupleLockExtraInfo[TUPLOCK_from_mxstatus((status))].hwlock)
#define LockTupleTuplock (   rel,
  tup,
  mode 
)    LockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 182 of file heapam.c.

Referenced by heap_acquire_tuplock().

#define TUPLOCK_from_mxstatus (   status)    (MultiXactStatusLock[(status)])

Definition at line 204 of file heapam.c.

Referenced by compute_new_xmax_infomask(), GetMultiXactIdHintBits(), and heap_lock_tuple().

#define UnlockTupleTuplock (   rel,
  tup,
  mode 
)    UnlockTuple((rel), (tup), tupleLockExtraInfo[mode].hwlock)

Definition at line 184 of file heapam.c.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

Function Documentation

static uint8 compute_infobits ( uint16  infomask,
uint16  infomask2 
)
static

Definition at line 2955 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_abort_speculative(), heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), heap_update(), and log_heap_update().

2956 {
2957  return
2958  ((infomask & HEAP_XMAX_IS_MULTI) != 0 ? XLHL_XMAX_IS_MULTI : 0) |
2959  ((infomask & HEAP_XMAX_LOCK_ONLY) != 0 ? XLHL_XMAX_LOCK_ONLY : 0) |
2960  ((infomask & HEAP_XMAX_EXCL_LOCK) != 0 ? XLHL_XMAX_EXCL_LOCK : 0) |
2961  /* note we ignore HEAP_XMAX_SHR_LOCK here */
2962  ((infomask & HEAP_XMAX_KEYSHR_LOCK) != 0 ? XLHL_XMAX_KEYSHR_LOCK : 0) |
2963  ((infomask2 & HEAP_KEYS_UPDATED) != 0 ?
2964  XLHL_KEYS_UPDATED : 0);
2965 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:179
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:182
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:241
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:240
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:181
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:242
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:244
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:243
static void compute_new_xmax_infomask ( TransactionId  xmax,
uint16  old_infomask,
uint16  old_infomask2,
TransactionId  add_to_xmax,
LockTupleMode  mode,
bool  is_update,
TransactionId result_xmax,
uint16 result_infomask,
uint16 result_infomask2 
)
static

Definition at line 5309 of file heapam.c.

References Assert, elog, ERROR, get_mxact_status_for_lock(), GetMultiXactIdHintBits(), HEAP_KEYS_UPDATED, HEAP_LOCKED_UPGRADED, HEAP_XMAX_COMMITTED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_INVALID, HEAP_XMAX_IS_EXCL_LOCKED, HEAP_XMAX_IS_KEYSHR_LOCKED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HEAP_XMAX_IS_SHR_LOCKED, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, InvalidTransactionId, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, MultiXactIdCreate(), MultiXactIdExpand(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TUPLOCK_from_mxstatus, and WARNING.

Referenced by heap_delete(), heap_lock_tuple(), heap_lock_updated_tuple_rec(), and heap_update().

5314 {
5315  TransactionId new_xmax;
5316  uint16 new_infomask,
5317  new_infomask2;
5318 
5320 
5321 l5:
5322  new_infomask = 0;
5323  new_infomask2 = 0;
5324  if (old_infomask & HEAP_XMAX_INVALID)
5325  {
5326  /*
5327  * No previous locker; we just insert our own TransactionId.
5328  *
5329  * Note that it's critical that this case be the first one checked,
5330  * because there are several blocks below that come back to this one
5331  * to implement certain optimizations; old_infomask might contain
5332  * other dirty bits in those cases, but we don't really care.
5333  */
5334  if (is_update)
5335  {
5336  new_xmax = add_to_xmax;
5337  if (mode == LockTupleExclusive)
5338  new_infomask2 |= HEAP_KEYS_UPDATED;
5339  }
5340  else
5341  {
5342  new_infomask |= HEAP_XMAX_LOCK_ONLY;
5343  switch (mode)
5344  {
5345  case LockTupleKeyShare:
5346  new_xmax = add_to_xmax;
5347  new_infomask |= HEAP_XMAX_KEYSHR_LOCK;
5348  break;
5349  case LockTupleShare:
5350  new_xmax = add_to_xmax;
5351  new_infomask |= HEAP_XMAX_SHR_LOCK;
5352  break;
5354  new_xmax = add_to_xmax;
5355  new_infomask |= HEAP_XMAX_EXCL_LOCK;
5356  break;
5357  case LockTupleExclusive:
5358  new_xmax = add_to_xmax;
5359  new_infomask |= HEAP_XMAX_EXCL_LOCK;
5360  new_infomask2 |= HEAP_KEYS_UPDATED;
5361  break;
5362  default:
5363  new_xmax = InvalidTransactionId; /* silence compiler */
5364  elog(ERROR, "invalid lock mode");
5365  }
5366  }
5367  }
5368  else if (old_infomask & HEAP_XMAX_IS_MULTI)
5369  {
5370  MultiXactStatus new_status;
5371 
5372  /*
5373  * Currently we don't allow XMAX_COMMITTED to be set for multis, so
5374  * cross-check.
5375  */
5376  Assert(!(old_infomask & HEAP_XMAX_COMMITTED));
5377 
5378  /*
5379  * A multixact together with LOCK_ONLY set but neither lock bit set
5380  * (i.e. a pg_upgraded share locked tuple) cannot possibly be running
5381  * anymore. This check is critical for databases upgraded by
5382  * pg_upgrade; both MultiXactIdIsRunning and MultiXactIdExpand assume
5383  * that such multis are never passed.
5384  */
5385  if (HEAP_LOCKED_UPGRADED(old_infomask))
5386  {
5387  old_infomask &= ~HEAP_XMAX_IS_MULTI;
5388  old_infomask |= HEAP_XMAX_INVALID;
5389  goto l5;
5390  }
5391 
5392  /*
5393  * If the XMAX is already a MultiXactId, then we need to expand it to
5394  * include add_to_xmax; but if all the members were lockers and are
5395  * all gone, we can do away with the IS_MULTI bit and just set
5396  * add_to_xmax as the only locker/updater. If all lockers are gone
5397  * and we have an updater that aborted, we can also do without a
5398  * multi.
5399  *
5400  * The cost of doing GetMultiXactIdMembers would be paid by
5401  * MultiXactIdExpand if we weren't to do this, so this check is not
5402  * incurring extra work anyhow.
5403  */
5404  if (!MultiXactIdIsRunning(xmax, HEAP_XMAX_IS_LOCKED_ONLY(old_infomask)))
5405  {
5406  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) ||
5408  old_infomask)))
5409  {
5410  /*
5411  * Reset these bits and restart; otherwise fall through to
5412  * create a new multi below.
5413  */
5414  old_infomask &= ~HEAP_XMAX_IS_MULTI;
5415  old_infomask |= HEAP_XMAX_INVALID;
5416  goto l5;
5417  }
5418  }
5419 
5420  new_status = get_mxact_status_for_lock(mode, is_update);
5421 
5422  new_xmax = MultiXactIdExpand((MultiXactId) xmax, add_to_xmax,
5423  new_status);
5424  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5425  }
5426  else if (old_infomask & HEAP_XMAX_COMMITTED)
5427  {
5428  /*
5429  * It's a committed update, so we need to preserve him as updater of
5430  * the tuple.
5431  */
5433  MultiXactStatus new_status;
5434 
5435  if (old_infomask2 & HEAP_KEYS_UPDATED)
5436  status = MultiXactStatusUpdate;
5437  else
5438  status = MultiXactStatusNoKeyUpdate;
5439 
5440  new_status = get_mxact_status_for_lock(mode, is_update);
5441 
5442  /*
5443  * since it's not running, it's obviously impossible for the old
5444  * updater to be identical to the current one, so we need not check
5445  * for that case as we do in the block above.
5446  */
5447  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5448  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5449  }
5450  else if (TransactionIdIsInProgress(xmax))
5451  {
5452  /*
5453  * If the XMAX is a valid, in-progress TransactionId, then we need to
5454  * create a new MultiXactId that includes both the old locker or
5455  * updater and our own TransactionId.
5456  */
5457  MultiXactStatus new_status;
5458  MultiXactStatus old_status;
5459  LockTupleMode old_mode;
5460 
5461  if (HEAP_XMAX_IS_LOCKED_ONLY(old_infomask))
5462  {
5463  if (HEAP_XMAX_IS_KEYSHR_LOCKED(old_infomask))
5464  old_status = MultiXactStatusForKeyShare;
5465  else if (HEAP_XMAX_IS_SHR_LOCKED(old_infomask))
5466  old_status = MultiXactStatusForShare;
5467  else if (HEAP_XMAX_IS_EXCL_LOCKED(old_infomask))
5468  {
5469  if (old_infomask2 & HEAP_KEYS_UPDATED)
5470  old_status = MultiXactStatusForUpdate;
5471  else
5472  old_status = MultiXactStatusForNoKeyUpdate;
5473  }
5474  else
5475  {
5476  /*
5477  * LOCK_ONLY can be present alone only when a page has been
5478  * upgraded by pg_upgrade. But in that case,
5479  * TransactionIdIsInProgress() should have returned false. We
5480  * assume it's no longer locked in this case.
5481  */
5482  elog(WARNING, "LOCK_ONLY found for Xid in progress %u", xmax);
5483  old_infomask |= HEAP_XMAX_INVALID;
5484  old_infomask &= ~HEAP_XMAX_LOCK_ONLY;
5485  goto l5;
5486  }
5487  }
5488  else
5489  {
5490  /* it's an update, but which kind? */
5491  if (old_infomask2 & HEAP_KEYS_UPDATED)
5492  old_status = MultiXactStatusUpdate;
5493  else
5494  old_status = MultiXactStatusNoKeyUpdate;
5495  }
5496 
5497  old_mode = TUPLOCK_from_mxstatus(old_status);
5498 
5499  /*
5500  * If the lock to be acquired is for the same TransactionId as the
5501  * existing lock, there's an optimization possible: consider only the
5502  * strongest of both locks as the only one present, and restart.
5503  */
5504  if (xmax == add_to_xmax)
5505  {
5506  /*
5507  * Note that it's not possible for the original tuple to be
5508  * updated: we wouldn't be here because the tuple would have been
5509  * invisible and we wouldn't try to update it. As a subtlety,
5510  * this code can also run when traversing an update chain to lock
5511  * future versions of a tuple. But we wouldn't be here either,
5512  * because the add_to_xmax would be different from the original
5513  * updater.
5514  */
5515  Assert(HEAP_XMAX_IS_LOCKED_ONLY(old_infomask));
5516 
5517  /* acquire the strongest of both */
5518  if (mode < old_mode)
5519  mode = old_mode;
5520  /* mustn't touch is_update */
5521 
5522  old_infomask |= HEAP_XMAX_INVALID;
5523  goto l5;
5524  }
5525 
5526  /* otherwise, just fall back to creating a new multixact */
5527  new_status = get_mxact_status_for_lock(mode, is_update);
5528  new_xmax = MultiXactIdCreate(xmax, old_status,
5529  add_to_xmax, new_status);
5530  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5531  }
5532  else if (!HEAP_XMAX_IS_LOCKED_ONLY(old_infomask) &&
5533  TransactionIdDidCommit(xmax))
5534  {
5535  /*
5536  * It's a committed update, so we gotta preserve him as updater of the
5537  * tuple.
5538  */
5540  MultiXactStatus new_status;
5541 
5542  if (old_infomask2 & HEAP_KEYS_UPDATED)
5543  status = MultiXactStatusUpdate;
5544  else
5545  status = MultiXactStatusNoKeyUpdate;
5546 
5547  new_status = get_mxact_status_for_lock(mode, is_update);
5548 
5549  /*
5550  * since it's not running, it's obviously impossible for the old
5551  * updater to be identical to the current one, so we need not check
5552  * for that case as we do in the block above.
5553  */
5554  new_xmax = MultiXactIdCreate(xmax, status, add_to_xmax, new_status);
5555  GetMultiXactIdHintBits(new_xmax, &new_infomask, &new_infomask2);
5556  }
5557  else
5558  {
5559  /*
5560  * Can get here iff the locking/updating transaction was running when
5561  * the infomask was extracted from the tuple, but finished before
5562  * TransactionIdIsInProgress got to run. Deal with it as if there was
5563  * no locker at all in the first place.
5564  */
5565  old_infomask |= HEAP_XMAX_INVALID;
5566  goto l5;
5567  }
5568 
5569  *result_infomask = new_infomask;
5570  *result_infomask2 = new_infomask2;
5571  *result_xmax = new_xmax;
5572 }
static void GetMultiXactIdHintBits(MultiXactId multi, uint16 *new_infomask, uint16 *new_infomask2)
Definition: heapam.c:6851
MultiXactStatus
Definition: multixact.h:40
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:179
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:182
uint32 TransactionId
Definition: c.h:397
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:773
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:995
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:238
#define HEAP_XMAX_COMMITTED
Definition: htup_details.h:192
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:185
#define HEAP_XMAX_IS_SHR_LOCKED(infomask)
Definition: htup_details.h:248
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:6932
LockTupleMode
Definition: heapam.h:38
unsigned short uint16
Definition: c.h:267
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:193
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:181
#define InvalidTransactionId
Definition: transam.h:31
#define WARNING
Definition: elog.h:40
MultiXactId MultiXactIdCreate(TransactionId xid1, MultiXactStatus status1, TransactionId xid2, MultiXactStatus status2)
Definition: multixact.c:384
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:216
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
TransactionId MultiXactId
Definition: c.h:407
#define Assert(condition)
Definition: c.h:675
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:204
static MultiXactStatus get_mxact_status_for_lock(LockTupleMode mode, bool is_update)
Definition: heapam.c:4541
#define HEAP_XMAX_IS_EXCL_LOCKED(infomask)
Definition: htup_details.h:250
#define elog
Definition: elog.h:219
#define HEAP_XMAX_IS_KEYSHR_LOCKED(infomask)
Definition: htup_details.h:252
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:549
MultiXactId MultiXactIdExpand(MultiXactId multi, TransactionId xid, MultiXactStatus status)
Definition: multixact.c:437
static bool ConditionalMultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
Relation  rel,
int *  remaining 
)
static

Definition at line 7186 of file heapam.c.

References Do_MultiXactIdWait(), and XLTW_None.

Referenced by heap_lock_tuple().

7188 {
7189  return Do_MultiXactIdWait(multi, status, infomask, true,
7190  rel, NULL, XLTW_None, remaining);
7191 }
int remaining
Definition: informix.c:692
Definition: lmgr.h:26
static bool Do_MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, bool nowait, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:7086
#define NULL
Definition: c.h:229
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
static bool Do_MultiXactIdWait ( MultiXactId  multi,
MultiXactStatus  status,
uint16  infomask,
bool  nowait,
Relation  rel,
ItemPointer  ctid,
XLTW_Oper  oper,
int *  remaining 
)
static

Definition at line 7086 of file heapam.c.

References ConditionalXactLockTableWait(), DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, LOCKMODE_from_mxstatus, pfree(), result, MultiXactMember::status, TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), XactLockTableWait(), and MultiXactMember::xid.

Referenced by ConditionalMultiXactIdWait(), and MultiXactIdWait().

7090 {
7091  bool result = true;
7092  MultiXactMember *members;
7093  int nmembers;
7094  int remain = 0;
7095 
7096  /* for pre-pg_upgrade tuples, no need to sleep at all */
7097  nmembers = HEAP_LOCKED_UPGRADED(infomask) ? -1 :
7098  GetMultiXactIdMembers(multi, &members, false,
7099  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7100 
7101  if (nmembers >= 0)
7102  {
7103  int i;
7104 
7105  for (i = 0; i < nmembers; i++)
7106  {
7107  TransactionId memxid = members[i].xid;
7108  MultiXactStatus memstatus = members[i].status;
7109 
7111  {
7112  remain++;
7113  continue;
7114  }
7115 
7118  {
7119  if (remaining && TransactionIdIsInProgress(memxid))
7120  remain++;
7121  continue;
7122  }
7123 
7124  /*
7125  * This member conflicts with our multi, so we have to sleep (or
7126  * return failure, if asked to avoid waiting.)
7127  *
7128  * Note that we don't set up an error context callback ourselves,
7129  * but instead we pass the info down to XactLockTableWait. This
7130  * might seem a bit wasteful because the context is set up and
7131  * tore down for each member of the multixact, but in reality it
7132  * should be barely noticeable, and it avoids duplicate code.
7133  */
7134  if (nowait)
7135  {
7136  result = ConditionalXactLockTableWait(memxid);
7137  if (!result)
7138  break;
7139  }
7140  else
7141  XactLockTableWait(memxid, rel, ctid, oper);
7142  }
7143 
7144  pfree(members);
7145  }
7146 
7147  if (remaining)
7148  *remaining = remain;
7149 
7150  return result;
7151 }
int remaining
Definition: informix.c:692
MultiXactStatus
Definition: multixact.h:40
uint32 TransactionId
Definition: c.h:397
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:773
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:995
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:238
return result
Definition: formatting.c:1618
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:174
bool ConditionalXactLockTableWait(TransactionId xid)
Definition: lmgr.c:607
void pfree(void *pointer)
Definition: mcxt.c:950
TransactionId xid
Definition: multixact.h:61
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:556
MultiXactStatus status
Definition: multixact.h:62
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:216
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:554
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
Operator oper(ParseState *pstate, List *opname, Oid ltypeId, Oid rtypeId, bool noError, int location)
Definition: parse_oper.c:377
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
static bool DoesMultiXactIdConflict ( MultiXactId  multi,
uint16  infomask,
LockTupleMode  lockmode 
)
static

Definition at line 6997 of file heapam.c.

References DoLockModesConflict(), GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, i, ISUPDATE_from_mxstatus, LOCKMODE_from_mxstatus, pfree(), result, status(), TransactionIdDidAbort(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), tupleLockExtraInfo, and MultiXactMember::xid.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

6999 {
7000  int nmembers;
7001  MultiXactMember *members;
7002  bool result = false;
7003  LOCKMODE wanted = tupleLockExtraInfo[lockmode].hwlock;
7004 
7005  if (HEAP_LOCKED_UPGRADED(infomask))
7006  return false;
7007 
7008  nmembers = GetMultiXactIdMembers(multi, &members, false,
7009  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
7010  if (nmembers >= 0)
7011  {
7012  int i;
7013 
7014  for (i = 0; i < nmembers; i++)
7015  {
7016  TransactionId memxid;
7017  LOCKMODE memlockmode;
7018 
7019  memlockmode = LOCKMODE_from_mxstatus(members[i].status);
7020 
7021  /* ignore members that don't conflict with the lock we want */
7022  if (!DoLockModesConflict(memlockmode, wanted))
7023  continue;
7024 
7025  /* ignore members from current xact */
7026  memxid = members[i].xid;
7028  continue;
7029 
7030  if (ISUPDATE_from_mxstatus(members[i].status))
7031  {
7032  /* ignore aborted updaters */
7033  if (TransactionIdDidAbort(memxid))
7034  continue;
7035  }
7036  else
7037  {
7038  /* ignore lockers-only that are no longer in progress */
7039  if (!TransactionIdIsInProgress(memxid))
7040  continue;
7041  }
7042 
7043  /*
7044  * Whatever remains are either live lockers that conflict with our
7045  * wanted lock, and updaters that are not aborted. Those conflict
7046  * with what we want, so return true.
7047  */
7048  result = true;
7049  break;
7050  }
7051  pfree(members);
7052  }
7053 
7054  return result;
7055 }
uint32 TransactionId
Definition: c.h:397
int LOCKMODE
Definition: lockdefs.h:26
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:773
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:995
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:238
return result
Definition: formatting.c:1618
#define LOCKMODE_from_mxstatus(status)
Definition: heapam.c:174
void pfree(void *pointer)
Definition: mcxt.c:950
TransactionId xid
Definition: multixact.h:61
bool DoLockModesConflict(LOCKMODE mode1, LOCKMODE mode2)
Definition: lock.c:556
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:55
bool TransactionIdDidAbort(TransactionId transactionId)
Definition: transam.c:181
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:216
static const struct @20 tupleLockExtraInfo[MaxLockTupleMode+1]
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
static HeapTuple ExtractReplicaIdentity ( Relation  rel,
HeapTuple  tup,
bool  key_modified,
bool copy 
)
static

Definition at line 7824 of file heapam.c.

References DEBUG4, elog, ERROR, heap_deform_tuple(), heap_form_tuple(), heap_freetuple(), HeapTupleGetOid, HeapTupleHasExternal, HeapTupleSetOid, MaxHeapAttributeNumber, tupleDesc::natts, NULL, ObjectIdAttributeNumber, OidIsValid, RelationData::rd_index, RelationData::rd_rel, RelationClose(), RelationGetDescr, RelationGetRelationName, RelationGetReplicaIndex(), RelationIdGetRelation(), RelationIsLogicallyLogged, REPLICA_IDENTITY_FULL, REPLICA_IDENTITY_NOTHING, toast_flatten_tuple(), and values.

Referenced by heap_delete(), and heap_update().

7825 {
7826  TupleDesc desc = RelationGetDescr(relation);
7827  Oid replidindex;
7828  Relation idx_rel;
7829  TupleDesc idx_desc;
7830  char replident = relation->rd_rel->relreplident;
7831  HeapTuple key_tuple = NULL;
7832  bool nulls[MaxHeapAttributeNumber];
7834  int natt;
7835 
7836  *copy = false;
7837 
7838  if (!RelationIsLogicallyLogged(relation))
7839  return NULL;
7840 
7841  if (replident == REPLICA_IDENTITY_NOTHING)
7842  return NULL;
7843 
7844  if (replident == REPLICA_IDENTITY_FULL)
7845  {
7846  /*
7847  * When logging the entire old tuple, it very well could contain
7848  * toasted columns. If so, force them to be inlined.
7849  */
7850  if (HeapTupleHasExternal(tp))
7851  {
7852  *copy = true;
7853  tp = toast_flatten_tuple(tp, RelationGetDescr(relation));
7854  }
7855  return tp;
7856  }
7857 
7858  /* if the key hasn't changed and we're only logging the key, we're done */
7859  if (!key_changed)
7860  return NULL;
7861 
7862  /* find the replica identity index */
7863  replidindex = RelationGetReplicaIndex(relation);
7864  if (!OidIsValid(replidindex))
7865  {
7866  elog(DEBUG4, "could not find configured replica identity for table \"%s\"",
7867  RelationGetRelationName(relation));
7868  return NULL;
7869  }
7870 
7871  idx_rel = RelationIdGetRelation(replidindex);
7872  idx_desc = RelationGetDescr(idx_rel);
7873 
7874  /* deform tuple, so we have fast access to columns */
7875  heap_deform_tuple(tp, desc, values, nulls);
7876 
7877  /* set all columns to NULL, regardless of whether they actually are */
7878  memset(nulls, 1, sizeof(nulls));
7879 
7880  /*
7881  * Now set all columns contained in the index to NOT NULL, they cannot
7882  * currently be NULL.
7883  */
7884  for (natt = 0; natt < idx_desc->natts; natt++)
7885  {
7886  int attno = idx_rel->rd_index->indkey.values[natt];
7887 
7888  if (attno < 0)
7889  {
7890  /*
7891  * The OID column can appear in an index definition, but that's
7892  * OK, because we always copy the OID if present (see below).
7893  * Other system columns may not.
7894  */
7895  if (attno == ObjectIdAttributeNumber)
7896  continue;
7897  elog(ERROR, "system column in index");
7898  }
7899  nulls[attno - 1] = false;
7900  }
7901 
7902  key_tuple = heap_form_tuple(desc, values, nulls);
7903  *copy = true;
7904  RelationClose(idx_rel);
7905 
7906  /*
7907  * Always copy oids if the table has them, even if not included in the
7908  * index. The space in the logged tuple is used anyway, so there's little
7909  * point in not including the information.
7910  */
7911  if (relation->rd_rel->relhasoids)
7912  HeapTupleSetOid(key_tuple, HeapTupleGetOid(tp));
7913 
7914  /*
7915  * If the tuple, which by here only contains indexed columns, still has
7916  * toasted columns, force them to be inlined. This is somewhat unlikely
7917  * since there's limits on the size of indexed columns, so we don't
7918  * duplicate toast_flatten_tuple()s functionality in the above loop over
7919  * the indexed columns, even if it would be more efficient.
7920  */
7921  if (HeapTupleHasExternal(key_tuple))
7922  {
7923  HeapTuple oldtup = key_tuple;
7924 
7925  key_tuple = toast_flatten_tuple(oldtup, RelationGetDescr(relation));
7926  heap_freetuple(oldtup);
7927  }
7928 
7929  return key_tuple;
7930 }
HeapTuple toast_flatten_tuple(HeapTuple tup, TupleDesc tupleDesc)
Definition: tuptoaster.c:1084
Oid RelationGetReplicaIndex(Relation relation)
Definition: relcache.c:4665
#define RelationGetDescr(relation)
Definition: rel.h:429
#define ObjectIdAttributeNumber
Definition: sysattr.h:22
#define REPLICA_IDENTITY_NOTHING
Definition: pg_class.h:177
HeapTuple heap_form_tuple(TupleDesc tupleDescriptor, Datum *values, bool *isnull)
Definition: heaptuple.c:692
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:576
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1374
unsigned int Oid
Definition: postgres_ext.h:31
#define DEBUG4
Definition: elog.h:22
#define OidIsValid(objectId)
Definition: c.h:538
int natts
Definition: tupdesc.h:73
#define HeapTupleSetOid(tuple, oid)
Definition: htup_details.h:698
Form_pg_index rd_index
Definition: rel.h:159
#define REPLICA_IDENTITY_FULL
Definition: pg_class.h:179
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:437
void RelationClose(Relation relation)
Definition: relcache.c:2156
uintptr_t Datum
Definition: postgres.h:372
#define MaxHeapAttributeNumber
Definition: htup_details.h:47
#define NULL
Definition: c.h:229
void heap_deform_tuple(HeapTuple tuple, TupleDesc tupleDesc, Datum *values, bool *isnull)
Definition: heaptuple.c:935
static Datum values[MAXATTR]
Definition: bootstrap.c:162
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:674
#define elog
Definition: elog.h:219
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:695
Relation RelationIdGetRelation(Oid relationId)
Definition: relcache.c:2067
static void fix_infomask_from_infobits ( uint8  infobits,
uint16 infomask,
uint16 infomask2 
)
static

Definition at line 8220 of file heapam.c.

References HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, XLHL_KEYS_UPDATED, XLHL_XMAX_EXCL_LOCK, XLHL_XMAX_IS_MULTI, XLHL_XMAX_KEYSHR_LOCK, and XLHL_XMAX_LOCK_ONLY.

Referenced by heap_xlog_delete(), heap_xlog_lock(), heap_xlog_lock_updated(), and heap_xlog_update().

8221 {
8222  *infomask &= ~(HEAP_XMAX_IS_MULTI | HEAP_XMAX_LOCK_ONLY |
8224  *infomask2 &= ~HEAP_KEYS_UPDATED;
8225 
8226  if (infobits & XLHL_XMAX_IS_MULTI)
8227  *infomask |= HEAP_XMAX_IS_MULTI;
8228  if (infobits & XLHL_XMAX_LOCK_ONLY)
8229  *infomask |= HEAP_XMAX_LOCK_ONLY;
8230  if (infobits & XLHL_XMAX_EXCL_LOCK)
8231  *infomask |= HEAP_XMAX_EXCL_LOCK;
8232  /* note HEAP_XMAX_SHR_LOCK isn't considered here */
8233  if (infobits & XLHL_XMAX_KEYSHR_LOCK)
8234  *infomask |= HEAP_XMAX_KEYSHR_LOCK;
8235 
8236  if (infobits & XLHL_KEYS_UPDATED)
8237  *infomask2 |= HEAP_KEYS_UPDATED;
8238 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:179
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:182
#define XLHL_XMAX_LOCK_ONLY
Definition: heapam_xlog.h:241
#define XLHL_XMAX_IS_MULTI
Definition: heapam_xlog.h:240
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:181
#define XLHL_XMAX_EXCL_LOCK
Definition: heapam_xlog.h:242
#define XLHL_KEYS_UPDATED
Definition: heapam_xlog.h:244
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
#define XLHL_XMAX_KEYSHR_LOCK
Definition: heapam_xlog.h:243
void FreeBulkInsertState ( BulkInsertState  bistate)

Definition at line 2336 of file heapam.c.

References BulkInsertStateData::current_buf, FreeAccessStrategy(), InvalidBuffer, pfree(), ReleaseBuffer(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), intorel_shutdown(), and transientrel_shutdown().

2337 {
2338  if (bistate->current_buf != InvalidBuffer)
2339  ReleaseBuffer(bistate->current_buf);
2340  FreeAccessStrategy(bistate->strategy);
2341  pfree(bistate);
2342 }
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
void pfree(void *pointer)
Definition: mcxt.c:950
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:580
BufferAccessStrategy strategy
Definition: hio.h:33
Buffer current_buf
Definition: hio.h:34
static TransactionId FreezeMultiXactId ( MultiXactId  multi,
uint16  t_infomask,
TransactionId  cutoff_xid,
MultiXactId  cutoff_multi,
uint16 flags 
)
static

Definition at line 6360 of file heapam.c.

References Assert, FRM_INVALIDATE_XMAX, FRM_MARK_COMMITTED, FRM_NOOP, FRM_RETURN_IS_MULTI, FRM_RETURN_IS_XID, GetMultiXactIdMembers(), HEAP_LOCKED_UPGRADED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, i, InvalidTransactionId, ISUPDATE_from_mxstatus, MultiXactIdCreateFromMembers(), MultiXactIdGetUpdateXid(), MultiXactIdIsRunning(), MultiXactIdIsValid, MultiXactIdPrecedes(), palloc(), pfree(), status(), TransactionIdDidCommit(), TransactionIdIsCurrentTransactionId(), TransactionIdIsInProgress(), TransactionIdIsValid, TransactionIdPrecedes(), and MultiXactMember::xid.

Referenced by heap_prepare_freeze_tuple().

6363 {
6365  int i;
6366  MultiXactMember *members;
6367  int nmembers;
6368  bool need_replace;
6369  int nnewmembers;
6370  MultiXactMember *newmembers;
6371  bool has_lockers;
6372  TransactionId update_xid;
6373  bool update_committed;
6374 
6375  *flags = 0;
6376 
6377  /* We should only be called in Multis */
6378  Assert(t_infomask & HEAP_XMAX_IS_MULTI);
6379 
6380  if (!MultiXactIdIsValid(multi) ||
6381  HEAP_LOCKED_UPGRADED(t_infomask))
6382  {
6383  /* Ensure infomask bits are appropriately set/reset */
6384  *flags |= FRM_INVALIDATE_XMAX;
6385  return InvalidTransactionId;
6386  }
6387  else if (MultiXactIdPrecedes(multi, cutoff_multi))
6388  {
6389  /*
6390  * This old multi cannot possibly have members still running. If it
6391  * was a locker only, it can be removed without any further
6392  * consideration; but if it contained an update, we might need to
6393  * preserve it.
6394  */
6396  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask)));
6397  if (HEAP_XMAX_IS_LOCKED_ONLY(t_infomask))
6398  {
6399  *flags |= FRM_INVALIDATE_XMAX;
6400  xid = InvalidTransactionId; /* not strictly necessary */
6401  }
6402  else
6403  {
6404  /* replace multi by update xid */
6405  xid = MultiXactIdGetUpdateXid(multi, t_infomask);
6406 
6407  /* wasn't only a lock, xid needs to be valid */
6409 
6410  /*
6411  * If the xid is older than the cutoff, it has to have aborted,
6412  * otherwise the tuple would have gotten pruned away.
6413  */
6414  if (TransactionIdPrecedes(xid, cutoff_xid))
6415  {
6417  *flags |= FRM_INVALIDATE_XMAX;
6418  xid = InvalidTransactionId; /* not strictly necessary */
6419  }
6420  else
6421  {
6422  *flags |= FRM_RETURN_IS_XID;
6423  }
6424  }
6425 
6426  return xid;
6427  }
6428 
6429  /*
6430  * This multixact might have or might not have members still running, but
6431  * we know it's valid and is newer than the cutoff point for multis.
6432  * However, some member(s) of it may be below the cutoff for Xids, so we
6433  * need to walk the whole members array to figure out what to do, if
6434  * anything.
6435  */
6436 
6437  nmembers =
6438  GetMultiXactIdMembers(multi, &members, false,
6439  HEAP_XMAX_IS_LOCKED_ONLY(t_infomask));
6440  if (nmembers <= 0)
6441  {
6442  /* Nothing worth keeping */
6443  *flags |= FRM_INVALIDATE_XMAX;
6444  return InvalidTransactionId;
6445  }
6446 
6447  /* is there anything older than the cutoff? */
6448  need_replace = false;
6449  for (i = 0; i < nmembers; i++)
6450  {
6451  if (TransactionIdPrecedes(members[i].xid, cutoff_xid))
6452  {
6453  need_replace = true;
6454  break;
6455  }
6456  }
6457 
6458  /*
6459  * In the simplest case, there is no member older than the cutoff; we can
6460  * keep the existing MultiXactId as is.
6461  */
6462  if (!need_replace)
6463  {
6464  *flags |= FRM_NOOP;
6465  pfree(members);
6466  return InvalidTransactionId;
6467  }
6468 
6469  /*
6470  * If the multi needs to be updated, figure out which members do we need
6471  * to keep.
6472  */
6473  nnewmembers = 0;
6474  newmembers = palloc(sizeof(MultiXactMember) * nmembers);
6475  has_lockers = false;
6476  update_xid = InvalidTransactionId;
6477  update_committed = false;
6478 
6479  for (i = 0; i < nmembers; i++)
6480  {
6481  /*
6482  * Determine whether to keep this member or ignore it.
6483  */
6484  if (ISUPDATE_from_mxstatus(members[i].status))
6485  {
6486  TransactionId xid = members[i].xid;
6487 
6488  /*
6489  * It's an update; should we keep it? If the transaction is known
6490  * aborted or crashed then it's okay to ignore it, otherwise not.
6491  * Note that an updater older than cutoff_xid cannot possibly be
6492  * committed, because HeapTupleSatisfiesVacuum would have returned
6493  * HEAPTUPLE_DEAD and we would not be trying to freeze the tuple.
6494  *
6495  * As with all tuple visibility routines, it's critical to test
6496  * TransactionIdIsInProgress before TransactionIdDidCommit,
6497  * because of race conditions explained in detail in tqual.c.
6498  */
6501  {
6502  Assert(!TransactionIdIsValid(update_xid));
6503  update_xid = xid;
6504  }
6505  else if (TransactionIdDidCommit(xid))
6506  {
6507  /*
6508  * The transaction committed, so we can tell caller to set
6509  * HEAP_XMAX_COMMITTED. (We can only do this because we know
6510  * the transaction is not running.)
6511  */
6512  Assert(!TransactionIdIsValid(update_xid));
6513  update_committed = true;
6514  update_xid = xid;
6515  }
6516 
6517  /*
6518  * Not in progress, not committed -- must be aborted or crashed;
6519  * we can ignore it.
6520  */
6521 
6522  /*
6523  * Since the tuple wasn't marked HEAPTUPLE_DEAD by vacuum, the
6524  * update Xid cannot possibly be older than the xid cutoff.
6525  */
6526  Assert(!TransactionIdIsValid(update_xid) ||
6527  !TransactionIdPrecedes(update_xid, cutoff_xid));
6528 
6529  /*
6530  * If we determined that it's an Xid corresponding to an update
6531  * that must be retained, additionally add it to the list of
6532  * members of the new Multi, in case we end up using that. (We
6533  * might still decide to use only an update Xid and not a multi,
6534  * but it's easier to maintain the list as we walk the old members
6535  * list.)
6536  */
6537  if (TransactionIdIsValid(update_xid))
6538  newmembers[nnewmembers++] = members[i];
6539  }
6540  else
6541  {
6542  /* We only keep lockers if they are still running */
6543  if (TransactionIdIsCurrentTransactionId(members[i].xid) ||
6544  TransactionIdIsInProgress(members[i].xid))
6545  {
6546  /* running locker cannot possibly be older than the cutoff */
6547  Assert(!TransactionIdPrecedes(members[i].xid, cutoff_xid));
6548  newmembers[nnewmembers++] = members[i];
6549  has_lockers = true;
6550  }
6551  }
6552  }
6553 
6554  pfree(members);
6555 
6556  if (nnewmembers == 0)
6557  {
6558  /* nothing worth keeping!? Tell caller to remove the whole thing */
6559  *flags |= FRM_INVALIDATE_XMAX;
6560  xid = InvalidTransactionId;
6561  }
6562  else if (TransactionIdIsValid(update_xid) && !has_lockers)
6563  {
6564  /*
6565  * If there's a single member and it's an update, pass it back alone
6566  * without creating a new Multi. (XXX we could do this when there's a
6567  * single remaining locker, too, but that would complicate the API too
6568  * much; moreover, the case with the single updater is more
6569  * interesting, because those are longer-lived.)
6570  */
6571  Assert(nnewmembers == 1);
6572  *flags |= FRM_RETURN_IS_XID;
6573  if (update_committed)
6574  *flags |= FRM_MARK_COMMITTED;
6575  xid = update_xid;
6576  }
6577  else
6578  {
6579  /*
6580  * Create a new multixact with the surviving members of the previous
6581  * one, to set as new Xmax in the tuple.
6582  */
6583  xid = MultiXactIdCreateFromMembers(nnewmembers, newmembers);
6584  *flags |= FRM_RETURN_IS_MULTI;
6585  }
6586 
6587  pfree(newmembers);
6588 
6589  return xid;
6590 }
#define FRM_RETURN_IS_XID
Definition: heapam.c:6334
#define FRM_MARK_COMMITTED
Definition: heapam.c:6336
uint32 TransactionId
Definition: c.h:397
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:773
bool TransactionIdIsInProgress(TransactionId xid)
Definition: procarray.c:995
MultiXactId MultiXactIdCreateFromMembers(int nmembers, MultiXactMember *members)
Definition: multixact.c:746
#define HEAP_LOCKED_UPGRADED(infomask)
Definition: htup_details.h:238
bool TransactionIdDidCommit(TransactionId transactionId)
Definition: transam.c:125
static TransactionId MultiXactIdGetUpdateXid(TransactionId xmax, uint16 t_infomask)
Definition: heapam.c:6932
void pfree(void *pointer)
Definition: mcxt.c:950
TransactionId xid
Definition: multixact.h:61
#define FRM_INVALIDATE_XMAX
Definition: heapam.c:6333
#define InvalidTransactionId
Definition: transam.h:31
#define ISUPDATE_from_mxstatus(status)
Definition: multixact.h:55
#define MultiXactIdIsValid(multi)
Definition: multixact.h:27
bool TransactionIdPrecedes(TransactionId id1, TransactionId id2)
Definition: transam.c:300
#define FRM_RETURN_IS_MULTI
Definition: heapam.c:6335
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:216
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
#define Assert(condition)
Definition: c.h:675
#define FRM_NOOP
Definition: heapam.c:6332
bool MultiXactIdPrecedes(MultiXactId multi1, MultiXactId multi2)
Definition: multixact.c:3140
void * palloc(Size size)
Definition: mcxt.c:849
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
#define TransactionIdIsValid(xid)
Definition: transam.h:41
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
bool MultiXactIdIsRunning(MultiXactId multi, bool isLockOnly)
Definition: multixact.c:549
static MultiXactStatus get_mxact_status_for_lock ( LockTupleMode  mode,
bool  is_update 
)
static

Definition at line 4541 of file heapam.c.

References elog, ERROR, and tupleLockExtraInfo.

Referenced by compute_new_xmax_infomask(), heap_lock_tuple(), and test_lockmode_for_conflict().

4542 {
4543  int retval;
4544 
4545  if (is_update)
4546  retval = tupleLockExtraInfo[mode].updstatus;
4547  else
4548  retval = tupleLockExtraInfo[mode].lockstatus;
4549 
4550  if (retval == -1)
4551  elog(ERROR, "invalid lock tuple mode %d/%s", mode,
4552  is_update ? "true" : "false");
4553 
4554  return (MultiXactStatus) retval;
4555 }
MultiXactStatus
Definition: multixact.h:40
#define ERROR
Definition: elog.h:43
static const struct @20 tupleLockExtraInfo[MaxLockTupleMode+1]
#define elog
Definition: elog.h:219
BulkInsertState GetBulkInsertState ( void  )

Definition at line 2322 of file heapam.c.

References BAS_BULKWRITE, BulkInsertStateData::current_buf, GetAccessStrategy(), InvalidBuffer, palloc(), and BulkInsertStateData::strategy.

Referenced by ATRewriteTable(), CopyFrom(), intorel_startup(), and transientrel_startup().

2323 {
2324  BulkInsertState bistate;
2325 
2326  bistate = (BulkInsertState) palloc(sizeof(BulkInsertStateData));
2328  bistate->current_buf = InvalidBuffer;
2329  return bistate;
2330 }
BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
Definition: freelist.c:525
#define InvalidBuffer
Definition: buf.h:25
struct BulkInsertStateData * BulkInsertState
Definition: heapam.h:33
BufferAccessStrategy strategy
Definition: hio.h:33
void * palloc(Size size)
Definition: mcxt.c:849
Buffer current_buf
Definition: hio.h:34
static void GetMultiXactIdHintBits ( MultiXactId  multi,
uint16 new_infomask,
uint16 new_infomask2 
)
static

Definition at line 6851 of file heapam.c.

References GetMultiXactIdMembers(), HEAP_KEYS_UPDATED, HEAP_XMAX_EXCL_LOCK, HEAP_XMAX_IS_MULTI, HEAP_XMAX_KEYSHR_LOCK, HEAP_XMAX_LOCK_ONLY, HEAP_XMAX_SHR_LOCK, i, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, MultiXactStatusForKeyShare, MultiXactStatusForNoKeyUpdate, MultiXactStatusForShare, MultiXactStatusForUpdate, MultiXactStatusNoKeyUpdate, MultiXactStatusUpdate, pfree(), status(), and TUPLOCK_from_mxstatus.

Referenced by compute_new_xmax_infomask(), heap_prepare_freeze_tuple(), and heap_update().

6853 {
6854  int nmembers;
6855  MultiXactMember *members;
6856  int i;
6857  uint16 bits = HEAP_XMAX_IS_MULTI;
6858  uint16 bits2 = 0;
6859  bool has_update = false;
6860  LockTupleMode strongest = LockTupleKeyShare;
6861 
6862  /*
6863  * We only use this in multis we just created, so they cannot be values
6864  * pre-pg_upgrade.
6865  */
6866  nmembers = GetMultiXactIdMembers(multi, &members, false, false);
6867 
6868  for (i = 0; i < nmembers; i++)
6869  {
6870  LockTupleMode mode;
6871 
6872  /*
6873  * Remember the strongest lock mode held by any member of the
6874  * multixact.
6875  */
6876  mode = TUPLOCK_from_mxstatus(members[i].status);
6877  if (mode > strongest)
6878  strongest = mode;
6879 
6880  /* See what other bits we need */
6881  switch (members[i].status)
6882  {
6886  break;
6887 
6889  bits2 |= HEAP_KEYS_UPDATED;
6890  break;
6891 
6893  has_update = true;
6894  break;
6895 
6896  case MultiXactStatusUpdate:
6897  bits2 |= HEAP_KEYS_UPDATED;
6898  has_update = true;
6899  break;
6900  }
6901  }
6902 
6903  if (strongest == LockTupleExclusive ||
6904  strongest == LockTupleNoKeyExclusive)
6905  bits |= HEAP_XMAX_EXCL_LOCK;
6906  else if (strongest == LockTupleShare)
6907  bits |= HEAP_XMAX_SHR_LOCK;
6908  else if (strongest == LockTupleKeyShare)
6909  bits |= HEAP_XMAX_KEYSHR_LOCK;
6910 
6911  if (!has_update)
6912  bits |= HEAP_XMAX_LOCK_ONLY;
6913 
6914  if (nmembers > 0)
6915  pfree(members);
6916 
6917  *new_infomask = bits;
6918  *new_infomask2 = bits2;
6919 }
#define HEAP_XMAX_KEYSHR_LOCK
Definition: htup_details.h:179
#define HEAP_XMAX_LOCK_ONLY
Definition: htup_details.h:182
#define HEAP_XMAX_SHR_LOCK
Definition: htup_details.h:185
LockTupleMode
Definition: heapam.h:38
unsigned short uint16
Definition: c.h:267
void pfree(void *pointer)
Definition: mcxt.c:950
#define HEAP_XMAX_EXCL_LOCK
Definition: htup_details.h:181
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
#define TUPLOCK_from_mxstatus(status)
Definition: heapam.c:204
int i
int GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members, bool from_pgupgrade, bool onlyLock)
Definition: multixact.c:1202
static void static void status(const char *fmt,...) pg_attribute_printf(1
Definition: pg_regress.c:224
void heap2_redo ( XLogReaderState record)

Definition at line 9084 of file heapam.c.

References elog, heap_xlog_clean(), heap_xlog_cleanup_info(), heap_xlog_freeze_page(), heap_xlog_lock_updated(), heap_xlog_logical_rewrite(), heap_xlog_multi_insert(), heap_xlog_visible(), PANIC, XLOG_HEAP2_CLEAN, XLOG_HEAP2_CLEANUP_INFO, XLOG_HEAP2_FREEZE_PAGE, XLOG_HEAP2_LOCK_UPDATED, XLOG_HEAP2_MULTI_INSERT, XLOG_HEAP2_NEW_CID, XLOG_HEAP2_REWRITE, XLOG_HEAP2_VISIBLE, XLOG_HEAP_OPMASK, XLogRecGetInfo, and XLR_INFO_MASK.

9085 {
9086  uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
9087 
9088  switch (info & XLOG_HEAP_OPMASK)
9089  {
9090  case XLOG_HEAP2_CLEAN:
9091  heap_xlog_clean(record);
9092  break;
9094  heap_xlog_freeze_page(record);
9095  break;
9097  heap_xlog_cleanup_info(record);
9098  break;
9099  case XLOG_HEAP2_VISIBLE:
9100  heap_xlog_visible(record);
9101  break;
9103  heap_xlog_multi_insert(record);
9104  break;
9106  heap_xlog_lock_updated(record);
9107  break;
9108  case XLOG_HEAP2_NEW_CID:
9109 
9110  /*
9111  * Nothing to do on a real replay, only used during logical
9112  * decoding.
9113  */
9114  break;
9115  case XLOG_HEAP2_REWRITE:
9116  heap_xlog_logical_rewrite(record);
9117  break;
9118  default:
9119  elog(PANIC, "heap2_redo: unknown op code %u", info);
9120  }
9121 }
void heap_xlog_logical_rewrite(XLogReaderState *r)
Definition: rewriteheap.c:1118
#define XLOG_HEAP2_LOCK_UPDATED
Definition: heapam_xlog.h:59
#define XLOG_HEAP2_REWRITE
Definition: heapam_xlog.h:53
unsigned char uint8
Definition: c.h:266
#define XLOG_HEAP_OPMASK
Definition: heapam_xlog.h:41
#define PANIC
Definition: elog.h:53
#define XLOG_HEAP2_MULTI_INSERT
Definition: heapam_xlog.h:58
#define XLOG_HEAP2_VISIBLE
Definition: heapam_xlog.h:57
static void heap_xlog_lock_updated(XLogReaderState *record)
Definition: heapam.c:8945
static void heap_xlog_freeze_page(XLogReaderState *record)
Definition: heapam.c:8162
#define XLOG_HEAP2_CLEAN
Definition: heapam_xlog.h:54
#define XLOG_HEAP2_CLEANUP_INFO
Definition: heapam_xlog.h:56
static void heap_xlog_multi_insert(XLogReaderState *record)
Definition: heapam.c:8427
#define XLOG_HEAP2_NEW_CID
Definition: heapam_xlog.h:60
#define XLogRecGetInfo(decoder)
Definition: xlogreader.h:214
#define XLR_INFO_MASK
Definition: xlogrecord.h:62
static void heap_xlog_cleanup_info(XLogReaderState *record)
Definition: heapam.c:7936
#define XLOG_HEAP2_FREEZE_PAGE
Definition: heapam_xlog.h:55
static void heap_xlog_visible(XLogReaderState *record)
Definition: heapam.c:8047
#define elog
Definition: elog.h:219
static void heap_xlog_clean(XLogReaderState *record)
Definition: heapam.c:7957
void heap_abort_speculative ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 6108 of file heapam.c.

References Assert, buffer, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, compute_infobits(), elog, END_CRIT_SECTION, ERROR, xl_heap_delete::flags, GetCurrentTransactionId(), HEAP_KEYS_UPDATED, HEAP_MOVED, HEAP_XMAX_BITS, HeapTupleHasExternal, HeapTupleHeaderIsHeapOnly, HeapTupleHeaderIsSpeculative, HeapTupleHeaderSetXmin, xl_heap_delete::infobits_set, InvalidTransactionId, IsToastRelation(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), MarkBufferDirty(), xl_heap_delete::offnum, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), ReadBuffer(), RecentGlobalXmin, REGBUF_STANDARD, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), SizeOfHeapDelete, START_CRIT_SECTION, HeapTupleHeaderData::t_choice, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_heap, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, HeapTupleFields::t_xmin, toast_delete(), TransactionIdIsValid, XLH_DELETE_IS_SUPER, XLOG_HEAP_DELETE, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and xl_heap_delete::xmax.

Referenced by ExecInsert(), and toast_delete_datum().

6109 {
6111  ItemPointer tid = &(tuple->t_self);
6112  ItemId lp;
6113  HeapTupleData tp;
6114  Page page;
6115  BlockNumber block;
6116  Buffer buffer;
6117 
6118  Assert(ItemPointerIsValid(tid));
6119 
6120  block = ItemPointerGetBlockNumber(tid);
6121  buffer = ReadBuffer(relation, block);
6122  page = BufferGetPage(buffer);
6123 
6125 
6126  /*
6127  * Page can't be all visible, we just inserted into it, and are still
6128  * running.
6129  */
6130  Assert(!PageIsAllVisible(page));
6131 
6132  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
6133  Assert(ItemIdIsNormal(lp));
6134 
6135  tp.t_tableOid = RelationGetRelid(relation);
6136  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
6137  tp.t_len = ItemIdGetLength(lp);
6138  tp.t_self = *tid;
6139 
6140  /*
6141  * Sanity check that the tuple really is a speculatively inserted tuple,
6142  * inserted by us.
6143  */
6144  if (tp.t_data->t_choice.t_heap.t_xmin != xid)
6145  elog(ERROR, "attempted to kill a tuple inserted by another transaction");
6146  if (!(IsToastRelation(relation) || HeapTupleHeaderIsSpeculative(tp.t_data)))
6147  elog(ERROR, "attempted to kill a non-speculative tuple");
6149 
6150  /*
6151  * No need to check for serializable conflicts here. There is never a
6152  * need for a combocid, either. No need to extract replica identity, or
6153  * do anything special with infomask bits.
6154  */
6155 
6157 
6158  /*
6159  * The tuple will become DEAD immediately. Flag that this page
6160  * immediately is a candidate for pruning by setting xmin to
6161  * RecentGlobalXmin. That's not pretty, but it doesn't seem worth
6162  * inventing a nicer API for this.
6163  */
6166 
6167  /* store transaction information of xact deleting the tuple */
6170 
6171  /*
6172  * Set the tuple header xmin to InvalidTransactionId. This makes the
6173  * tuple immediately invisible everyone. (In particular, to any
6174  * transactions waiting on the speculative token, woken up later.)
6175  */
6177 
6178  /* Clear the speculative insertion token too */
6179  tp.t_data->t_ctid = tp.t_self;
6180 
6181  MarkBufferDirty(buffer);
6182 
6183  /*
6184  * XLOG stuff
6185  *
6186  * The WAL records generated here match heap_delete(). The same recovery
6187  * routines are used.
6188  */
6189  if (RelationNeedsWAL(relation))
6190  {
6191  xl_heap_delete xlrec;
6192  XLogRecPtr recptr;
6193 
6194  xlrec.flags = XLH_DELETE_IS_SUPER;
6196  tp.t_data->t_infomask2);
6198  xlrec.xmax = xid;
6199 
6200  XLogBeginInsert();
6201  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
6202  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6203 
6204  /* No replica identity & replication origin logged */
6205 
6206  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
6207 
6208  PageSetLSN(page, recptr);
6209  }
6210 
6211  END_CRIT_SECTION();
6212 
6213  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6214 
6215  if (HeapTupleHasExternal(&tp))
6216  {
6217  Assert(!IsToastRelation(relation));
6218  toast_delete(relation, &tp, true);
6219  }
6220 
6221  /*
6222  * Never need to mark tuple for invalidation, since catalogs don't support
6223  * speculative insertion
6224  */
6225 
6226  /* Now we can release the buffer */
6227  ReleaseBuffer(buffer);
6228 
6229  /* count deletion, as we counted the insertion too */
6230  pgstat_count_heap_delete(relation);
6231 }
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:59
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
bool IsToastRelation(Relation relation)
Definition: catalog.c:135
#define HEAP_XMAX_BITS
Definition: htup_details.h:256
#define XLH_DELETE_IS_SUPER
Definition: heapam_xlog.h:95
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2955
HeapTupleFields t_heap
Definition: htup_details.h:146
#define PageIsAllVisible(page)
Definition: bufpage.h:382
uint32 TransactionId
Definition: c.h:397
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:132
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:423
#define PageSetPrunable(page, xid)
Definition: bufpage.h:395
#define START_CRIT_SECTION()
Definition: miscadmin.h:130
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
OffsetNumber offnum
Definition: heapam_xlog.h:105
HeapTupleHeader t_data
Definition: htup.h:67
#define HeapTupleHeaderIsHeapOnly(tup)
Definition: htup_details.h:502
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
#define ERROR
Definition: elog.h:43
ItemPointerData t_ctid
Definition: htup_details.h:150
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:104
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:417
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:110
TransactionId RecentGlobalXmin
Definition: snapmgr.c:166
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
TransactionId t_xmin
Definition: htup_details.h:118
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
union HeapTupleHeaderData::@39 t_choice
#define HEAP_MOVED
Definition: htup_details.h:202
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:675
uint8 infobits_set
Definition: heapam_xlog.h:106
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:207
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:76
#define RelationNeedsWAL(relation)
Definition: rel.h:506
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:1916
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:674
void toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: tuptoaster.c:464
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:66
#define TransactionIdIsValid(xid)
Definition: transam.h:41
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:365
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:417
#define PageGetItem(page, itemId)
Definition: bufpage.h:337
Pointer Page
Definition: bufpage.h:74
#define HeapTupleHeaderSetXmin(tup, xid)
Definition: htup_details.h:313
static bool heap_acquire_tuplock ( Relation  relation,
ItemPointer  tid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
bool have_tuple_lock 
)
static

Definition at line 5260 of file heapam.c.

References ConditionalLockTupleTuplock, ereport, errcode(), errmsg(), ERROR, LockTupleTuplock, LockWaitBlock, LockWaitError, LockWaitSkip, and RelationGetRelationName.

Referenced by heap_delete(), heap_lock_tuple(), and heap_update().

5262 {
5263  if (*have_tuple_lock)
5264  return true;
5265 
5266  switch (wait_policy)
5267  {
5268  case LockWaitBlock:
5269  LockTupleTuplock(relation, tid, mode);
5270  break;
5271 
5272  case LockWaitSkip:
5273  if (!ConditionalLockTupleTuplock(relation, tid, mode))
5274  return false;
5275  break;
5276 
5277  case LockWaitError:
5278  if (!ConditionalLockTupleTuplock(relation, tid, mode))
5279  ereport(ERROR,
5280  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5281  errmsg("could not obtain lock on row in relation \"%s\"",
5282  RelationGetRelationName(relation))));
5283  break;
5284  }
5285  *have_tuple_lock = true;
5286 
5287  return true;
5288 }
#define LockTupleTuplock(rel, tup, mode)
Definition: heapam.c:182
#define ConditionalLockTupleTuplock(rel, tup, mode)
Definition: heapam.c:186
int errcode(int sqlerrcode)
Definition: elog.c:575
#define ERROR
Definition: elog.h:43
#define RelationGetRelationName(relation)
Definition: rel.h:437
#define ereport(elevel, rest)
Definition: elog.h:122
int errmsg(const char *fmt,...)
Definition: elog.c:797
HeapScanDesc heap_beginscan ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key 
)

Definition at line 1394 of file heapam.c.

References heap_beginscan_internal().

Referenced by AlterDomainNotNull(), ATRewriteTable(), copy_heap_data(), CopyTo(), DefineQueryRewrite(), pgrowlocks(), pgstat_collect_oids(), RelationFindReplTupleSeq(), SeqNext(), validateCheckConstraint(), validateDomainConstraint(), and validateForeignKeyConstraint().

1396 {
1397  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1398  true, true, true, false, false, false);
1399 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1440
#define NULL
Definition: c.h:229
HeapScanDesc heap_beginscan_bm ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key 
)

Definition at line 1422 of file heapam.c.

References heap_beginscan_internal().

Referenced by ExecInitBitmapHeapScan().

1424 {
1425  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1426  false, false, true, true, false, false);
1427 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1440
#define NULL
Definition: c.h:229
HeapScanDesc heap_beginscan_catalog ( Relation  relation,
int  nkeys,
ScanKey  key 
)

Definition at line 1402 of file heapam.c.

References GetCatalogSnapshot(), heap_beginscan_internal(), RegisterSnapshot(), and RelationGetRelid.

Referenced by AlterTableMoveAll(), AlterTableSpaceOptions(), boot_openrel(), check_db_file_conflict(), createdb(), do_autovacuum(), DropSetting(), DropTableSpace(), find_typed_table_dependencies(), get_database_list(), get_rel_oids(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), index_update_stats(), objectsInSchemaToOids(), ReindexMultipleTables(), remove_dbtablespaces(), RemoveConversionById(), RemoveSubscriptionRel(), RenameTableSpace(), ThereIsAtLeastOneRole(), and vac_truncate_clog().

1403 {
1404  Oid relid = RelationGetRelid(relation);
1405  Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
1406 
1407  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1408  true, true, true, false, false, true);
1409 }
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:858
Snapshot GetCatalogSnapshot(Oid relid)
Definition: snapmgr.c:436
unsigned int Oid
Definition: postgres_ext.h:31
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1440
#define NULL
Definition: c.h:229
#define RelationGetRelid(relation)
Definition: rel.h:417
static HeapScanDesc heap_beginscan_internal ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
ParallelHeapScanDesc  parallel_scan,
bool  allow_strat,
bool  allow_sync,
bool  allow_pagemode,
bool  is_bitmapscan,
bool  is_samplescan,
bool  temp_snap 
)
static

Definition at line 1440 of file heapam.c.

References initscan(), IsMVCCSnapshot, NULL, palloc(), PredicateLockRelation(), RelationGetRelid, RelationIncrementReferenceCount(), HeapScanDescData::rs_allow_strat, HeapScanDescData::rs_allow_sync, HeapScanDescData::rs_bitmapscan, HeapScanDescData::rs_ctup, HeapScanDescData::rs_key, HeapScanDescData::rs_nkeys, HeapScanDescData::rs_pageatatime, HeapScanDescData::rs_parallel, HeapScanDescData::rs_rd, HeapScanDescData::rs_samplescan, HeapScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, HeapScanDescData::rs_temp_snap, and HeapTupleData::t_tableOid.

Referenced by heap_beginscan(), heap_beginscan_bm(), heap_beginscan_catalog(), heap_beginscan_parallel(), heap_beginscan_sampling(), and heap_beginscan_strat().

1449 {
1450  HeapScanDesc scan;
1451 
1452  /*
1453  * increment relation ref count while scanning relation
1454  *
1455  * This is just to make really sure the relcache entry won't go away while
1456  * the scan has a pointer to it. Caller should be holding the rel open
1457  * anyway, so this is redundant in all normal scenarios...
1458  */
1460 
1461  /*
1462  * allocate and initialize scan descriptor
1463  */
1464  scan = (HeapScanDesc) palloc(sizeof(HeapScanDescData));
1465 
1466  scan->rs_rd = relation;
1467  scan->rs_snapshot = snapshot;
1468  scan->rs_nkeys = nkeys;
1469  scan->rs_bitmapscan = is_bitmapscan;
1470  scan->rs_samplescan = is_samplescan;
1471  scan->rs_strategy = NULL; /* set in initscan */
1472  scan->rs_allow_strat = allow_strat;
1473  scan->rs_allow_sync = allow_sync;
1474  scan->rs_temp_snap = temp_snap;
1475  scan->rs_parallel = parallel_scan;
1476 
1477  /*
1478  * we can use page-at-a-time mode if it's an MVCC-safe snapshot
1479  */
1480  scan->rs_pageatatime = allow_pagemode && IsMVCCSnapshot(snapshot);
1481 
1482  /*
1483  * For a seqscan in a serializable transaction, acquire a predicate lock
1484  * on the entire relation. This is required not only to lock all the
1485  * matching tuples, but also to conflict with new insertions into the
1486  * table. In an indexscan, we take page locks on the index pages covering
1487  * the range specified in the scan qual, but in a heap scan there is
1488  * nothing more fine-grained to lock. A bitmap scan is a different story,
1489  * there we have already scanned the index and locked the index pages
1490  * covering the predicate. But in that case we still have to lock any
1491  * matching heap tuples.
1492  */
1493  if (!is_bitmapscan)
1494  PredicateLockRelation(relation, snapshot);
1495 
1496  /* we only need to set this up once */
1497  scan->rs_ctup.t_tableOid = RelationGetRelid(relation);
1498 
1499  /*
1500  * we do this here instead of in initscan() because heap_rescan also calls
1501  * initscan() and we don't want to allocate memory again
1502  */
1503  if (nkeys > 0)
1504  scan->rs_key = (ScanKey) palloc(sizeof(ScanKeyData) * nkeys);
1505  else
1506  scan->rs_key = NULL;
1507 
1508  initscan(scan, key, false);
1509 
1510  return scan;
1511 }
bool rs_allow_sync
Definition: relscan.h:55
void PredicateLockRelation(Relation relation, Snapshot snapshot)
Definition: predicate.c:2415
struct HeapScanDescData * HeapScanDesc
Definition: heapam.h:100
HeapTupleData rs_ctup
Definition: relscan.h:68
bool rs_bitmapscan
Definition: relscan.h:51
bool rs_pageatatime
Definition: relscan.h:53
ParallelHeapScanDesc rs_parallel
Definition: relscan.h:72
ScanKeyData * ScanKey
Definition: skey.h:75
Snapshot rs_snapshot
Definition: relscan.h:48
Oid t_tableOid
Definition: htup.h:66
bool rs_temp_snap
Definition: relscan.h:56
void RelationIncrementReferenceCount(Relation rel)
Definition: relcache.c:2123
BufferAccessStrategy rs_strategy
Definition: relscan.h:63
Relation rs_rd
Definition: relscan.h:47
#define NULL
Definition: c.h:229
#define IsMVCCSnapshot(snapshot)
Definition: tqual.h:31
void * palloc(Size size)
Definition: mcxt.c:849
bool rs_allow_strat
Definition: relscan.h:54
static void initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
Definition: heapam.c:217
bool rs_samplescan
Definition: relscan.h:52
#define RelationGetRelid(relation)
Definition: rel.h:417
ScanKey rs_key
Definition: relscan.h:50
HeapScanDesc heap_beginscan_parallel ( Relation  relation,
ParallelHeapScanDesc  parallel_scan 
)

Definition at line 1653 of file heapam.c.

References Assert, heap_beginscan_internal(), ParallelHeapScanDescData::phs_relid, ParallelHeapScanDescData::phs_snapshot_data, RegisterSnapshot(), RelationGetRelid, and RestoreSnapshot().

Referenced by ExecSeqScanInitializeDSM(), and ExecSeqScanInitializeWorker().

1654 {
1655  Snapshot snapshot;
1656 
1657  Assert(RelationGetRelid(relation) == parallel_scan->phs_relid);
1658  snapshot = RestoreSnapshot(parallel_scan->phs_snapshot_data);
1659  RegisterSnapshot(snapshot);
1660 
1661  return heap_beginscan_internal(relation, snapshot, 0, NULL, parallel_scan,
1662  true, true, true, false, false, true);
1663 }
char phs_snapshot_data[FLEXIBLE_ARRAY_MEMBER]
Definition: relscan.h:41
Snapshot RestoreSnapshot(char *start_address)
Definition: snapmgr.c:2075
Snapshot RegisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:858
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1440
#define NULL
Definition: c.h:229
#define Assert(condition)
Definition: c.h:675
#define RelationGetRelid(relation)
Definition: rel.h:417
HeapScanDesc heap_beginscan_sampling ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
bool  allow_strat,
bool  allow_sync,
bool  allow_pagemode 
)

Definition at line 1430 of file heapam.c.

References heap_beginscan_internal().

Referenced by tablesample_init().

1433 {
1434  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1435  allow_strat, allow_sync, allow_pagemode,
1436  false, true, false);
1437 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1440
#define NULL
Definition: c.h:229
HeapScanDesc heap_beginscan_strat ( Relation  relation,
Snapshot  snapshot,
int  nkeys,
ScanKey  key,
bool  allow_strat,
bool  allow_sync 
)

Definition at line 1412 of file heapam.c.

References heap_beginscan_internal().

Referenced by IndexBuildHeapRangeScan(), IndexCheckExclusion(), pgstat_heap(), systable_beginscan(), and validate_index_heapscan().

1415 {
1416  return heap_beginscan_internal(relation, snapshot, nkeys, key, NULL,
1417  allow_strat, allow_sync, true,
1418  false, false, false);
1419 }
static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, ParallelHeapScanDesc parallel_scan, bool allow_strat, bool allow_sync, bool allow_pagemode, bool is_bitmapscan, bool is_samplescan, bool temp_snap)
Definition: heapam.c:1440
#define NULL
Definition: c.h:229
HTSU_Result heap_delete ( Relation  relation,
ItemPointer  tid,
CommandId  cid,
Snapshot  crosscheck,
bool  wait,
HeapUpdateFailureData hufd 
)

Definition at line 3014 of file heapam.c.

References Assert, buffer, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), HeapUpdateFailureData::cmax, compute_infobits(), compute_new_xmax_infomask(), HeapUpdateFailureData::ctid, DoesMultiXactIdConflict(), END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, ExtractReplicaIdentity(), xl_heap_delete::flags, GetCurrentTransactionId(), heap_acquire_tuplock(), heap_freetuple(), HEAP_KEYS_UPDATED, HEAP_MOVED, HEAP_XMAX_BITS, HEAP_XMAX_INVALID, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HeapTupleBeingUpdated, HeapTupleHasExternal, HeapTupleHeaderAdjustCmax(), HeapTupleHeaderClearHotUpdated, HeapTupleHeaderGetCmax(), HeapTupleHeaderGetRawXmax, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIsOnlyLocked(), HeapTupleHeaderSetCmax, HeapTupleHeaderSetXmax, HeapTupleInvisible, HeapTupleMayBeUpdated, HeapTupleSatisfiesUpdate(), HeapTupleSatisfiesVisibility, HeapTupleSelfUpdated, HeapTupleUpdated, xl_heap_delete::infobits_set, InvalidBuffer, InvalidCommandId, InvalidSnapshot, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), LockTupleExclusive, LockWaitBlock, log_heap_new_cid(), MarkBufferDirty(), MultiXactIdSetOldestMember(), MultiXactIdWait(), MultiXactStatusUpdate, NULL, xl_heap_delete::offnum, PageClearAllVisible, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, PageSetPrunable, pgstat_count_heap_delete(), RelationData::rd_rel, ReadBuffer(), REGBUF_STANDARD, RelationGetRelid, RelationIsAccessibleInLogicalDecoding, RelationNeedsWAL, ReleaseBuffer(), RELKIND_MATVIEW, RELKIND_RELATION, REPLICA_IDENTITY_FULL, result, SizeOfHeapDelete, SizeOfHeapHeader, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, toast_delete(), TransactionIdEquals, TransactionIdIsCurrentTransactionId(), UnlockReleaseBuffer(), UnlockTupleTuplock, UpdateXmaxHintBits(), visibilitymap_clear(), visibilitymap_pin(), VISIBILITYMAP_VALID_BITS, XactLockTableWait(), XLH_DELETE_ALL_VISIBLE_CLEARED, XLH_DELETE_CONTAINS_OLD_KEY, XLH_DELETE_CONTAINS_OLD_TUPLE, XLOG_HEAP_DELETE, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), XLogSetRecordFlags(), XLTW_Delete, HeapUpdateFailureData::xmax, xl_heap_delete::xmax, and xmax_infomask_changed().

Referenced by ExecDelete(), and simple_heap_delete().

3017 {
3020  ItemId lp;
3021  HeapTupleData tp;
3022  Page page;
3023  BlockNumber block;
3024  Buffer buffer;
3025  Buffer vmbuffer = InvalidBuffer;
3026  TransactionId new_xmax;
3027  uint16 new_infomask,
3028  new_infomask2;
3029  bool have_tuple_lock = false;
3030  bool iscombo;
3031  bool all_visible_cleared = false;
3032  HeapTuple old_key_tuple = NULL; /* replica identity of the tuple */
3033  bool old_key_copied = false;
3034 
3035  Assert(ItemPointerIsValid(tid));
3036 
3037  /*
3038  * Forbid this during a parallel operation, lest it allocate a combocid.
3039  * Other workers might need that combocid for visibility checks, and we
3040  * have no provision for broadcasting it to them.
3041  */
3042  if (IsInParallelMode())
3043  ereport(ERROR,
3044  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
3045  errmsg("cannot delete tuples during a parallel operation")));
3046 
3047  block = ItemPointerGetBlockNumber(tid);
3048  buffer = ReadBuffer(relation, block);
3049  page = BufferGetPage(buffer);
3050 
3051  /*
3052  * Before locking the buffer, pin the visibility map page if it appears to
3053  * be necessary. Since we haven't got the lock yet, someone else might be
3054  * in the middle of changing this, so we'll need to recheck after we have
3055  * the lock.
3056  */
3057  if (PageIsAllVisible(page))
3058  visibilitymap_pin(relation, block, &vmbuffer);
3059 
3061 
3062  /*
3063  * If we didn't pin the visibility map page and the page has become all
3064  * visible while we were busy locking the buffer, we'll have to unlock and
3065  * re-lock, to avoid holding the buffer lock across an I/O. That's a bit
3066  * unfortunate, but hopefully shouldn't happen often.
3067  */
3068  if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
3069  {
3070  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3071  visibilitymap_pin(relation, block, &vmbuffer);
3073  }
3074 
3075  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
3076  Assert(ItemIdIsNormal(lp));
3077 
3078  tp.t_tableOid = RelationGetRelid(relation);
3079  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
3080  tp.t_len = ItemIdGetLength(lp);
3081  tp.t_self = *tid;
3082 
3083 l1:
3084  result = HeapTupleSatisfiesUpdate(&tp, cid, buffer);
3085 
3086  if (result == HeapTupleInvisible)
3087  {
3088  UnlockReleaseBuffer(buffer);
3089  ereport(ERROR,
3090  (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
3091  errmsg("attempted to delete invisible tuple")));
3092  }
3093  else if (result == HeapTupleBeingUpdated && wait)
3094  {
3095  TransactionId xwait;
3096  uint16 infomask;
3097 
3098  /* must copy state data before unlocking buffer */
3099  xwait = HeapTupleHeaderGetRawXmax(tp.t_data);
3100  infomask = tp.t_data->t_infomask;
3101 
3102  /*
3103  * Sleep until concurrent transaction ends -- except when there's a
3104  * single locker and it's our own transaction. Note we don't care
3105  * which lock mode the locker has, because we need the strongest one.
3106  *
3107  * Before sleeping, we need to acquire tuple lock to establish our
3108  * priority for the tuple (see heap_lock_tuple). LockTuple will
3109  * release us when we are next-in-line for the tuple.
3110  *
3111  * If we are forced to "start over" below, we keep the tuple lock;
3112  * this arranges that we stay at the head of the line while rechecking
3113  * tuple state.
3114  */
3115  if (infomask & HEAP_XMAX_IS_MULTI)
3116  {
3117  /* wait for multixact */
3118  if (DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
3120  {
3121  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3122 
3123  /* acquire tuple lock, if necessary */
3125  LockWaitBlock, &have_tuple_lock);
3126 
3127  /* wait for multixact */
3129  relation, &(tp.t_self), XLTW_Delete,
3130  NULL);
3132 
3133  /*
3134  * If xwait had just locked the tuple then some other xact
3135  * could update this tuple before we get to this point. Check
3136  * for xmax change, and start over if so.
3137  */
3138  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
3140  xwait))
3141  goto l1;
3142  }
3143 
3144  /*
3145  * You might think the multixact is necessarily done here, but not
3146  * so: it could have surviving members, namely our own xact or
3147  * other subxacts of this backend. It is legal for us to delete
3148  * the tuple in either case, however (the latter case is
3149  * essentially a situation of upgrading our former shared lock to
3150  * exclusive). We don't bother changing the on-disk hint bits
3151  * since we are about to overwrite the xmax altogether.
3152  */
3153  }
3154  else if (!TransactionIdIsCurrentTransactionId(xwait))
3155  {
3156  /*
3157  * Wait for regular transaction to end; but first, acquire tuple
3158  * lock.
3159  */
3160  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3162  LockWaitBlock, &have_tuple_lock);
3163  XactLockTableWait(xwait, relation, &(tp.t_self), XLTW_Delete);
3165 
3166  /*
3167  * xwait is done, but if xwait had just locked the tuple then some
3168  * other xact could update this tuple before we get to this point.
3169  * Check for xmax change, and start over if so.
3170  */
3171  if (xmax_infomask_changed(tp.t_data->t_infomask, infomask) ||
3173  xwait))
3174  goto l1;
3175 
3176  /* Otherwise check if it committed or aborted */
3177  UpdateXmaxHintBits(tp.t_data, buffer, xwait);
3178  }
3179 
3180  /*
3181  * We may overwrite if previous xmax aborted, or if it committed but
3182  * only locked the tuple without updating it.
3183  */
3184  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
3187  result = HeapTupleMayBeUpdated;
3188  else
3189  result = HeapTupleUpdated;
3190  }
3191 
3192  if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated)
3193  {
3194  /* Perform additional check for transaction-snapshot mode RI updates */
3195  if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer))
3196  result = HeapTupleUpdated;
3197  }
3198 
3199  if (result != HeapTupleMayBeUpdated)
3200  {
3201  Assert(result == HeapTupleSelfUpdated ||
3202  result == HeapTupleUpdated ||
3203  result == HeapTupleBeingUpdated);
3205  hufd->ctid = tp.t_data->t_ctid;
3207  if (result == HeapTupleSelfUpdated)
3208  hufd->cmax = HeapTupleHeaderGetCmax(tp.t_data);
3209  else
3210  hufd->cmax = InvalidCommandId;
3211  UnlockReleaseBuffer(buffer);
3212  if (have_tuple_lock)
3213  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3214  if (vmbuffer != InvalidBuffer)
3215  ReleaseBuffer(vmbuffer);
3216  return result;
3217  }
3218 
3219  /*
3220  * We're about to do the actual delete -- check for conflict first, to
3221  * avoid possibly having to roll back work we've just done.
3222  *
3223  * This is safe without a recheck as long as there is no possibility of
3224  * another process scanning the page between this check and the delete
3225  * being visible to the scan (i.e., an exclusive buffer content lock is
3226  * continuously held from this point until the tuple delete is visible).
3227  */
3228  CheckForSerializableConflictIn(relation, &tp, buffer);
3229 
3230  /* replace cid with a combo cid if necessary */
3231  HeapTupleHeaderAdjustCmax(tp.t_data, &cid, &iscombo);
3232 
3233  /*
3234  * Compute replica identity tuple before entering the critical section so
3235  * we don't PANIC upon a memory allocation failure.
3236  */
3237  old_key_tuple = ExtractReplicaIdentity(relation, &tp, true, &old_key_copied);
3238 
3239  /*
3240  * If this is the first possibly-multixact-able operation in the current
3241  * transaction, set my per-backend OldestMemberMXactId setting. We can be
3242  * certain that the transaction will never become a member of any older
3243  * MultiXactIds than that. (We have to do this even if we end up just
3244  * using our own TransactionId below, since some other backend could
3245  * incorporate our XID into a MultiXact immediately afterwards.)
3246  */
3248 
3251  xid, LockTupleExclusive, true,
3252  &new_xmax, &new_infomask, &new_infomask2);
3253 
3255 
3256  /*
3257  * If this transaction commits, the tuple will become DEAD sooner or
3258  * later. Set flag that this page is a candidate for pruning once our xid
3259  * falls below the OldestXmin horizon. If the transaction finally aborts,
3260  * the subsequent page pruning will be a no-op and the hint will be
3261  * cleared.
3262  */
3263  PageSetPrunable(page, xid);
3264 
3265  if (PageIsAllVisible(page))
3266  {
3267  all_visible_cleared = true;
3268  PageClearAllVisible(page);
3269  visibilitymap_clear(relation, BufferGetBlockNumber(buffer),
3270  vmbuffer, VISIBILITYMAP_VALID_BITS);
3271  }
3272 
3273  /* store transaction information of xact deleting the tuple */
3276  tp.t_data->t_infomask |= new_infomask;
3277  tp.t_data->t_infomask2 |= new_infomask2;
3279  HeapTupleHeaderSetXmax(tp.t_data, new_xmax);
3280  HeapTupleHeaderSetCmax(tp.t_data, cid, iscombo);
3281  /* Make sure there is no forward chain link in t_ctid */
3282  tp.t_data->t_ctid = tp.t_self;
3283 
3284  MarkBufferDirty(buffer);
3285 
3286  /*
3287  * XLOG stuff
3288  *
3289  * NB: heap_abort_speculative() uses the same xlog record and replay
3290  * routines.
3291  */
3292  if (RelationNeedsWAL(relation))
3293  {
3294  xl_heap_delete xlrec;
3295  XLogRecPtr recptr;
3296 
3297  /* For logical decode we need combocids to properly decode the catalog */
3299  log_heap_new_cid(relation, &tp);
3300 
3301  xlrec.flags = all_visible_cleared ? XLH_DELETE_ALL_VISIBLE_CLEARED : 0;
3303  tp.t_data->t_infomask2);
3305  xlrec.xmax = new_xmax;
3306 
3307  if (old_key_tuple != NULL)
3308  {
3309  if (relation->rd_rel->relreplident == REPLICA_IDENTITY_FULL)
3311  else
3313  }
3314 
3315  XLogBeginInsert();
3316  XLogRegisterData((char *) &xlrec, SizeOfHeapDelete);
3317 
3318  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
3319 
3320  /*
3321  * Log replica identity of the deleted tuple if there is one
3322  */
3323  if (old_key_tuple != NULL)
3324  {
3325  xl_heap_header xlhdr;
3326 
3327  xlhdr.t_infomask2 = old_key_tuple->t_data->t_infomask2;
3328  xlhdr.t_infomask = old_key_tuple->t_data->t_infomask;
3329  xlhdr.t_hoff = old_key_tuple->t_data->t_hoff;
3330 
3331  XLogRegisterData((char *) &xlhdr, SizeOfHeapHeader);
3332  XLogRegisterData((char *) old_key_tuple->t_data
3334  old_key_tuple->t_len
3336  }
3337 
3338  /* filtering by origin on a row level is much more efficient */
3340 
3341  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_DELETE);
3342 
3343  PageSetLSN(page, recptr);
3344  }
3345 
3346  END_CRIT_SECTION();
3347 
3348  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
3349 
3350  if (vmbuffer != InvalidBuffer)
3351  ReleaseBuffer(vmbuffer);
3352 
3353  /*
3354  * If the tuple has toasted out-of-line attributes, we need to delete
3355  * those items too. We have to do this before releasing the buffer
3356  * because we need to look at the contents of the tuple, but it's OK to
3357  * release the content lock on the buffer first.
3358  */
3359  if (relation->rd_rel->relkind != RELKIND_RELATION &&
3360  relation->rd_rel->relkind != RELKIND_MATVIEW)
3361  {
3362  /* toast table entries should never be recursively toasted */
3364  }
3365  else if (HeapTupleHasExternal(&tp))
3366  toast_delete(relation, &tp, false);
3367 
3368  /*
3369  * Mark tuple for invalidation from system caches at next command
3370  * boundary. We have to do this before releasing the buffer because we
3371  * need to look at the contents of the tuple.
3372  */
3373  CacheInvalidateHeapTuple(relation, &tp, NULL);
3374 
3375  /* Now we can release the buffer */
3376  ReleaseBuffer(buffer);
3377 
3378  /*
3379  * Release the lmgr tuple lock, if we had it.
3380  */
3381  if (have_tuple_lock)
3382  UnlockTupleTuplock(relation, &(tp.t_self), LockTupleExclusive);
3383 
3384  pgstat_count_heap_delete(relation);
3385 
3386  if (old_key_tuple != NULL && old_key_copied)
3387  heap_freetuple(old_key_tuple);
3388 
3389  return HeapTupleMayBeUpdated;
3390 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:359
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
Definition: tqual.c:1585
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:59
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
#define SizeofHeapTupleHeader
Definition: htup_details.h:170
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:7748
#define HEAP_XMAX_BITS
Definition: htup_details.h:256
static uint8 compute_infobits(uint16 infomask, uint16 infomask2)
Definition: heapam.c:2955
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1087
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
#define PageIsAllVisible(page)
Definition: bufpage.h:382
uint32 TransactionId
Definition: c.h:397
HTSU_Result HeapTupleSatisfiesUpdate(HeapTuple htup, CommandId curcid, Buffer buffer)
Definition: tqual.c:460
bool TransactionIdIsCurrentTransactionId(TransactionId xid)
Definition: xact.c:773
void visibilitymap_pin(Relation rel, BlockNumber heapBlk, Buffer *buf)
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
static bool xmax_infomask_changed(uint16 new_infomask, uint16 old_infomask)
Definition: heapam.c:2977
#define HeapTupleHeaderClearHotUpdated(tup)
Definition: htup_details.h:497
#define END_CRIT_SECTION()
Definition: miscadmin.h:132
#define RELKIND_MATVIEW
Definition: pg_class.h:165
#define InvalidBuffer
Definition: buf.h:25
uint16 t_infomask2
Definition: heapam_xlog.h:122
#define PageSetPrunable(page, xid)
Definition: bufpage.h:395
#define START_CRIT_SECTION()
Definition: miscadmin.h:130
int errcode(int sqlerrcode)
Definition: elog.c:575
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:192
return result
Definition: formatting.c:1618
uint32 BlockNumber
Definition: block.h:31
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
Form_pg_class rd_rel
Definition: rel.h:114
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1374
void CheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
Definition: predicate.c:4243
#define UnlockTupleTuplock(rel, tup, mode)
Definition: heapam.c:184
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
OffsetNumber offnum
Definition: heapam_xlog.h:105
void MultiXactIdSetOldestMember(void)
Definition: multixact.c:623
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:67
#define HeapTupleHeaderGetRawXmax(tup)
Definition: htup_details.h:369
unsigned short uint16
Definition: c.h:267
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
bool IsInParallelMode(void)
Definition: xact.c:913
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define REPLICA_IDENTITY_FULL
Definition: pg_class.h:179
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:193
ItemPointerData t_ctid
Definition: htup_details.h:150
ItemPointerData t_self
Definition: htup.h:65
TransactionId xmax
Definition: heapam_xlog.h:104
static void MultiXactIdWait(MultiXactId multi, MultiXactStatus status, uint16 infomask, Relation rel, ItemPointer ctid, XLTW_Oper oper, int *remaining)
Definition: heapam.c:7164
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:417
uint32 t_len
Definition: htup.h:64
#define SizeOfHeapDelete
Definition: heapam_xlog.h:110
#define REGBUF_STANDARD
Definition: xloginsert.h:35
#define XLH_DELETE_CONTAINS_OLD_KEY
Definition: heapam_xlog.h:94
CommandId cmax
Definition: heapam.h:72
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:374
HTSU_Result
Definition: snapshot.h:119
Oid t_tableOid
Definition: htup.h:66
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define HeapTupleHeaderSetCmax(tup, cid, iscombo)
Definition: htup_details.h:399
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define ereport(elevel, rest)
Definition: elog.h:122
static void compute_new_xmax_infomask(TransactionId xmax, uint16 old_infomask, uint16 old_infomask2, TransactionId add_to_xmax, LockTupleMode mode, bool is_update, TransactionId *result_xmax, uint16 *result_infomask, uint16 *result_infomask2)
Definition: heapam.c:5309
TransactionId xmax
Definition: heapam.h:71
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
#define InvalidSnapshot
Definition: snapshot.h:25
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:560
#define InvalidCommandId
Definition: c.h:414
#define HEAP_XMAX_IS_LOCKED_ONLY(infomask)
Definition: htup_details.h:216
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define HEAP_KEYS_UPDATED
Definition: htup_details.h:264
#define HEAP_XMAX_IS_MULTI
Definition: htup_details.h:194
static void UpdateXmaxHintBits(HeapTupleHeader tuple, Buffer buffer, TransactionId xid)
Definition: heapam.c:2300
#define HEAP_MOVED
Definition: htup_details.h:202
static bool heap_acquire_tuplock(Relation relation, ItemPointer tid, LockTupleMode mode, LockWaitPolicy wait_policy, bool *have_tuple_lock)
Definition: heapam.c:5260
TransactionId MultiXactId
Definition: c.h:407
#define PageClearAllVisible(page)
Definition: bufpage.h:386
void XactLockTableWait(TransactionId xid, Relation rel, ItemPointer ctid, XLTW_Oper oper)
Definition: lmgr.c:554
#define NULL
Definition: c.h:229
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:675
uint8 infobits_set
Definition: heapam_xlog.h:106
static HeapTuple ExtractReplicaIdentity(Relation rel, HeapTuple tup, bool key_modified, bool *copy)
Definition: heapam.c:7824
CommandId HeapTupleHeaderGetCmax(HeapTupleHeader tup)
Definition: combocid.c:119
static bool DoesMultiXactIdConflict(MultiXactId multi, uint16 infomask, LockTupleMode lockmode)
Definition: heapam.c:6997
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:207
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
uint16 t_infomask
Definition: heapam_xlog.h:123
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:76
#define RelationNeedsWAL(relation)
Definition: rel.h:506
void pgstat_count_heap_delete(Relation rel)
Definition: pgstat.c:1916
void HeapTupleHeaderAdjustCmax(HeapTupleHeader tup, CommandId *cmax, bool *iscombo)
Definition: combocid.c:154
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
#define HeapTupleHasExternal(tuple)
Definition: htup_details.h:674
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define XLH_DELETE_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:92
void toast_delete(Relation rel, HeapTuple oldtup, bool is_speculative)
Definition: tuptoaster.c:464
ItemPointerData ctid
Definition: heapam.h:70
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:66
#define RELKIND_RELATION
Definition: pg_class.h:160
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:365
int Buffer
Definition: buf.h:23
#define XLOG_HEAP_DELETE
Definition: heapam_xlog.h:33
#define RelationGetRelid(relation)
Definition: rel.h:417
#define PageGetItem(page, itemId)
Definition: bufpage.h:337
#define SizeOfHeapHeader
Definition: heapam_xlog.h:127
Pointer Page
Definition: bufpage.h:74
#define XLH_DELETE_CONTAINS_OLD_TUPLE
Definition: heapam_xlog.h:93
void heap_endscan ( HeapScanDesc  scan)

Definition at line 1581 of file heapam.c.

References BufferIsValid, FreeAccessStrategy(), pfree(), RelationDecrementReferenceCount(), ReleaseBuffer(), HeapScanDescData::rs_cbuf, HeapScanDescData::rs_key, HeapScanDescData::rs_rd, HeapScanDescData::rs_snapshot, HeapScanDescData::rs_strategy, HeapScanDescData::rs_temp_snap, and UnregisterSnapshot().

Referenced by AlterDomainNotNull(), AlterTableMoveAll(), AlterTableSpaceOptions(), ATRewriteTable(), boot_openrel(), check_db_file_conflict(), copy_heap_data(), CopyTo(), createdb(), DefineQueryRewrite(), do_autovacuum(), DropSetting(), DropTableSpace(), ExecEndBitmapHeapScan(), ExecEndSampleScan(), ExecEndSeqScan(), find_typed_table_dependencies(), get_database_list(), get_rel_oids(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), index_update_stats(), IndexBuildHeapRangeScan(), IndexCheckExclusion(), objectsInSchemaToOids(), pgrowlocks(), pgstat_collect_oids(), pgstat_heap(), ReindexMultipleTables(), RelationFindReplTupleSeq(), remove_dbtablespaces(), RemoveConversionById(), RemoveSubscriptionRel(), RenameTableSpace(), systable_endscan(), ThereIsAtLeastOneRole(), vac_truncate_clog(), validate_index_heapscan(), validateCheckConstraint(), validateDomainConstraint(), and validateForeignKeyConstraint().

1582 {
1583  /* Note: no locking manipulations needed */
1584 
1585  /*
1586  * unpin scan buffers
1587  */
1588  if (BufferIsValid(scan->rs_cbuf))
1589  ReleaseBuffer(scan->rs_cbuf);
1590 
1591  /*
1592  * decrement relation reference count and free scan descriptor storage
1593  */
1595 
1596  if (scan->rs_key)
1597  pfree(scan->rs_key);
1598 
1599  if (scan->rs_strategy != NULL)
1601 
1602  if (scan->rs_temp_snap)
1604 
1605  pfree(scan);
1606 }
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
void pfree(void *pointer)
Definition: mcxt.c:950
void RelationDecrementReferenceCount(Relation rel)
Definition: relcache.c:2136
Snapshot rs_snapshot
Definition: relscan.h:48
void UnregisterSnapshot(Snapshot snapshot)
Definition: snapmgr.c:900
bool rs_temp_snap
Definition: relscan.h:56
BufferAccessStrategy rs_strategy
Definition: relscan.h:63
Relation rs_rd
Definition: relscan.h:47
Buffer rs_cbuf
Definition: relscan.h:70
void FreeAccessStrategy(BufferAccessStrategy strategy)
Definition: freelist.c:580
#define NULL
Definition: c.h:229
#define BufferIsValid(bufnum)
Definition: bufmgr.h:114
ScanKey rs_key
Definition: relscan.h:50
void heap_execute_freeze_tuple ( HeapTupleHeader  tuple,
xl_heap_freeze_tuple frz 
)

Definition at line 6802 of file heapam.c.

References FrozenTransactionId, xl_heap_freeze_tuple::frzflags, HeapTupleHeaderSetXmax, HeapTupleHeaderSetXvac, InvalidTransactionId, HeapTupleHeaderData::t_infomask, xl_heap_freeze_tuple::t_infomask, HeapTupleHeaderData::t_infomask2, xl_heap_freeze_tuple::t_infomask2, XLH_FREEZE_XVAC, XLH_INVALID_XVAC, and xl_heap_freeze_tuple::xmax.

Referenced by heap_freeze_tuple(), heap_xlog_freeze_page(), and lazy_scan_heap().

6803 {
6804  HeapTupleHeaderSetXmax(tuple, frz->xmax);
6805 
6806  if (frz->frzflags & XLH_FREEZE_XVAC)
6808 
6809  if (frz->frzflags & XLH_INVALID_XVAC)
6811 
6812  tuple->t_infomask = frz->t_infomask;
6813  tuple->t_infomask2 = frz->t_infomask2;
6814 }
#define HeapTupleHeaderSetXvac(tup, xid)
Definition: htup_details.h:417
#define HeapTupleHeaderSetXmax(tup, xid)
Definition: htup_details.h:374
#define InvalidTransactionId
Definition: transam.h:31
#define FrozenTransactionId
Definition: transam.h:33
TransactionId xmax
Definition: heapam_xlog.h:298
#define XLH_INVALID_XVAC
Definition: heapam_xlog.h:294
#define XLH_FREEZE_XVAC
Definition: heapam_xlog.h:293
bool heap_fetch ( Relation  relation,
Snapshot  snapshot,
HeapTuple  tuple,
Buffer userbuf,
bool  keep_buf,
Relation  stats_relation 
)

Definition at line 1865 of file heapam.c.

References buffer, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, BufferGetPage, CheckForSerializableConflictOut(), HeapTupleSatisfiesVisibility, InvalidBuffer, ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), NULL, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, pgstat_count_heap_fetch, PredicateLockTuple(), ReadBuffer(), RelationGetRelid, ReleaseBuffer(), HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, and TestForOldSnapshot().

Referenced by AfterTriggerExecute(), EvalPlanQualFetch(), EvalPlanQualFetchRowMarks(), ExecCheckTIDVisible(), ExecDelete(), ExecLockRows(), heap_lock_updated_tuple_rec(), and TidNext().

1871 {
1872  ItemPointer tid = &(tuple->t_self);
1873  ItemId lp;
1874  Buffer buffer;
1875  Page page;
1876  OffsetNumber offnum;
1877  bool valid;
1878 
1879  /*
1880  * Fetch and pin the appropriate page of the relation.
1881  */
1882  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
1883 
1884  /*
1885  * Need share lock on buffer to examine tuple commit status.
1886  */
1887  LockBuffer(buffer, BUFFER_LOCK_SHARE);
1888  page = BufferGetPage(buffer);
1889  TestForOldSnapshot(snapshot, relation, page);
1890 
1891  /*
1892  * We'd better check for out-of-range offnum in case of VACUUM since the
1893  * TID was obtained.
1894  */
1895  offnum = ItemPointerGetOffsetNumber(tid);
1896  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
1897  {
1898  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1899  if (keep_buf)
1900  *userbuf = buffer;
1901  else
1902  {
1903  ReleaseBuffer(buffer);
1904  *userbuf = InvalidBuffer;
1905  }
1906  tuple->t_data = NULL;
1907  return false;
1908  }
1909 
1910  /*
1911  * get the item line pointer corresponding to the requested tid
1912  */
1913  lp = PageGetItemId(page, offnum);
1914 
1915  /*
1916  * Must check for deleted tuple.
1917  */
1918  if (!ItemIdIsNormal(lp))
1919  {
1920  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1921  if (keep_buf)
1922  *userbuf = buffer;
1923  else
1924  {
1925  ReleaseBuffer(buffer);
1926  *userbuf = InvalidBuffer;
1927  }
1928  tuple->t_data = NULL;
1929  return false;
1930  }
1931 
1932  /*
1933  * fill in *tuple fields
1934  */
1935  tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
1936  tuple->t_len = ItemIdGetLength(lp);
1937  tuple->t_tableOid = RelationGetRelid(relation);
1938 
1939  /*
1940  * check time qualification of tuple, then release lock
1941  */
1942  valid = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
1943 
1944  if (valid)
1945  PredicateLockTuple(relation, tuple, snapshot);
1946 
1947  CheckForSerializableConflictOut(valid, relation, tuple, buffer, snapshot);
1948 
1949  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
1950 
1951  if (valid)
1952  {
1953  /*
1954  * All checks passed, so return the tuple as valid. Caller is now
1955  * responsible for releasing the buffer.
1956  */
1957  *userbuf = buffer;
1958 
1959  /* Count the successful fetch against appropriate rel, if any */
1960  if (stats_relation != NULL)
1961  pgstat_count_heap_fetch(stats_relation);
1962 
1963  return true;
1964  }
1965 
1966  /* Tuple failed time qual, but maybe caller wants to see it anyway. */
1967  if (keep_buf)
1968  *userbuf = buffer;
1969  else
1970  {
1971  ReleaseBuffer(buffer);
1972  *userbuf = InvalidBuffer;
1973  }
1974 
1975  return false;
1976 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:265
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define InvalidBuffer
Definition: buf.h:25
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:354
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:3862
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
ItemPointerData t_self
Definition: htup.h:65
#define pgstat_count_heap_fetch(rel)
Definition: pgstat.h:1257
uint32 t_len
Definition: htup.h:64
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define NULL
Definition: c.h:229
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:207
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:76
void PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
Definition: predicate.c:2460
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:66
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:417
#define PageGetItem(page, itemId)
Definition: bufpage.h:337
Pointer Page
Definition: bufpage.h:74
void heap_finish_speculative ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 6017 of file heapam.c.

References Assert, buffer, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, elog, END_CRIT_SECTION, ERROR, HeapTupleHeaderIsSpeculative, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), MaxOffsetNumber, NULL, xl_heap_confirm::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapConfirm, SpecTokenOffsetNumber, START_CRIT_SECTION, StaticAssertStmt, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleData::t_self, UnlockReleaseBuffer(), XLOG_HEAP_CONFIRM, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by ExecInsert().

6018 {
6019  Buffer buffer;
6020  Page page;
6021  OffsetNumber offnum;
6022  ItemId lp = NULL;
6023  HeapTupleHeader htup;
6024 
6025  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
6027  page = (Page) BufferGetPage(buffer);
6028 
6029  offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
6030  if (PageGetMaxOffsetNumber(page) >= offnum)
6031  lp = PageGetItemId(page, offnum);
6032 
6033  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
6034  elog(ERROR, "invalid lp");
6035 
6036  htup = (HeapTupleHeader) PageGetItem(page, lp);
6037 
6038  /* SpecTokenOffsetNumber should be distinguishable from any real offset */
6040  "invalid speculative token constant");
6041 
6042  /* NO EREPORT(ERROR) from here till changes are logged */
6044 
6046 
6047  MarkBufferDirty(buffer);
6048 
6049  /*
6050  * Replace the speculative insertion token with a real t_ctid, pointing to
6051  * itself like it does on regular tuples.
6052  */
6053  htup->t_ctid = tuple->t_self;
6054 
6055  /* XLOG stuff */
6056  if (RelationNeedsWAL(relation))
6057  {
6058  xl_heap_confirm xlrec;
6059  XLogRecPtr recptr;
6060 
6061  xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6062 
6063  XLogBeginInsert();
6064 
6065  /* We want the same filtering on this as on a plain insert */
6067 
6068  XLogRegisterData((char *) &xlrec, SizeOfHeapConfirm);
6069  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6070 
6071  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_CONFIRM);
6072 
6073  PageSetLSN(page, recptr);
6074  }
6075 
6076  END_CRIT_SECTION();
6077 
6078  UnlockReleaseBuffer(buffer);
6079 }
OffsetNumber offnum
Definition: heapam_xlog.h:274
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define MaxOffsetNumber
Definition: off.h:28
#define END_CRIT_SECTION()
Definition: miscadmin.h:132
#define HeapTupleHeaderIsSpeculative(tup)
Definition: htup_details.h:423
#define START_CRIT_SECTION()
Definition: miscadmin.h:130
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:192
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:354
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:757
#define SpecTokenOffsetNumber
Definition: htup_details.h:285
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define ERROR
Definition: elog.h:43
ItemPointerData t_ctid
Definition: htup_details.h:150
ItemPointerData t_self
Definition: htup.h:65
#define REGBUF_STANDARD
Definition: xloginsert.h:35
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define SizeOfHeapConfirm
Definition: heapam_xlog.h:277
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define NULL
Definition: c.h:229
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:675
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:207
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:76
#define RelationNeedsWAL(relation)
Definition: rel.h:506
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:66
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:365
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:337
Pointer Page
Definition: bufpage.h:74
#define XLOG_HEAP_CONFIRM
Definition: heapam_xlog.h:37
bool heap_freeze_tuple ( HeapTupleHeader  tuple,
TransactionId  cutoff_xid,
TransactionId  cutoff_multi 
)

Definition at line 6823 of file heapam.c.

References heap_execute_freeze_tuple(), and heap_prepare_freeze_tuple().

Referenced by rewrite_heap_tuple().

6825 {
6827  bool do_freeze;
6828  bool tuple_totally_frozen;
6829 
6830  do_freeze = heap_prepare_freeze_tuple(tuple, cutoff_xid, cutoff_multi,
6831  &frz, &tuple_totally_frozen);
6832 
6833  /*
6834  * Note that because this is not a WAL-logged operation, we don't need to
6835  * fill in the offset in the freeze record.
6836  */
6837 
6838  if (do_freeze)
6839  heap_execute_freeze_tuple(tuple, &frz);
6840  return do_freeze;
6841 }
void heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *frz)
Definition: heapam.c:6802
bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, TransactionId cutoff_multi, xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
Definition: heapam.c:6624
void heap_get_latest_tid ( Relation  relation,
Snapshot  snapshot,
ItemPointer  tid 
)

Definition at line 2170 of file heapam.c.

References buffer, BUFFER_LOCK_SHARE, BufferGetPage, CheckForSerializableConflictOut(), elog, ERROR, HEAP_XMAX_INVALID, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleHeaderIsOnlyLocked(), HeapTupleSatisfiesVisibility, InvalidTransactionId, ItemIdGetLength, ItemIdIsNormal, ItemPointerEquals(), ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerIsValid, LockBuffer(), PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, ReadBuffer(), RelationGetNumberOfBlocks, RelationGetRelationName, RelationGetRelid, HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TestForOldSnapshot(), TransactionIdEquals, TransactionIdIsValid, and UnlockReleaseBuffer().

Referenced by currtid_byrelname(), currtid_byreloid(), and TidNext().

2173 {
2174  BlockNumber blk;
2175  ItemPointerData ctid;
2176  TransactionId priorXmax;
2177 
2178  /* this is to avoid Assert failures on bad input */
2179  if (!ItemPointerIsValid(tid))
2180  return;
2181 
2182  /*
2183  * Since this can be called with user-supplied TID, don't trust the input
2184  * too much. (RelationGetNumberOfBlocks is an expensive check, so we
2185  * don't check t_ctid links again this way. Note that it would not do to
2186  * call it just once and save the result, either.)
2187  */
2188  blk = ItemPointerGetBlockNumber(tid);
2189  if (blk >= RelationGetNumberOfBlocks(relation))
2190  elog(ERROR, "block number %u is out of range for relation \"%s\"",
2191  blk, RelationGetRelationName(relation));
2192 
2193  /*
2194  * Loop to chase down t_ctid links. At top of loop, ctid is the tuple we
2195  * need to examine, and *tid is the TID we will return if ctid turns out
2196  * to be bogus.
2197  *
2198  * Note that we will loop until we reach the end of the t_ctid chain.
2199  * Depending on the snapshot passed, there might be at most one visible
2200  * version of the row, but we don't try to optimize for that.
2201  */
2202  ctid = *tid;
2203  priorXmax = InvalidTransactionId; /* cannot check first XMIN */
2204  for (;;)
2205  {
2206  Buffer buffer;
2207  Page page;
2208  OffsetNumber offnum;
2209  ItemId lp;
2210  HeapTupleData tp;
2211  bool valid;
2212 
2213  /*
2214  * Read, pin, and lock the page.
2215  */
2216  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&ctid));
2217  LockBuffer(buffer, BUFFER_LOCK_SHARE);
2218  page = BufferGetPage(buffer);
2219  TestForOldSnapshot(snapshot, relation, page);
2220 
2221  /*
2222  * Check for bogus item number. This is not treated as an error
2223  * condition because it can happen while following a t_ctid link. We
2224  * just assume that the prior tid is OK and return it unchanged.
2225  */
2226  offnum = ItemPointerGetOffsetNumber(&ctid);
2227  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(page))
2228  {
2229  UnlockReleaseBuffer(buffer);
2230  break;
2231  }
2232  lp = PageGetItemId(page, offnum);
2233  if (!ItemIdIsNormal(lp))
2234  {
2235  UnlockReleaseBuffer(buffer);
2236  break;
2237  }
2238 
2239  /* OK to access the tuple */
2240  tp.t_self = ctid;
2241  tp.t_data = (HeapTupleHeader) PageGetItem(page, lp);
2242  tp.t_len = ItemIdGetLength(lp);
2243  tp.t_tableOid = RelationGetRelid(relation);
2244 
2245  /*
2246  * After following a t_ctid link, we might arrive at an unrelated
2247  * tuple. Check for XMIN match.
2248  */
2249  if (TransactionIdIsValid(priorXmax) &&
2251  {
2252  UnlockReleaseBuffer(buffer);
2253  break;
2254  }
2255 
2256  /*
2257  * Check time qualification of tuple; if visible, set it as the new
2258  * result candidate.
2259  */
2260  valid = HeapTupleSatisfiesVisibility(&tp, snapshot, buffer);
2261  CheckForSerializableConflictOut(valid, relation, &tp, buffer, snapshot);
2262  if (valid)
2263  *tid = ctid;
2264 
2265  /*
2266  * If there's a valid t_ctid link, follow it, else we're done.
2267  */
2268  if ((tp.t_data->t_infomask & HEAP_XMAX_INVALID) ||
2271  {
2272  UnlockReleaseBuffer(buffer);
2273  break;
2274  }
2275 
2276  ctid = tp.t_data->t_ctid;
2277  priorXmax = HeapTupleHeaderGetUpdateXid(tp.t_data);
2278  UnlockReleaseBuffer(buffer);
2279  } /* end of loop */
2280 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:359
bool HeapTupleHeaderIsOnlyLocked(HeapTupleHeader tuple)
Definition: tqual.c:1585
#define ItemPointerIsValid(pointer)
Definition: itemptr.h:59
static void TestForOldSnapshot(Snapshot snapshot, Relation relation, Page page)
Definition: bufmgr.h:265
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:397
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
uint32 BlockNumber
Definition: block.h:31
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:354
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:3862
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define ERROR
Definition: elog.h:43
#define HEAP_XMAX_INVALID
Definition: htup_details.h:193
ItemPointerData t_ctid
Definition: htup_details.h:150
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define InvalidTransactionId
Definition: transam.h:31
#define RelationGetRelationName(relation)
Definition: rel.h:437
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define RelationGetNumberOfBlocks(reln)
Definition: bufmgr.h:199
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:207
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:307
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:76
bool ItemPointerEquals(ItemPointer pointer1, ItemPointer pointer2)
Definition: itemptr.c:29
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:66
#define TransactionIdIsValid(xid)
Definition: transam.h:41
int Buffer
Definition: buf.h:23
#define RelationGetRelid(relation)
Definition: rel.h:417
#define PageGetItem(page, itemId)
Definition: bufpage.h:337
Pointer Page
Definition: bufpage.h:74
HeapTuple heap_getnext ( HeapScanDesc  scan,
ScanDirection  direction 
)

Definition at line 1797 of file heapam.c.

References HEAPDEBUG_1, HEAPDEBUG_2, HEAPDEBUG_3, heapgettup(), heapgettup_pagemode(), NULL, pgstat_count_heap_getnext, HeapScanDescData::rs_ctup, HeapScanDescData::rs_key, HeapScanDescData::rs_nkeys, HeapScanDescData::rs_pageatatime, HeapScanDescData::rs_rd, and HeapTupleData::t_data.

Referenced by AlterDomainNotNull(), AlterTableMoveAll(), AlterTableSpaceOptions(), ATRewriteTable(), boot_openrel(), check_db_file_conflict(), copy_heap_data(), CopyTo(), createdb(), DefineQueryRewrite(), do_autovacuum(), DropSetting(), DropTableSpace(), find_typed_table_dependencies(), get_database_list(), get_rel_oids(), get_subscription_list(), get_tables_to_cluster(), get_tablespace_name(), get_tablespace_oid(), GetAllTablesPublicationRelations(), getRelationsInNamespace(), gettype(), index_update_stats(), IndexBuildHeapRangeScan(), IndexCheckExclusion(), objectsInSchemaToOids(), pgrowlocks(), pgstat_collect_oids(), pgstat_heap(), ReindexMultipleTables(), RelationFindReplTupleSeq(), remove_dbtablespaces(), RemoveConversionById(), RemoveSubscriptionRel(), RenameTableSpace(), SeqNext(), systable_getnext(), ThereIsAtLeastOneRole(), vac_truncate_clog(), validate_index_heapscan(), validateCheckConstraint(), validateDomainConstraint(), and validateForeignKeyConstraint().

1798 {
1799  /* Note: no locking manipulations needed */
1800 
1801  HEAPDEBUG_1; /* heap_getnext( info ) */
1802 
1803  if (scan->rs_pageatatime)
1804  heapgettup_pagemode(scan, direction,
1805  scan->rs_nkeys, scan->rs_key);
1806  else
1807  heapgettup(scan, direction, scan->rs_nkeys, scan->rs_key);
1808 
1809  if (scan->rs_ctup.t_data == NULL)
1810  {
1811  HEAPDEBUG_2; /* heap_getnext returning EOS */
1812  return NULL;
1813  }
1814 
1815  /*
1816  * if we get here it means we have a new current scan tuple, so point to
1817  * the proper return buffer and return the tuple.
1818  */
1819  HEAPDEBUG_3; /* heap_getnext returning tuple */
1820 
1822 
1823  return &(scan->rs_ctup);
1824 }
#define HEAPDEBUG_2
Definition: heapam.c:1791
HeapTupleData rs_ctup
Definition: relscan.h:68
HeapTupleHeader t_data
Definition: htup.h:67
bool rs_pageatatime
Definition: relscan.h:53
#define HEAPDEBUG_1
Definition: heapam.c:1790
static void heapgettup(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:482
Relation rs_rd
Definition: relscan.h:47
#define NULL
Definition: c.h:229
#define HEAPDEBUG_3
Definition: heapam.c:1792
#define pgstat_count_heap_getnext(rel)
Definition: pgstat.h:1252
static void heapgettup_pagemode(HeapScanDesc scan, ScanDirection dir, int nkeys, ScanKey key)
Definition: heapam.c:784
ScanKey rs_key
Definition: relscan.h:50
bool heap_hot_search ( ItemPointer  tid,
Relation  relation,
Snapshot  snapshot,
bool all_dead 
)

Definition at line 2142 of file heapam.c.

References buffer, BUFFER_LOCK_SHARE, BUFFER_LOCK_UNLOCK, heap_hot_search_buffer(), ItemPointerGetBlockNumber, LockBuffer(), ReadBuffer(), ReleaseBuffer(), and result.

Referenced by _bt_check_unique(), and unique_key_recheck().

2144 {
2145  bool result;
2146  Buffer buffer;
2147  HeapTupleData heapTuple;
2148 
2149  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
2150  LockBuffer(buffer, BUFFER_LOCK_SHARE);
2151  result = heap_hot_search_buffer(tid, relation, buffer, snapshot,
2152  &heapTuple, all_dead, true);
2153  LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
2154  ReleaseBuffer(buffer);
2155  return result;
2156 }
#define BUFFER_LOCK_UNLOCK
Definition: bufmgr.h:87
return result
Definition: formatting.c:1618
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
bool heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer, Snapshot snapshot, HeapTuple heapTuple, bool *all_dead, bool first_call)
Definition: heapam.c:2000
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:207
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define BUFFER_LOCK_SHARE
Definition: bufmgr.h:88
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:66
int Buffer
Definition: buf.h:23
bool heap_hot_search_buffer ( ItemPointer  tid,
Relation  relation,
Buffer  buffer,
Snapshot  snapshot,
HeapTuple  heapTuple,
bool all_dead,
bool  first_call 
)

Definition at line 2000 of file heapam.c.

References Assert, BufferGetBlockNumber(), BufferGetPage, CheckForSerializableConflictOut(), HeapTupleHeaderGetUpdateXid, HeapTupleHeaderGetXmin, HeapTupleIsHeapOnly, HeapTupleIsHotUpdated, HeapTupleIsSurelyDead(), HeapTupleSatisfiesVisibility, InvalidTransactionId, ItemIdGetLength, ItemIdGetRedirect, ItemIdIsNormal, ItemIdIsRedirected, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, ItemPointerSet, ItemPointerSetOffsetNumber, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PredicateLockTuple(), RecentGlobalXmin, RelationGetRelid, skip(), HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdEquals, and TransactionIdIsValid.

Referenced by bitgetpage(), heap_hot_search(), and index_fetch_heap().

2003 {
2004  Page dp = (Page) BufferGetPage(buffer);
2005  TransactionId prev_xmax = InvalidTransactionId;
2006  OffsetNumber offnum;
2007  bool at_chain_start;
2008  bool valid;
2009  bool skip;
2010 
2011  /* If this is not the first call, previous call returned a (live!) tuple */
2012  if (all_dead)
2013  *all_dead = first_call;
2014 
2016 
2018  offnum = ItemPointerGetOffsetNumber(tid);
2019  at_chain_start = first_call;
2020  skip = !first_call;
2021 
2022  heapTuple->t_self = *tid;
2023 
2024  /* Scan through possible multiple members of HOT-chain */
2025  for (;;)
2026  {
2027  ItemId lp;
2028 
2029  /* check for bogus TID */
2030  if (offnum < FirstOffsetNumber || offnum > PageGetMaxOffsetNumber(dp))
2031  break;
2032 
2033  lp = PageGetItemId(dp, offnum);
2034 
2035  /* check for unused, dead, or redirected items */
2036  if (!ItemIdIsNormal(lp))
2037  {
2038  /* We should only see a redirect at start of chain */
2039  if (ItemIdIsRedirected(lp) && at_chain_start)
2040  {
2041  /* Follow the redirect */
2042  offnum = ItemIdGetRedirect(lp);
2043  at_chain_start = false;
2044  continue;
2045  }
2046  /* else must be end of chain */
2047  break;
2048  }
2049 
2050  heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
2051  heapTuple->t_len = ItemIdGetLength(lp);
2052  heapTuple->t_tableOid = RelationGetRelid(relation);
2053  ItemPointerSetOffsetNumber(&heapTuple->t_self, offnum);
2054 
2055  /*
2056  * Shouldn't see a HEAP_ONLY tuple at chain start.
2057  */
2058  if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
2059  break;
2060 
2061  /*
2062  * The xmin should match the previous xmax value, else chain is
2063  * broken.
2064  */
2065  if (TransactionIdIsValid(prev_xmax) &&
2066  !TransactionIdEquals(prev_xmax,
2067  HeapTupleHeaderGetXmin(heapTuple->t_data)))
2068  break;
2069 
2070  /*
2071  * When first_call is true (and thus, skip is initially false) we'll
2072  * return the first tuple we find. But on later passes, heapTuple
2073  * will initially be pointing to the tuple we returned last time.
2074  * Returning it again would be incorrect (and would loop forever), so
2075  * we skip it and return the next match we find.
2076  */
2077  if (!skip)
2078  {
2079  /*
2080  * For the benefit of logical decoding, have t_self point at the
2081  * element of the HOT chain we're currently investigating instead
2082  * of the root tuple of the HOT chain. This is important because
2083  * the *Satisfies routine for historical mvcc snapshots needs the
2084  * correct tid to decide about the visibility in some cases.
2085  */
2086  ItemPointerSet(&(heapTuple->t_self), BufferGetBlockNumber(buffer), offnum);
2087 
2088  /* If it's visible per the snapshot, we must return it */
2089  valid = HeapTupleSatisfiesVisibility(heapTuple, snapshot, buffer);
2090  CheckForSerializableConflictOut(valid, relation, heapTuple,
2091  buffer, snapshot);
2092  /* reset to original, non-redirected, tid */
2093  heapTuple->t_self = *tid;
2094 
2095  if (valid)
2096  {
2097  ItemPointerSetOffsetNumber(tid, offnum);
2098  PredicateLockTuple(relation, heapTuple, snapshot);
2099  if (all_dead)
2100  *all_dead = false;
2101  return true;
2102  }
2103  }
2104  skip = false;
2105 
2106  /*
2107  * If we can't see it, maybe no one else can either. At caller
2108  * request, check whether all chain members are dead to all
2109  * transactions.
2110  */
2111  if (all_dead && *all_dead &&
2113  *all_dead = false;
2114 
2115  /*
2116  * Check to see if HOT chain continues past this tuple; if so fetch
2117  * the next offnum and loop around.
2118  */
2119  if (HeapTupleIsHotUpdated(heapTuple))
2120  {
2123  offnum = ItemPointerGetOffsetNumber(&heapTuple->t_data->t_ctid);
2124  at_chain_start = false;
2125  prev_xmax = HeapTupleHeaderGetUpdateXid(heapTuple->t_data);
2126  }
2127  else
2128  break; /* end of chain */
2129  }
2130 
2131  return false;
2132 }
#define HeapTupleHeaderGetUpdateXid(tup)
Definition: htup_details.h:359
static void skip(struct vars *v)
Definition: regc_lex.c:1109
#define ItemIdIsRedirected(itemId)
Definition: itemid.h:105
#define TransactionIdEquals(id1, id2)
Definition: transam.h:43
uint32 TransactionId
Definition: c.h:397
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define ItemIdGetRedirect(itemId)
Definition: itemid.h:77
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:354
void CheckForSerializableConflictOut(bool visible, Relation relation, HeapTuple tuple, Buffer buffer, Snapshot snapshot)
Definition: predicate.c:3862
bool HeapTupleIsSurelyDead(HeapTuple htup, TransactionId OldestXmin)
Definition: tqual.c:1409
#define HeapTupleSatisfiesVisibility(tuple, snapshot, buffer)
Definition: tqual.h:45
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define HeapTupleIsHotUpdated(tuple)
Definition: htup_details.h:677
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
ItemPointerData t_ctid
Definition: htup_details.h:150
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
TransactionId RecentGlobalXmin
Definition: snapmgr.c:166
#define InvalidTransactionId
Definition: transam.h:31
Oid t_tableOid
Definition: htup.h:66
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
#define HeapTupleIsHeapOnly(tuple)
Definition: htup_details.h:686
#define Assert(condition)
Definition: c.h:675
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:207
#define HeapTupleHeaderGetXmin(tup)
Definition: htup_details.h:307
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:76
void PredicateLockTuple(Relation relation, HeapTuple tuple, Snapshot snapshot)
Definition: predicate.c:2460
#define ItemPointerSetOffsetNumber(pointer, offsetNumber)
Definition: itemptr.h:107
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:66
#define TransactionIdIsValid(xid)
Definition: transam.h:41
#define RelationGetRelid(relation)
Definition: rel.h:417
#define PageGetItem(page, itemId)
Definition: bufpage.h:337
Pointer Page
Definition: bufpage.h:74
#define ItemPointerSet(pointer, blockNumber, offNum)
Definition: itemptr.h:86
void heap_inplace_update ( Relation  relation,
HeapTuple  tuple 
)

Definition at line 6249 of file heapam.c.

References buffer, BUFFER_LOCK_EXCLUSIVE, BufferGetPage, CacheInvalidateHeapTuple(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, IsBootstrapProcessingMode, IsInParallelMode(), ItemIdGetLength, ItemIdIsNormal, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), MarkBufferDirty(), NULL, xl_heap_inplace::offnum, PageGetItem, PageGetItemId, PageGetMaxOffsetNumber, PageSetLSN, ReadBuffer(), REGBUF_STANDARD, RelationNeedsWAL, SizeOfHeapInplace, START_CRIT_SECTION, HeapTupleData::t_data, HeapTupleHeaderData::t_hoff, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), XLOG_HEAP_INPLACE, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), and XLogRegisterData().

Referenced by create_toast_table(), index_set_state_flags(), index_update_stats(), vac_update_datfrozenxid(), and vac_update_relstats().

6250 {
6251  Buffer buffer;
6252  Page page;
6253  OffsetNumber offnum;
6254  ItemId lp = NULL;
6255  HeapTupleHeader htup;
6256  uint32 oldlen;
6257  uint32 newlen;
6258 
6259  /*
6260  * For now, parallel operations are required to be strictly read-only.
6261  * Unlike a regular update, this should never create a combo CID, so it
6262  * might be possible to relax this restriction, but not without more
6263  * thought and testing. It's not clear that it would be useful, anyway.
6264  */
6265  if (IsInParallelMode())
6266  ereport(ERROR,
6267  (errcode(ERRCODE_INVALID_TRANSACTION_STATE),
6268  errmsg("cannot update tuples during a parallel operation")));
6269 
6270  buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(&(tuple->t_self)));
6272  page = (Page) BufferGetPage(buffer);
6273 
6274  offnum = ItemPointerGetOffsetNumber(&(tuple->t_self));
6275  if (PageGetMaxOffsetNumber(page) >= offnum)
6276  lp = PageGetItemId(page, offnum);
6277 
6278  if (PageGetMaxOffsetNumber(page) < offnum || !ItemIdIsNormal(lp))
6279  elog(ERROR, "invalid lp");
6280 
6281  htup = (HeapTupleHeader) PageGetItem(page, lp);
6282 
6283  oldlen = ItemIdGetLength(lp) - htup->t_hoff;
6284  newlen = tuple->t_len - tuple->t_data->t_hoff;
6285  if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
6286  elog(ERROR, "wrong tuple length");
6287 
6288  /* NO EREPORT(ERROR) from here till changes are logged */
6290 
6291  memcpy((char *) htup + htup->t_hoff,
6292  (char *) tuple->t_data + tuple->t_data->t_hoff,
6293  newlen);
6294 
6295  MarkBufferDirty(buffer);
6296 
6297  /* XLOG stuff */
6298  if (RelationNeedsWAL(relation))
6299  {
6300  xl_heap_inplace xlrec;
6301  XLogRecPtr recptr;
6302 
6303  xlrec.offnum = ItemPointerGetOffsetNumber(&tuple->t_self);
6304 
6305  XLogBeginInsert();
6306  XLogRegisterData((char *) &xlrec, SizeOfHeapInplace);
6307 
6308  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD);
6309  XLogRegisterBufData(0, (char *) htup + htup->t_hoff, newlen);
6310 
6311  /* inplace updates aren't decoded atm, don't log the origin */
6312 
6313  recptr = XLogInsert(RM_HEAP_ID, XLOG_HEAP_INPLACE);
6314 
6315  PageSetLSN(page, recptr);
6316  }
6317 
6318  END_CRIT_SECTION();
6319 
6320  UnlockReleaseBuffer(buffer);
6321 
6322  /*
6323  * Send out shared cache inval if necessary. Note that because we only
6324  * pass the new version of the tuple, this mustn't be used for any
6325  * operations that could change catcache lookup keys. But we aren't
6326  * bothering with index updates either, so that's true a fortiori.
6327  */
6329  CacheInvalidateHeapTuple(relation, tuple, NULL);
6330 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:361
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1087
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
HeapTupleHeaderData * HeapTupleHeader
Definition: htup.h:23
#define END_CRIT_SECTION()
Definition: miscadmin.h:132
#define SizeOfHeapInplace
Definition: heapam_xlog.h:286
#define START_CRIT_SECTION()
Definition: miscadmin.h:130
int errcode(int sqlerrcode)
Definition: elog.c:575
#define BUFFER_LOCK_EXCLUSIVE
Definition: bufmgr.h:89
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:354
uint16 OffsetNumber
Definition: off.h:24
HeapTupleHeader t_data
Definition: htup.h:67
#define ItemIdGetLength(itemId)
Definition: itemid.h:58
bool IsInParallelMode(void)
Definition: xact.c:913
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define ERROR
Definition: elog.h:43
ItemPointerData t_self
Definition: htup.h:65
uint32 t_len
Definition: htup.h:64
#define REGBUF_STANDARD
Definition: xloginsert.h:35
unsigned int uint32
Definition: c.h:268
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
#define ereport(elevel, rest)
Definition: elog.h:122
#define PageGetItemId(page, offsetNumber)
Definition: bufpage.h:232
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
OffsetNumber offnum
Definition: heapam_xlog.h:282
void LockBuffer(Buffer buffer, int mode)
Definition: bufmgr.c:3546
#define NULL
Definition: c.h:229
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define ItemIdIsNormal(itemId)
Definition: itemid.h:98
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:207
Buffer ReadBuffer(Relation reln, BlockNumber blockNum)
Definition: bufmgr.c:594
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:76
#define XLOG_HEAP_INPLACE
Definition: heapam_xlog.h:39
#define RelationNeedsWAL(relation)
Definition: rel.h:506
#define IsBootstrapProcessingMode()
Definition: miscadmin.h:365
int errmsg(const char *fmt,...)
Definition: elog.c:797
#define elog
Definition: elog.h:219
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:66
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:365
int Buffer
Definition: buf.h:23
#define PageGetItem(page, itemId)
Definition: bufpage.h:337
Pointer Page
Definition: bufpage.h:74
Oid heap_insert ( Relation  relation,
HeapTuple  tup,
CommandId  cid,
int  options,
BulkInsertState  bistate 
)

Definition at line 2399 of file heapam.c.

References Assert, buffer, BufferGetBlockNumber(), BufferGetPage, CacheInvalidateHeapTuple(), CheckForSerializableConflictIn(), END_CRIT_SECTION, FirstOffsetNumber, xl_heap_insert::flags, GetCurrentTransactionId(), heap_freetuple(), HEAP_INSERT_SKIP_WAL, HEAP_INSERT_SPECULATIVE, heap_prepare_insert(), HeapTupleGetOid, InvalidBuffer, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, log_heap_new_cid(), MarkBufferDirty(), xl_heap_insert::offnum, PageClearAllVisible, PageGetMaxOffsetNumber, PageIsAllVisible, PageSetLSN, pgstat_count_heap_insert(), REGBUF_KEEP_DATA, REGBUF_STANDARD, REGBUF_WILL_INIT, RelationGetBufferForTuple(), RelationIsAccessibleInLogicalDecoding, RelationIsLogicallyLogged, RelationNeedsWAL, RelationPutHeapTuple(), ReleaseBuffer(), SizeOfHeapHeader, SizeOfHeapInsert, SizeofHeapTupleHeader, START_CRIT_SECTION, HeapTupleData::t_data, xl_heap_header::t_hoff, HeapTupleHeaderData::t_hoff, xl_heap_header::t_infomask, HeapTupleHeaderData::t_infomask, xl_heap_header::t_infomask2, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, UnlockReleaseBuffer(), visibilitymap_clear(), VISIBILITYMAP_VALID_BITS, XLH_INSERT_ALL_VISIBLE_CLEARED, XLH_INSERT_CONTAINS_NEW_TUPLE, XLH_INSERT_IS_SPECULATIVE, XLOG_HEAP_INIT_PAGE, XLOG_HEAP_INSERT, XLOG_INCLUDE_ORIGIN, XLogBeginInsert(), XLogInsert(), XLogRegisterBufData(), XLogRegisterBuffer(), XLogRegisterData(), and XLogSetRecordFlags().

Referenced by ATRewriteTable(), CopyFrom(), ExecInsert(), intorel_receive(), simple_heap_insert(), toast_save_datum(), and transientrel_receive().

2401 {
2403  HeapTuple heaptup;
2404  Buffer buffer;
2405  Buffer vmbuffer = InvalidBuffer;
2406  bool all_visible_cleared = false;
2407 
2408  /*
2409  * Fill in tuple header fields, assign an OID, and toast the tuple if
2410  * necessary.
2411  *
2412  * Note: below this point, heaptup is the data we actually intend to store
2413  * into the relation; tup is the caller's original untoasted data.
2414  */
2415  heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
2416 
2417  /*
2418  * Find buffer to insert this tuple into. If the page is all visible,
2419  * this will also pin the requisite visibility map page.
2420  */
2421  buffer = RelationGetBufferForTuple(relation, heaptup->t_len,
2422  InvalidBuffer, options, bistate,
2423  &vmbuffer, NULL);
2424 
2425  /*
2426  * We're about to do the actual insert -- but check for conflict first, to
2427  * avoid possibly having to roll back work we've just done.
2428  *
2429  * This is safe without a recheck as long as there is no possibility of
2430  * another process scanning the page between this check and the insert
2431  * being visible to the scan (i.e., an exclusive buffer content lock is
2432  * continuously held from this point until the tuple insert is visible).
2433  *
2434  * For a heap insert, we only need to check for table-level SSI locks. Our
2435  * new tuple can't possibly conflict with existing tuple locks, and heap
2436  * page locks are only consolidated versions of tuple locks; they do not
2437  * lock "gaps" as index page locks do. So we don't need to specify a
2438  * buffer when making the call, which makes for a faster check.
2439  */
2441 
2442  /* NO EREPORT(ERROR) from here till changes are logged */
2444 
2445  RelationPutHeapTuple(relation, buffer, heaptup,
2446  (options & HEAP_INSERT_SPECULATIVE) != 0);
2447 
2448  if (PageIsAllVisible(BufferGetPage(buffer)))
2449  {
2450  all_visible_cleared = true;
2452  visibilitymap_clear(relation,
2453  ItemPointerGetBlockNumber(&(heaptup->t_self)),
2454  vmbuffer, VISIBILITYMAP_VALID_BITS);
2455  }
2456 
2457  /*
2458  * XXX Should we set PageSetPrunable on this page ?
2459  *
2460  * The inserting transaction may eventually abort thus making this tuple
2461  * DEAD and hence available for pruning. Though we don't want to optimize
2462  * for aborts, if no other tuple in this page is UPDATEd/DELETEd, the
2463  * aborted tuple will never be pruned until next vacuum is triggered.
2464  *
2465  * If you do add PageSetPrunable here, add it in heap_xlog_insert too.
2466  */
2467 
2468  MarkBufferDirty(buffer);
2469 
2470  /* XLOG stuff */
2471  if (!(options & HEAP_INSERT_SKIP_WAL) && RelationNeedsWAL(relation))
2472  {
2473  xl_heap_insert xlrec;
2474  xl_heap_header xlhdr;
2475  XLogRecPtr recptr;
2476  Page page = BufferGetPage(buffer);
2477  uint8 info = XLOG_HEAP_INSERT;
2478  int bufflags = 0;
2479 
2480  /*
2481  * If this is a catalog, we need to transmit combocids to properly
2482  * decode, so log that as well.
2483  */
2485  log_heap_new_cid(relation, heaptup);
2486 
2487  /*
2488  * If this is the single and first tuple on page, we can reinit the
2489  * page instead of restoring the whole thing. Set flag, and hide
2490  * buffer references from XLogInsert.
2491  */
2492  if (ItemPointerGetOffsetNumber(&(heaptup->t_self)) == FirstOffsetNumber &&
2494  {
2495  info |= XLOG_HEAP_INIT_PAGE;
2496  bufflags |= REGBUF_WILL_INIT;
2497  }
2498 
2499  xlrec.offnum = ItemPointerGetOffsetNumber(&heaptup->t_self);
2500  xlrec.flags = 0;
2501  if (all_visible_cleared)
2506 
2507  /*
2508  * For logical decoding, we need the tuple even if we're doing a full
2509  * page write, so make sure it's included even if we take a full-page
2510  * image. (XXX We could alternatively store a pointer into the FPW).
2511  */
2512  if (RelationIsLogicallyLogged(relation))
2513  {
2515  bufflags |= REGBUF_KEEP_DATA;
2516  }
2517 
2518  XLogBeginInsert();
2519  XLogRegisterData((char *) &xlrec, SizeOfHeapInsert);
2520 
2521  xlhdr.t_infomask2 = heaptup->t_data->t_infomask2;
2522  xlhdr.t_infomask = heaptup->t_data->t_infomask;
2523  xlhdr.t_hoff = heaptup->t_data->t_hoff;
2524 
2525  /*
2526  * note we mark xlhdr as belonging to buffer; if XLogInsert decides to
2527  * write the whole page to the xlog, we don't need to store
2528  * xl_heap_header in the xlog.
2529  */
2530  XLogRegisterBuffer(0, buffer, REGBUF_STANDARD | bufflags);
2531  XLogRegisterBufData(0, (char *) &xlhdr, SizeOfHeapHeader);
2532  /* PG73FORMAT: write bitmap [+ padding] [+ oid] + data */
2534  (char *) heaptup->t_data + SizeofHeapTupleHeader,
2535  heaptup->t_len - SizeofHeapTupleHeader);
2536 
2537  /* filtering by origin on a row level is much more efficient */
2539 
2540  recptr = XLogInsert(RM_HEAP_ID, info);
2541 
2542  PageSetLSN(page, recptr);
2543  }
2544 
2545  END_CRIT_SECTION();
2546 
2547  UnlockReleaseBuffer(buffer);
2548  if (vmbuffer != InvalidBuffer)
2549  ReleaseBuffer(vmbuffer);
2550 
2551  /*
2552  * If tuple is cachable, mark it for invalidation from the caches in case
2553  * we abort. Note it is OK to do this after releasing the buffer, because
2554  * the heaptup data structure is all in local memory, not in the shared
2555  * buffer.
2556  */
2557  CacheInvalidateHeapTuple(relation, heaptup, NULL);
2558 
2559  /* Note: speculative insertions are counted too, even if aborted later */
2560  pgstat_count_heap_insert(relation, 1);
2561 
2562  /*
2563  * If heaptup is a private copy, release it. Don't forget to copy t_self
2564  * back to the caller's image, too.
2565  */
2566  if (heaptup != tup)
2567  {
2568  tup->t_self = heaptup->t_self;
2569  heap_freetuple(heaptup);
2570  }
2571 
2572  return HeapTupleGetOid(tup);
2573 }
void XLogRegisterBufData(uint8 block_id, char *data, int len)
Definition: xloginsert.c:361
#define SizeofHeapTupleHeader
Definition: htup_details.h:170
#define XLOG_HEAP_INSERT
Definition: heapam_xlog.h:32
static XLogRecPtr log_heap_new_cid(Relation relation, HeapTuple tup)
Definition: heapam.c:7748
void CacheInvalidateHeapTuple(Relation relation, HeapTuple tuple, HeapTuple newtuple)
Definition: inval.c:1087
static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options)
Definition: heapam.c:2583
#define PageIsAllVisible(page)
Definition: bufpage.h:382
uint32 TransactionId
Definition: c.h:397
void MarkBufferDirty(Buffer buffer)
Definition: bufmgr.c:1450
void XLogRegisterBuffer(uint8 block_id, Buffer buffer, uint8 flags)
Definition: xloginsert.c:213
#define END_CRIT_SECTION()
Definition: miscadmin.h:132
unsigned char uint8
Definition: c.h:266
#define XLH_INSERT_IS_SPECULATIVE
Definition: heapam_xlog.h:68
#define InvalidBuffer
Definition: buf.h:25
#define REGBUF_WILL_INIT
Definition: xloginsert.h:32
uint16 t_infomask2
Definition: heapam_xlog.h:122
#define START_CRIT_SECTION()
Definition: miscadmin.h:130
#define XLOG_INCLUDE_ORIGIN
Definition: xlog.h:192
#define HEAP_INSERT_SKIP_WAL
Definition: heapam.h:28
void ReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3309
#define RelationIsLogicallyLogged(relation)
Definition: rel.h:576
void heap_freetuple(HeapTuple htup)
Definition: heaptuple.c:1374
#define PageGetMaxOffsetNumber(page)
Definition: bufpage.h:354
void RelationPutHeapTuple(Relation relation, Buffer buffer, HeapTuple tuple, bool token)
Definition: hio.c:36
void CheckForSerializableConflictIn(Relation relation, HeapTuple tuple, Buffer buffer)
Definition: predicate.c:4243
#define XLOG_HEAP_INIT_PAGE
Definition: heapam_xlog.h:46
#define HEAP_INSERT_SPECULATIVE
Definition: heapam.h:31
#define VISIBILITYMAP_VALID_BITS
Definition: visibilitymap.h:28
HeapTupleHeader t_data
Definition: htup.h:67
bool visibilitymap_clear(Relation rel, BlockNumber heapBlk, Buffer buf, uint8 flags)
void UnlockReleaseBuffer(Buffer buffer)
Definition: bufmgr.c:3332
#define XLH_INSERT_CONTAINS_NEW_TUPLE
Definition: heapam_xlog.h:69
ItemPointerData t_self
Definition: htup.h:65
TransactionId GetCurrentTransactionId(void)
Definition: xact.c:417
uint32 t_len
Definition: htup.h:64
#define FirstOffsetNumber
Definition: off.h:27
#define REGBUF_STANDARD
Definition: xloginsert.h:35
Buffer RelationGetBufferForTuple(Relation relation, Size len, Buffer otherBuffer, int options, BulkInsertState bistate, Buffer *vmbuffer, Buffer *vmbuffer_other)
Definition: hio.c:297
void XLogSetRecordFlags(uint8 flags)
Definition: xloginsert.c:397
#define BufferGetPage(buffer)
Definition: bufmgr.h:160
void XLogRegisterData(char *data, int len)
Definition: xloginsert.c:323
XLogRecPtr XLogInsert(RmgrId rmid, uint8 info)
Definition: xloginsert.c:415
#define RelationIsAccessibleInLogicalDecoding(relation)
Definition: rel.h:560
#define REGBUF_KEEP_DATA
Definition: xloginsert.h:38
#define PageClearAllVisible(page)
Definition: bufpage.h:386
#define NULL
Definition: c.h:229
uint64 XLogRecPtr
Definition: xlogdefs.h:21
#define Assert(condition)
Definition: c.h:675
WalTimeSample buffer[LAG_TRACKER_BUFFER_SIZE]
Definition: walsender.c:207
uint16 t_infomask
Definition: heapam_xlog.h:123
#define ItemPointerGetOffsetNumber(pointer)
Definition: itemptr.h:76
#define RelationNeedsWAL(relation)
Definition: rel.h:506
#define SizeOfHeapInsert
Definition: heapam_xlog.h:138
#define XLH_INSERT_ALL_VISIBLE_CLEARED
Definition: heapam_xlog.h:66
BlockNumber BufferGetBlockNumber(Buffer buffer)
Definition: bufmgr.c:2605
void pgstat_count_heap_insert(Relation rel, PgStat_Counter n)
Definition: pgstat.c:1870
#define ItemPointerGetBlockNumber(pointer)
Definition: itemptr.h:66
#define HeapTupleGetOid(tuple)
Definition: htup_details.h:695
void XLogBeginInsert(void)
Definition: xloginsert.c:120
#define PageSetLSN(page, lsn)
Definition: bufpage.h:365
int Buffer
Definition: buf.h:23
OffsetNumber offnum
Definition: heapam_xlog.h:132
#define SizeOfHeapHeader
Definition: heapam_xlog.h:127
Pointer Page
Definition: bufpage.h:74
HTSU_Result heap_lock_tuple ( Relation  relation,
HeapTuple  tuple,
CommandId  cid,
LockTupleMode  mode,
LockWaitPolicy  wait_policy,
bool  follow_updates,
Buffer buffer,
HeapUpdateFailureData hufd 
)

Definition at line 4594 of file heapam.c.

References Assert, BUFFER_LOCK_EXCLUSIVE, BUFFER_LOCK_UNLOCK, BufferGetPage, BufferIsValid, HeapUpdateFailureData::cmax, compute_infobits(), compute_new_xmax_infomask(), ConditionalMultiXactIdWait(), ConditionalXactLockTableWait(), HeapUpdateFailureData::ctid, DoesMultiXactIdConflict(), elog, END_CRIT_SECTION, ereport, errcode(), errmsg(), ERROR, xl_heap_lock::flags, get_mxact_status_for_lock(), GetCurrentTransactionId(), GetMultiXactIdMembers(), heap_acquire_tuplock(), HEAP_KEYS_UPDATED, heap_lock_updated_tuple(), HEAP_XMAX_BITS, HEAP_XMAX_INVALID, HEAP_XMAX_IS_EXCL_LOCKED, HEAP_XMAX_IS_KEYSHR_LOCKED, HEAP_XMAX_IS_LOCKED_ONLY, HEAP_XMAX_IS_MULTI, HEAP_XMAX_IS_SHR_LOCKED, HeapTupleBeingUpdated, HeapTupleHeaderClearHotUpdated, HeapTupleHeaderGetCmax(), HeapTupleHeaderGetRawXmax, HeapTupleHeaderGetUpdateXid, HeapTupleHeaderIsOnlyLocked(), HeapTupleHeaderSetXmax, HeapTupleInvisible, HeapTupleMayBeUpdated, HeapTupleSatisfiesUpdate(), HeapTupleSelfUpdated, HeapTupleUpdated, HeapTupleWouldBlock, i, xl_heap_lock::infobits_set, InvalidBuffer, InvalidCommandId, ItemIdGetLength, ItemIdIsNormal, ItemPointerCopy, ItemPointerGetBlockNumber, ItemPointerGetOffsetNumber, LockBuffer(), xl_heap_lock::locking_xid, LockTupleExclusive, LockTupleKeyShare, LockTupleNoKeyExclusive, LockTupleShare, LockWaitBlock, LockWaitError, LockWaitSkip, MarkBufferDirty(), MultiXactIdSetOldestMember(), MultiXactIdWait(), MultiXactStatusNoKeyUpdate, xl_heap_lock::offnum, PageGetItem, PageGetItemId, PageIsAllVisible, PageSetLSN, pfree(), ReadBuffer(), REGBUF_STANDARD, RelationGetRelationName, RelationGetRelid, RelationNeedsWAL, ReleaseBuffer(), result, SizeOfHeapLock, START_CRIT_SECTION, status(), HeapTupleHeaderData::t_ctid, HeapTupleData::t_data, HeapTupleHeaderData::t_infomask, HeapTupleHeaderData::t_infomask2, HeapTupleData::t_len, HeapTupleData::t_self, HeapTupleData::t_tableOid, TransactionIdEquals, TransactionIdIsCurrentTransactionId(), TUPLOCK_from_mxstatus, UnlockTupleTuplock, UpdateXmaxHintBits(), VISIBILITYMAP_ALL_FROZEN, visibilitymap_clear(), visibilitymap_pin(), XactLockTableWait(), XLH_LOCK_ALL_FROZEN_CLEARED, XLOG_HEAP_LOCK, XLogBeginInsert(), XLogInsert(), XLogRegisterBuffer(), XLogRegisterData(), XLTW_Lock, HeapUpdateFailureData::xmax, and xmax_infomask_changed().

Referenced by EvalPlanQualFetch(), ExecLockRows(), ExecOnConflictUpdate(), GetTupleForTrigger(), RelationFindReplTupleByIndex(), and RelationFindReplTupleSeq().

4598 {
4600  ItemPointer tid = &(tuple->t_self);
4601  ItemId lp;
4602  Page page;
4603  Buffer vmbuffer = InvalidBuffer;
4604  BlockNumber block;
4605  TransactionId xid,
4606  xmax;
4607  uint16 old_infomask,
4608  new_infomask,
4609  new_infomask2;
4610  bool first_time = true;
4611  bool have_tuple_lock = false;
4612  bool cleared_all_frozen = false;
4613 
4614  *buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
4615  block = ItemPointerGetBlockNumber(tid);
4616 
4617  /*
4618  * Before locking the buffer, pin the visibility map page if it appears to
4619  * be necessary. Since we haven't got the lock yet, someone else might be
4620  * in the middle of changing this, so we'll need to recheck after we have
4621  * the lock.
4622  */
4624  visibilitymap_pin(relation, block, &vmbuffer);
4625 
4627 
4628  page = BufferGetPage(*buffer);
4629  lp = PageGetItemId(page, ItemPointerGetOffsetNumber(tid));
4630  Assert(ItemIdIsNormal(lp));
4631 
4632  tuple->t_data = (HeapTupleHeader) PageGetItem(page, lp);
4633  tuple->t_len = ItemIdGetLength(lp);
4634  tuple->t_tableOid = RelationGetRelid(relation);
4635 
4636 l3:
4637  result = HeapTupleSatisfiesUpdate(tuple, cid, *buffer);
4638 
4639  if (result == HeapTupleInvisible)
4640  {
4641  /*
4642  * This is possible, but only when locking a tuple for ON CONFLICT
4643  * UPDATE. We return this value here rather than throwing an error in
4644  * order to give that case the opportunity to throw a more specific
4645  * error.
4646  */
4647  result = HeapTupleInvisible;
4648  goto out_locked;
4649  }
4650  else if (result == HeapTupleBeingUpdated || result == HeapTupleUpdated)
4651  {
4652  TransactionId xwait;
4653  uint16 infomask;
4654  uint16 infomask2;
4655  bool require_sleep;
4656  ItemPointerData t_ctid;
4657 
4658  /* must copy state data before unlocking buffer */
4659  xwait = HeapTupleHeaderGetRawXmax(tuple->t_data);
4660  infomask = tuple->t_data->t_infomask;
4661  infomask2 = tuple->t_data->t_infomask2;
4662  ItemPointerCopy(&tuple->t_data->t_ctid, &t_ctid);
4663 
4665 
4666  /*
4667  * If any subtransaction of the current top transaction already holds
4668  * a lock as strong as or stronger than what we're requesting, we
4669  * effectively hold the desired lock already. We *must* succeed
4670  * without trying to take the tuple lock, else we will deadlock
4671  * against anyone wanting to acquire a stronger lock.
4672  *
4673  * Note we only do this the first time we loop on the HTSU result;
4674  * there is no point in testing in subsequent passes, because
4675  * evidently our own transaction cannot have acquired a new lock after
4676  * the first time we checked.
4677  */
4678  if (first_time)
4679  {
4680  first_time = false;
4681 
4682  if (infomask & HEAP_XMAX_IS_MULTI)
4683  {
4684  int i;
4685  int nmembers;
4686  MultiXactMember *members;
4687 
4688  /*
4689  * We don't need to allow old multixacts here; if that had
4690  * been the case, HeapTupleSatisfiesUpdate would have returned
4691  * MayBeUpdated and we wouldn't be here.
4692  */
4693  nmembers =
4694  GetMultiXactIdMembers(xwait, &members, false,
4695  HEAP_XMAX_IS_LOCKED_ONLY(infomask));
4696 
4697  for (i = 0; i < nmembers; i++)
4698  {
4699  /* only consider members of our own transaction */
4700  if (!TransactionIdIsCurrentTransactionId(members[i].xid))
4701  continue;
4702 
4703  if (TUPLOCK_from_mxstatus(members[i].status) >= mode)
4704  {
4705  pfree(members);
4706  result = HeapTupleMayBeUpdated;
4707  goto out_unlocked;
4708  }
4709  }
4710 
4711  if (members)
4712  pfree(members);
4713  }
4714  else if (TransactionIdIsCurrentTransactionId(xwait))
4715  {
4716  switch (mode)
4717  {
4718  case LockTupleKeyShare:
4719  Assert(HEAP_XMAX_IS_KEYSHR_LOCKED(infomask) ||
4720  HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4721  HEAP_XMAX_IS_EXCL_LOCKED(infomask));
4722  result = HeapTupleMayBeUpdated;
4723  goto out_unlocked;
4724  case LockTupleShare:
4725  if (HEAP_XMAX_IS_SHR_LOCKED(infomask) ||
4726  HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4727  {
4728  result = HeapTupleMayBeUpdated;
4729  goto out_unlocked;
4730  }
4731  break;
4733  if (HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4734  {
4735  result = HeapTupleMayBeUpdated;
4736  goto out_unlocked;
4737  }
4738  break;
4739  case LockTupleExclusive:
4740  if (HEAP_XMAX_IS_EXCL_LOCKED(infomask) &&
4741  infomask2 & HEAP_KEYS_UPDATED)
4742  {
4743  result = HeapTupleMayBeUpdated;
4744  goto out_unlocked;
4745  }
4746  break;
4747  }
4748  }
4749  }
4750 
4751  /*
4752  * Initially assume that we will have to wait for the locking
4753  * transaction(s) to finish. We check various cases below in which
4754  * this can be turned off.
4755  */
4756  require_sleep = true;
4757  if (mode == LockTupleKeyShare)
4758  {
4759  /*
4760  * If we're requesting KeyShare, and there's no update present, we
4761  * don't need to wait. Even if there is an update, we can still
4762  * continue if the key hasn't been modified.
4763  *
4764  * However, if there are updates, we need to walk the update chain
4765  * to mark future versions of the row as locked, too. That way,
4766  * if somebody deletes that future version, we're protected
4767  * against the key going away. This locking of future versions
4768  * could block momentarily, if a concurrent transaction is
4769  * deleting a key; or it could return a value to the effect that
4770  * the transaction deleting the key has already committed. So we
4771  * do this before re-locking the buffer; otherwise this would be
4772  * prone to deadlocks.
4773  *
4774  * Note that the TID we're locking was grabbed before we unlocked
4775  * the buffer. For it to change while we're not looking, the
4776  * other properties we're testing for below after re-locking the
4777  * buffer would also change, in which case we would restart this
4778  * loop above.
4779  */
4780  if (!(infomask2 & HEAP_KEYS_UPDATED))
4781  {
4782  bool updated;
4783 
4784  updated = !HEAP_XMAX_IS_LOCKED_ONLY(infomask);
4785 
4786  /*
4787  * If there are updates, follow the update chain; bail out if
4788  * that cannot be done.
4789  */
4790  if (follow_updates && updated)
4791  {
4792  HTSU_Result res;
4793 
4794  res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
4796  mode);
4797  if (res != HeapTupleMayBeUpdated)
4798  {
4799  result = res;
4800  /* recovery code expects to have buffer lock held */
4802  goto failed;
4803  }
4804  }
4805 
4807 
4808  /*
4809  * Make sure it's still an appropriate lock, else start over.
4810  * Also, if it wasn't updated before we released the lock, but
4811  * is updated now, we start over too; the reason is that we
4812  * now need to follow the update chain to lock the new
4813  * versions.
4814  */
4815  if (!HeapTupleHeaderIsOnlyLocked(tuple->t_data) &&
4816  ((tuple->t_data->t_infomask2 & HEAP_KEYS_UPDATED) ||
4817  !updated))
4818  goto l3;
4819 
4820  /* Things look okay, so we can skip sleeping */
4821  require_sleep = false;
4822 
4823  /*
4824  * Note we allow Xmax to change here; other updaters/lockers
4825  * could have modified it before we grabbed the buffer lock.
4826  * However, this is not a problem, because with the recheck we
4827  * just did we ensure that they still don't conflict with the
4828  * lock we want.
4829  */
4830  }
4831  }
4832  else if (mode == LockTupleShare)
4833  {
4834  /*
4835  * If we're requesting Share, we can similarly avoid sleeping if
4836  * there's no update and no exclusive lock present.
4837  */
4838  if (HEAP_XMAX_IS_LOCKED_ONLY(infomask) &&
4839  !HEAP_XMAX_IS_EXCL_LOCKED(infomask))
4840  {
4842 
4843  /*
4844  * Make sure it's still an appropriate lock, else start over.
4845  * See above about allowing xmax to change.
4846  */
4847  if (!HEAP_XMAX_IS_LOCKED_ONLY(tuple->t_data->t_infomask) ||
4849  goto l3;
4850  require_sleep = false;
4851  }
4852  }
4853  else if (mode == LockTupleNoKeyExclusive)
4854  {
4855  /*
4856  * If we're requesting NoKeyExclusive, we might also be able to
4857  * avoid sleeping; just ensure that there no conflicting lock
4858  * already acquired.
4859  */
4860  if (infomask & HEAP_XMAX_IS_MULTI)
4861  {
4862  if (!DoesMultiXactIdConflict((MultiXactId) xwait, infomask,
4863  mode))
4864  {
4865  /*
4866  * No conflict, but if the xmax changed under us in the
4867  * meantime, start over.
4868  */
4870  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4872  xwait))
4873  goto l3;
4874 
4875  /* otherwise, we're good */
4876  require_sleep = false;
4877  }
4878  }
4879  else if (HEAP_XMAX_IS_KEYSHR_LOCKED(infomask))
4880  {
4882 
4883  /* if the xmax changed in the meantime, start over */
4884  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4887  xwait))
4888  goto l3;
4889  /* otherwise, we're good */
4890  require_sleep = false;
4891  }
4892  }
4893 
4894  /*
4895  * As a check independent from those above, we can also avoid sleeping
4896  * if the current transaction is the sole locker of the tuple. Note
4897  * that the strength of the lock already held is irrelevant; this is
4898  * not about recording the lock in Xmax (which will be done regardless
4899  * of this optimization, below). Also, note that the cases where we
4900  * hold a lock stronger than we are requesting are already handled
4901  * above by not doing anything.
4902  *
4903  * Note we only deal with the non-multixact case here; MultiXactIdWait
4904  * is well equipped to deal with this situation on its own.
4905  */
4906  if (require_sleep && !(infomask & HEAP_XMAX_IS_MULTI) &&
4908  {
4909  /* ... but if the xmax changed in the meantime, start over */
4911  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
4913  xwait))
4914  goto l3;
4916  require_sleep = false;
4917  }
4918 
4919  /*
4920  * Time to sleep on the other transaction/multixact, if necessary.
4921  *
4922  * If the other transaction is an update that's already committed,
4923  * then sleeping cannot possibly do any good: if we're required to
4924  * sleep, get out to raise an error instead.
4925  *
4926  * By here, we either have already acquired the buffer exclusive lock,
4927  * or we must wait for the locking transaction or multixact; so below
4928  * we ensure that we grab buffer lock after the sleep.
4929  */
4930  if (require_sleep && result == HeapTupleUpdated)
4931  {
4933  goto failed;
4934  }
4935  else if (require_sleep)
4936  {
4937  /*
4938  * Acquire tuple lock to establish our priority for the tuple, or
4939  * die trying. LockTuple will release us when we are next-in-line
4940  * for the tuple. We must do this even if we are share-locking.
4941  *
4942  * If we are forced to "start over" below, we keep the tuple lock;
4943  * this arranges that we stay at the head of the line while
4944  * rechecking tuple state.
4945  */
4946  if (!heap_acquire_tuplock(relation, tid, mode, wait_policy,
4947  &have_tuple_lock))
4948  {
4949  /*
4950  * This can only happen if wait_policy is Skip and the lock
4951  * couldn't be obtained.
4952  */
4953  result = HeapTupleWouldBlock;
4954  /* recovery code expects to have buffer lock held */
4956  goto failed;
4957  }
4958 
4959  if (infomask & HEAP_XMAX_IS_MULTI)
4960  {
4962 
4963  /* We only ever lock tuples, never update them */
4964  if (status >= MultiXactStatusNoKeyUpdate)
4965  elog(ERROR, "invalid lock mode in heap_lock_tuple");
4966 
4967  /* wait for multixact to end, or die trying */
4968  switch (wait_policy)
4969  {
4970  case LockWaitBlock:
4971  MultiXactIdWait((MultiXactId) xwait, status, infomask,
4972  relation, &tuple->t_self, XLTW_Lock, NULL);
4973  break;
4974  case LockWaitSkip:
4976  status, infomask, relation,
4977  NULL))
4978  {
4979  result = HeapTupleWouldBlock;
4980  /* recovery code expects to have buffer lock held */
4982  goto failed;
4983  }
4984  break;
4985  case LockWaitError:
4987  status, infomask, relation,
4988  NULL))
4989  ereport(ERROR,
4990  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
4991  errmsg("could not obtain lock on row in relation \"%s\"",
4992  RelationGetRelationName(relation))));
4993 
4994  break;
4995  }
4996 
4997  /*
4998  * Of course, the multixact might not be done here: if we're
4999  * requesting a light lock mode, other transactions with light
5000  * locks could still be alive, as well as locks owned by our
5001  * own xact or other subxacts of this backend. We need to
5002  * preserve the surviving MultiXact members. Note that it
5003  * isn't absolutely necessary in the latter case, but doing so
5004  * is simpler.
5005  */
5006  }
5007  else
5008  {
5009  /* wait for regular transaction to end, or die trying */
5010  switch (wait_policy)
5011  {
5012  case LockWaitBlock:
5013  XactLockTableWait(xwait, relation, &tuple->t_self,
5014  XLTW_Lock);
5015  break;
5016  case LockWaitSkip:
5017  if (!ConditionalXactLockTableWait(xwait))
5018  {
5019  result = HeapTupleWouldBlock;
5020  /* recovery code expects to have buffer lock held */
5022  goto failed;
5023  }
5024  break;
5025  case LockWaitError:
5026  if (!ConditionalXactLockTableWait(xwait))
5027  ereport(ERROR,
5028  (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
5029  errmsg("could not obtain lock on row in relation \"%s\"",
5030  RelationGetRelationName(relation))));
5031  break;
5032  }
5033  }
5034 
5035  /* if there are updates, follow the update chain */
5036  if (follow_updates && !HEAP_XMAX_IS_LOCKED_ONLY(infomask))
5037  {
5038  HTSU_Result res;
5039 
5040  res = heap_lock_updated_tuple(relation, tuple, &t_ctid,
5042  mode);
5043  if (res != HeapTupleMayBeUpdated)
5044  {
5045  result = res;
5046  /* recovery code expects to have buffer lock held */
5048  goto failed;
5049  }
5050  }
5051 
5053 
5054  /*
5055  * xwait is done, but if xwait had just locked the tuple then some
5056  * other xact could update this tuple before we get to this point.
5057  * Check for xmax change, and start over if so.
5058  */
5059  if (xmax_infomask_changed(tuple->t_data->t_infomask, infomask) ||
5061  xwait))
5062  goto l3;
5063 
5064  if (!(infomask & HEAP_XMAX_IS_MULTI))
5065  {
5066  /*
5067  * Otherwise check if it committed or aborted. Note we cannot
5068  * be here if the tuple was only locked by somebody who didn't
5069  * conflict with us; that would have been handled above. So
5070  * that transaction must necessarily be gone by now. But
5071  * don't check for this in the multixact case, because some
5072  * locker transactions might still be running.
5073  */
5074  UpdateXmaxHintBits(tuple->t_data, *buffer, xwait);
5075  }
5076  }
5077 
5078  /* By here, we're certain that we hold buffer exclusive lock again */
5079 
5080  /*
5081  * We may lock if previous xmax aborted, or if it committed but only
5082  * locked the tuple without updating it; or if we didn't have to wait
5083  * at all for whatever reason.
5084  */
5085  if (!require_sleep ||
5086  (tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
5089  result = HeapTupleMayBeUpdated;
5090  else
5091  result = HeapTupleUpdated;
5092  }
5093 
5094 failed:
5095  if (result != HeapTupleMayBeUpdated)
5096  {
5097  Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated ||
5098  result == HeapTupleWouldBlock);
5099  Assert(!(tuple->t_data->t_infomask & HEAP_XMAX_INVALID));
5100  hufd->ctid = tuple->t_data->t_ctid;
5101  hufd->xmax = HeapTupleHeaderGetUpdateXid(tuple->t_data);
5102  if (result == HeapTupleSelfUpdated)
5103  hufd->cmax = HeapTupleHeaderGetCmax(tuple->t_data);
5104  else
5105  hufd->cmax = InvalidCommandId;
5106  goto out_locked;
5107  }
5108 
5109  /*
5110  * If we didn't pin the visibility map page and the page has become all
5111  * visible while we were busy locking the buffer, or during some
5112  * subsequent window during which we had it unlocked, we'll have to unlock
5113  * and re-lock, to avoid holding the buffer lock across I/O. That's a bit
5114  * unfortunate, especially since we'll now have to recheck whether the
5115  * tuple has been locked or updated under us, but hopefully it won't
5116  * happen very often.
5117  */
5118  if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
5119  {
5121  visibilitymap_pin(relation, block, &vmbuffer);
5123  goto l3;
5124  }
5125 
5126  xmax = HeapTupleHeaderGetRawXmax(tuple->t_data);
5127  old_infomask = tuple->t_data->t_infomask;
5128 
5129  /*
5130  * If this is the first possibly-multixact-able operation in the current
5131  * transaction, set my per-backend OldestMemberMXactId setting. We can be
5132  * certain that the transaction will never become a member of any older
5133  * MultiXactIds than that. (We have to do this even if we end up just
5134  * using our own TransactionId below, since some other backend could
5135  * incorporate our XID into a MultiXact immediately afterwards.)
5136  */
5138 
5139  /*
5140  * Compute the new xmax and infomask to store into the tuple. Note we do
5141  * not modify the tuple just yet, because that would leave it in the wrong
5142  * state if multixact.c elogs.
5143  */
5144  compute_new_xmax_infomask(xmax, old_infomask, tuple->t_data->t_infomask2,
5145  GetCurrentTransactionId(), mode, false,
5146  &xid, &new_infomask, &new_infomask2);
5147 
5149 
5150  /*
5151  * Store transaction information of xact locking the tuple.
5152  *
5153  * Note: Cmax is meaningless in this context, so don't set it; this avoids
5154  * possibly generating a useless combo CID. Moreover, if we're locking a
5155  * previously updated tuple, it's important to preserve the Cmax.
5156  *
5157  * Also reset the HOT UPDATE bit, but only if there's no update; otherwise
5158  * we would break the HOT chain.
5159  */
5160  tuple->t_data->t_infomask &= ~HEAP_XMAX_BITS;
5161  tuple->t_data->t_infomask2 &= ~HEAP_KEYS_UPDAT